From c03ac02680688d71bdc8f9ada9ca95bd4390ea80 Mon Sep 17 00:00:00 2001 From: Jason Del Ponte <961963+jasdel@users.noreply.github.com> Date: Thu, 19 Aug 2021 15:56:27 -0700 Subject: [PATCH] API Model update (#1385) Updates SDK's API clients with latest models. --- .../41575353444b40ffbf474f4155544f00.json | 8 + .../b2097c2cde554c7bbc5e18b1d993b80e.json | 32 + .../aws-models/apigateway.2015-07-09.json | 26 +- .../aws-models/apigatewayv2.2018-11-29.json | 19 +- .../aws-models/appflow.2020-08-23.json | 526 +- .../applicationautoscaling.2016-02-06.json | 162 +- .../aws-models/cloud9.2017-09-23.json | 33 + .../aws-models/clouddirectory.2017-01-11.json | 46 +- .../aws-models/cloudwatchlogs.2014-03-28.json | 48 +- .../aws-models/codebuild.2016-10-06.json | 2 +- .../aws-models/configservice.2014-11-12.json | 68 +- .../aws-models/costexplorer.2017-10-25.json | 955 +- .../customerprofiles.2020-08-15.json | 74 +- .../aws-models/databrew.2017-07-25.json | 4 + .../directoryservice.2015-04-16.json | 454 +- .../aws-models/ec2.2016-11-15.json | 93 +- .../aws-models/elasticache.2015-02-02.json | 133 +- .../aws-models/emr.2009-03-31.json | 48 +- .../aws-models/iotsitewise.2019-12-02.json | 21 +- .../aws-models/lambda.2015-03-31.json | 6 +- .../aws-models/licensemanager.2018-08-01.json | 4 + .../aws-models/memorydb.2021-01-01.json | 5130 +++++++ .../aws-models/quicksight.2018-04-01.json | 106 +- .../aws-models/route53.2013-04-01.json | 2 +- .../route53resolver.2018-04-01.json | 68 +- .../sdk-codegen/aws-models/s3.2006-03-01.json | 274 +- .../aws-models/sagemaker.2017-07-24.json | 344 +- .../sagemakerruntime.2017-05-13.json | 139 +- .../aws/go/codegen/endpoint-prefix.json | 1 + service/apigateway/api_op_CreateDomainName.go | 18 +- service/apigateway/api_op_GetDomainName.go | 13 +- service/apigateway/api_op_UpdateDomainName.go | 13 +- service/apigateway/deserializers.go | 36 + service/apigateway/serializers.go | 5 + service/apigateway/types/enums.go | 10 +- service/apigateway/types/types.go | 13 +- service/apigatewayv2/deserializers.go | 9 + service/apigatewayv2/serializers.go | 5 + service/apigatewayv2/types/enums.go | 8 +- service/apigatewayv2/types/types.go | 13 +- .../appflow/api_op_CreateConnectorProfile.go | 15 +- .../appflow/api_op_DescribeConnectorEntity.go | 2 +- .../api_op_DescribeConnectorProfiles.go | 2 +- .../appflow/api_op_ListConnectorEntities.go | 3 +- .../appflow/api_op_UpdateConnectorProfile.go | 2 +- service/appflow/api_op_UpdateFlow.go | 10 +- service/appflow/deserializers.go | 342 + service/appflow/doc.go | 6 +- service/appflow/serializers.go | 209 + service/appflow/types/enums.go | 104 + service/appflow/types/types.go | 182 +- service/appflow/validators.go | 119 +- .../api_op_DeleteScalingPolicy.go | 25 +- .../api_op_DeleteScheduledAction.go | 25 +- .../api_op_DeregisterScalableTarget.go | 58 +- .../api_op_DescribeScalableTargets.go | 58 +- .../api_op_DescribeScalingActivities.go | 25 +- .../api_op_DescribeScalingPolicies.go | 25 +- .../api_op_DescribeScheduledActions.go | 25 +- .../api_op_PutScalingPolicy.go | 30 +- .../api_op_PutScheduledAction.go | 25 +- .../api_op_RegisterScalableTarget.go | 58 +- service/applicationautoscaling/doc.go | 57 +- service/applicationautoscaling/types/enums.go | 46 +- service/applicationautoscaling/types/types.go | 221 +- service/cloud9/api_op_CreateEnvironmentEC2.go | 6 + service/cloud9/api_op_UpdateEnvironment.go | 15 + service/cloud9/serializers.go | 10 + service/cloud9/types/enums.go | 18 + service/clouddirectory/types/errors.go | 4 +- service/clouddirectory/types/types.go | 27 +- .../cloudwatchlogs/api_op_AssociateKmsKey.go | 23 +- .../cloudwatchlogs/api_op_CreateExportTask.go | 2 +- .../cloudwatchlogs/api_op_CreateLogGroup.go | 47 +- .../api_op_DescribeLogGroups.go | 8 +- .../api_op_DisassociateKmsKey.go | 13 +- service/cloudwatchlogs/api_op_GetLogEvents.go | 7 +- .../api_op_PutDestinationPolicy.go | 8 +- service/cloudwatchlogs/api_op_PutLogEvents.go | 25 +- .../cloudwatchlogs/api_op_PutMetricFilter.go | 2 +- .../api_op_PutResourcePolicy.go | 20 +- .../api_op_PutRetentionPolicy.go | 5 +- .../api_op_PutSubscriptionFilter.go | 18 +- service/cloudwatchlogs/api_op_TagLogGroup.go | 7 +- .../cloudwatchlogs/api_op_UntagLogGroup.go | 3 + service/cloudwatchlogs/doc.go | 48 +- service/cloudwatchlogs/types/errors.go | 3 +- service/cloudwatchlogs/types/types.go | 11 +- service/codebuild/types/types.go | 4 +- service/configservice/types/enums.go | 22 + .../api_op_CreateCostCategoryDefinition.go | 4 + service/costexplorer/api_op_GetAnomalies.go | 14 +- .../costexplorer/api_op_GetAnomalyMonitors.go | 12 +- .../api_op_GetAnomalySubscriptions.go | 10 +- .../costexplorer/api_op_GetCostAndUsage.go | 61 +- .../api_op_GetCostAndUsageWithResources.go | 29 +- .../costexplorer/api_op_GetCostCategories.go | 60 +- .../costexplorer/api_op_GetDimensionValues.go | 292 +- .../api_op_GetReservationCoverage.go | 25 +- ...op_GetReservationPurchaseRecommendation.go | 81 +- .../api_op_GetReservationUtilization.go | 15 +- .../api_op_GetRightsizingRecommendation.go | 51 +- ...p_GetSavingsPlansPurchaseRecommendation.go | 5 +- service/costexplorer/api_op_GetTags.go | 61 +- .../api_op_UpdateAnomalyMonitor.go | 2 +- .../api_op_UpdateAnomalySubscription.go | 5 +- .../api_op_UpdateCostCategoryDefinition.go | 4 + service/costexplorer/deserializers.go | 249 + service/costexplorer/doc.go | 12 +- service/costexplorer/serializers.go | 112 + service/costexplorer/types/enums.go | 38 + service/costexplorer/types/types.go | 707 +- service/costexplorer/validators.go | 88 + .../customerprofiles/api_op_CreateDomain.go | 23 +- service/customerprofiles/api_op_GetDomain.go | 8 +- service/customerprofiles/api_op_GetMatches.go | 11 +- .../api_op_ListProfileObjects.go | 4 + .../customerprofiles/api_op_UpdateDomain.go | 22 +- service/customerprofiles/serializers.go | 26 + service/customerprofiles/types/enums.go | 4 + service/customerprofiles/types/types.go | 36 +- service/customerprofiles/validators.go | 23 + service/databrew/types/enums.go | 16 +- .../directoryservice/api_op_AddIpRoutes.go | 48 +- .../api_op_ConnectDirectory.go | 11 +- .../directoryservice/api_op_CreateAlias.go | 6 +- .../api_op_CreateConditionalForwarder.go | 17 +- .../api_op_CreateDirectory.go | 8 +- .../api_op_CreateLogSubscription.go | 3 +- .../api_op_CreateMicrosoftAD.go | 28 +- .../directoryservice/api_op_CreateSnapshot.go | 4 +- .../directoryservice/api_op_CreateTrust.go | 26 +- .../api_op_DeleteConditionalForwarder.go | 3 +- .../api_op_DeleteDirectory.go | 8 +- .../directoryservice/api_op_DeleteTrust.go | 4 +- .../api_op_DeregisterEventTopic.go | 11 +- ...op_DescribeClientAuthenticationSettings.go | 145 + .../api_op_DescribeEventTopics.go | 19 +- .../directoryservice/api_op_DescribeTrusts.go | 10 +- .../api_op_EnableClientAuthentication.go | 4 +- service/directoryservice/api_op_EnableSso.go | 4 +- .../api_op_GetDirectoryLimits.go | 2 +- .../api_op_ListLogSubscriptions.go | 10 +- .../api_op_RegisterEventTopic.go | 17 +- .../api_op_ResetUserPassword.go | 15 +- .../directoryservice/api_op_ShareDirectory.go | 35 +- .../api_op_UnshareDirectory.go | 2 +- .../api_op_UpdateConditionalForwarder.go | 7 +- .../directoryservice/api_op_UpdateTrust.go | 6 +- .../directoryservice/api_op_VerifyTrust.go | 8 +- service/directoryservice/deserializers.go | 270 + service/directoryservice/doc.go | 27 +- service/directoryservice/generated.json | 1 + service/directoryservice/serializers.go | 74 + service/directoryservice/types/enums.go | 18 + service/directoryservice/types/errors.go | 17 +- service/directoryservice/types/types.go | 101 +- service/directoryservice/validators.go | 39 + service/ec2/api_op_CreateKeyPair.go | 23 +- service/ec2/api_op_CreateSecurityGroup.go | 4 +- service/ec2/api_op_DescribeKeyPairs.go | 2 +- service/ec2/api_op_DescribeSecurityGroups.go | 4 +- service/ec2/api_op_ExportImage.go | 2 +- service/ec2/api_op_ImportImage.go | 64 +- service/ec2/api_op_ImportInstance.go | 4 +- service/ec2/api_op_ImportKeyPair.go | 13 +- service/ec2/api_op_ImportSnapshot.go | 44 +- service/ec2/api_op_ImportVolume.go | 6 +- service/ec2/deserializers.go | 39 + service/ec2/serializers.go | 10 + service/ec2/types/enums.go | 18 + service/ec2/types/types.go | 50 +- .../elasticache/api_op_AddTagsToResource.go | 14 +- ...i_op_AuthorizeCacheSecurityGroupIngress.go | 6 +- .../api_op_CreateGlobalReplicationGroup.go | 16 +- .../api_op_CreateReplicationGroup.go | 2 +- ...reaseNodeGroupsInGlobalReplicationGroup.go | 2 +- .../api_op_DeleteGlobalReplicationGroup.go | 2 +- .../api_op_DescribeCacheClusters.go | 325 + ...i_op_DisassociateGlobalReplicationGroup.go | 7 +- .../api_op_FailoverGlobalReplicationGroup.go | 4 +- ...reaseNodeGroupsInGlobalReplicationGroup.go | 6 +- .../elasticache/api_op_ListTagsForResource.go | 4 +- .../api_op_ModifyGlobalReplicationGroup.go | 2 +- .../api_op_ModifyReplicationGroup.go | 3 +- ..._RebalanceSlotsInGlobalReplicationGroup.go | 2 +- .../api_op_RemoveTagsFromResource.go | 2 +- .../api_op_RevokeCacheSecurityGroupIngress.go | 6 +- service/elasticache/api_op_TestFailover.go | 4 +- service/elasticache/deserializers.go | 17 + service/elasticache/types/types.go | 17 +- service/emr/api_client.go | 3 +- .../emr/api_op_CreateStudioSessionMapping.go | 4 +- service/emr/api_op_ListReleaseLabels.go | 2 +- service/emr/api_op_RunJobFlow.go | 4 +- service/emr/api_op_SetVisibleToAllUsers.go | 2 +- service/emr/deserializers.go | 27 + service/emr/doc.go | 2 +- service/emr/serializers.go | 10 + service/emr/types/types.go | 46 +- ...i_op_GetInterpolatedAssetPropertyValues.go | 35 +- service/iotsitewise/serializers.go | 4 + service/iotsitewise/types/types.go | 51 +- service/lambda/api_op_ListFunctions.go | 8 +- service/lambda/types/enums.go | 2 + service/licensemanager/types/enums.go | 2 + service/memorydb/LICENSE.txt | 202 + service/memorydb/api_client.go | 261 + service/memorydb/api_op_BatchUpdateCluster.go | 129 + service/memorydb/api_op_CopySnapshot.go | 144 + service/memorydb/api_op_CreateACL.go | 130 + service/memorydb/api_op_CreateCluster.go | 206 + .../memorydb/api_op_CreateParameterGroup.go | 137 + service/memorydb/api_op_CreateSnapshot.go | 133 + service/memorydb/api_op_CreateSubnetGroup.go | 139 + service/memorydb/api_op_CreateUser.go | 139 + service/memorydb/api_op_DeleteACL.go | 124 + service/memorydb/api_op_DeleteCluster.go | 126 + .../memorydb/api_op_DeleteParameterGroup.go | 123 + service/memorydb/api_op_DeleteSnapshot.go | 123 + service/memorydb/api_op_DeleteSubnetGroup.go | 122 + service/memorydb/api_op_DeleteUser.go | 122 + service/memorydb/api_op_DescribeACLs.go | 134 + service/memorydb/api_op_DescribeClusters.go | 140 + .../memorydb/api_op_DescribeEngineVersions.go | 143 + service/memorydb/api_op_DescribeEvents.go | 157 + .../api_op_DescribeParameterGroups.go | 137 + service/memorydb/api_op_DescribeParameters.go | 141 + .../memorydb/api_op_DescribeServiceUpdates.go | 141 + service/memorydb/api_op_DescribeSnapshots.go | 152 + .../memorydb/api_op_DescribeSubnetGroups.go | 137 + service/memorydb/api_op_DescribeUsers.go | 141 + service/memorydb/api_op_FailoverShard.go | 126 + .../api_op_ListAllowedNodeTypeUpdates.go | 128 + service/memorydb/api_op_ListTags.go | 126 + .../memorydb/api_op_ResetParameterGroup.go | 134 + service/memorydb/api_op_TagResource.go | 138 + service/memorydb/api_op_UntagResource.go | 127 + service/memorydb/api_op_UpdateACL.go | 127 + service/memorydb/api_op_UpdateCluster.go | 169 + .../memorydb/api_op_UpdateParameterGroup.go | 129 + service/memorydb/api_op_UpdateSubnetGroup.go | 128 + service/memorydb/api_op_UpdateUser.go | 128 + service/memorydb/deserializers.go | 12229 ++++++++++++++++ service/memorydb/doc.go | 12 + service/memorydb/endpoints.go | 160 + service/memorydb/generated.json | 59 + service/memorydb/go.mod | 10 + service/memorydb/go.sum | 14 + service/memorydb/go_module_metadata.go | 6 + .../memorydb/internal/endpoints/endpoints.go | 106 + .../internal/endpoints/endpoints_test.go | 11 + service/memorydb/protocol_test.go | 3 + service/memorydb/serializers.go | 2883 ++++ service/memorydb/types/enums.go | 119 + service/memorydb/types/errors.go | 966 ++ service/memorydb/types/types.go | 688 + service/memorydb/validators.go | 1137 ++ .../api_op_CreateAccountCustomization.go | 25 +- service/quicksight/api_op_CreateDashboard.go | 8 +- service/quicksight/api_op_CreateDataSet.go | 8 +- service/quicksight/api_op_CreateDataSource.go | 8 +- service/quicksight/api_op_CreateNamespace.go | 5 +- service/quicksight/api_op_CreateTemplate.go | 10 +- service/quicksight/api_op_CreateTheme.go | 4 +- .../api_op_DeleteAccountCustomization.go | 6 +- service/quicksight/api_op_DeleteDataSet.go | 8 +- service/quicksight/api_op_DeleteDataSource.go | 8 +- .../api_op_DescribeAccountCustomization.go | 64 +- .../api_op_DescribeAccountSettings.go | 2 +- service/quicksight/api_op_DescribeDataSet.go | 4 +- .../api_op_DescribeDataSetPermissions.go | 8 +- .../quicksight/api_op_DescribeDataSource.go | 4 +- .../api_op_DescribeDataSourcePermissions.go | 8 +- .../quicksight/api_op_DescribeNamespace.go | 8 +- .../quicksight/api_op_GetDashboardEmbedUrl.go | 6 +- .../quicksight/api_op_GetSessionEmbedUrl.go | 4 +- service/quicksight/api_op_ListDataSets.go | 2 +- service/quicksight/api_op_ListDataSources.go | 4 +- service/quicksight/api_op_ListNamespaces.go | 4 +- .../api_op_UpdateAccountCustomization.go | 19 +- service/quicksight/api_op_UpdateDashboard.go | 8 +- service/quicksight/api_op_UpdateDataSet.go | 8 +- .../api_op_UpdateDataSetPermissions.go | 4 +- service/quicksight/api_op_UpdateDataSource.go | 8 +- .../api_op_UpdateDataSourcePermissions.go | 8 +- service/quicksight/api_op_UpdateTemplate.go | 8 +- service/quicksight/doc.go | 8 +- service/quicksight/types/types.go | 20 +- service/route53/types/types.go | 2 +- .../api_op_CreateResolverQueryLogConfig.go | 8 +- .../api_op_DeleteResolverQueryLogConfig.go | 8 +- ...i_op_DisassociateResolverQueryLogConfig.go | 6 +- .../api_op_GetFirewallRuleGroupPolicy.go | 12 +- .../api_op_GetResolverQueryLogConfigPolicy.go | 2 +- .../api_op_ListResolverDnssecConfigs.go | 20 +- .../api_op_ListResolverEndpoints.go | 8 +- .../api_op_ListResolverQueryLogConfigs.go | 41 +- .../api_op_ListResolverRuleAssociations.go | 5 +- .../api_op_ListResolverRules.go | 7 +- .../api_op_PutFirewallRuleGroupPolicy.go | 10 +- .../api_op_PutResolverQueryLogConfigPolicy.go | 15 +- .../api_op_PutResolverRulePolicy.go | 20 +- service/route53resolver/doc.go | 8 +- service/route53resolver/types/types.go | 89 +- service/s3/api_op_AbortMultipartUpload.go | 12 +- service/s3/api_op_CompleteMultipartUpload.go | 51 +- service/s3/api_op_CopyObject.go | 66 +- service/s3/api_op_CreateBucket.go | 67 +- service/s3/api_op_CreateMultipartUpload.go | 180 +- service/s3/api_op_DeleteBucketPolicy.go | 21 +- service/s3/api_op_DeleteObject.go | 12 +- service/s3/api_op_DeleteObjectTagging.go | 12 +- service/s3/api_op_DeleteObjects.go | 12 +- service/s3/api_op_GetBucketLocation.go | 8 +- ...i_op_GetBucketNotificationConfiguration.go | 2 +- service/s3/api_op_GetBucketPolicy.go | 21 +- service/s3/api_op_GetObject.go | 47 +- service/s3/api_op_GetObjectAcl.go | 6 +- service/s3/api_op_GetObjectLegalHold.go | 6 +- .../s3/api_op_GetObjectLockConfiguration.go | 6 +- service/s3/api_op_GetObjectRetention.go | 6 +- service/s3/api_op_GetObjectTagging.go | 12 +- service/s3/api_op_HeadBucket.go | 20 +- service/s3/api_op_HeadObject.go | 43 +- service/s3/api_op_ListMultipartUploads.go | 15 +- service/s3/api_op_ListObjects.go | 15 +- service/s3/api_op_ListObjectsV2.go | 26 +- service/s3/api_op_ListParts.go | 21 +- service/s3/api_op_PutBucketAcl.go | 94 +- service/s3/api_op_PutBucketCors.go | 5 +- service/s3/api_op_PutBucketEncryption.go | 24 +- .../api_op_PutBucketInventoryConfiguration.go | 13 +- .../api_op_PutBucketLifecycleConfiguration.go | 16 +- service/s3/api_op_PutBucketLogging.go | 28 +- ...i_op_PutBucketNotificationConfiguration.go | 2 +- .../s3/api_op_PutBucketOwnershipControls.go | 4 +- service/s3/api_op_PutBucketPolicy.go | 29 +- service/s3/api_op_PutBucketReplication.go | 45 +- service/s3/api_op_PutBucketRequestPayment.go | 5 +- service/s3/api_op_PutBucketTagging.go | 22 +- service/s3/api_op_PutBucketVersioning.go | 5 +- service/s3/api_op_PutBucketWebsite.go | 5 +- service/s3/api_op_PutObject.go | 84 +- service/s3/api_op_PutObjectAcl.go | 110 +- service/s3/api_op_PutObjectLegalHold.go | 11 +- .../s3/api_op_PutObjectLockConfiguration.go | 7 +- service/s3/api_op_PutObjectRetention.go | 21 +- service/s3/api_op_PutObjectTagging.go | 17 +- service/s3/api_op_PutPublicAccessBlock.go | 4 +- service/s3/api_op_RestoreObject.go | 22 +- service/s3/api_op_UploadPart.go | 34 +- service/s3/api_op_UploadPartCopy.go | 26 +- service/s3/api_op_WriteGetObjectResponse.go | 54 +- service/s3/types/errors.go | 8 +- service/s3/types/types.go | 544 +- service/sagemaker/api_op_CreateAutoMLJob.go | 4 +- .../sagemaker/api_op_CreateEndpointConfig.go | 6 + service/sagemaker/api_op_CreateLabelingJob.go | 11 + .../api_op_CreateNotebookInstance.go | 3 + service/sagemaker/api_op_DescribeAutoMLJob.go | 2 +- service/sagemaker/api_op_DescribeEndpoint.go | 6 + .../api_op_DescribeEndpointConfig.go | 6 + .../api_op_DescribeNotebookInstance.go | 3 + .../sagemaker/api_op_StopPipelineExecution.go | 22 +- service/sagemaker/deserializers.go | 331 +- service/sagemaker/serializers.go | 86 + service/sagemaker/types/enums.go | 142 +- service/sagemaker/types/types.go | 122 +- service/sagemaker/validators.go | 39 + .../api_op_InvokeEndpointAsync.go | 175 + service/sagemakerruntime/deserializers.go | 174 + service/sagemakerruntime/generated.json | 1 + service/sagemakerruntime/serializers.go | 88 + service/sagemakerruntime/validators.go | 42 + 375 files changed, 38674 insertions(+), 3764 deletions(-) create mode 100644 .changelog/41575353444b40ffbf474f4155544f00.json create mode 100644 .changelog/b2097c2cde554c7bbc5e18b1d993b80e.json create mode 100644 codegen/sdk-codegen/aws-models/memorydb.2021-01-01.json create mode 100644 service/directoryservice/api_op_DescribeClientAuthenticationSettings.go create mode 100644 service/memorydb/LICENSE.txt create mode 100644 service/memorydb/api_client.go create mode 100644 service/memorydb/api_op_BatchUpdateCluster.go create mode 100644 service/memorydb/api_op_CopySnapshot.go create mode 100644 service/memorydb/api_op_CreateACL.go create mode 100644 service/memorydb/api_op_CreateCluster.go create mode 100644 service/memorydb/api_op_CreateParameterGroup.go create mode 100644 service/memorydb/api_op_CreateSnapshot.go create mode 100644 service/memorydb/api_op_CreateSubnetGroup.go create mode 100644 service/memorydb/api_op_CreateUser.go create mode 100644 service/memorydb/api_op_DeleteACL.go create mode 100644 service/memorydb/api_op_DeleteCluster.go create mode 100644 service/memorydb/api_op_DeleteParameterGroup.go create mode 100644 service/memorydb/api_op_DeleteSnapshot.go create mode 100644 service/memorydb/api_op_DeleteSubnetGroup.go create mode 100644 service/memorydb/api_op_DeleteUser.go create mode 100644 service/memorydb/api_op_DescribeACLs.go create mode 100644 service/memorydb/api_op_DescribeClusters.go create mode 100644 service/memorydb/api_op_DescribeEngineVersions.go create mode 100644 service/memorydb/api_op_DescribeEvents.go create mode 100644 service/memorydb/api_op_DescribeParameterGroups.go create mode 100644 service/memorydb/api_op_DescribeParameters.go create mode 100644 service/memorydb/api_op_DescribeServiceUpdates.go create mode 100644 service/memorydb/api_op_DescribeSnapshots.go create mode 100644 service/memorydb/api_op_DescribeSubnetGroups.go create mode 100644 service/memorydb/api_op_DescribeUsers.go create mode 100644 service/memorydb/api_op_FailoverShard.go create mode 100644 service/memorydb/api_op_ListAllowedNodeTypeUpdates.go create mode 100644 service/memorydb/api_op_ListTags.go create mode 100644 service/memorydb/api_op_ResetParameterGroup.go create mode 100644 service/memorydb/api_op_TagResource.go create mode 100644 service/memorydb/api_op_UntagResource.go create mode 100644 service/memorydb/api_op_UpdateACL.go create mode 100644 service/memorydb/api_op_UpdateCluster.go create mode 100644 service/memorydb/api_op_UpdateParameterGroup.go create mode 100644 service/memorydb/api_op_UpdateSubnetGroup.go create mode 100644 service/memorydb/api_op_UpdateUser.go create mode 100644 service/memorydb/deserializers.go create mode 100644 service/memorydb/doc.go create mode 100644 service/memorydb/endpoints.go create mode 100644 service/memorydb/generated.json create mode 100644 service/memorydb/go.mod create mode 100644 service/memorydb/go.sum create mode 100644 service/memorydb/go_module_metadata.go create mode 100644 service/memorydb/internal/endpoints/endpoints.go create mode 100644 service/memorydb/internal/endpoints/endpoints_test.go create mode 100644 service/memorydb/protocol_test.go create mode 100644 service/memorydb/serializers.go create mode 100644 service/memorydb/types/enums.go create mode 100644 service/memorydb/types/errors.go create mode 100644 service/memorydb/types/types.go create mode 100644 service/memorydb/validators.go create mode 100644 service/sagemakerruntime/api_op_InvokeEndpointAsync.go diff --git a/.changelog/41575353444b40ffbf474f4155544f00.json b/.changelog/41575353444b40ffbf474f4155544f00.json new file mode 100644 index 00000000000..72781d3dd7d --- /dev/null +++ b/.changelog/41575353444b40ffbf474f4155544f00.json @@ -0,0 +1,8 @@ +{ + "id": "41575353-444b-40ff-bf47-4f4155544f00", + "type": "release", + "description": "New AWS service client module", + "modules": [ + "service/memorydb" + ] +} \ No newline at end of file diff --git a/.changelog/b2097c2cde554c7bbc5e18b1d993b80e.json b/.changelog/b2097c2cde554c7bbc5e18b1d993b80e.json new file mode 100644 index 00000000000..bda04ae2fba --- /dev/null +++ b/.changelog/b2097c2cde554c7bbc5e18b1d993b80e.json @@ -0,0 +1,32 @@ +{ + "id": "b2097c2c-de55-4c7b-bc5e-18b1d993b80e", + "type": "feature", + "description": "API client updated", + "modules": [ + "service/apigateway", + "service/apigatewayv2", + "service/appflow", + "service/applicationautoscaling", + "service/cloud9", + "service/clouddirectory", + "service/cloudwatchlogs", + "service/codebuild", + "service/configservice", + "service/costexplorer", + "service/customerprofiles", + "service/databrew", + "service/directoryservice", + "service/ec2", + "service/elasticache", + "service/emr", + "service/iotsitewise", + "service/lambda", + "service/licensemanager", + "service/quicksight", + "service/route53", + "service/route53resolver", + "service/s3", + "service/sagemaker", + "service/sagemakerruntime" + ] +} diff --git a/codegen/sdk-codegen/aws-models/apigateway.2015-07-09.json b/codegen/sdk-codegen/aws-models/apigateway.2015-07-09.json index 09fd218ffca..35c837c089f 100644 --- a/codegen/sdk-codegen/aws-models/apigateway.2015-07-09.json +++ b/codegen/sdk-codegen/aws-models/apigateway.2015-07-09.json @@ -1628,6 +1628,12 @@ }, "mutualTlsAuthentication": { "target": "com.amazonaws.apigateway#MutualTlsAuthenticationInput" + }, + "ownershipVerificationCertificateArn": { + "target": "com.amazonaws.apigateway#String", + "traits": { + "smithy.api#documentation": "
The ARN of the public certificate issued by ACM to validate ownership of your custom domain. Only required when configuring mutual TLS and using an ACM imported or private CA certificate ARN as the regionalCertificateArn.
" + } } }, "traits": { @@ -3766,7 +3772,7 @@ "domainNameStatus": { "target": "com.amazonaws.apigateway#DomainNameStatus", "traits": { - "smithy.api#documentation": "The status of the DomainName migration. The valid values are AVAILABLE
and UPDATING
. If the status is UPDATING
, the domain cannot be modified further until the existing operation is complete. If it is AVAILABLE
, the domain can be updated.
The status of the DomainName migration. The valid values are AVAILABLE
, UPDATING
, PENDING_CERTIFICATE_REIMPORT
, and PENDING_OWNERSHIP_VERIFICATION
. If the status is UPDATING
, the domain cannot be modified further until the existing operation is complete. If it is AVAILABLE
, the domain can be updated.
The mutual TLS authentication configuration for a custom domain name. If specified, API Gateway performs two-way authentication between the client and the server. Clients must present a trusted certificate to access your API.
" } + }, + "ownershipVerificationCertificateArn": { + "target": "com.amazonaws.apigateway#String", + "traits": { + "smithy.api#documentation": "The ARN of the public certificate issued by ACM to validate ownership of your custom domain. Only required when configuring mutual TLS and using an ACM imported or private CA certificate ARN as the regionalCertificateArn.
" + } } }, "traits": { @@ -3813,6 +3825,14 @@ { "value": "PENDING", "name": "PENDING" + }, + { + "value": "PENDING_CERTIFICATE_REIMPORT", + "name": "PENDING_CERTIFICATE_REIMPORT" + }, + { + "value": "PENDING_OWNERSHIP_VERIFICATION", + "name": "PENDING_OWNERSHIP_VERIFICATION" } ] } @@ -7875,7 +7895,7 @@ "truststoreVersion": { "target": "com.amazonaws.apigateway#String", "traits": { - "smithy.api#documentation": "The version of the S3 object that contains your truststore. To\n specify a version, you must have versioning enabled for the S3 bucket.
" + "smithy.api#documentation": "The version of the S3 object that contains your truststore. To\n specify a version, you must have versioning enabled for the S3 bucket.
" } } }, @@ -9176,7 +9196,7 @@ "type": "string", "traits": { "smithy.api#documentation": "The status code.
", - "smithy.api#pattern": "[1-5]\\d\\d" + "smithy.api#pattern": "^[1-5]\\d\\d$" } }, "com.amazonaws.apigateway#String": { diff --git a/codegen/sdk-codegen/aws-models/apigatewayv2.2018-11-29.json b/codegen/sdk-codegen/aws-models/apigatewayv2.2018-11-29.json index 06e71be9deb..45b45862e0b 100644 --- a/codegen/sdk-codegen/aws-models/apigatewayv2.2018-11-29.json +++ b/codegen/sdk-codegen/aws-models/apigatewayv2.2018-11-29.json @@ -3631,7 +3631,7 @@ "DomainNameStatus": { "target": "com.amazonaws.apigatewayv2#DomainNameStatus", "traits": { - "smithy.api#documentation": "The status of the domain name migration. The valid values are AVAILABLE and UPDATING. If the status is UPDATING, the domain cannot be modified further until the existing operation is complete. If it is AVAILABLE, the domain can be updated.
", + "smithy.api#documentation": "The status of the domain name migration. The valid values are AVAILABLE, UPDATING, PENDING_CERTIFICATE_REIMPORT, and PENDING_OWNERSHIP_VERIFICATION. If the status is UPDATING, the domain cannot be modified further until the existing operation is complete. If it is AVAILABLE, the domain can be updated.
", "smithy.api#jsonName": "domainNameStatus" } }, @@ -3662,6 +3662,13 @@ "smithy.api#documentation": "The Transport Layer Security (TLS) version of the security policy for this domain name. The valid values are TLS_1_0 and TLS_1_2.
", "smithy.api#jsonName": "securityPolicy" } + }, + "OwnershipVerificationCertificateArn": { + "target": "com.amazonaws.apigatewayv2#Arn", + "traits": { + "smithy.api#documentation": "The ARN of the public certificate issued by ACM to validate ownership of your custom domain. Only required when configuring mutual TLS and using an ACM imported or private CA certificate ARN as the regionalCertificateArn
", + "smithy.api#jsonName": "ownershipVerificationCertificateArn" + } } }, "traits": { @@ -3680,7 +3687,7 @@ "com.amazonaws.apigatewayv2#DomainNameStatus": { "type": "string", "traits": { - "smithy.api#documentation": "The status of the domain name migration. The valid values are AVAILABLE and UPDATING. If the status is UPDATING, the domain cannot be modified further until the existing operation is complete. If it is AVAILABLE, the domain can be updated.
", + "smithy.api#documentation": "The status of the domain name migration. The valid values are AVAILABLE, UPDATING, PENDING_CERTIFICATE_REIMPORT, and PENDING_OWNERSHIP_VERIFICATION. If the status is UPDATING, the domain cannot be modified further until the existing operation is complete. If it is AVAILABLE, the domain can be updated.
", "smithy.api#enum": [ { "value": "AVAILABLE", @@ -3689,6 +3696,14 @@ { "value": "UPDATING", "name": "UPDATING" + }, + { + "value": "PENDING_CERTIFICATE_REIMPORT", + "name": "PENDING_CERTIFICATE_REIMPORT" + }, + { + "value": "PENDING_OWNERSHIP_VERIFICATION", + "name": "PENDING_OWNERSHIP_VERIFICATION" } ] } diff --git a/codegen/sdk-codegen/aws-models/appflow.2020-08-23.json b/codegen/sdk-codegen/aws-models/appflow.2020-08-23.json index 2cbe5b94cff..f063902c85b 100644 --- a/codegen/sdk-codegen/aws-models/appflow.2020-08-23.json +++ b/codegen/sdk-codegen/aws-models/appflow.2020-08-23.json @@ -193,6 +193,16 @@ "smithy.api#pattern": "^\\S+$" } }, + "com.amazonaws.appflow#ApplicationHostUrl": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^(https?)://[-a-zA-Z0-9+&@#/%?=~_|!:,.;]*[-a-zA-Z0-9+&@#/%=~_|]$" + } + }, "com.amazonaws.appflow#ApplicationKey": { "type": "string", "traits": { @@ -203,6 +213,16 @@ "smithy.api#pattern": "^\\S+$" } }, + "com.amazonaws.appflow#ApplicationServicePath": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 512 + }, + "smithy.api#pattern": "^\\S+$" + } + }, "com.amazonaws.appflow#AuthCode": { "type": "string", "traits": { @@ -213,6 +233,38 @@ "smithy.api#pattern": "^\\S+$" } }, + "com.amazonaws.appflow#AuthCodeUrl": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^(https?)://[-a-zA-Z0-9+&@#/%?=~_|!:,.;]*[-a-zA-Z0-9+&@#/%=~_|]$" + } + }, + "com.amazonaws.appflow#BasicAuthCredentials": { + "type": "structure", + "members": { + "username": { + "target": "com.amazonaws.appflow#Username", + "traits": { + "smithy.api#documentation": "The username to use to connect to a resource.
", + "smithy.api#required": {} + } + }, + "password": { + "target": "com.amazonaws.appflow#Password", + "traits": { + "smithy.api#documentation": "The password to use to connect to a resource.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The basic auth credentials required for basic authentication.
" + } + }, "com.amazonaws.appflow#Boolean": { "type": "boolean" }, @@ -257,6 +309,16 @@ "smithy.api#pattern": "^\\S+$" } }, + "com.amazonaws.appflow#ClientNumber": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 3, + "max": 3 + }, + "smithy.api#pattern": "^\\d{3}$" + } + }, "com.amazonaws.appflow#ClientSecret": { "type": "string", "traits": { @@ -589,6 +651,9 @@ "traits": { "smithy.api#documentation": "The connector metadata specific to Amazon Honeycode.
" } + }, + "SAPOData": { + "target": "com.amazonaws.appflow#SAPODataMetadata" } }, "traits": { @@ -701,6 +766,12 @@ "traits": { "smithy.api#documentation": "The operation to be performed on the provided Zendesk source fields.
" } + }, + "SAPOData": { + "target": "com.amazonaws.appflow#SAPODataConnectorOperator", + "traits": { + "smithy.api#documentation": "The operation to be performed on the provided SAPOData source fields.
" + } } }, "traits": { @@ -719,7 +790,7 @@ "connectorProfileName": { "target": "com.amazonaws.appflow#ConnectorProfileName", "traits": { - "smithy.api#documentation": " The name of the connector profile. The name is unique for each\n ConnectorProfile
in the AWS account.
The name of the connector profile. The name is unique for each\n ConnectorProfile
in the Amazon Web Services account.
Specifies when the connector profile was last updated.
" } + }, + "privateConnectionProvisioningState": { + "target": "com.amazonaws.appflow#PrivateConnectionProvisioningState", + "traits": { + "smithy.api#documentation": "Specifies the private connection provisioning state.
" + } } }, "traits": { @@ -893,6 +970,9 @@ "traits": { "smithy.api#documentation": "The connector-specific credentials required when using Zendesk.
" } + }, + "SAPOData": { + "target": "com.amazonaws.appflow#SAPODataConnectorProfileCredentials" } }, "traits": { @@ -1025,6 +1105,9 @@ "traits": { "smithy.api#documentation": "The connector-specific properties required by Zendesk.
" } + }, + "SAPOData": { + "target": "com.amazonaws.appflow#SAPODataConnectorProfileProperties" } }, "traits": { @@ -1131,6 +1214,10 @@ { "value": "CustomerProfiles", "name": "CUSTOMERPROFILES" + }, + { + "value": "SAPOData", + "name": "SAPODATA" } ] } @@ -1173,7 +1260,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a new connector profile associated with your AWS account. There is a soft quota\n of 100 connector profiles per AWS account. If you need more connector profiles than this quota\n allows, you can submit a request to the Amazon AppFlow team through the Amazon AppFlow support\n channel.
", + "smithy.api#documentation": "Creates a new connector profile associated with your Amazon Web Services account. There is a soft quota\n of 100 connector profiles per Amazon Web Services account. If you need more connector profiles than this quota\n allows, you can submit a request to the Amazon AppFlow team through the Amazon AppFlow support\n channel.
", "smithy.api#http": { "method": "POST", "uri": "/create-connector-profile", @@ -1187,7 +1274,7 @@ "connectorProfileName": { "target": "com.amazonaws.appflow#ConnectorProfileName", "traits": { - "smithy.api#documentation": " The name of the connector profile. The name is unique for each\n ConnectorProfile
in your AWS account.
The name of the connector profile. The name is unique for each\n ConnectorProfile
in your Amazon Web Services account.
Indicates the connection mode and specifies whether it is public or private. Private\n flows use AWS PrivateLink to route data over AWS infrastructure without exposing it to the\n public internet.
", + "smithy.api#documentation": "Indicates the connection mode and specifies whether it is public or private. Private\n flows use Amazon Web Services PrivateLink to route data over Amazon Web Services infrastructure without exposing it to the\n public internet.
", "smithy.api#required": {} } }, @@ -1701,7 +1788,7 @@ "connectorProfileName": { "target": "com.amazonaws.appflow#ConnectorProfileName", "traits": { - "smithy.api#documentation": " The name of the connector profile. The name is unique for each\n ConnectorProfile
in the AWS account.
The name of the connector profile. The name is unique for each\n ConnectorProfile
in the Amazon Web Services account.
The name of the connector profile. The name is unique for each\n ConnectorProfile
in the AWS account.
The name of the connector profile. The name is unique for each\n ConnectorProfile
in the Amazon Web Services account.
The name of the connector profile. This name must be unique for each connector profile in\n the AWS account.
" + "smithy.api#documentation": "The name of the connector profile. This name must be unique for each connector profile in\n the Amazon Web Services account.
" } }, "destinationConnectorProperties": { @@ -2229,6 +2316,16 @@ "target": "com.amazonaws.appflow#DestinationFlowConfig" } }, + "com.amazonaws.appflow#DocumentType": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 512 + }, + "smithy.api#pattern": "^[\\s\\w_-]+$" + } + }, "com.amazonaws.appflow#DomainName": { "type": "string", "traits": { @@ -3254,7 +3351,7 @@ "connectorProfileName": { "target": "com.amazonaws.appflow#ConnectorProfileName", "traits": { - "smithy.api#documentation": " The name of the connector profile. The name is unique for each\n ConnectorProfile
in the AWS account, and is used to query the downstream\n connector.
The name of the connector profile. The name is unique for each\n ConnectorProfile
in the Amazon Web Services account, and is used to query the downstream\n connector.
The identifier for the desired client.
", + "smithy.api#required": {} + } + }, + "clientSecret": { + "target": "com.amazonaws.appflow#ClientSecret", + "traits": { + "smithy.api#documentation": "The client secret used by the OAuth client to authenticate to the authorization server.\n
", + "smithy.api#required": {} + } + }, + "accessToken": { + "target": "com.amazonaws.appflow#AccessToken", + "traits": { + "smithy.api#documentation": "The access token used to access protected SAPOData resources.
" + } + }, + "refreshToken": { + "target": "com.amazonaws.appflow#RefreshToken", + "traits": { + "smithy.api#documentation": "The refresh token used to refresh expired access token.
" + } + }, + "oAuthRequest": { + "target": "com.amazonaws.appflow#ConnectorOAuthRequest", + "traits": { + "smithy.api#documentation": "The OAuth requirement needed to request security tokens from the connector endpoint.\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "The OAuth credentials required for OAuth type authentication.
" + } + }, + "com.amazonaws.appflow#OAuthProperties": { + "type": "structure", + "members": { + "tokenUrl": { + "target": "com.amazonaws.appflow#TokenUrl", + "traits": { + "smithy.api#documentation": "The token url required to fetch access/refresh tokens using authorization code and also to refresh expired\n access token using refresh token.
", + "smithy.api#required": {} + } + }, + "authCodeUrl": { + "target": "com.amazonaws.appflow#AuthCodeUrl", + "traits": { + "smithy.api#documentation": "The authorization code url required to redirect to SAP Login Page to fetch authorization code for OAuth type\n authentication.
", + "smithy.api#required": {} + } + }, + "oAuthScopes": { + "target": "com.amazonaws.appflow#OAuthScopeList", + "traits": { + "smithy.api#documentation": "The OAuth scopes required for OAuth type authentication.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The OAuth properties required for OAuth type authentication.
" + } + }, "com.amazonaws.appflow#OAuthScope": { "type": "string", "traits": { @@ -3601,7 +3777,7 @@ "min": 0, "max": 128 }, - "smithy.api#pattern": "^[\\w]*$" + "smithy.api#pattern": "^[/\\w]*$" } }, "com.amazonaws.appflow#OAuthScopeList": { @@ -3799,6 +3975,15 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.appflow#PortNumber": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 65535 + } + } + }, "com.amazonaws.appflow#PrefixConfig": { "type": "structure", "members": { @@ -3865,6 +4050,88 @@ ] } }, + "com.amazonaws.appflow#PrivateConnectionProvisioningFailureCause": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CONNECTOR_AUTHENTICATION", + "name": "CONNECTOR_AUTHENTICATION" + }, + { + "value": "CONNECTOR_SERVER", + "name": "CONNECTOR_SERVER" + }, + { + "value": "INTERNAL_SERVER", + "name": "INTERNAL_SERVER" + }, + { + "value": "ACCESS_DENIED", + "name": "ACCESS_DENIED" + }, + { + "value": "VALIDATION", + "name": "VALIDATION" + } + ] + } + }, + "com.amazonaws.appflow#PrivateConnectionProvisioningFailureMessage": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": "^[\\s\\w/!@#+=.-]*$" + } + }, + "com.amazonaws.appflow#PrivateConnectionProvisioningState": { + "type": "structure", + "members": { + "status": { + "target": "com.amazonaws.appflow#PrivateConnectionProvisioningStatus", + "traits": { + "smithy.api#documentation": "Specifies the private connection provisioning status.
" + } + }, + "failureMessage": { + "target": "com.amazonaws.appflow#PrivateConnectionProvisioningFailureMessage", + "traits": { + "smithy.api#documentation": "Specifies the private connection provisioning failure reason.
" + } + }, + "failureCause": { + "target": "com.amazonaws.appflow#PrivateConnectionProvisioningFailureCause", + "traits": { + "smithy.api#documentation": "Specifies the private connection provisioning failure cause.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Specifies the private connection provisioning state.
" + } + }, + "com.amazonaws.appflow#PrivateConnectionProvisioningStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "FAILED", + "name": "FAILED" + }, + { + "value": "PENDING", + "name": "PENDING" + }, + { + "value": "CREATED", + "name": "CREATED" + } + ] + } + }, "com.amazonaws.appflow#PrivateLinkServiceName": { "type": "string", "traits": { @@ -3872,7 +4139,7 @@ "min": 0, "max": 512 }, - "smithy.api#pattern": "^\\S+$" + "smithy.api#pattern": "^$|com.amazonaws.vpce.[\\w/!:@#.\\-]+$" } }, "com.amazonaws.appflow#Property": { @@ -4204,6 +4471,193 @@ "smithy.api#documentation": "The properties that are applied when Amazon S3 is being used as the flow source.
" } }, + "com.amazonaws.appflow#SAPODataConnectorOperator": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "PROJECTION", + "name": "PROJECTION" + }, + { + "value": "LESS_THAN", + "name": "LESS_THAN" + }, + { + "value": "CONTAINS", + "name": "CONTAINS" + }, + { + "value": "GREATER_THAN", + "name": "GREATER_THAN" + }, + { + "value": "BETWEEN", + "name": "BETWEEN" + }, + { + "value": "LESS_THAN_OR_EQUAL_TO", + "name": "LESS_THAN_OR_EQUAL_TO" + }, + { + "value": "GREATER_THAN_OR_EQUAL_TO", + "name": "GREATER_THAN_OR_EQUAL_TO" + }, + { + "value": "EQUAL_TO", + "name": "EQUAL_TO" + }, + { + "value": "NOT_EQUAL_TO", + "name": "NOT_EQUAL_TO" + }, + { + "value": "ADDITION", + "name": "ADDITION" + }, + { + "value": "MULTIPLICATION", + "name": "MULTIPLICATION" + }, + { + "value": "DIVISION", + "name": "DIVISION" + }, + { + "value": "SUBTRACTION", + "name": "SUBTRACTION" + }, + { + "value": "MASK_ALL", + "name": "MASK_ALL" + }, + { + "value": "MASK_FIRST_N", + "name": "MASK_FIRST_N" + }, + { + "value": "MASK_LAST_N", + "name": "MASK_LAST_N" + }, + { + "value": "VALIDATE_NON_NULL", + "name": "VALIDATE_NON_NULL" + }, + { + "value": "VALIDATE_NON_ZERO", + "name": "VALIDATE_NON_ZERO" + }, + { + "value": "VALIDATE_NON_NEGATIVE", + "name": "VALIDATE_NON_NEGATIVE" + }, + { + "value": "VALIDATE_NUMERIC", + "name": "VALIDATE_NUMERIC" + }, + { + "value": "NO_OP", + "name": "NO_OP" + } + ] + } + }, + "com.amazonaws.appflow#SAPODataConnectorProfileCredentials": { + "type": "structure", + "members": { + "basicAuthCredentials": { + "target": "com.amazonaws.appflow#BasicAuthCredentials", + "traits": { + "smithy.api#documentation": "The SAPOData basic authentication credentials.
" + } + }, + "oAuthCredentials": { + "target": "com.amazonaws.appflow#OAuthCredentials", + "traits": { + "smithy.api#documentation": "The SAPOData OAuth type authentication credentials.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The connector-specific profile credentials required when using SAPOData.
" + } + }, + "com.amazonaws.appflow#SAPODataConnectorProfileProperties": { + "type": "structure", + "members": { + "applicationHostUrl": { + "target": "com.amazonaws.appflow#ApplicationHostUrl", + "traits": { + "smithy.api#documentation": "The location of the SAPOData resource.
", + "smithy.api#required": {} + } + }, + "applicationServicePath": { + "target": "com.amazonaws.appflow#ApplicationServicePath", + "traits": { + "smithy.api#documentation": "The application path to catalog service.
", + "smithy.api#required": {} + } + }, + "portNumber": { + "target": "com.amazonaws.appflow#PortNumber", + "traits": { + "smithy.api#box": {}, + "smithy.api#documentation": "The port number of the SAPOData instance.
", + "smithy.api#required": {} + } + }, + "clientNumber": { + "target": "com.amazonaws.appflow#ClientNumber", + "traits": { + "smithy.api#documentation": "The client number for the client creating the connection.
", + "smithy.api#required": {} + } + }, + "logonLanguage": { + "target": "com.amazonaws.appflow#LogonLanguage", + "traits": { + "smithy.api#documentation": "The logon language of SAPOData instance.
" + } + }, + "privateLinkServiceName": { + "target": "com.amazonaws.appflow#PrivateLinkServiceName", + "traits": { + "smithy.api#documentation": "The SAPOData Private Link service name to be used for private data transfers.
" + } + }, + "oAuthProperties": { + "target": "com.amazonaws.appflow#OAuthProperties", + "traits": { + "smithy.api#documentation": "The SAPOData OAuth properties required for OAuth type authentication.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The connector-specific profile properties required when using SAPOData.
" + } + }, + "com.amazonaws.appflow#SAPODataMetadata": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "The connector metadata specific to SAPOData.
" + } + }, + "com.amazonaws.appflow#SAPODataSourceProperties": { + "type": "structure", + "members": { + "objectPath": { + "target": "com.amazonaws.appflow#Object", + "traits": { + "smithy.api#documentation": "The object path specified in the SAPOData flow source.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The properties that are applied when using SAPOData as a flow source.
" + } + }, "com.amazonaws.appflow#SalesforceConnectorOperator": { "type": "string", "traits": { @@ -4492,7 +4946,7 @@ "name": "appflow" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "Welcome to the Amazon AppFlow API reference. This guide is for developers who need\n detailed information about the Amazon AppFlow API operations, data types, and errors.
\n\nAmazon AppFlow is a fully managed integration service that enables you to securely\n transfer data between software as a service (SaaS) applications like Salesforce, Marketo,\n Slack, and ServiceNow, and AWS services like Amazon S3 and Amazon Redshift.
\n\n\n\nUse the following links to get started on the Amazon AppFlow API:
\n\n\n Actions: An alphabetical list of all Amazon AppFlow API operations.
\n\n Data\n types: An alphabetical list of all Amazon AppFlow data types.
\n\n Common parameters: Parameters that all Query operations can use.
\n\n Common\n errors: Client and server errors that all operations can return.
\nIf you're new to Amazon AppFlow, we recommend that you review the Amazon AppFlow User\n Guide.
\nAmazon AppFlow API users can use vendor-specific mechanisms for OAuth, and include\n applicable OAuth attributes (such as auth-code
and redirecturi
) with\n the connector-specific ConnectorProfileProperties
when creating a new connector\n profile using Amazon AppFlow API operations. For example, Salesforce users can refer to the\n \n Authorize Apps with OAuth\n documentation.
Welcome to the Amazon AppFlow API reference. This guide is for developers who need\n detailed information about the Amazon AppFlow API operations, data types, and errors.
\n\nAmazon AppFlow is a fully managed integration service that enables you to securely\n transfer data between software as a service (SaaS) applications like Salesforce, Marketo,\n Slack, and ServiceNow, and Amazon Web Services like Amazon S3 and Amazon Redshift.
\n\n\n\nUse the following links to get started on the Amazon AppFlow API:
\n\n\n Actions: An alphabetical list of all Amazon AppFlow API operations.
\n\n Data\n types: An alphabetical list of all Amazon AppFlow data types.
\n\n Common parameters: Parameters that all Query operations can use.
\n\n Common\n errors: Client and server errors that all operations can return.
\nIf you're new to Amazon AppFlow, we recommend that you review the Amazon AppFlow User\n Guide.
\nAmazon AppFlow API users can use vendor-specific mechanisms for OAuth, and include\n applicable OAuth attributes (such as auth-code
and redirecturi
) with\n the connector-specific ConnectorProfileProperties
when creating a new connector\n profile using Amazon AppFlow API operations. For example, Salesforce users can refer to the\n \n Authorize Apps with OAuth\n documentation.
The AWS Region of the Snowflake account.
" + "smithy.api#documentation": "The Amazon Web Services Region of the Snowflake account.
" } } }, @@ -5161,7 +5615,7 @@ "supportedRegions": { "target": "com.amazonaws.appflow#RegionList", "traits": { - "smithy.api#documentation": "Specifies the supported AWS Regions when using Snowflake.
" + "smithy.api#documentation": "Specifies the supported Amazon Web Services Regions when using Snowflake.
" } } }, @@ -5255,6 +5709,9 @@ "traits": { "smithy.api#documentation": "Specifies the information that is required for querying Zendesk.
" } + }, + "SAPOData": { + "target": "com.amazonaws.appflow#SAPODataSourceProperties" } }, "traits": { @@ -5300,7 +5757,7 @@ "connectorProfileName": { "target": "com.amazonaws.appflow#ConnectorProfileName", "traits": { - "smithy.api#documentation": "The name of the connector profile. This name must be unique for each connector profile in\n the AWS account.
" + "smithy.api#documentation": "The name of the connector profile. This name must be unique for each connector profile in\n the Amazon Web Services account.
" } }, "sourceConnectorProperties": { @@ -5697,6 +6154,16 @@ "smithy.api#pattern": ".*" } }, + "com.amazonaws.appflow#TokenUrl": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^(https?)://[-a-zA-Z0-9+&@#/%?=~_|!:,.;]*[-a-zA-Z0-9+&@#/%=~_|]$" + } + }, "com.amazonaws.appflow#TrendmicroConnectorOperator": { "type": "string", "traits": { @@ -5970,7 +6437,7 @@ "connectorProfileName": { "target": "com.amazonaws.appflow#ConnectorProfileName", "traits": { - "smithy.api#documentation": " The name of the connector profile and is unique for each ConnectorProfile
in\n the AWS Account.
The name of the connector profile and is unique for each ConnectorProfile
in\n the Amazon Web Services account.
The object specified in the Veeva flow source.
", "smithy.api#required": {} } + }, + "documentType": { + "target": "com.amazonaws.appflow#DocumentType", + "traits": { + "smithy.api#documentation": "The document type specified in the Veeva document extract flow.
" + } + }, + "includeSourceFiles": { + "target": "com.amazonaws.appflow#Boolean", + "traits": { + "smithy.api#documentation": "Boolean value to include source files in Veeva document extract flow.
" + } + }, + "includeRenditions": { + "target": "com.amazonaws.appflow#Boolean", + "traits": { + "smithy.api#documentation": "Boolean value to include file renditions in Veeva document extract flow.
" + } + }, + "includeAllVersions": { + "target": "com.amazonaws.appflow#Boolean", + "traits": { + "smithy.api#documentation": "Boolean value to include All Versions of files in Veeva document extract flow.
" + } } }, "traits": { diff --git a/codegen/sdk-codegen/aws-models/applicationautoscaling.2016-02-06.json b/codegen/sdk-codegen/aws-models/applicationautoscaling.2016-02-06.json index b9e5e03447f..bcf1c260535 100644 --- a/codegen/sdk-codegen/aws-models/applicationautoscaling.2016-02-06.json +++ b/codegen/sdk-codegen/aws-models/applicationautoscaling.2016-02-06.json @@ -123,7 +123,7 @@ "name": "application-autoscaling" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "With Application Auto Scaling, you can configure automatic scaling for the following\n resources:
\nAmazon ECS services
\nAmazon EC2 Spot Fleet requests
\nAmazon EMR clusters
\nAmazon AppStream 2.0 fleets
\nAmazon DynamoDB tables and global secondary indexes throughput capacity
\nAmazon Aurora Replicas
\nAmazon SageMaker endpoint variants
\nCustom resources provided by your own applications or services
\nAmazon Comprehend document classification and entity recognizer endpoints
\nAWS Lambda function provisioned concurrency
\nAmazon Keyspaces (for Apache Cassandra) tables
\nAmazon Managed Streaming for Apache Kafka broker storage
\n\n API Summary\n
\nThe Application Auto Scaling service API includes three key sets of actions:
\nRegister and manage scalable targets - Register AWS or custom resources as scalable\n targets (a resource that Application Auto Scaling can scale), set minimum and maximum capacity limits, and\n retrieve information on existing scalable targets.
\nConfigure and manage automatic scaling - Define scaling policies to dynamically scale\n your resources in response to CloudWatch alarms, schedule one-time or recurring scaling actions,\n and retrieve your recent scaling activity history.
\nSuspend and resume scaling - Temporarily suspend and later resume automatic scaling by\n calling the RegisterScalableTarget API action for any Application Auto Scaling scalable target. You can\n suspend and resume (individually or in combination) scale-out activities that are\n triggered by a scaling policy, scale-in activities that are triggered by a scaling policy,\n and scheduled scaling.
\nTo learn more about Application Auto Scaling, including information about granting IAM users required\n permissions for Application Auto Scaling actions, see the Application Auto Scaling User\n Guide.
", + "smithy.api#documentation": "With Application Auto Scaling, you can configure automatic scaling for the following\n resources:
\nAmazon AppStream 2.0 fleets
\nAmazon Aurora Replicas
\nAmazon Comprehend document classification and entity recognizer endpoints
\nAmazon DynamoDB tables and global secondary indexes throughput capacity
\nAmazon ECS services
\nAmazon ElastiCache for Redis clusters (replication groups)
\nAmazon EMR clusters
\nAmazon Keyspaces (for Apache Cassandra) tables
\nLambda function provisioned concurrency
\nAmazon Managed Streaming for Apache Kafka broker storage
\nAmazon SageMaker endpoint variants
\nSpot Fleet (Amazon EC2) requests
\nCustom resources provided by your own applications or services
\n\n API Summary\n
\nThe Application Auto Scaling service API includes three key sets of actions:
\nRegister and manage scalable targets - Register Amazon Web Services or custom resources as scalable\n targets (a resource that Application Auto Scaling can scale), set minimum and maximum capacity limits, and\n retrieve information on existing scalable targets.
\nConfigure and manage automatic scaling - Define scaling policies to dynamically scale\n your resources in response to CloudWatch alarms, schedule one-time or recurring scaling actions,\n and retrieve your recent scaling activity history.
\nSuspend and resume scaling - Temporarily suspend and later resume automatic scaling by\n calling the RegisterScalableTarget API action for any Application Auto Scaling scalable target. You can\n suspend and resume (individually or in combination) scale-out activities that are\n triggered by a scaling policy, scale-in activities that are triggered by a scaling policy,\n and scheduled scaling.
\nTo learn more about Application Auto Scaling, including information about granting IAM users required\n permissions for Application Auto Scaling actions, see the Application Auto Scaling User\n Guide.
", "smithy.api#title": "Application Auto Scaling" } }, @@ -135,6 +135,10 @@ } }, "traits": { + "aws.protocols#awsQueryError": { + "code": "ConcurrentUpdateException", + "httpResponseCode": 500 + }, "smithy.api#documentation": "Concurrent updates caused an exception, for example, if you request an update to an\n Application Auto Scaling resource that already has a pending update.
", "smithy.api#error": "server", "smithy.api#httpError": 500 @@ -184,7 +188,7 @@ } }, "traits": { - "smithy.api#documentation": "Represents a CloudWatch metric of your choosing for a target tracking scaling policy to use\n with Application Auto Scaling.
\nFor information about the available metrics for a service, see AWS\n Services That Publish CloudWatch Metrics in the Amazon CloudWatch User\n Guide.
\nTo create your customized metric specification:
\nAdd values for each required parameter from CloudWatch. You can use an existing metric,\n or a new metric that you create. To use your own metric, you must first publish the\n metric to CloudWatch. For more information, see Publish Custom\n Metrics in the Amazon CloudWatch User Guide.
\nChoose a metric that changes proportionally with capacity. The value of the metric\n should increase or decrease in inverse proportion to the number of capacity units.\n That is, the value of the metric should decrease when capacity increases, and\n increase when capacity decreases.
\nFor more information about CloudWatch, see Amazon CloudWatch\n Concepts.
" + "smithy.api#documentation": "Represents a CloudWatch metric of your choosing for a target tracking scaling policy to use\n with Application Auto Scaling.
\nFor information about the available metrics for a service, see Amazon Web Services\n Services That Publish CloudWatch Metrics in the Amazon CloudWatch User\n Guide.
\nTo create your customized metric specification:
\nAdd values for each required parameter from CloudWatch. You can use an existing metric,\n or a new metric that you create. To use your own metric, you must first publish the\n metric to CloudWatch. For more information, see Publish Custom\n Metrics in the Amazon CloudWatch User Guide.
\nChoose a metric that changes proportionally with capacity. The value of the metric\n should increase or decrease in inverse proportion to the number of capacity units.\n That is, the value of the metric should decrease when capacity increases, and\n increase when capacity decreases.
\nFor more information about CloudWatch, see Amazon CloudWatch\n Concepts.
" } }, "com.amazonaws.applicationautoscaling#DeleteScalingPolicy": { @@ -226,21 +230,21 @@ "ServiceNamespace": { "target": "com.amazonaws.applicationautoscaling#ServiceNamespace", "traits": { - "smithy.api#documentation": "The namespace of the AWS service that provides the resource. For a resource provided\n by your own application or service, use custom-resource
instead.
The namespace of the Amazon Web Services service that provides the resource. For a resource provided\n by your own application or service, use custom-resource
instead.
The identifier of the resource associated with the scalable target.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
The identifier of the resource associated with the scalable target.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
Amazon ElastiCache replication group - The resource type is replication-group
and the unique identifier is the replication group name.\n Example: replication-group/mycluster
.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups
- The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas
- The number of replicas per node group for an Amazon ElastiCache replication group.
The namespace of the AWS service that provides the resource. For a resource provided\n by your own application or service, use custom-resource
instead.
The namespace of the Amazon Web Services service that provides the resource. For a resource provided\n by your own application or service, use custom-resource
instead.
The identifier of the resource associated with the scheduled action.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
The identifier of the resource associated with the scheduled action.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
Amazon ElastiCache replication group - The resource type is replication-group
and the unique identifier is the replication group name.\n Example: replication-group/mycluster
.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups
- The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas
- The number of replicas per node group for an Amazon ElastiCache replication group.
The namespace of the AWS service that provides the resource. For a resource provided\n by your own application or service, use custom-resource
instead.
The namespace of the Amazon Web Services service that provides the resource. For a resource provided\n by your own application or service, use custom-resource
instead.
The identifier of the resource associated with the scalable target.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
The identifier of the resource associated with the scalable target.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
Amazon ElastiCache replication group - The resource type is replication-group
and the unique identifier is the replication group name.\n Example: replication-group/mycluster
.
The scalable dimension associated with the scalable target.\n This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
The scalable dimension associated with the scalable target.\n This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups
- The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas
- The number of replicas per node group for an Amazon ElastiCache replication group.
The namespace of the AWS service that provides the resource. For a resource provided\n by your own application or service, use custom-resource
instead.
The namespace of the Amazon Web Services service that provides the resource. For a resource provided\n by your own application or service, use custom-resource
instead.
The identifier of the resource associated with the scalable target.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
The identifier of the resource associated with the scalable target.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
Amazon ElastiCache replication group - The resource type is replication-group
and the unique identifier is the replication group name.\n Example: replication-group/mycluster
.
The scalable dimension associated with the scalable target.\n This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
The scalable dimension associated with the scalable target.\n This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups
- The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas
- The number of replicas per node group for an Amazon ElastiCache replication group.
The namespace of the AWS service that provides the resource. For a resource provided\n by your own application or service, use custom-resource
instead.
The namespace of the Amazon Web Services service that provides the resource. For a resource provided\n by your own application or service, use custom-resource
instead.
The identifier of the resource associated with the scaling activity.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
The identifier of the resource associated with the scaling activity.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
Amazon ElastiCache replication group - The resource type is replication-group
and the unique identifier is the replication group name.\n Example: replication-group/mycluster
.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.\n If you specify a scalable dimension, you must also specify a resource ID.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.\n If you specify a scalable dimension, you must also specify a resource ID.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups
- The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas
- The number of replicas per node group for an Amazon ElastiCache replication group.
The namespace of the AWS service that provides the resource. For a resource provided\n by your own application or service, use custom-resource
instead.
The namespace of the Amazon Web Services service that provides the resource. For a resource provided\n by your own application or service, use custom-resource
instead.
The identifier of the resource associated with the scaling policy.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
The identifier of the resource associated with the scaling policy.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
Amazon ElastiCache replication group - The resource type is replication-group
and the unique identifier is the replication group name.\n Example: replication-group/mycluster
.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.\n If you specify a scalable dimension, you must also specify a resource ID.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.\n If you specify a scalable dimension, you must also specify a resource ID.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups
- The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas
- The number of replicas per node group for an Amazon ElastiCache replication group.
The namespace of the AWS service that provides the resource. For a resource provided\n by your own application or service, use custom-resource
instead.
The namespace of the Amazon Web Services service that provides the resource. For a resource provided\n by your own application or service, use custom-resource
instead.
The identifier of the resource associated with the scheduled action.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
The identifier of the resource associated with the scheduled action.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
Amazon ElastiCache replication group - The resource type is replication-group
and the unique identifier is the replication group name.\n Example: replication-group/mycluster
.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.\n If you specify a scalable dimension, you must also specify a resource ID.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.\n If you specify a scalable dimension, you must also specify a resource ID.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups
- The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas
- The number of replicas per node group for an Amazon ElastiCache replication group.
Failed access to resources caused an exception. This exception is thrown when Application Auto Scaling\n is unable to retrieve the alarms associated with a scaling policy due to a client error,\n for example, if the role ARN specified for a scalable target does not have permission to\n call the CloudWatch DescribeAlarms on your behalf.
", "smithy.api#error": "client", "smithy.api#httpError": 400 @@ -754,6 +762,10 @@ } }, "traits": { + "aws.protocols#awsQueryError": { + "code": "InternalServiceException", + "httpResponseCode": 500 + }, "smithy.api#documentation": "The service encountered an internal error.
", "smithy.api#error": "server", "smithy.api#httpError": 500 @@ -767,6 +779,10 @@ } }, "traits": { + "aws.protocols#awsQueryError": { + "code": "InvalidNextTokenException", + "httpResponseCode": 400 + }, "smithy.api#documentation": "The next token supplied was invalid.
", "smithy.api#error": "client", "smithy.api#httpError": 400 @@ -780,6 +796,10 @@ } }, "traits": { + "aws.protocols#awsQueryError": { + "code": "LimitExceededException", + "httpResponseCode": 400 + }, "smithy.api#documentation": "A per-account resource limit is exceeded. For more information, see Application Auto Scaling service quotas.
", "smithy.api#error": "client", "smithy.api#httpError": 400 @@ -954,6 +974,18 @@ { "value": "KafkaBrokerStorageUtilization", "name": "KafkaBrokerStorageUtilization" + }, + { + "value": "ElastiCachePrimaryEngineCPUUtilization", + "name": "ElastiCachePrimaryEngineCPUUtilization" + }, + { + "value": "ElastiCacheReplicaEngineCPUUtilization", + "name": "ElastiCacheReplicaEngineCPUUtilization" + }, + { + "value": "ElastiCacheDatabaseMemoryUsageCountedForEvictPercentage", + "name": "ElastiCacheDatabaseMemoryUsageCountedForEvictPercentage" } ] } @@ -975,6 +1007,10 @@ } }, "traits": { + "aws.protocols#awsQueryError": { + "code": "ObjectNotFoundException", + "httpResponseCode": 400 + }, "smithy.api#documentation": "The specified object could not be found. For any operation that depends on the existence\n of a scalable target, this exception is thrown if the scalable target with the specified\n service namespace, resource ID, and scalable dimension does not exist. For any operation\n that deletes or deregisters a resource, this exception is thrown if the resource cannot be\n found.
", "smithy.api#error": "client", "smithy.api#httpError": 400 @@ -987,7 +1023,7 @@ "min": 1, "max": 256 }, - "smithy.api#pattern": "\\p{Print}+" + "smithy.api#pattern": "^\\p{Print}+$" } }, "com.amazonaws.applicationautoscaling#PolicyType": { @@ -1018,12 +1054,12 @@ "ResourceLabel": { "target": "com.amazonaws.applicationautoscaling#ResourceLabel", "traits": { - "smithy.api#documentation": "Identifies the resource associated with the metric type. You can't specify a resource\n label unless the metric type is ALBRequestCountPerTarget
and there is a target\n group attached to the Spot Fleet request or ECS service.
You create the resource label by appending the final portion of the load balancer ARN\n and the final portion of the target group ARN into a single value, separated by a forward\n slash (/). The format is\n app/
app/
targetgroup/
This is an example:\n app/EC2Co-EcsEl-1TKLTMITMM0EO/f37c06a68c1748aa/targetgroup/EC2Co-Defau-LDNM7Q3ZH1ZN/6d4ea56ca2d6a18d.
\nTo find the ARN for an Application Load Balancer, use the DescribeLoadBalancers API operation. To find the ARN for the target group, use\n the DescribeTargetGroups API operation.
" + "smithy.api#documentation": "Identifies the resource associated with the metric type. You can't specify a resource\n label unless the metric type is ALBRequestCountPerTarget
and there is a target\n group attached to the Spot Fleet request or ECS service.
You create the resource label by appending the final portion of the load balancer ARN\n and the final portion of the target group ARN into a single value, separated by a forward\n slash (/). The format of the resource label is:
\n\n app/my-alb/778d41231b141a0f/targetgroup/my-alb-target-group/943f017f100becff
.
Where:
\napp/
targetgroup/
To find the ARN for an Application Load Balancer, use the DescribeLoadBalancers API operation. To find the ARN for the target group, use\n the DescribeTargetGroups API operation.
" } } }, "traits": { - "smithy.api#documentation": "Represents a predefined metric for a target tracking scaling policy to use with\n Application Auto Scaling.
\nOnly the AWS services that you're using send metrics to Amazon CloudWatch. To determine whether a\n desired metric already exists by looking up its namespace and dimension using the CloudWatch\n metrics dashboard in the console, follow the procedure in Building dashboards\n with CloudWatch in the Application Auto Scaling User Guide.
" + "smithy.api#documentation": "Represents a predefined metric for a target tracking scaling policy to use with\n Application Auto Scaling.
\nOnly the Amazon Web Services that you're using send metrics to Amazon CloudWatch. To determine whether a\n desired metric already exists by looking up its namespace and dimension using the CloudWatch\n metrics dashboard in the console, follow the procedure in Building dashboards\n with CloudWatch in the Application Auto Scaling User Guide.
" } }, "com.amazonaws.applicationautoscaling#PutScalingPolicy": { @@ -1071,28 +1107,28 @@ "ServiceNamespace": { "target": "com.amazonaws.applicationautoscaling#ServiceNamespace", "traits": { - "smithy.api#documentation": "The namespace of the AWS service that provides the resource. For a resource provided\n by your own application or service, use custom-resource
instead.
The namespace of the Amazon Web Services service that provides the resource. For a resource provided\n by your own application or service, use custom-resource
instead.
The identifier of the resource associated with the scaling policy.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
The identifier of the resource associated with the scaling policy.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
Amazon ElastiCache replication group - The resource type is replication-group
and the unique identifier is the replication group name.\n Example: replication-group/mycluster
.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups
- The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas
- The number of replicas per node group for an Amazon ElastiCache replication group.
The policy type. This parameter is required if you are creating a scaling policy.
\nThe following policy types are supported:
\n\n TargetTrackingScaling
—Not supported for Amazon EMR
\n StepScaling
—Not supported for DynamoDB, Amazon Comprehend, Lambda, Amazon Keyspaces (for\n Apache Cassandra), or Amazon MSK.
For more information, see Target\n tracking scaling policies and Step scaling policies in the Application Auto Scaling User Guide.
" + "smithy.api#documentation": "The policy type. This parameter is required if you are creating a scaling policy.
\nThe following policy types are supported:
\n\n TargetTrackingScaling
—Not supported for Amazon EMR
\n StepScaling
—Not supported for DynamoDB, Amazon Comprehend, Lambda, Amazon Keyspaces (for Apache\n Cassandra), Amazon MSK, or Amazon ElastiCache for Redis.
For more information, see Target\n tracking scaling policies and Step scaling policies in the Application Auto Scaling User Guide.
" } }, "StepScalingPolicyConfiguration": { @@ -1162,7 +1198,7 @@ "ServiceNamespace": { "target": "com.amazonaws.applicationautoscaling#ServiceNamespace", "traits": { - "smithy.api#documentation": "The namespace of the AWS service that provides the resource. For a resource provided\n by your own application or service, use custom-resource
instead.
The namespace of the Amazon Web Services service that provides the resource. For a resource provided\n by your own application or service, use custom-resource
instead.
The identifier of the resource associated with the scheduled action.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
The identifier of the resource associated with the scheduled action.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
Amazon ElastiCache replication group - The resource type is replication-group
and the unique identifier is the replication group name.\n Example: replication-group/mycluster
.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups
- The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas
- The number of replicas per node group for an Amazon ElastiCache replication group.
The namespace of the AWS service that provides the resource. For a resource provided\n by your own application or service, use custom-resource
instead.
The namespace of the Amazon Web Services service that provides the resource. For a resource provided\n by your own application or service, use custom-resource
instead.
The identifier of the resource that is associated with the scalable target.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
The identifier of the resource that is associated with the scalable target.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
Amazon ElastiCache replication group - The resource type is replication-group
and the unique identifier is the replication group name.\n Example: replication-group/mycluster
.
The scalable dimension associated with the scalable target.\n This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
The scalable dimension associated with the scalable target.\n This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups
- The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas
- The number of replicas per node group for an Amazon ElastiCache replication group.
The namespace of the AWS service that provides the resource, or a\n custom-resource
.
The namespace of the Amazon Web Services service that provides the resource, or a\n custom-resource
.
The identifier of the resource associated with the scalable target.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
The identifier of the resource associated with the scalable target.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
Amazon ElastiCache replication group - The resource type is replication-group
and the unique identifier is the replication group name.\n Example: replication-group/mycluster
.
The scalable dimension associated with the scalable target.\n This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
The scalable dimension associated with the scalable target.\n This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups
- The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas
- The number of replicas per node group for an Amazon ElastiCache replication group.
The namespace of the AWS service that provides the resource, or a\n custom-resource
.
The namespace of the Amazon Web Services service that provides the resource, or a\n custom-resource
.
The identifier of the resource associated with the scaling activity.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
The identifier of the resource associated with the scaling activity.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
Amazon ElastiCache replication group - The resource type is replication-group
and the unique identifier is the replication group name.\n Example: replication-group/mycluster
.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups
- The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas
- The number of replicas per node group for an Amazon ElastiCache replication group.
The namespace of the AWS service that provides the resource, or a\n custom-resource
.
The namespace of the Amazon Web Services service that provides the resource, or a\n custom-resource
.
The identifier of the resource associated with the scaling policy.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
The identifier of the resource associated with the scaling policy.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
Amazon ElastiCache replication group - The resource type is replication-group
and the unique identifier is the replication group name.\n Example: replication-group/mycluster
.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups
- The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas
- The number of replicas per node group for an Amazon ElastiCache replication group.
The namespace of the AWS service that provides the resource, or a\n custom-resource
.
The namespace of the Amazon Web Services service that provides the resource, or a\n custom-resource
.
The identifier of the resource associated with the scaling policy.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
The identifier of the resource associated with the scaling policy.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service
and the unique identifier is the cluster name \n and service name. Example: service/default/sample-webapp
.
Spot Fleet request - The resource type is spot-fleet-request
and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE
.
EMR cluster - The resource type is instancegroup
and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0
.
AppStream 2.0 fleet - The resource type is fleet
and the unique identifier is the fleet name.\n Example: fleet/sample-fleet
.
DynamoDB table - The resource type is table
and the unique identifier is the table name. \n Example: table/my-table
.
DynamoDB global secondary index - The resource type is index
and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index
.
Aurora DB cluster - The resource type is cluster
and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster
.
Amazon SageMaker endpoint variant - The resource type is variant
and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering
.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue
from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE
.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE
.
Lambda provisioned concurrency - The resource type is function
and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST
. \n Example: function:my-function:prod
or function:my-function:1
.
Amazon Keyspaces table - The resource type is table
and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable
.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5
.
Amazon ElastiCache replication group - The resource type is replication-group
and the unique identifier is the replication group name.\n Example: replication-group/mycluster
.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount
- The desired task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount
- The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity
- The target capacity of a Spot Fleet request.
\n appstream:fleet:DesiredCapacity
- The desired capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits
- The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits
- The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount
- The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount
- The number of EC2 instances for an Amazon SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property
- The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits
- The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency
- The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits
- The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits
- The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize
- The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups
- The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas
- The number of replicas per node group for an Amazon ElastiCache replication group.
The amount of time, in seconds, to wait for a previous scaling activity to take effect.
\nWith scale-out policies, the intention is to continuously (but not excessively) scale out.\n After Application Auto Scaling successfully scales out using a step scaling policy, it starts to calculate the\n cooldown time. The scaling policy won't increase the desired capacity again unless either a\n larger scale out is triggered or the cooldown period ends. While the cooldown period is in\n effect, capacity added by the initiating scale-out activity is calculated as part of the\n desired capacity for the next scale-out activity. For example, when an alarm triggers a step\n scaling policy to increase the capacity by 2, the scaling activity completes successfully, and\n a cooldown period starts. If the alarm triggers again during the cooldown period but at a more\n aggressive step adjustment of 3, the previous increase of 2 is considered part of the current\n capacity. Therefore, only 1 is added to the capacity.
\nWith scale-in policies, the intention is to scale in conservatively to protect your\n application’s availability, so scale-in activities are blocked until the cooldown period has\n expired. However, if another alarm triggers a scale-out activity during the cooldown period\n after a scale-in activity, Application Auto Scaling scales out the target immediately. In this case, the\n cooldown period for the scale-in activity stops and doesn't complete.
\nApplication Auto Scaling provides a default value of 300 for the following scalable targets:
\nECS services
\nSpot Fleet requests
\nEMR clusters
\nAppStream 2.0 fleets
\nAurora DB clusters
\nAmazon SageMaker endpoint variants
\nCustom resources
\nFor all other scalable targets, the default value is 0:
\nDynamoDB tables
\nDynamoDB global secondary indexes
\nAmazon Comprehend document classification and entity recognizer endpoints
\nLambda provisioned concurrency
\nAmazon Keyspaces tables
\nAmazon MSK broker storage
\nThe amount of time, in seconds, to wait for a previous scaling activity to take effect.
\nWith scale-out policies, the intention is to continuously (but not excessively) scale out.\n After Application Auto Scaling successfully scales out using a step scaling policy, it starts to calculate the\n cooldown time. The scaling policy won't increase the desired capacity again unless either a\n larger scale out is triggered or the cooldown period ends. While the cooldown period is in\n effect, capacity added by the initiating scale-out activity is calculated as part of the\n desired capacity for the next scale-out activity. For example, when an alarm triggers a step\n scaling policy to increase the capacity by 2, the scaling activity completes successfully, and\n a cooldown period starts. If the alarm triggers again during the cooldown period but at a more\n aggressive step adjustment of 3, the previous increase of 2 is considered part of the current\n capacity. Therefore, only 1 is added to the capacity.
\nWith scale-in policies, the intention is to scale in conservatively to protect your\n application’s availability, so scale-in activities are blocked until the cooldown period has\n expired. However, if another alarm triggers a scale-out activity during the cooldown period\n after a scale-in activity, Application Auto Scaling scales out the target immediately. In this case, the\n cooldown period for the scale-in activity stops and doesn't complete.
\nApplication Auto Scaling provides a default value of 600 for Amazon ElastiCache replication groups\n and a default value of 300 for the following scalable targets:
\nECS services
\nSpot Fleet requests
\nEMR clusters
\nAppStream 2.0 fleets
\nAurora DB clusters
\nAmazon SageMaker endpoint variants
\nCustom resources
\nFor all other scalable targets, the default value is 0:
\nDynamoDB tables
\nDynamoDB global secondary indexes
\nAmazon Comprehend document classification and entity recognizer endpoints
\nLambda provisioned concurrency
\nAmazon Keyspaces tables
\nAmazon MSK broker storage
\nThe amount of time, in seconds, to wait for a previous scale-out activity to take\n effect.
\nWith the scale-out cooldown period, the intention is to continuously\n (but not excessively) scale out. After Application Auto Scaling successfully scales out using a target\n tracking scaling policy, it starts to calculate the cooldown time. The scaling policy won't\n increase the desired capacity again unless either a larger scale out is triggered or the\n cooldown period ends. While the cooldown period is in effect, the capacity added by the\n initiating scale-out activity is calculated as part of the desired capacity for the next\n scale-out activity.
\nApplication Auto Scaling provides a default value of 300 for the following scalable targets:
\nECS services
\nSpot Fleet requests
\nEMR clusters
\nAppStream 2.0 fleets
\nAurora DB clusters
\nAmazon SageMaker endpoint variants
\nCustom resources
\nFor all other scalable targets, the default value is 0:
\nDynamoDB tables
\nDynamoDB global secondary indexes
\nAmazon Comprehend document classification and entity recognizer endpoints
\nLambda provisioned concurrency
\nAmazon Keyspaces tables
\nAmazon MSK broker storage
\nThe amount of time, in seconds, to wait for a previous scale-out activity to take\n effect.
\nWith the scale-out cooldown period, the intention is to continuously\n (but not excessively) scale out. After Application Auto Scaling successfully scales out using a target\n tracking scaling policy, it starts to calculate the cooldown time. The scaling policy won't\n increase the desired capacity again unless either a larger scale out is triggered or the\n cooldown period ends. While the cooldown period is in effect, the capacity added by the\n initiating scale-out activity is calculated as part of the desired capacity for the next\n scale-out activity.
\nApplication Auto Scaling provides a default value of 600 for Amazon ElastiCache replication groups\n and a default value of 300 for the following scalable targets:
\nECS services
\nSpot Fleet requests
\nEMR clusters
\nAppStream 2.0 fleets
\nAurora DB clusters
\nAmazon SageMaker endpoint variants
\nCustom resources
\nFor all other scalable targets, the default value is 0:
\nDynamoDB tables
\nDynamoDB global secondary indexes
\nAmazon Comprehend document classification and entity recognizer endpoints
\nLambda provisioned concurrency
\nAmazon Keyspaces tables
\nAmazon MSK broker storage
\nThe amount of time, in seconds, after a scale-in activity completes before another\n scale-in activity can start.
\nWith the scale-in cooldown period, the intention is to scale in\n conservatively to protect your application’s availability, so scale-in activities are blocked\n until the cooldown period has expired. However, if another alarm triggers a scale-out activity\n during the scale-in cooldown period, Application Auto Scaling scales out the target immediately. In this case,\n the scale-in cooldown period stops and doesn't complete.
\nApplication Auto Scaling provides a default value of 300 for the following scalable targets:
\nECS services
\nSpot Fleet requests
\nEMR clusters
\nAppStream 2.0 fleets
\nAurora DB clusters
\nAmazon SageMaker endpoint variants
\nCustom resources
\nFor all other scalable targets, the default value is 0:
\nDynamoDB tables
\nDynamoDB global secondary indexes
\nAmazon Comprehend document classification and entity recognizer endpoints
\nLambda provisioned concurrency
\nAmazon Keyspaces tables
\nAmazon MSK broker storage
\nThe amount of time, in seconds, after a scale-in activity completes before another\n scale-in activity can start.
\nWith the scale-in cooldown period, the intention is to scale in\n conservatively to protect your application’s availability, so scale-in activities are blocked\n until the cooldown period has expired. However, if another alarm triggers a scale-out activity\n during the scale-in cooldown period, Application Auto Scaling scales out the target immediately. In this case,\n the scale-in cooldown period stops and doesn't complete.
\nApplication Auto Scaling provides a default value of 600 for Amazon ElastiCache replication groups\n and a default value of 300 for the following scalable targets:
\nECS services
\nSpot Fleet requests
\nEMR clusters
\nAppStream 2.0 fleets
\nAurora DB clusters
\nAmazon SageMaker endpoint variants
\nCustom resources
\nFor all other scalable targets, the default value is 0:
\nDynamoDB tables
\nDynamoDB global secondary indexes
\nAmazon Comprehend document classification and entity recognizer endpoints
\nLambda provisioned concurrency
\nAmazon Keyspaces tables
\nAmazon MSK broker storage
\nAn exception was thrown for a validation issue. Review the available parameters for the\n API request.
", "smithy.api#error": "client", "smithy.api#httpError": 400 @@ -2031,7 +2083,7 @@ "com.amazonaws.applicationautoscaling#XmlString": { "type": "string", "traits": { - "smithy.api#pattern": "[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$" } } } diff --git a/codegen/sdk-codegen/aws-models/cloud9.2017-09-23.json b/codegen/sdk-codegen/aws-models/cloud9.2017-09-23.json index 0c85dbf66d3..069e0d2d895 100644 --- a/codegen/sdk-codegen/aws-models/cloud9.2017-09-23.json +++ b/codegen/sdk-codegen/aws-models/cloud9.2017-09-23.json @@ -285,6 +285,12 @@ "traits": { "smithy.api#documentation": "The connection type used for connecting to an Amazon EC2 environment. Valid values are\n CONNECT_SSH
(default) and CONNECT_SSM
(connected through\n Amazon EC2 Systems Manager).
For more information, see Accessing no-ingress EC2 instances with\n Amazon EC2 Systems Manager in the Cloud9 User Guide.
" } + }, + "dryRun": { + "target": "com.amazonaws.cloud9#NullableBoolean", + "traits": { + "smithy.api#documentation": "Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Any new or replacement description for the environment.
" } + }, + "managedCredentialsAction": { + "target": "com.amazonaws.cloud9#ManagedCredentialsAction", + "traits": { + "smithy.api#documentation": "Allows the environment owner to turn on or turn off the Amazon Web Services managed temporary\n credentials for an Cloud9 environment by using one of the following values:
\n\n ENABLE
\n
\n DISABLE
\n
Only the environment owner can change the status of managed temporary credentials. An AccessDeniedException
is thrown if an attempt to turn on or turn off managed temporary credentials is made by an account that's not the environment\n owner.
Access denied. Check your permissions.
", + "smithy.api#documentation": "Access denied or directory not found. Either you don't have permissions for this directory or the directory does not exist. Try calling ListDirectories and check your permissions.
", "smithy.api#error": "client", "smithy.api#httpError": 403 } @@ -1794,22 +1794,40 @@ } }, "NextToken": { - "target": "com.amazonaws.clouddirectory#NextToken" + "target": "com.amazonaws.clouddirectory#NextToken", + "traits": { + "smithy.api#documentation": "The pagination token.
" + } }, "MaxResults": { - "target": "com.amazonaws.clouddirectory#NumberResults" + "target": "com.amazonaws.clouddirectory#NumberResults", + "traits": { + "smithy.api#documentation": "The maximum number of items to be retrieved in a single call. This is an approximate\n number.
" + } } + }, + "traits": { + "smithy.api#documentation": "Lists parent objects that are associated with a given object in pagination\n fashion.
" } }, "com.amazonaws.clouddirectory#BatchListObjectParentsResponse": { "type": "structure", "members": { "ParentLinks": { - "target": "com.amazonaws.clouddirectory#ObjectIdentifierAndLinkNameList" + "target": "com.amazonaws.clouddirectory#ObjectIdentifierAndLinkNameList", + "traits": { + "smithy.api#documentation": "Returns a list of parent reference and LinkName Tuples.
" + } }, "NextToken": { - "target": "com.amazonaws.clouddirectory#NextToken" + "target": "com.amazonaws.clouddirectory#NextToken", + "traits": { + "smithy.api#documentation": "The pagination token.
" + } } + }, + "traits": { + "smithy.api#documentation": "Represents the output of a ListObjectParents response operation.
" } }, "com.amazonaws.clouddirectory#BatchListObjectPolicies": { @@ -2174,7 +2192,10 @@ } }, "ListObjectParents": { - "target": "com.amazonaws.clouddirectory#BatchListObjectParents" + "target": "com.amazonaws.clouddirectory#BatchListObjectParents", + "traits": { + "smithy.api#documentation": "Lists parent objects that are associated with a given object in pagination\n fashion.
" + } }, "ListObjectPolicies": { "target": "com.amazonaws.clouddirectory#BatchListObjectPolicies", @@ -2375,7 +2396,10 @@ } }, "ListObjectParents": { - "target": "com.amazonaws.clouddirectory#BatchListObjectParentsResponse" + "target": "com.amazonaws.clouddirectory#BatchListObjectParentsResponse", + "traits": { + "smithy.api#documentation": "The list of parent objects to retrieve.
" + } } }, "traits": { @@ -5450,7 +5474,7 @@ "min": 1, "max": 64 }, - "smithy.api#pattern": "[^\\/\\[\\]\\(\\):\\{\\}#@!?\\s\\\\;]+" + "smithy.api#pattern": "^[^\\/\\[\\]\\(\\):\\{\\}#@!?\\s\\\\;]+$" } }, "com.amazonaws.clouddirectory#LinkNameAlreadyInUseException": { @@ -7799,7 +7823,7 @@ "Selector": { "target": "com.amazonaws.clouddirectory#SelectorObjectReference", "traits": { - "smithy.api#documentation": "A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Access Objects. You can identify an object in one of the following ways:
\n\n $ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object’s identifier is immutable and no two objects will ever share the same object identifier
\n\n /some/path - Identifies the object based on path
\n\n #SomeBatchReference - Identifies the object in a batch call
\nA path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Access Objects. You can identify an object in one of the following ways:
\n\n $ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object’s identifier is immutable and no two objects will ever share the same object identifier. To identify an object with ObjectIdentifier, the ObjectIdentifier must be wrapped in double quotes.
\n\n /some/path - Identifies the object based on path
\n\n #SomeBatchReference - Identifies the object in a batch call
\nThe ARN of the schema that contains the facet with no minor component. See arns and In-Place Schema Upgrade for a description of when to provide minor versions.
" + "smithy.api#documentation": "The ARN of the schema that contains the facet with no minor component. See arns and In-Place Schema Upgrade for a description of when to provide minor versions.\n If this value is set, FacetName must also be set.
" } }, "FacetName": { "target": "com.amazonaws.clouddirectory#FacetName", "traits": { - "smithy.api#documentation": "The name of the facet.
" + "smithy.api#documentation": "The name of the facet. If this value is set, SchemaArn must also be set.
" } } }, diff --git a/codegen/sdk-codegen/aws-models/cloudwatchlogs.2014-03-28.json b/codegen/sdk-codegen/aws-models/cloudwatchlogs.2014-03-28.json index d2b860cc779..7eac2d48c14 100644 --- a/codegen/sdk-codegen/aws-models/cloudwatchlogs.2014-03-28.json +++ b/codegen/sdk-codegen/aws-models/cloudwatchlogs.2014-03-28.json @@ -60,7 +60,7 @@ } ], "traits": { - "smithy.api#documentation": "Associates the specified AWS Key Management Service (AWS KMS) customer master key (CMK) with the specified log group.
\nAssociating an AWS KMS CMK with a log group overrides any existing associations between the log group and a CMK. \n After a CMK is associated with a log group, all newly ingested data for the log group is encrypted using the CMK. \n This association is stored as long as the data encrypted with the CMK is still within Amazon CloudWatch Logs. \n This enables Amazon CloudWatch Logs to decrypt this data whenever it is requested.
\nCloudWatch Logs supports only symmetric CMKs. Do not use an associate an asymmetric CMK\n with your log group. For more information, see Using Symmetric and Asymmetric\n Keys.
\nIt can take up to 5 minutes for this operation to take effect.
\nIf you attempt to associate a CMK with a log group but the CMK does not exist or the\n CMK is disabled, you receive an InvalidParameterException
error.
Associates the specified Key Management Service customer master key (CMK) with the specified log group.
\nAssociating an KMS CMK with a log group overrides any existing associations between the log group and a CMK. \n After a CMK is associated with a log group, all newly ingested data for the log group is encrypted using the CMK. \n This association is stored as long as the data encrypted with the CMK is still within CloudWatch Logs. \n This enables CloudWatch Logs to decrypt this data whenever it is requested.
\nCloudWatch Logs supports only symmetric CMKs. Do not use an associate an asymmetric CMK\n with your log group. For more information, see Using Symmetric and Asymmetric\n Keys.
\nIt can take up to 5 minutes for this operation to take effect.
\nIf you attempt to associate a CMK with a log group but the CMK does not exist or the\n CMK is disabled, you receive an InvalidParameterException
error.
The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. This must be a symmetric CMK.\n For more information, see Amazon Resource Names - AWS Key Management Service (AWS KMS) and Using Symmetric and Asymmetric Keys.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. This must be a symmetric CMK.\n For more information, see Amazon Resource Names - Key Management Service and Using Symmetric and Asymmetric Keys.
", "smithy.api#required": {} } } @@ -188,7 +188,7 @@ "destination": { "target": "com.amazonaws.cloudwatchlogs#ExportDestinationBucket", "traits": { - "smithy.api#documentation": "The name of S3 bucket for the exported log data. The bucket must be in the same AWS region.
", + "smithy.api#documentation": "The name of S3 bucket for the exported log data. The bucket must be in the same Amazon Web Services region.
", "smithy.api#required": {} } }, @@ -234,7 +234,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a log group with the specified name. You can create up to 20,000 log groups per account.
\nYou must use the following guidelines when naming a log group:
\nLog group names must be unique within a region for an AWS account.
\nLog group names can be between 1 and 512 characters long.
\nLog group names consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), \n '/' (forward slash), '.' (period), and '#' (number sign)
\nWhen you create a log group, by default the log events in the log group never expire. To set\n a retention policy so that events expire and are deleted after a specified time, use\n PutRetentionPolicy.
\nIf you associate a AWS Key Management Service (AWS KMS) customer master key (CMK) with the log group, ingested data is encrypted using the CMK. \n This association is stored as long as the data encrypted with the CMK is still within Amazon CloudWatch Logs. \n This enables Amazon CloudWatch Logs to decrypt this data whenever it is requested.
\nIf you attempt to associate a CMK with the log group but the CMK does not exist or the\n CMK is disabled, you receive an InvalidParameterException
error.
CloudWatch Logs supports only symmetric CMKs. Do not associate an asymmetric CMK with\n your log group. For more information, see Using Symmetric and Asymmetric\n Keys.
\nCreates a log group with the specified name. You can create up to 20,000 log groups per account.
\nYou must use the following guidelines when naming a log group:
\nLog group names must be unique within a region for an Amazon Web Services account.
\nLog group names can be between 1 and 512 characters long.
\nLog group names consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), \n '/' (forward slash), '.' (period), and '#' (number sign)
\nWhen you create a log group, by default the log events in the log group never expire. To set\n a retention policy so that events expire and are deleted after a specified time, use\n PutRetentionPolicy.
\nIf you associate a Key Management Service customer master key (CMK) with the log group, ingested data is encrypted using the CMK. \n This association is stored as long as the data encrypted with the CMK is still within CloudWatch Logs. \n This enables CloudWatch Logs to decrypt this data whenever it is requested.
\nIf you attempt to associate a CMK with the log group but the CMK does not exist or the\n CMK is disabled, you receive an InvalidParameterException
error.
CloudWatch Logs supports only symmetric CMKs. Do not associate an asymmetric CMK with\n your log group. For more information, see Using Symmetric and Asymmetric\n Keys.
\nThe Amazon Resource Name (ARN) of the CMK to use when encrypting log data. \n For more information, see Amazon Resource Names - AWS Key Management Service (AWS KMS).
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. \n For more information, see Amazon Resource Names - Key Management Service.
" } }, "tags": { "target": "com.amazonaws.cloudwatchlogs#Tags", "traits": { - "smithy.api#documentation": "The key-value pairs to use for the tags.
" + "smithy.api#documentation": "The key-value pairs to use for the tags.
\nCloudWatch Logs doesn’t support IAM policies that prevent users from assigning specified tags to \n log groups using the aws:Resource/key-name\n
or aws:TagKeys
condition keys. \n For more information about using tags to control access, see \n Controlling access to Amazon Web Services resources using tags.
The number of days to retain the log events in the specified log group.\n Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, and 3653.
\nIf you omit retentionInDays
in a PutRetentionPolicy
operation, \n the events in the log group are always retained and never expire.
The number of days to retain the log events in the specified log group.\n Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, and 3653.
\nTo set a log group to never have log events expire, use\n DeleteRetentionPolicy.\n
" } }, "com.amazonaws.cloudwatchlogs#DefaultValue": { @@ -798,7 +798,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists the specified log groups. You can list all your log groups or filter the results by prefix.\n The results are ASCII-sorted by log group name.
", + "smithy.api#documentation": "Lists the specified log groups. You can list all your log groups or filter the results by prefix.\n The results are ASCII-sorted by log group name.
\nCloudWatch Logs doesn’t support IAM policies that control access to the DescribeLogGroups
action by using the \n aws:ResourceTag/key-name\n
condition key. Other CloudWatch Logs actions\n do support the use of the aws:ResourceTag/key-name\n
condition key to control access.\n For more information about using tags to control access, see \n Controlling access to Amazon Web Services resources using tags.
An IAM policy document that governs which AWS accounts can create subscription filters\n against this destination.
" + "smithy.api#documentation": "An IAM policy document that governs which Amazon Web Services accounts can create subscription filters\n against this destination.
" } }, "arn": { @@ -1376,7 +1376,7 @@ } ], "traits": { - "smithy.api#documentation": "Disassociates the associated AWS Key Management Service (AWS KMS) customer master key (CMK) from the specified log group.
\nAfter the AWS KMS CMK is disassociated from the log group, AWS CloudWatch Logs stops encrypting newly ingested data for the log group. \n All previously ingested data remains encrypted, and AWS CloudWatch Logs requires permissions for the CMK whenever the encrypted data is requested.
\nNote that it can take up to 5 minutes for this operation to take effect.
" + "smithy.api#documentation": "Disassociates the associated Key Management Service customer master key (CMK) from the specified log group.
\nAfter the KMS CMK is disassociated from the log group, CloudWatch Logs stops encrypting newly ingested data for the log group. \n All previously ingested data remains encrypted, and CloudWatch Logs requires permissions for the CMK whenever the encrypted data is requested.
\nNote that it can take up to 5 minutes for this operation to take effect.
" } }, "com.amazonaws.cloudwatchlogs#DisassociateKmsKeyRequest": { @@ -1860,7 +1860,7 @@ "nextToken": { "target": "com.amazonaws.cloudwatchlogs#NextToken", "traits": { - "smithy.api#documentation": "The token for the next set of items to return. (You received this token from a previous call.)
\nUsing this token works only when you specify true
for startFromHead
.
The token for the next set of items to return. (You received this token from a previous call.)
" } }, "limit": { @@ -1872,7 +1872,7 @@ "startFromHead": { "target": "com.amazonaws.cloudwatchlogs#StartFromHead", "traits": { - "smithy.api#documentation": "If the value is true, the earliest log events are returned first.\n If the value is false, the latest log events are returned first.\n The default value is false.
\nIf you are using nextToken
in this operation, you must specify true
for startFromHead
.
If the value is true, the earliest log events are returned first.\n If the value is false, the latest log events are returned first.\n The default value is false.
\nIf you are using a previous nextForwardToken
value as the nextToken
in this operation, \n you must specify true
for startFromHead
.
You can use Amazon CloudWatch Logs to monitor, store, and access your log files from\n EC2 instances, AWS CloudTrail, and other sources. You can then retrieve the associated\n log data from CloudWatch Logs using the CloudWatch console, CloudWatch Logs commands in the\n AWS CLI, CloudWatch Logs API, or CloudWatch Logs SDK.
\nYou can use CloudWatch Logs to:
\n\n Monitor logs from EC2 instances in real-time: You\n can use CloudWatch Logs to monitor applications and systems using log data. For example,\n CloudWatch Logs can track the number of errors that occur in your application logs and\n send you a notification whenever the rate of errors exceeds a threshold that you specify.\n CloudWatch Logs uses your log data for monitoring so no code changes are required. For\n example, you can monitor application logs for specific literal terms (such as\n \"NullReferenceException\") or count the number of occurrences of a literal term at a\n particular position in log data (such as \"404\" status codes in an Apache access log). When\n the term you are searching for is found, CloudWatch Logs reports the data to a CloudWatch\n metric that you specify.
\n\n Monitor AWS CloudTrail logged events: You can\n create alarms in CloudWatch and receive notifications of particular API activity as\n captured by CloudTrail. You can use the notification to perform troubleshooting.
\n\n Archive log data: You can use CloudWatch Logs to\n store your log data in highly durable storage. You can change the log retention setting so\n that any log events older than this setting are automatically deleted. The CloudWatch Logs\n agent makes it easy to quickly send both rotated and non-rotated log data off of a host\n and into the log service. You can then access the raw log data when you need it.
\nYou can use Amazon CloudWatch Logs to monitor, store, and access your log files from\n EC2 instances, CloudTrail, and other sources. You can then retrieve the associated\n log data from CloudWatch Logs using the CloudWatch console, CloudWatch Logs commands in the\n Amazon Web Services CLI, CloudWatch Logs API, or CloudWatch Logs SDK.
\nYou can use CloudWatch Logs to:
\n\n Monitor logs from EC2 instances in real-time: You\n can use CloudWatch Logs to monitor applications and systems using log data. For example,\n CloudWatch Logs can track the number of errors that occur in your application logs and\n send you a notification whenever the rate of errors exceeds a threshold that you specify.\n CloudWatch Logs uses your log data for monitoring so no code changes are required. For\n example, you can monitor application logs for specific literal terms (such as\n \"NullReferenceException\") or count the number of occurrences of a literal term at a\n particular position in log data (such as \"404\" status codes in an Apache access log). When\n the term you are searching for is found, CloudWatch Logs reports the data to a CloudWatch\n metric that you specify.
\n\n Monitor CloudTrail logged events: You can\n create alarms in CloudWatch and receive notifications of particular API activity as\n captured by CloudTrail. You can use the notification to perform troubleshooting.
\n\n Archive log data: You can use CloudWatch Logs to\n store your log data in highly durable storage. You can change the log retention setting so\n that any log events older than this setting are automatically deleted. The CloudWatch Logs\n agent makes it easy to quickly send both rotated and non-rotated log data off of a host\n and into the log service. You can then access the raw log data when you need it.
\nThe fields to use as dimensions for the metric. One metric filter can include\n as many as three dimensions.
\nMetrics extracted from log events are charged as custom metrics.\n To prevent unexpected high charges, do not specify high-cardinality fields such as \n IPAddress
or requestID
as dimensions. Each different value \n found for \n a dimension is treated as a separate metric and accrues charges as a separate custom metric.\n
To help prevent accidental high charges, Amazon disables a metric filter\n if it generates 1000 different name/value pairs for the dimensions that you \n have specified within a certain amount of time.
\nYou can also set up a billing alarm to alert you if your charges are higher than \n expected. For more information, \n see \n Creating a Billing Alarm to Monitor Your Estimated AWS Charges.\n
\nThe fields to use as dimensions for the metric. One metric filter can include\n as many as three dimensions.
\nMetrics extracted from log events are charged as custom metrics.\n To prevent unexpected high charges, do not specify high-cardinality fields such as \n IPAddress
or requestID
as dimensions. Each different value \n found for \n a dimension is treated as a separate metric and accrues charges as a separate custom metric.\n
To help prevent accidental high charges, Amazon disables a metric filter\n if it generates 1000 different name/value pairs for the dimensions that you \n have specified within a certain amount of time.
\nYou can also set up a billing alarm to alert you if your charges are higher than \n expected. For more information, \n see \n Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges.\n
\nCreates or updates an access policy associated with an existing\n destination. An access policy is an IAM policy document that is used\n to authorize claims to register a subscription filter against a given destination.
\nIf multiple AWS accounts are sending logs to this destination, each sender account must be \n listed separately in the policy. The policy does not support specifying *
\n as the Principal or the use of the aws:PrincipalOrgId
global key.
Creates or updates an access policy associated with an existing\n destination. An access policy is an IAM policy document that is used\n to authorize claims to register a subscription filter against a given destination.
\nIf multiple Amazon Web Services accounts are sending logs to this destination, each sender account must be \n listed separately in the policy. The policy does not support specifying *
\n as the Principal or the use of the aws:PrincipalOrgId
global key.
Uploads a batch of log events to the specified log stream.
\nYou must include the sequence token obtained from the response of the previous call. An\n upload in a newly created log stream does not require a sequence token. You can also get the\n sequence token in the expectedSequenceToken
field from\n InvalidSequenceTokenException
. If you call PutLogEvents
twice\n within a narrow time period using the same value for sequenceToken
, both calls\n might be successful or one might be rejected.
The batch of events must satisfy the following constraints:
\nThe maximum batch size is 1,048,576 bytes. This size is calculated as the sum of\n all event messages in UTF-8, plus 26 bytes for each log event.
\nNone of the log events in the batch can be more than 2 hours in the future.
\nNone of the log events in the batch can be older than 14 days or older than the retention\n period of the log group.
\nThe log events in the batch must be in chronological order by their timestamp. The\n timestamp is the time the event occurred, expressed as the number of milliseconds after\n Jan 1, 1970 00:00:00 UTC. (In AWS Tools for PowerShell and the AWS SDK for .NET, the\n timestamp is specified in .NET format: yyyy-mm-ddThh:mm:ss. For example,\n 2017-09-15T13:45:30.)
\nA batch of log events in a single request cannot span more than 24 hours. Otherwise, the operation fails.
\nThe maximum number of log events in a batch is 10,000.
\nThere is a quota of 5 requests per second per log stream. Additional requests are throttled. This quota can't be changed.
\nIf a call to PutLogEvents
returns \"UnrecognizedClientException\" the most likely cause is an invalid AWS access key ID or secret key.
Uploads a batch of log events to the specified log stream.
\nYou must include the sequence token obtained from the response of the previous call. An\n upload in a newly created log stream does not require a sequence token. You can also get the\n sequence token in the expectedSequenceToken
field from\n InvalidSequenceTokenException
. If you call PutLogEvents
twice\n within a narrow time period using the same value for sequenceToken
, both calls\n might be successful or one might be rejected.
The batch of events must satisfy the following constraints:
\nThe maximum batch size is 1,048,576 bytes. This size is calculated as the sum of\n all event messages in UTF-8, plus 26 bytes for each log event.
\nNone of the log events in the batch can be more than 2 hours in the future.
\nNone of the log events in the batch can be older than 14 days or older than the retention\n period of the log group.
\nThe log events in the batch must be in chronological order by their timestamp. The\n timestamp is the time the event occurred, expressed as the number of milliseconds after\n Jan 1, 1970 00:00:00 UTC. (In Amazon Web Services Tools for PowerShell and the Amazon Web Services SDK for .NET, the\n timestamp is specified in .NET format: yyyy-mm-ddThh:mm:ss. For example,\n 2017-09-15T13:45:30.)
\nA batch of log events in a single request cannot span more than 24 hours. Otherwise, the operation fails.
\nThe maximum number of log events in a batch is 10,000.
\nThere is a quota of 5 requests per second per log stream. Additional requests are throttled. This quota can't be changed.
\nIf a call to PutLogEvents
returns \"UnrecognizedClientException\" the most likely cause is an invalid Amazon Web Services access key ID or secret key.
Creates or updates a metric filter and associates it with the specified log group.\n Metric filters allow you to configure rules to extract metric data from log events ingested\n through PutLogEvents.
\nThe maximum number of metric filters that can be associated with a log group is\n 100.
\nWhen you create a metric filter, you can also optionally assign a unit and dimensions\n to the metric that is created.
\nMetrics extracted from log events are charged as custom metrics.\n To prevent unexpected high charges, do not specify high-cardinality fields such as \n IPAddress
or requestID
as dimensions. Each different value \n found for \n a dimension is treated as a separate metric and accrues charges as a separate custom metric.\n
To help prevent accidental high charges, Amazon disables a metric filter\n if it generates 1000 different name/value pairs for the dimensions that you \n have specified within a certain amount of time.
\nYou can also set up a billing alarm to alert you if your charges are higher than \n expected. For more information, \n see \n Creating a Billing Alarm to Monitor Your Estimated AWS Charges.\n
\nCreates or updates a metric filter and associates it with the specified log group.\n Metric filters allow you to configure rules to extract metric data from log events ingested\n through PutLogEvents.
\nThe maximum number of metric filters that can be associated with a log group is\n 100.
\nWhen you create a metric filter, you can also optionally assign a unit and dimensions\n to the metric that is created.
\nMetrics extracted from log events are charged as custom metrics.\n To prevent unexpected high charges, do not specify high-cardinality fields such as \n IPAddress
or requestID
as dimensions. Each different value \n found for \n a dimension is treated as a separate metric and accrues charges as a separate custom metric.\n
To help prevent accidental high charges, Amazon disables a metric filter\n if it generates 1000 different name/value pairs for the dimensions that you \n have specified within a certain amount of time.
\nYou can also set up a billing alarm to alert you if your charges are higher than \n expected. For more information, \n see \n Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges.\n
\nCreates or updates a resource policy allowing other AWS services to put log events to\n this account, such as Amazon Route 53. An account can have up to 10 resource policies per AWS\n Region.
" + "smithy.api#documentation": "Creates or updates a resource policy allowing other Amazon Web Services services to put log events to\n this account, such as Amazon Route 53. An account can have up to 10 resource policies per Amazon Web Services\n Region.
" } }, "com.amazonaws.cloudwatchlogs#PutResourcePolicyRequest": { @@ -3170,7 +3170,7 @@ "policyDocument": { "target": "com.amazonaws.cloudwatchlogs#PolicyDocument", "traits": { - "smithy.api#documentation": "Details of the new policy, including the identity of the principal that is enabled to put logs to this account. This is formatted as a JSON string.\n This parameter is required.
\nThe following example creates a resource policy enabling the Route 53 service to put\n DNS query logs in to the specified log group. Replace \"logArn\"
with the ARN of your CloudWatch Logs resource, such as a log group or log stream.
\n { \n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"Route53LogsToCloudWatchLogs\", \n \"Effect\": \"Allow\", \n \"Principal\": {\n \"Service\": [\n \"route53.amazonaws.com\"\n ]\n }, \n \"Action\":\"logs:PutLogEvents\", \n \"Resource\": \"logArn\"\n }\n ]\n}
\n \n
Details of the new policy, including the identity of the principal that is enabled to put logs to this account. This is formatted as a JSON string.\n This parameter is required.
\nThe following example creates a resource policy enabling the Route 53 service to put\n DNS query logs in to the specified log group. Replace \"logArn\"
with the ARN of \n your CloudWatch Logs resource, such as a log group or log stream.
CloudWatch Logs also supports aws:SourceArn\n and aws:SourceAccount\ncondition context keys.
\nIn the example resource policy, you would replace the value of SourceArn
with the resource making the\n call from Route 53 to CloudWatch Logs and replace the value of SourceAccount
with \n the Amazon Web Services account ID making that call.
\n {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"Route53LogsToCloudWatchLogs\",\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": [\n \"route53.amazonaws.com\"\n ]\n },\n \"Action\": \"logs:PutLogEvents\",\n \"Resource\": \"logArn\",\n \"Condition\": {\n \"ArnLike\": {\n \"aws:SourceArn\": \"myRoute53ResourceArn\"\n },\n \"StringEquals\": {\n \"aws:SourceAccount\": \"myAwsAccountId\"\n }\n }\n }\n ]\n}
\n \n
Creates or updates a subscription filter and associates it with the specified log\n group. Subscription filters allow you to subscribe to a real-time stream of log events\n ingested through PutLogEvents and have them delivered to a specific\n destination. When log events are sent to the \n receiving service, they are Base64 encoded\n and compressed with the gzip format.
\nThe following destinations are supported for subscription filters:
\nAn Amazon Kinesis stream belonging to the same account as the subscription filter,\n for same-account delivery.
\nA logical destination that belongs to a different account, for cross-account delivery.
\nAn Amazon Kinesis Firehose delivery stream that belongs to the same account as the\n subscription filter, for same-account delivery.
\nAn AWS Lambda function that belongs to the same account as the subscription filter,\n for same-account delivery.
\nEach log group can have up to two subscription filters associated with it. If you are\n updating an existing filter, you must specify the correct name in filterName
.\n
To perform a PutSubscriptionFilter
operation, you must also have the \n iam:PassRole
permission.
Creates or updates a subscription filter and associates it with the specified log\n group. Subscription filters allow you to subscribe to a real-time stream of log events\n ingested through PutLogEvents and have them delivered to a specific\n destination. When log events are sent to the \n receiving service, they are Base64 encoded\n and compressed with the gzip format.
\nThe following destinations are supported for subscription filters:
\nAn Amazon Kinesis stream belonging to the same account as the subscription filter,\n for same-account delivery.
\nA logical destination that belongs to a different account, for cross-account delivery.
\nAn Amazon Kinesis Firehose delivery stream that belongs to the same account as the\n subscription filter, for same-account delivery.
\nAn Lambda function that belongs to the same account as the subscription filter,\n for same-account delivery.
\nEach log group can have up to two subscription filters associated with it. If you are\n updating an existing filter, you must specify the correct name in filterName
.\n
To perform a PutSubscriptionFilter
operation, you must also have the \n iam:PassRole
permission.
The ARN of the destination to deliver matching log events to. Currently, the supported\n destinations are:
\nAn Amazon Kinesis stream belonging to the same account as the subscription filter,\n for same-account delivery.
\nA logical destination (specified using an ARN) belonging to a different account, \n for cross-account delivery.
\nIf you are setting up a cross-account subscription, the destination must have an \n IAM policy associated with it that allows the sender to send logs to the destination.\n For more information, see PutDestinationPolicy.
\nAn Amazon Kinesis Firehose delivery stream belonging to the same account as the\n subscription filter, for same-account delivery.
\nAn AWS Lambda function belonging to the same account as the subscription filter,\n for same-account delivery.
\nThe ARN of the destination to deliver matching log events to. Currently, the supported\n destinations are:
\nAn Amazon Kinesis stream belonging to the same account as the subscription filter,\n for same-account delivery.
\nA logical destination (specified using an ARN) belonging to a different account, \n for cross-account delivery.
\nIf you are setting up a cross-account subscription, the destination must have an \n IAM policy associated with it that allows the sender to send logs to the destination.\n For more information, see PutDestinationPolicy.
\nAn Amazon Kinesis Firehose delivery stream belonging to the same account as the\n subscription filter, for same-account delivery.
\nA Lambda function belonging to the same account as the subscription filter,\n for same-account delivery.
\nAdds or updates the specified tags for the specified log group.
\nTo list the tags for a log group, use ListTagsLogGroup.\n To remove tags, use UntagLogGroup.
\nFor more information about tags, see Tag Log Groups in Amazon CloudWatch Logs\n in the Amazon CloudWatch Logs User Guide.
" + "smithy.api#documentation": "Adds or updates the specified tags for the specified log group.
\nTo list the tags for a log group, use ListTagsLogGroup.\n To remove tags, use UntagLogGroup.
\nFor more information about tags, see Tag Log Groups in Amazon CloudWatch Logs\n in the Amazon CloudWatch Logs User Guide.
\nCloudWatch Logs doesn’t support IAM policies that prevent users from assigning specified tags to \n log groups using the aws:Resource/key-name\n
or aws:TagKeys
condition keys. \n For more information about using tags to control access, see \n Controlling access to Amazon Web Services resources using tags.
The most likely cause is an invalid AWS access key ID or secret key.
", + "smithy.api#documentation": "The most likely cause is an invalid Amazon Web Services access key ID or secret key.
", "smithy.api#error": "client" } }, @@ -4210,7 +4210,7 @@ } ], "traits": { - "smithy.api#documentation": "Removes the specified tags from the specified log group.
\nTo list the tags for a log group, use ListTagsLogGroup.\n To add tags, use TagLogGroup.
" + "smithy.api#documentation": "Removes the specified tags from the specified log group.
\nTo list the tags for a log group, use ListTagsLogGroup.\n To add tags, use TagLogGroup.
\nCloudWatch Logs doesn’t support IAM policies that prevent users from assigning specified tags to \n log groups using the aws:Resource/key-name\n
or aws:TagKeys
condition keys. \n
Set to true to report the status of a build's start and finish to your source\n provider. This option is valid only when your source provider is GitHub, GitHub\n Enterprise, or Bitbucket. If this is set and you use a different source provider, an\n invalidInputException
is thrown.
To be able to report the build status to the source provider, the user associated with the source provider must\nhave write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide.
\nThe status of a build triggered by a webhook is always reported to your source\n provider.
\n Set to true to report the status of a build's start and finish to your source\n provider. This option is valid only when your source provider is GitHub, GitHub\n Enterprise, or Bitbucket. If this is set and you use a different source provider, an\n invalidInputException
is thrown.
To be able to report the build status to the source provider, the user associated with the source provider must\nhave write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide.
\nThe status of a build triggered by a webhook is always reported to your source\n provider.
\nIf your project's builds are triggered by a webhook, you must push a\n new commit to the repo for a change to this property to take\n effect.
" } }, "buildStatusConfig": { diff --git a/codegen/sdk-codegen/aws-models/configservice.2014-11-12.json b/codegen/sdk-codegen/aws-models/configservice.2014-11-12.json index e614d20ebb3..48cfffc737d 100644 --- a/codegen/sdk-codegen/aws-models/configservice.2014-11-12.json +++ b/codegen/sdk-codegen/aws-models/configservice.2014-11-12.json @@ -85,7 +85,7 @@ "com.amazonaws.configservice#AccountId": { "type": "string", "traits": { - "smithy.api#pattern": "\\d{12}" + "smithy.api#pattern": "^\\d{12}$" } }, "com.amazonaws.configservice#AggregateComplianceByConfigRule": { @@ -1345,7 +1345,7 @@ "min": 1, "max": 128 }, - "smithy.api#pattern": ".*\\S.*" + "smithy.api#pattern": "\\S" } }, "com.amazonaws.configservice#ConfigRuleNames": { @@ -1491,7 +1491,7 @@ "com.amazonaws.configservice#ConfigurationAggregatorArn": { "type": "string", "traits": { - "smithy.api#pattern": "arn:aws[a-z\\-]*:config:[a-z\\-\\d]+:\\d+:config-aggregator/config-aggregator-[a-z\\d]+" + "smithy.api#pattern": "^arn:aws[a-z\\-]*:config:[a-z\\-\\d]+:\\d+:config-aggregator/config-aggregator-[a-z\\d]+$" } }, "com.amazonaws.configservice#ConfigurationAggregatorList": { @@ -1507,7 +1507,7 @@ "min": 1, "max": 256 }, - "smithy.api#pattern": "[\\w\\-]+" + "smithy.api#pattern": "^[\\w\\-]+$" } }, "com.amazonaws.configservice#ConfigurationAggregatorNameList": { @@ -2080,7 +2080,7 @@ "min": 1, "max": 256 }, - "smithy.api#pattern": "[a-zA-Z][-a-zA-Z0-9]*" + "smithy.api#pattern": "^[a-zA-Z][-a-zA-Z0-9]*$" } }, "com.amazonaws.configservice#ConformancePackNamesList": { @@ -7523,7 +7523,7 @@ "min": 1, "max": 64 }, - "smithy.api#pattern": ".*\\S.*" + "smithy.api#pattern": "\\S" } }, "com.amazonaws.configservice#OrganizationConfigRuleNames": { @@ -7728,7 +7728,7 @@ "min": 1, "max": 128 }, - "smithy.api#pattern": "[a-zA-Z][-a-zA-Z0-9]*" + "smithy.api#pattern": "^[a-zA-Z][-a-zA-Z0-9]*$" } }, "com.amazonaws.configservice#OrganizationConformancePackNames": { @@ -9081,7 +9081,7 @@ "min": 0, "max": 256 }, - "smithy.api#pattern": "[\\s\\S]*" + "smithy.api#pattern": "^[\\s\\S]*$" } }, "com.amazonaws.configservice#QueryExpression": { @@ -9091,7 +9091,7 @@ "min": 1, "max": 4096 }, - "smithy.api#pattern": "[\\s\\S]*" + "smithy.api#pattern": "^[\\s\\S]*$" } }, "com.amazonaws.configservice#QueryId": { @@ -10271,6 +10271,50 @@ { "value": "AWS::SSM::FileData", "name": "FileData" + }, + { + "value": "AWS::Backup::BackupPlan", + "name": "BackupPlan" + }, + { + "value": "AWS::Backup::BackupSelection", + "name": "BackupSelection" + }, + { + "value": "AWS::Backup::BackupVault", + "name": "BackupVault" + }, + { + "value": "AWS::Backup::RecoveryPoint", + "name": "BackupRecoveryPoint" + }, + { + "value": "AWS::ECR::Repository", + "name": "ECRRepository" + }, + { + "value": "AWS::ECS::Cluster", + "name": "ECSCluster" + }, + { + "value": "AWS::ECS::Service", + "name": "ECSService" + }, + { + "value": "AWS::ECS::TaskDefinition", + "name": "ECSTaskDefinition" + }, + { + "value": "AWS::EFS::AccessPoint", + "name": "EFSAccessPoint" + }, + { + "value": "AWS::EFS::FileSystem", + "name": "EFSFileSystem" + }, + { + "value": "AWS::EKS::Cluster", + "name": "EKSCluster" } ] } @@ -10381,7 +10425,7 @@ "min": 1, "max": 256 }, - "smithy.api#pattern": "[\\w\\-]+" + "smithy.api#pattern": "^[\\w\\-]+$" } }, "com.amazonaws.configservice#RetentionConfigurationNameList": { @@ -10421,7 +10465,7 @@ "min": 1, "max": 128 }, - "smithy.api#pattern": "[A-Za-z0-9-]+" + "smithy.api#pattern": "^[A-Za-z0-9-]+$" } }, "com.amazonaws.configservice#Scope": { @@ -11511,7 +11555,7 @@ "min": 1, "max": 1024 }, - "smithy.api#pattern": "s3://.*" + "smithy.api#pattern": "^s3://" } }, "com.amazonaws.configservice#TooManyTagsException": { diff --git a/codegen/sdk-codegen/aws-models/costexplorer.2017-10-25.json b/codegen/sdk-codegen/aws-models/costexplorer.2017-10-25.json index 5024431ee4d..0554e8bffb9 100644 --- a/codegen/sdk-codegen/aws-models/costexplorer.2017-10-25.json +++ b/codegen/sdk-codegen/aws-models/costexplorer.2017-10-25.json @@ -136,7 +136,7 @@ "name": "ce" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "The Cost Explorer API enables you to programmatically query your cost and usage data. You can query for aggregated data \n\t\t\tsuch as total monthly costs or total daily usage. You can also query for granular data, such as the number of \n\t\t\tdaily write operations for Amazon DynamoDB database tables in your production environment.
\n\t\tService Endpoint
\n\t\tThe Cost Explorer API provides the following endpoint:
\n\t\t\n https://ce.us-east-1.amazonaws.com
\n
For information about costs associated with the Cost Explorer API, see \n\t\t\tAWS Cost Management Pricing.
", + "smithy.api#documentation": "You can use the Cost Explorer API to programmatically query your cost and usage data. You\n can query for aggregated data such as total monthly costs or total daily usage. You can also\n query for granular data. This might include the number of daily write operations for Amazon\n DynamoDB database tables in your production environment.
\n\t\tService Endpoint
\n\t\tThe Cost Explorer API provides the following endpoint:
\n\t\t\n https://ce.us-east-1.amazonaws.com
\n
For information about the costs that are associated with the Cost Explorer API, see\n Amazon Web Services Cost\n Management Pricing.
", "smithy.api#title": "AWS Cost Explorer Service" } }, @@ -173,64 +173,64 @@ "AnomalyId": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The unique identifier for the anomaly.\n
", + "smithy.api#documentation": "The unique identifier for the anomaly.
", "smithy.api#required": {} } }, "AnomalyStartDate": { "target": "com.amazonaws.costexplorer#YearMonthDay", "traits": { - "smithy.api#documentation": "\n The first day the anomaly is detected.\n
" + "smithy.api#documentation": "The first day the anomaly is detected.
" } }, "AnomalyEndDate": { "target": "com.amazonaws.costexplorer#YearMonthDay", "traits": { - "smithy.api#documentation": "\n The last day the anomaly is detected.\n
" + "smithy.api#documentation": "The last day the anomaly is detected.
" } }, "DimensionValue": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The dimension for the anomaly. For example, an AWS service in a service monitor.\n
" + "smithy.api#documentation": "The dimension for the anomaly (for example, an Amazon Web Services service in a service\n monitor).
" } }, "RootCauses": { "target": "com.amazonaws.costexplorer#RootCauses", "traits": { - "smithy.api#documentation": "\n The list of identified root causes for the anomaly.\n
" + "smithy.api#documentation": "The list of identified root causes for the anomaly.
" } }, "AnomalyScore": { "target": "com.amazonaws.costexplorer#AnomalyScore", "traits": { - "smithy.api#documentation": "\n The latest and maximum score for the anomaly.\n
", + "smithy.api#documentation": "The latest and maximum score for the anomaly.
", "smithy.api#required": {} } }, "Impact": { "target": "com.amazonaws.costexplorer#Impact", "traits": { - "smithy.api#documentation": "\n The dollar impact for the anomaly.\n
", + "smithy.api#documentation": "The dollar impact for the anomaly.
", "smithy.api#required": {} } }, "MonitorArn": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) for the cost monitor that generated this anomaly.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) for the cost monitor that generated this anomaly.\n
", "smithy.api#required": {} } }, "Feedback": { "target": "com.amazonaws.costexplorer#AnomalyFeedbackType", "traits": { - "smithy.api#documentation": "\n The feedback value.\n
" + "smithy.api#documentation": "The feedback value.
" } } }, "traits": { - "smithy.api#documentation": "\n An unusual cost pattern. This consists of the detailed metadata and the current status of the anomaly object.\n
" + "smithy.api#documentation": "An unusual cost pattern. This consists of the detailed metadata and the current status\n of the anomaly object.
" } }, "com.amazonaws.costexplorer#AnomalyDateInterval": { @@ -239,19 +239,19 @@ "StartDate": { "target": "com.amazonaws.costexplorer#YearMonthDay", "traits": { - "smithy.api#documentation": "\n The first date an anomaly was observed.\n
", + "smithy.api#documentation": "The first date an anomaly was observed.
", "smithy.api#required": {} } }, "EndDate": { "target": "com.amazonaws.costexplorer#YearMonthDay", "traits": { - "smithy.api#documentation": "\n The last date an anomaly was observed.\n
" + "smithy.api#documentation": "The last date an anomaly was observed.
" } } }, "traits": { - "smithy.api#documentation": "\n The time period for an anomaly.\n
" + "smithy.api#documentation": "The time period for an anomaly.
" } }, "com.amazonaws.costexplorer#AnomalyFeedbackType": { @@ -279,45 +279,45 @@ "MonitorArn": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) value.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) value.
" } }, "MonitorName": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The name of the monitor.\n
", + "smithy.api#documentation": "The name of the monitor.
", "smithy.api#required": {} } }, "CreationDate": { "target": "com.amazonaws.costexplorer#YearMonthDay", "traits": { - "smithy.api#documentation": "\n The date when the monitor was created.\n
" + "smithy.api#documentation": "The date when the monitor was created.
" } }, "LastUpdatedDate": { "target": "com.amazonaws.costexplorer#YearMonthDay", "traits": { - "smithy.api#documentation": "\n The date when the monitor was last updated.\n
" + "smithy.api#documentation": "The date when the monitor was last updated.
" } }, "LastEvaluatedDate": { "target": "com.amazonaws.costexplorer#YearMonthDay", "traits": { - "smithy.api#documentation": "\n The date when the monitor last evaluated for anomalies.\n
" + "smithy.api#documentation": "The date when the monitor last evaluated for anomalies.
" } }, "MonitorType": { "target": "com.amazonaws.costexplorer#MonitorType", "traits": { - "smithy.api#documentation": "\n The possible type values.\n
", + "smithy.api#documentation": "The possible type values.
", "smithy.api#required": {} } }, "MonitorDimension": { "target": "com.amazonaws.costexplorer#MonitorDimension", "traits": { - "smithy.api#documentation": "\n The dimensions to evaluate.\n
" + "smithy.api#documentation": "The dimensions to evaluate.
" } }, "MonitorSpecification": { @@ -326,12 +326,12 @@ "DimensionalValueCount": { "target": "com.amazonaws.costexplorer#NonNegativeInteger", "traits": { - "smithy.api#documentation": "\n The value for evaluated dimensions.\n
" + "smithy.api#documentation": "The value for evaluated dimensions.
" } } }, "traits": { - "smithy.api#documentation": " This object continuously inspects your account's cost data for anomalies, based on\n MonitorType
and MonitorSpecification
. The content consists\n of detailed metadata and the current status of the monitor object.
This object continuously inspects your account's cost data for anomalies. It's based\n on MonitorType
and MonitorSpecification
. The content consists\n of detailed metadata and the current status of the monitor object.
\n The maximum score observed during the AnomalyDateInterval
.\n
The maximum score that's observed during the AnomalyDateInterval
.
\n The last observed score.\n
", + "smithy.api#documentation": "The last observed score.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "Quantifies the anomaly. The higher score means that it is more anomalous.
" + "smithy.api#documentation": "Quantifies the anomaly. The higher score means that it's more anomalous.
" } }, "com.amazonaws.costexplorer#AnomalySubscription": { @@ -368,53 +368,53 @@ "SubscriptionArn": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": " The AnomalySubscription
Amazon Resource Name (ARN).
The AnomalySubscription
Amazon Resource Name (ARN).
\n Your unique account identifier.\n
" + "smithy.api#documentation": "Your unique account identifier.
" } }, "MonitorArnList": { "target": "com.amazonaws.costexplorer#MonitorArnList", "traits": { - "smithy.api#documentation": "\n A list of cost anomaly monitors.\n
", + "smithy.api#documentation": "A list of cost anomaly monitors.
", "smithy.api#required": {} } }, "Subscribers": { "target": "com.amazonaws.costexplorer#Subscribers", "traits": { - "smithy.api#documentation": "\n A list of subscribers to notify.\n
", + "smithy.api#documentation": "A list of subscribers to notify.
", "smithy.api#required": {} } }, "Threshold": { "target": "com.amazonaws.costexplorer#NullableNonNegativeDouble", "traits": { - "smithy.api#documentation": "\n The dollar value that triggers a notification if the threshold is exceeded.\n
", + "smithy.api#documentation": "The dollar value that triggers a notification if the threshold is exceeded.
", "smithy.api#required": {} } }, "Frequency": { "target": "com.amazonaws.costexplorer#AnomalySubscriptionFrequency", "traits": { - "smithy.api#documentation": "\n The frequency at which anomaly reports are sent over email.\n
", + "smithy.api#documentation": "The frequency that anomaly reports are sent over email.
", "smithy.api#required": {} } }, "SubscriptionName": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The name for the subscription.\n
", + "smithy.api#documentation": "The name for the subscription.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "\n The association between a monitor, threshold, and list of subscribers used to deliver notifications about anomalies detected by a monitor that exceeds a threshold. The content consists of the detailed metadata and the current status of the AnomalySubscription
object.\n
The association between a monitor, threshold, and list of subscribers used to deliver\n notifications about anomalies detected by a monitor that exceeds a threshold. The\n content consists of the detailed metadata and the current status of the\n AnomalySubscription
object.
\n The unique identifier for your Cost Category.\n
", + "smithy.api#documentation": "The unique identifier for your Cost Category.
", "smithy.api#required": {} } }, "EffectiveStart": { "target": "com.amazonaws.costexplorer#ZonedDateTime", "traits": { - "smithy.api#documentation": "\n The Cost Category's effective start date.
", + "smithy.api#documentation": "The effective state data of your Cost Category.
", "smithy.api#required": {} } }, "EffectiveEnd": { "target": "com.amazonaws.costexplorer#ZonedDateTime", "traits": { - "smithy.api#documentation": "\n The Cost Category's effective end date.
" + "smithy.api#documentation": "The effective end data of your Cost Category.
" } }, "Name": { @@ -536,14 +536,20 @@ "Rules": { "target": "com.amazonaws.costexplorer#CostCategoryRulesList", "traits": { - "smithy.api#documentation": "\n Rules are processed in order. If there are multiple rules that match the line item, then the first rule to match is used to determine that Cost Category value.\n
", + "smithy.api#documentation": "The rules are processed in order. If there are multiple rules that match the line\n item, then the first rule to match is used to determine that Cost Category value.\n
", "smithy.api#required": {} } }, + "SplitChargeRules": { + "target": "com.amazonaws.costexplorer#CostCategorySplitChargeRulesList", + "traits": { + "smithy.api#documentation": "The split charge rules that are used to allocate your charges between your Cost\n Category values.
" + } + }, "ProcessingStatus": { "target": "com.amazonaws.costexplorer#CostCategoryProcessingStatusList", "traits": { - "smithy.api#documentation": "\n The list of processing statuses for Cost Management products for a specific cost category.\n
" + "smithy.api#documentation": "The list of processing statuses for Cost Management products for a specific cost\n category.
" } }, "DefaultValue": { @@ -551,7 +557,7 @@ } }, "traits": { - "smithy.api#documentation": "The structure of Cost Categories. This includes detailed metadata and the set of rules for the CostCategory
object.
The structure of Cost Categories. This includes detailed metadata and the set of rules\n for the CostCategory
object.
The name of dimension for which to group costs.
\n\tIf you specify LINKED_ACCOUNT_NAME
, the cost category value will be based on account name. If you specify TAG
, the cost category value will be based on the value of the specified tag key.
The name of the dimension that's used to group costs.
\nIf you specify LINKED_ACCOUNT_NAME
, the cost category value is based on\n account name. If you specify TAG
, the cost category value will be based on\n the value of the specified tag key.
When creating or updating a cost category, you can define the CostCategoryRule
rule type as INHERITED_VALUE
. This rule type adds the flexibility of defining a rule that dynamically inherits the cost category value from the dimension value defined by CostCategoryInheritedValueDimension
. For example, if you wanted to dynamically group costs based on the value of a specific tag key, you would first choose an inherited value rule type, then choose the tag dimension and specify the tag key to use.
When creating or updating a cost category, you can define the\n CostCategoryRule
rule type as INHERITED_VALUE
. This rule\n type adds the flexibility of defining a rule that dynamically inherits the cost category\n value from the dimension value defined by\n CostCategoryInheritedValueDimension
. For example, if you want to\n dynamically group costs that are based on the value of a specific tag key, first choose\n an inherited value rule type, then choose the tag dimension and specify the tag key to\n use.
\n The Cost Management product name of the applied status.\n
" + "smithy.api#documentation": "The Cost Management product name of the applied status.
" } }, "Status": { "target": "com.amazonaws.costexplorer#CostCategoryStatus", "traits": { - "smithy.api#documentation": "\n The process status for a specific cost category.\n
" + "smithy.api#documentation": "The process status for a specific cost category.
" } } }, "traits": { - "smithy.api#documentation": "\n The list of processing statuses for Cost Management products for a specific cost category.\n
" + "smithy.api#documentation": "The list of processing statuses for Cost Management products for a specific cost\n category.
" } }, "com.amazonaws.costexplorer#CostCategoryProcessingStatusList": { @@ -647,7 +653,7 @@ "CostCategoryArn": { "target": "com.amazonaws.costexplorer#Arn", "traits": { - "smithy.api#documentation": "\n The unique identifier for your Cost Category.\n
" + "smithy.api#documentation": "The unique identifier for your Cost Category.
" } }, "Name": { @@ -656,31 +662,31 @@ "EffectiveStart": { "target": "com.amazonaws.costexplorer#ZonedDateTime", "traits": { - "smithy.api#documentation": "\n The Cost Category's effective start date.
" + "smithy.api#documentation": "The Cost Category's effective start date.
" } }, "EffectiveEnd": { "target": "com.amazonaws.costexplorer#ZonedDateTime", "traits": { - "smithy.api#documentation": "\n The Cost Category's effective end date.
" + "smithy.api#documentation": "The Cost Category's effective end date.
" } }, "NumberOfRules": { "target": "com.amazonaws.costexplorer#NonNegativeInteger", "traits": { - "smithy.api#documentation": "\n The number of rules associated with a specific Cost Category.\n
" + "smithy.api#documentation": "The number of rules that are associated with a specific Cost Category.
" } }, "ProcessingStatus": { "target": "com.amazonaws.costexplorer#CostCategoryProcessingStatusList", "traits": { - "smithy.api#documentation": "\n The list of processing statuses for Cost Management products for a specific cost category.\n
" + "smithy.api#documentation": "The list of processing statuses for Cost Management products for a specific cost\n category.
" } }, "Values": { "target": "com.amazonaws.costexplorer#CostCategoryValuesList", "traits": { - "smithy.api#documentation": "\n A list of unique cost category values in a specific cost category.\n
" + "smithy.api#documentation": "A list of unique cost category values in a specific cost category.
" } }, "DefaultValue": { @@ -688,7 +694,7 @@ } }, "traits": { - "smithy.api#documentation": "A reference to a Cost Category containing only enough information to identify the Cost Category.
\nYou can use this information to retrieve the full Cost Category information using DescribeCostCategory
.
A reference to a Cost Category containing only enough information to identify the Cost\n Category.
\nYou can use this information to retrieve the full Cost Category information using\n DescribeCostCategory
.
An Expression\n object used to categorize costs. This supports dimensions, tags, and nested expressions.\n Currently the only dimensions supported are LINKED_ACCOUNT
,\n SERVICE_CODE
, RECORD_TYPE
, and\n LINKED_ACCOUNT_NAME
.
Root level OR
is not supported. We recommend that you create a separate\n rule instead.
\n RECORD_TYPE
is a dimension used for Cost Explorer APIs, and is also\n supported for Cost Category expressions. This dimension uses different terms, depending\n on whether you're using the console or API/JSON editor. For a detailed comparison, see\n Term Comparisons in the AWS Billing and Cost Management User\n Guide.
An Expression\n object used to categorize costs. This supports dimensions, tags, and nested expressions.\n Currently the only dimensions supported are LINKED_ACCOUNT
,\n SERVICE_CODE
, RECORD_TYPE
, and\n LINKED_ACCOUNT_NAME
.
Root level OR
isn't supported. We recommend that you create a separate\n rule instead.
\n RECORD_TYPE
is a dimension used for Cost Explorer APIs, and is also\n supported for Cost Category expressions. This dimension uses different terms, depending\n on whether you're using the console or API/JSON editor. For a detailed comparison, see\n Term Comparisons in the Billing and Cost Management User\n Guide.
The value the line item will be categorized as, if the line item contains the matched dimension.
" + "smithy.api#documentation": "The value the line item is categorized as if the line item contains the matched\n dimension.
" } }, "Type": { "target": "com.amazonaws.costexplorer#CostCategoryRuleType", "traits": { - "smithy.api#documentation": "You can define the CostCategoryRule
rule type as either REGULAR
or INHERITED_VALUE
. The INHERITED_VALUE
rule type adds the flexibility of defining a rule that dynamically inherits the cost category value from the dimension value defined by CostCategoryInheritedValueDimension
. For example, if you wanted to dynamically group costs based on the value of a specific tag key, you would first choose an inherited value rule type, then choose the tag dimension and specify the tag key to use.
You can define the CostCategoryRule
rule type as either\n REGULAR
or INHERITED_VALUE
. The\n INHERITED_VALUE
rule type adds the flexibility of defining a rule that\n dynamically inherits the cost category value from the dimension value defined by\n CostCategoryInheritedValueDimension
. For example, if you want to\n dynamically group costs based on the value of a specific tag key, first choose an\n inherited value rule type, then choose the tag dimension and specify the tag key to\n use.
Rules are processed in order. If there are multiple rules that match the line item, then the first rule to match is used to determine that Cost Category value.
" + "smithy.api#documentation": "Rules are processed in order. If there are multiple rules that match the line item,\n then the first rule to match is used to determine that Cost Category value.
" } }, "com.amazonaws.costexplorer#CostCategoryRuleType": { @@ -765,6 +771,141 @@ } } }, + "com.amazonaws.costexplorer#CostCategorySplitChargeMethod": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "FIXED", + "name": "FIXED" + }, + { + "value": "PROPORTIONAL", + "name": "PROPORTIONAL" + }, + { + "value": "EVEN", + "name": "EVEN" + } + ] + } + }, + "com.amazonaws.costexplorer#CostCategorySplitChargeRule": { + "type": "structure", + "members": { + "Source": { + "target": "com.amazonaws.costexplorer#GenericString", + "traits": { + "smithy.api#documentation": "The Cost Category value that you want to split. That value can't be used as a source\n or a target in other split charge rules. To indicate uncategorized costs, you can use an empty string as the source.
", + "smithy.api#required": {} + } + }, + "Targets": { + "target": "com.amazonaws.costexplorer#CostCategorySplitChargeRuleTargetsList", + "traits": { + "smithy.api#documentation": "The Cost Category values that you want to split costs across. These values can't be\n used as a source in other split charge rules.
", + "smithy.api#required": {} + } + }, + "Method": { + "target": "com.amazonaws.costexplorer#CostCategorySplitChargeMethod", + "traits": { + "smithy.api#documentation": "The method that's used to define how to split your source costs across your targets.
\n\n Proportional
- Allocates charges across your targets based on the\n proportional weighted cost of each target.
\n Fixed
- Allocates charges across your targets based on your defined\n allocation percentage.
>Even
- Allocates costs evenly across all targets.
The parameters for a split charge method. This is only required for the\n FIXED
method.
Use the split charge rule to split the cost of one Cost Category value across several\n other target values.
" + } + }, + "com.amazonaws.costexplorer#CostCategorySplitChargeRuleParameter": { + "type": "structure", + "members": { + "Type": { + "target": "com.amazonaws.costexplorer#CostCategorySplitChargeRuleParameterType", + "traits": { + "smithy.api#documentation": "The parameter type.
", + "smithy.api#required": {} + } + }, + "Values": { + "target": "com.amazonaws.costexplorer#CostCategorySplitChargeRuleParameterValuesList", + "traits": { + "smithy.api#documentation": "The parameter values.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The parameters for a split charge method.
" + } + }, + "com.amazonaws.costexplorer#CostCategorySplitChargeRuleParameterType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ALLOCATION_PERCENTAGES", + "name": "ALLOCATION_PERCENTAGES" + } + ] + } + }, + "com.amazonaws.costexplorer#CostCategorySplitChargeRuleParameterValuesList": { + "type": "list", + "member": { + "target": "com.amazonaws.costexplorer#GenericString" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 500 + } + } + }, + "com.amazonaws.costexplorer#CostCategorySplitChargeRuleParametersList": { + "type": "list", + "member": { + "target": "com.amazonaws.costexplorer#CostCategorySplitChargeRuleParameter" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.costexplorer#CostCategorySplitChargeRuleTargetsList": { + "type": "list", + "member": { + "target": "com.amazonaws.costexplorer#GenericString" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 500 + } + } + }, + "com.amazonaws.costexplorer#CostCategorySplitChargeRulesList": { + "type": "list", + "member": { + "target": "com.amazonaws.costexplorer#CostCategorySplitChargeRule" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, "com.amazonaws.costexplorer#CostCategoryStatus": { "type": "string", "traits": { @@ -794,7 +935,7 @@ "com.amazonaws.costexplorer#CostCategoryValue": { "type": "string", "traits": { - "smithy.api#documentation": "The default value for the cost category.
", + "smithy.api#documentation": "The\n default value for the cost category.
", "smithy.api#length": { "min": 1, "max": 50 @@ -817,12 +958,12 @@ "MatchOptions": { "target": "com.amazonaws.costexplorer#MatchOptions", "traits": { - "smithy.api#documentation": "\n The match options that you can use to filter your results. MatchOptions is only applicable for actions related to cost category. The default values for MatchOptions
is EQUALS
and CASE_SENSITIVE
.\n
The match options that you can use to filter your results. MatchOptions is only\n applicable for actions related to cost category. The default values for\n MatchOptions
is EQUALS
and CASE_SENSITIVE
.\n
The Cost Categories values used for filtering the costs.
\n\t\tIf Values
and Key
are not specified, the ABSENT
\n MatchOption
is applied to all Cost Categories. That is, filtering on resources that are not mapped to any Cost Categories.
If Values
is provided and Key
is not specified, the ABSENT
\n MatchOption
is applied to the Cost Categories Key
only. That is, filtering on resources without the given Cost Categories key.
The Cost Categories values used for filtering the costs.
\nIf Values
and Key
are not specified, the ABSENT
\n MatchOption
is applied to all Cost Categories. That is, it filters on\n resources that aren't mapped to any Cost Categories.
If Values
is provided and Key
isn't specified, the\n ABSENT
\n MatchOption
is applied to the Cost Categories Key
only. That\n is, it filters on resources without the given Cost Categories key.
The amount of instance usage that the reservation covered, in normalized\n units.
" + "smithy.api#documentation": "The amount of instance usage that the reservation covered, in normalized units.
" } }, "CoverageCost": { @@ -880,7 +1021,7 @@ } }, "traits": { - "smithy.api#documentation": "Reservation\n coverage for a specified period, in\n hours.
" + "smithy.api#documentation": "Reservation coverage for a specified period, in hours.
" } }, "com.amazonaws.costexplorer#CoverageCost": { @@ -961,7 +1102,7 @@ } }, "traits": { - "smithy.api#documentation": "The amount of instance usage, in normalized units. Normalized units enable you to\n see your EC2 usage for multiple sizes of instances in a uniform way. For example,\n suppose you run an xlarge instance and a 2xlarge instance. If you run both instances for\n the same amount of time, the 2xlarge instance uses twice as much of your reservation as\n the xlarge instance, even though both instances show only one instance-hour. Using\n normalized units instead of instance-hours, the xlarge instance used 8 normalized units,\n and the 2xlarge instance used 16 normalized units.
\nFor more information, see Modifying Reserved Instances in the Amazon Elastic Compute Cloud User Guide for\n Linux Instances.
" + "smithy.api#documentation": "The amount of instance usage, in normalized units. You can use normalized units to see\n your EC2 usage for multiple sizes of instances in a uniform way. For example, suppose\n that you run an xlarge instance and a 2xlarge instance. If you run both instances for\n the same amount of time, the 2xlarge instance uses twice as much of your reservation as\n the xlarge instance, even though both instances show only one instance-hour. When you\n use normalized units instead of instance-hours, the xlarge instance used 8 normalized\n units, and the 2xlarge instance used 16 normalized units.
\nFor more information, see Modifying Reserved Instances\n in the Amazon Elastic Compute Cloud User Guide for Linux\n Instances.
" } }, "com.amazonaws.costexplorer#CoverageNormalizedUnitsPercentage": { @@ -996,7 +1137,7 @@ "AnomalyMonitor": { "target": "com.amazonaws.costexplorer#AnomalyMonitor", "traits": { - "smithy.api#documentation": "The cost anomaly detection monitor object that you want to create.
", + "smithy.api#documentation": "The cost anomaly detection monitor object that you want to create.
", "smithy.api#required": {} } } @@ -1008,7 +1149,7 @@ "MonitorArn": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The unique identifier of your newly created cost anomaly detection monitor.
", + "smithy.api#documentation": "The unique identifier of your newly created cost anomaly detection monitor.
", "smithy.api#required": {} } } @@ -1040,7 +1181,7 @@ "AnomalySubscription": { "target": "com.amazonaws.costexplorer#AnomalySubscription", "traits": { - "smithy.api#documentation": "\n The cost anomaly subscription object that you want to create.\n
", + "smithy.api#documentation": "The cost anomaly subscription object that you want to create.
", "smithy.api#required": {} } } @@ -1052,7 +1193,7 @@ "SubscriptionArn": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The unique identifier of your newly created cost anomaly subscription.\n
", + "smithy.api#documentation": "The unique identifier of your newly created cost anomaly subscription.
", "smithy.api#required": {} } } @@ -1102,6 +1243,12 @@ }, "DefaultValue": { "target": "com.amazonaws.costexplorer#CostCategoryValue" + }, + "SplitChargeRules": { + "target": "com.amazonaws.costexplorer#CostCategorySplitChargeRulesList", + "traits": { + "smithy.api#documentation": "\n The split charge rules used to allocate your charges between your Cost Category values.\n
" + } } } }, @@ -1134,61 +1281,61 @@ "InstanceName": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The name you've given an instance. This field will show as blank if you haven't given the instance a name.
" + "smithy.api#documentation": "The name that you given an instance. This field shows as blank if you haven't given\n the instance a name.
" } }, "Tags": { "target": "com.amazonaws.costexplorer#TagValuesList", "traits": { - "smithy.api#documentation": "Cost allocation resource tags applied to the instance.
" + "smithy.api#documentation": "Cost allocation resource tags that are applied to the instance.
" } }, "ResourceDetails": { "target": "com.amazonaws.costexplorer#ResourceDetails", "traits": { - "smithy.api#documentation": "Details about the resource and utilization.
" + "smithy.api#documentation": "Details about the resource and utilization.
" } }, "ResourceUtilization": { "target": "com.amazonaws.costexplorer#ResourceUtilization", "traits": { - "smithy.api#documentation": "Utilization information of the current instance during the lookback period.
" + "smithy.api#documentation": "Utilization information of the current instance during the lookback period.
" } }, "ReservationCoveredHoursInLookbackPeriod": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "Number of hours during the lookback period covered by reservations.
" + "smithy.api#documentation": "The number of hours during the lookback period that's covered by reservations.
" } }, "SavingsPlansCoveredHoursInLookbackPeriod": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "Number of hours during the lookback period covered by Savings Plans.
" + "smithy.api#documentation": "The number of hours during the lookback period that's covered by Savings Plans.
" } }, "OnDemandHoursInLookbackPeriod": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "Number of hours during the lookback period billed at On-Demand rates.
" + "smithy.api#documentation": "The number of hours during the lookback period that's billed at On-Demand\n rates.
" } }, "TotalRunningHoursInLookbackPeriod": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The total number of hours the instance ran during the lookback period.
" + "smithy.api#documentation": "The total number of hours that the instance ran during the lookback period.
" } }, "MonthlyCost": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "Current On-Demand cost of operating this instance on a monthly basis.
" + "smithy.api#documentation": "The current On-Demand cost of operating this instance on a monthly basis.
" } }, "CurrencyCode": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The currency code that AWS used to calculate the costs for this instance.
" + "smithy.api#documentation": "The currency code that Amazon Web Services used to calculate the costs for this\n instance.
" } } }, @@ -1214,20 +1361,20 @@ "Start": { "target": "com.amazonaws.costexplorer#YearMonthDay", "traits": { - "smithy.api#documentation": "The beginning of the time period. The start\n date is inclusive. For example, if start
is 2017-01-01
, AWS\n retrieves cost and usage data starting at 2017-01-01
up to the end\n date. The start date must be equal to or no later than the current date to avoid a validation error.
The beginning of the time period. The start date is inclusive. For example, if\n start
is 2017-01-01
, Amazon Web Services retrieves cost and\n usage data starting at 2017-01-01
up to the end date. The start date must\n be equal to or no later than the current date to avoid a validation error.
The end of the time period. The end date is\n exclusive. For example, if end
is 2017-05-01
, AWS retrieves\n cost and usage data from the start date up to, but not including,\n 2017-05-01
.
The end of the time period. The end date is exclusive. For example, if\n end
is 2017-05-01
, Amazon Web Services retrieves cost and\n usage data from the start date up to, but not including, 2017-05-01
.
The time period of the request.\n
" + "smithy.api#documentation": "The time period of the request.
" } }, "com.amazonaws.costexplorer#DeleteAnomalyMonitor": { @@ -1256,7 +1403,7 @@ "MonitorArn": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The unique identifier of the cost anomaly monitor that you want to delete.
", + "smithy.api#documentation": "The unique identifier of the cost anomaly monitor that you want to delete.
", "smithy.api#required": {} } } @@ -1292,7 +1439,7 @@ "SubscriptionArn": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The unique identifier of the cost anomaly subscription that you want to delete.
", + "smithy.api#documentation": "The unique identifier of the cost anomaly subscription that you want to delete.
", "smithy.api#required": {} } } @@ -1534,7 +1681,7 @@ "Key": { "target": "com.amazonaws.costexplorer#Dimension", "traits": { - "smithy.api#documentation": "The names of the metadata types that you can use to filter and group your results.\n For example, AZ
returns a list of Availability Zones.
The names of the metadata types that you can use to filter and group your results. For\n example, AZ
returns a list of Availability Zones.
The match options that you can use to filter your results.\n MatchOptions
is only applicable for actions related to Cost Category.\n The default values for MatchOptions
are EQUALS
and\n CASE_SENSITIVE
.
The match options that you can use to filter your results. MatchOptions
\n is only applicable for actions related to Cost Category. The default values for\n MatchOptions
are EQUALS
and\n CASE_SENSITIVE
.
The metadata of a specific type that you can use to filter and group your results.\n You can use GetDimensionValues
to find specific values.
The metadata of a specific type that you can use to filter and group your results. You\n can use GetDimensionValues
to find specific values.
\n The maximum number of read operations per second.\n
" + "smithy.api#documentation": "The maximum number of read operations per second.
" } }, "DiskWriteOpsPerSecond": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The maximum number of write operations per second.\n
" + "smithy.api#documentation": "The maximum number of write operations per second.
" } }, "DiskReadBytesPerSecond": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The maximum read throughput operations per second.\n
" + "smithy.api#documentation": "The maximum read throughput operations per second.
" } }, "DiskWriteBytesPerSecond": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The maximum write throughput operations per second.\n
" + "smithy.api#documentation": "The maximum write throughput operations per second.
" } } }, "traits": { - "smithy.api#documentation": "\n The field that contains a list of disk (local storage) metrics associated with the current instance.\n
" + "smithy.api#documentation": "The field that contains a list of disk (local storage) metrics that are associated\n with the current instance.
" } }, "com.amazonaws.costexplorer#EBSResourceUtilization": { @@ -1618,30 +1765,30 @@ "EbsReadOpsPerSecond": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The maximum number of read operations per second.\n
" + "smithy.api#documentation": "The maximum number of read operations per second.
" } }, "EbsWriteOpsPerSecond": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The maximum number of write operations per second.\n
" + "smithy.api#documentation": "The maximum number of write operations per second.
" } }, "EbsReadBytesPerSecond": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The maximum size of read operations per second\n
" + "smithy.api#documentation": "The maximum size of read operations per second
" } }, "EbsWriteBytesPerSecond": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The maximum size of write operations per second.\n
" + "smithy.api#documentation": "The maximum size of write operations per second.
" } } }, "traits": { - "smithy.api#documentation": "\n The EBS field that contains a list of EBS metrics associated with the current instance.\n
" + "smithy.api#documentation": "The EBS field that contains a list of EBS metrics that are associated with the current\n instance.
" } }, "com.amazonaws.costexplorer#EC2InstanceDetails": { @@ -1656,13 +1803,13 @@ "InstanceType": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The type of instance that AWS recommends.
" + "smithy.api#documentation": "The type of instance that Amazon Web Services recommends.
" } }, "Region": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The AWS Region of the recommended reservation.
" + "smithy.api#documentation": "The Amazon Web Services Region of the recommended reservation.
" } }, "AvailabilityZone": { @@ -1674,30 +1821,30 @@ "Platform": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The platform of the recommended reservation. The platform is the specific\n combination of operating system, license model, and software on an instance.
" + "smithy.api#documentation": "The platform of the recommended reservation. The platform is the specific combination\n of operating system, license model, and software on an instance.
" } }, "Tenancy": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "Whether the recommended reservation is dedicated or shared.
" + "smithy.api#documentation": "Determines whether the recommended reservation is dedicated or shared.
" } }, "CurrentGeneration": { "target": "com.amazonaws.costexplorer#GenericBoolean", "traits": { - "smithy.api#documentation": "Whether the recommendation is for a current-generation instance.
" + "smithy.api#documentation": "Determines whether the recommendation is for a current-generation instance.
" } }, "SizeFlexEligible": { "target": "com.amazonaws.costexplorer#GenericBoolean", "traits": { - "smithy.api#documentation": "Whether the recommended reservation is size flexible.
" + "smithy.api#documentation": "Determines whether the recommended reservation is size flexible.
" } } }, "traits": { - "smithy.api#documentation": "Details about the Amazon EC2 instances that AWS recommends that you purchase.
" + "smithy.api#documentation": "Details about the Amazon EC2 instances that Amazon Web Services recommends that you\n purchase.
" } }, "com.amazonaws.costexplorer#EC2ResourceDetails": { @@ -1706,60 +1853,60 @@ "HourlyOnDemandRate": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "Hourly public On-Demand rate for the instance type.
" + "smithy.api#documentation": "The hourly public On-Demand rate for the instance type.
" } }, "InstanceType": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The type of AWS instance.
" + "smithy.api#documentation": "The type of Amazon Web Services instance.
" } }, "Platform": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The platform of the AWS instance. The platform is the specific combination of\n operating system, license model, and software on an instance.
" + "smithy.api#documentation": "The platform of the Amazon Web Services instance. The platform is the specific\n combination of operating system, license model, and software on an instance.
" } }, "Region": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The AWS Region of the instance.
" + "smithy.api#documentation": "The Amazon Web Services Region of the instance.
" } }, "Sku": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The SKU of the product.
" + "smithy.api#documentation": "The SKU of the product.
" } }, "Memory": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "Memory capacity of the AWS instance.
" + "smithy.api#documentation": "The memory capacity of the Amazon Web Services instance.
" } }, "NetworkPerformance": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "Network performance capacity of the AWS instance.
" + "smithy.api#documentation": "The network performance capacity of the Amazon Web Services instance.
" } }, "Storage": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The disk storage of the AWS instance (not EBS storage).
" + "smithy.api#documentation": "The disk storage of the Amazon Web Services instance. This doesn't include EBS\n storage.
" } }, "Vcpu": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "Number of VCPU cores in the AWS instance type.
" + "smithy.api#documentation": "The number of VCPU cores in the Amazon Web Services instance type.
" } } }, "traits": { - "smithy.api#documentation": "Details on the Amazon EC2 Resource.
" + "smithy.api#documentation": "Details on the Amazon EC2 Resource.
" } }, "com.amazonaws.costexplorer#EC2ResourceUtilization": { @@ -1768,42 +1915,42 @@ "MaxCpuUtilizationPercentage": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "Maximum observed or expected CPU utilization of the instance.
" + "smithy.api#documentation": "The maximum observed or expected CPU utilization of the instance.
" } }, "MaxMemoryUtilizationPercentage": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "Maximum observed or expected memory utilization of the instance.
" + "smithy.api#documentation": "The maximum observed or expected memory utilization of the instance.
" } }, "MaxStorageUtilizationPercentage": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "Maximum observed or expected storage utilization of the instance (does not measure EBS storage).
" + "smithy.api#documentation": "The maximum observed or expected storage utilization of the instance. This doesn't\n include EBS storage.
" } }, "EBSResourceUtilization": { "target": "com.amazonaws.costexplorer#EBSResourceUtilization", "traits": { - "smithy.api#documentation": "\n The EBS field that contains a list of EBS metrics associated with the current instance.\n
" + "smithy.api#documentation": "The EBS field that contains a list of EBS metrics that are associated with the current\n instance.
" } }, "DiskResourceUtilization": { "target": "com.amazonaws.costexplorer#DiskResourceUtilization", "traits": { - "smithy.api#documentation": "\n The field that contains a list of disk (local storage) metrics associated with the current instance.\n
" + "smithy.api#documentation": "The field that contains a list of disk (local storage) metrics that are associated\n with the current instance.
" } }, "NetworkResourceUtilization": { "target": "com.amazonaws.costexplorer#NetworkResourceUtilization", "traits": { - "smithy.api#documentation": "\n The network field that contains a list of network metrics associated with the current instance.\n
" + "smithy.api#documentation": "The network field that contains a list of network metrics that are associated with\n the current instance.
" } } }, "traits": { - "smithy.api#documentation": "Utilization metrics of the instance.
" + "smithy.api#documentation": "Utilization metrics of the instance.
" } }, "com.amazonaws.costexplorer#EC2Specification": { @@ -1812,12 +1959,12 @@ "OfferingClass": { "target": "com.amazonaws.costexplorer#OfferingClass", "traits": { - "smithy.api#documentation": "Whether you want a recommendation for standard or convertible\n reservations.
" + "smithy.api#documentation": "Indicates whether you want a recommendation for standard or convertible\n reservations.
" } } }, "traits": { - "smithy.api#documentation": "The Amazon EC2 hardware specifications that you want AWS to provide recommendations\n for.
" + "smithy.api#documentation": "The Amazon EC2 hardware specifications that you want Amazon Web Services to provide\n recommendations for.
" } }, "com.amazonaws.costexplorer#ESInstanceDetails": { @@ -1826,36 +1973,36 @@ "InstanceClass": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The class of instance that AWS recommends.
" + "smithy.api#documentation": "The class of instance that Amazon Web Services recommends.
" } }, "InstanceSize": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The size of instance that AWS recommends.
" + "smithy.api#documentation": "The size of instance that Amazon Web Services recommends.
" } }, "Region": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The AWS Region of the recommended reservation.
" + "smithy.api#documentation": "The Amazon Web Services Region of the recommended reservation.
" } }, "CurrentGeneration": { "target": "com.amazonaws.costexplorer#GenericBoolean", "traits": { - "smithy.api#documentation": "Whether the recommendation is for a current-generation instance.
" + "smithy.api#documentation": "Determines whether the recommendation is for a current-generation instance.
" } }, "SizeFlexEligible": { "target": "com.amazonaws.costexplorer#GenericBoolean", "traits": { - "smithy.api#documentation": "Whether the recommended reservation is size flexible.
" + "smithy.api#documentation": "Determines whether the recommended reservation is size flexible.
" } } }, "traits": { - "smithy.api#documentation": "Details about the Amazon ES instances that AWS recommends that you\n purchase.
" + "smithy.api#documentation": "Details about the Amazon ES instances that Amazon Web Services recommends that you\n purchase.
" } }, "com.amazonaws.costexplorer#ElastiCacheInstanceDetails": { @@ -1870,13 +2017,13 @@ "NodeType": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The type of node that AWS recommends.
" + "smithy.api#documentation": "The type of node that Amazon Web Services recommends.
" } }, "Region": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The AWS Region of the recommended reservation.
" + "smithy.api#documentation": "The Amazon Web Services Region of the recommended reservation.
" } }, "ProductDescription": { @@ -1888,18 +2035,18 @@ "CurrentGeneration": { "target": "com.amazonaws.costexplorer#GenericBoolean", "traits": { - "smithy.api#documentation": "Whether the recommendation is for a current generation instance.
" + "smithy.api#documentation": "Determines whether the recommendation is for a current generation instance.
" } }, "SizeFlexEligible": { "target": "com.amazonaws.costexplorer#GenericBoolean", "traits": { - "smithy.api#documentation": "Whether the recommended reservation is size flexible.
" + "smithy.api#documentation": "Determines whether the recommended reservation is size flexible.
" } } }, "traits": { - "smithy.api#documentation": "Details about the Amazon ElastiCache instances that AWS recommends that you\n purchase.
" + "smithy.api#documentation": "Details about the Amazon ElastiCache instances that Amazon Web Services recommends that\n you purchase.
" } }, "com.amazonaws.costexplorer#Entity": { @@ -1947,12 +2094,12 @@ "CostCategories": { "target": "com.amazonaws.costexplorer#CostCategoryValues", "traits": { - "smithy.api#documentation": "The filter based on CostCategory
values.
The filter that's based on CostCategory
values.
Use Expression
to filter by cost or by usage. There are two patterns:
Simple dimension values - You can set the dimension name and values for the\n filters that you plan to use. For example, you can filter for\n REGION==us-east-1 OR REGION==us-west-1
. For GetRightsizingRecommendation
, the Region is a full name (for example, REGION==US East (N. Virginia)
. The\n Expression
example looks like:
\n { \"Dimensions\": { \"Key\": \"REGION\", \"Values\": [ \"us-east-1\",\n “us-west-1” ] } }
\n
The list of dimension values are OR'd together to retrieve cost or usage\n data. You can create Expression
and DimensionValues
\n objects using either with*
methods or set*
methods in\n multiple lines.
Compound dimension values with logical operations - You can use multiple\n Expression
types and the logical operators\n AND/OR/NOT
to create a list of one or more\n Expression
objects. This allows you to filter on more advanced\n options. For example, you can filter on ((REGION == us-east-1 OR\n REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE !=\n DataTransfer)
. The Expression
for that looks like\n this:
\n { \"And\": [ {\"Or\": [ {\"Dimensions\": { \"Key\": \"REGION\",\n \"Values\": [ \"us-east-1\", \"us-west-1\" ] }}, {\"Tags\": { \"Key\": \"TagName\",\n \"Values\": [\"Value1\"] } } ]}, {\"Not\": {\"Dimensions\": { \"Key\": \"USAGE_TYPE\",\n \"Values\": [\"DataTransfer\"] }}} ] }
\n
Because each Expression
can have only one operator, the\n service returns an error if more than one is specified. The following\n example shows an Expression
object that creates an\n error.
\n { \"And\": [ ... ], \"DimensionValues\": { \"Dimension\": \"USAGE_TYPE\",\n \"Values\": [ \"DataTransfer\" ] } }
\n
For the GetRightsizingRecommendation
action, a combination of OR and NOT is not\n supported. OR is not supported between different dimensions, or dimensions and tags.\n NOT operators aren't supported.\n Dimensions\n are also limited to LINKED_ACCOUNT
, REGION
, or\n RIGHTSIZING_TYPE
.
For the GetReservationPurchaseRecommendation
action, only NOT is supported. AND and OR are not supported. Dimensions are limited to LINKED_ACCOUNT
.
Use Expression
to filter by cost or by usage. There are two patterns:
Simple dimension values - You can set the dimension name and values for the\n filters that you plan to use. For example, you can filter for\n REGION==us-east-1 OR REGION==us-west-1
. For\n GetRightsizingRecommendation
, the Region is a full name (for\n example, REGION==US East (N. Virginia)
. The Expression
\n example is as follows:
\n { \"Dimensions\": { \"Key\": \"REGION\", \"Values\": [ \"us-east-1\", “us-west-1” ]\n } }
\n
The list of dimension values are OR'd together to retrieve cost or usage data.\n You can create Expression
and DimensionValues
objects\n using either with*
methods or set*
methods in multiple\n lines.
Compound dimension values with logical operations - You can use multiple\n Expression
types and the logical operators\n AND/OR/NOT
to create a list of one or more\n Expression
objects. By doing this, you can filter on more\n advanced options. For example, you can filter on ((REGION == us-east-1 OR\n REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE !=\n DataTransfer)
. The Expression
for that is as\n follows:
\n { \"And\": [ {\"Or\": [ {\"Dimensions\": { \"Key\": \"REGION\", \"Values\": [\n \"us-east-1\", \"us-west-1\" ] }}, {\"Tags\": { \"Key\": \"TagName\", \"Values\":\n [\"Value1\"] } } ]}, {\"Not\": {\"Dimensions\": { \"Key\": \"USAGE_TYPE\", \"Values\":\n [\"DataTransfer\"] }}} ] }
\n
Because each Expression
can have only one operator, the\n service returns an error if more than one is specified. The following\n example shows an Expression
object that creates an\n error.
\n { \"And\": [ ... ], \"DimensionValues\": { \"Dimension\": \"USAGE_TYPE\",\n \"Values\": [ \"DataTransfer\" ] } }
\n
For the GetRightsizingRecommendation
action, a combination of OR and\n NOT isn't supported. OR isn't supported between different dimensions, or dimensions\n and tags. NOT operators aren't supported. Dimensions are also limited to\n LINKED_ACCOUNT
, REGION
, or\n RIGHTSIZING_TYPE
.
For the GetReservationPurchaseRecommendation
action, only NOT is\n supported. AND and OR aren't supported. Dimensions are limited to\n LINKED_ACCOUNT
.
The forecast created for your query.
" + "smithy.api#documentation": "The forecast that's created for your query.
" } }, "com.amazonaws.costexplorer#ForecastResultsByTime": { @@ -2089,7 +2236,7 @@ "min": 0, "max": 1024 }, - "smithy.api#pattern": "[\\S\\s]*" + "smithy.api#pattern": "^[\\S\\s]*$" } }, "com.amazonaws.costexplorer#GetAnomalies": { @@ -2109,7 +2256,7 @@ } ], "traits": { - "smithy.api#documentation": "Retrieves all of the cost anomalies detected on your account, during the time period\n specified by the DateInterval
object.
Retrieves all of the cost anomalies detected on your account during the time period that's\n specified by the DateInterval
object.
\n The token to retrieve the next set of results. AWS provides the token when the response from a previous call has more results than the maximum page size.\n
" + "smithy.api#documentation": "The token to retrieve the next set of results. Amazon Web Services provides the token when\n the response from a previous call has more results than the maximum page size.
" } }, "MaxResults": { "target": "com.amazonaws.costexplorer#PageSize", "traits": { - "smithy.api#documentation": "\n The number of entries a paginated response contains.\n
" + "smithy.api#documentation": "The number of entries a paginated response contains.
" } } } @@ -2160,14 +2307,14 @@ "Anomalies": { "target": "com.amazonaws.costexplorer#Anomalies", "traits": { - "smithy.api#documentation": "\n A list of cost anomalies.\n
", + "smithy.api#documentation": "A list of cost anomalies.
", "smithy.api#required": {} } }, "NextPageToken": { "target": "com.amazonaws.costexplorer#NextPageToken", "traits": { - "smithy.api#documentation": "\n The token to retrieve the next set of results. AWS provides the token when the response from a previous call has more results than the maximum page size.\n
" + "smithy.api#documentation": "The token to retrieve the next set of results. Amazon Web Services provides the token when\n the response from a previous call has more results than the maximum page size.
" } } } @@ -2201,19 +2348,19 @@ "MonitorArnList": { "target": "com.amazonaws.costexplorer#Values", "traits": { - "smithy.api#documentation": "\n A list of cost anomaly monitor ARNs.\n
" + "smithy.api#documentation": "A list of cost anomaly monitor ARNs.
" } }, "NextPageToken": { "target": "com.amazonaws.costexplorer#NextPageToken", "traits": { - "smithy.api#documentation": "\n The token to retrieve the next set of results. AWS provides the token when the response from a previous call has more results than the maximum page size.\n
" + "smithy.api#documentation": "The token to retrieve the next set of results. Amazon Web Services provides the token when\n the response from a previous call has more results than the maximum page size.
" } }, "MaxResults": { "target": "com.amazonaws.costexplorer#PageSize", "traits": { - "smithy.api#documentation": "\n The number of entries a paginated response contains.\n
" + "smithy.api#documentation": "The number of entries that a paginated response contains.
" } } } @@ -2224,14 +2371,14 @@ "AnomalyMonitors": { "target": "com.amazonaws.costexplorer#AnomalyMonitors", "traits": { - "smithy.api#documentation": "\n A list of cost anomaly monitors that includes the detailed metadata for each monitor.\n
", + "smithy.api#documentation": "A list of cost anomaly monitors that includes the detailed metadata for each monitor.
", "smithy.api#required": {} } }, "NextPageToken": { "target": "com.amazonaws.costexplorer#NextPageToken", "traits": { - "smithy.api#documentation": "\n The token to retrieve the next set of results. AWS provides the token when the response from a previous call has more results than the maximum page size.\n
" + "smithy.api#documentation": "The token to retrieve the next set of results. Amazon Web Services provides the token when\n the response from a previous call has more results than the maximum page size.
" } } } @@ -2265,25 +2412,25 @@ "SubscriptionArnList": { "target": "com.amazonaws.costexplorer#Values", "traits": { - "smithy.api#documentation": "\n A list of cost anomaly subscription ARNs.\n
" + "smithy.api#documentation": "A list of cost anomaly subscription ARNs.
" } }, "MonitorArn": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n Cost anomaly monitor ARNs.\n
" + "smithy.api#documentation": "Cost anomaly monitor ARNs.
" } }, "NextPageToken": { "target": "com.amazonaws.costexplorer#NextPageToken", "traits": { - "smithy.api#documentation": "\n The token to retrieve the next set of results. AWS provides the token when the response from a previous call has more results than the maximum page size.\n
" + "smithy.api#documentation": "The token to retrieve the next set of results. Amazon Web Services provides the token when\n the response from a previous call has more results than the maximum page size.
" } }, "MaxResults": { "target": "com.amazonaws.costexplorer#PageSize", "traits": { - "smithy.api#documentation": "\n The number of entries a paginated response contains.\n
" + "smithy.api#documentation": "The number of entries a paginated response contains.
" } } } @@ -2294,14 +2441,14 @@ "AnomalySubscriptions": { "target": "com.amazonaws.costexplorer#AnomalySubscriptions", "traits": { - "smithy.api#documentation": "\n A list of cost anomaly subscriptions that includes the detailed metadata for each one.\n
", + "smithy.api#documentation": "A list of cost anomaly subscriptions that includes the detailed metadata for each one.
", "smithy.api#required": {} } }, "NextPageToken": { "target": "com.amazonaws.costexplorer#NextPageToken", "traits": { - "smithy.api#documentation": "\n The token to retrieve the next set of results. AWS provides the token when the response from a previous call has more results than the maximum page size.\n
" + "smithy.api#documentation": "The token to retrieve the next set of results. Amazon Web Services provides the token when\n the response from a previous call has more results than the maximum page size.
" } } } @@ -2332,7 +2479,7 @@ } ], "traits": { - "smithy.api#documentation": "Retrieves cost and usage metrics for your account. You can specify which cost and usage-related metric, such as \n\t\t\tBlendedCosts
or UsageQuantity
, that you want the request to return. You can also filter and group \n\t\t\tyour data by various dimensions, such as SERVICE
or AZ
, in a specific time range. For a complete list \n\t\t\tof valid dimensions, see the \n\t\t\tGetDimensionValues \n\t\t operation. Management account in an organization in AWS Organizations have access to all member accounts.
For information about filter limitations, see Quotas and restrictions in the Billing and Cost Management User Guide.
" + "smithy.api#documentation": "Retrieves cost and usage metrics for your account. You can specify which cost and\n usage-related metric that you want the request to return. For example, you can specify\n BlendedCosts
or UsageQuantity
. You can also filter and group your\n data by various dimensions, such as SERVICE
or AZ
, in a specific\n time range. For a complete list of valid dimensions, see the GetDimensionValues operation. Management account in an organization in Organizations have access to all member accounts.
For information about filter limitations, see Quotas and restrictions in the Billing and Cost Management User Guide.
" } }, "com.amazonaws.costexplorer#GetCostAndUsageRequest": { @@ -2341,40 +2488,40 @@ "TimePeriod": { "target": "com.amazonaws.costexplorer#DateInterval", "traits": { - "smithy.api#documentation": "Sets the start and end dates for retrieving AWS costs. The start date is inclusive, but the end date is exclusive. For example, if start
is 2017-01-01
and end
is 2017-05-01
, then the cost and usage data is \n retrieved from 2017-01-01
up to and including 2017-04-30
but not including 2017-05-01
.
Sets the start date and end date for retrieving Amazon Web Services costs. The start date\n is inclusive, but the end date is exclusive. For example, if start
is\n 2017-01-01
and end
is 2017-05-01
, then the cost and\n usage data is retrieved from 2017-01-01
up to and including\n 2017-04-30
but not including 2017-05-01
.
Sets the AWS cost granularity to MONTHLY
or DAILY
, or HOURLY
. If Granularity
isn't set, \n\t the response object doesn't include the Granularity
, either MONTHLY
or DAILY
, or HOURLY
.
Sets the Amazon Web Services cost granularity to MONTHLY
or DAILY
, or HOURLY
. If Granularity
isn't set, \n\t the response object doesn't include the Granularity
, either MONTHLY
or DAILY
, or HOURLY
.
Filters AWS costs by different dimensions. For example, you can specify SERVICE
and LINKED_ACCOUNT
\n\t\t\tand get the costs that are associated with that account's usage of that service. You can nest Expression
objects \n\t\t\tto define any combination of dimension filters. For more information, see \n\t\t\tExpression.
Filters Amazon Web Services costs by different dimensions. For example, you can specify SERVICE
and LINKED_ACCOUNT
\n\t\t\tand get the costs that are associated with that account's usage of that service. You can nest Expression
objects \n\t\t\tto define any combination of dimension filters. For more information, see \n\t\t\tExpression.
Which metrics are returned in the query. For more information about blended and unblended rates, see \n\t\t\tWhy does the \"blended\" annotation \n\t\t\t\tappear on some line items in my bill?.
\n\t\tValid values are AmortizedCost
, BlendedCost
, NetAmortizedCost
, \n\t\t\tNetUnblendedCost
, NormalizedUsageAmount
, UnblendedCost
, and UsageQuantity
.
If you return the UsageQuantity
metric, the service aggregates all usage numbers without \n\t\t\t\ttaking into account the units. For example, if you aggregate usageQuantity
across all of Amazon EC2, \n\t\t\t\tthe results aren't meaningful because Amazon EC2 compute hours and data transfer are measured in different units \n\t\t\t\t(for example, hours vs. GB). To get more meaningful UsageQuantity
metrics, filter by UsageType
or \n\t\t\t\tUsageTypeGroups
.
\n Metrics
is required for GetCostAndUsage
requests.
Which metrics are returned in the query. For more information about blended and unblended rates, see \n\t\t\tWhy does the \"blended\" annotation \n\t\t\t\tappear on some line items in my bill?.
\n\t\tValid values are AmortizedCost
, BlendedCost
, NetAmortizedCost
, \n\t\t\tNetUnblendedCost
, NormalizedUsageAmount
, UnblendedCost
, and UsageQuantity
.
If you return the UsageQuantity
metric, the service aggregates all usage\n numbers without taking into account the units. For example, if you aggregate\n usageQuantity
across all of Amazon EC2, the results aren't meaningful because\n Amazon EC2 compute hours and data transfer are measured in different units (for example,\n hours and GB). To get more meaningful UsageQuantity
metrics, filter by\n UsageType
or UsageTypeGroups
.
\n Metrics
is required for GetCostAndUsage
requests.
You can group AWS costs using up to two different groups, either dimensions, tag keys,\n cost categories, or any two group by types.
\n\t\tWhen you group by tag key, you get all tag values, including empty strings.
\n\t\tValid values are AZ
, INSTANCE_TYPE
, LEGAL_ENTITY_NAME
, LINKED_ACCOUNT
, \n\t\t\tOPERATION
, PLATFORM
, PURCHASE_TYPE
, SERVICE
, TAGS
, \n\t\t TENANCY
, RECORD_TYPE
, and USAGE_TYPE
.
You can group Amazon Web Services costs using up to two different groups, either dimensions, tag keys,\n cost categories, or any two group by types.
\n\tValid values for the DIMENSION
type are AZ
, INSTANCE_TYPE
, LEGAL_ENTITY_NAME
, LINKED_ACCOUNT
, \n\t\t\tOPERATION
, PLATFORM
, PURCHASE_TYPE
, SERVICE
,\n\t\t TENANCY
, RECORD_TYPE
, and USAGE_TYPE
.
When you group by the TAG
type and include a valid tag key, you get all tag values, including empty strings.
The token to retrieve the next set of results. AWS provides the token when the response from a previous call has more results than the maximum page size.
" + "smithy.api#documentation": "The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.
" } } } @@ -2385,7 +2532,7 @@ "NextPageToken": { "target": "com.amazonaws.costexplorer#NextPageToken", "traits": { - "smithy.api#documentation": "The token for the next set of retrievable results. AWS provides the token when the response from a previous call has more results than the maximum page size.
" + "smithy.api#documentation": "The token for the next set of retrievable results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.
" } }, "GroupDefinitions": { @@ -2397,7 +2544,7 @@ "ResultsByTime": { "target": "com.amazonaws.costexplorer#ResultsByTime", "traits": { - "smithy.api#documentation": "The time period that is covered by the results in the response.
" + "smithy.api#documentation": "The time period that's covered by the results in the response.
" } }, "DimensionValueAttributes": { @@ -2434,7 +2581,7 @@ } ], "traits": { - "smithy.api#documentation": "Retrieves cost and usage metrics with resources for your account. You can specify which cost and\n\t usage-related metric, such as BlendedCosts
or UsageQuantity
, that\n\t you want the request to return. You can also filter and group your data by various dimensions,\n\t such as SERVICE
or AZ
, in a specific time range. For a complete list\n\t of valid dimensions, see the GetDimensionValues operation. Management account in an organization in AWS\n\t Organizations have access to all member accounts. This API is currently available for the Amazon Elastic Compute Cloud – Compute service only.
This is an opt-in only feature. You can enable this feature from the Cost Explorer Settings page. For information on how to access the Settings page, see Controlling Access for Cost Explorer in the AWS Billing and Cost Management User Guide.
\nRetrieves cost and usage metrics with resources for your account. You can specify which cost and\n\t usage-related metric, such as BlendedCosts
or UsageQuantity
, that\n\t you want the request to return. You can also filter and group your data by various dimensions,\n\t such as SERVICE
or AZ
, in a specific time range. For a complete list\n\t of valid dimensions, see the GetDimensionValues operation. Management account in an organization in Organizations have access to all member accounts. This API is currently available for the Amazon Elastic Compute Cloud – Compute service only.
This is an opt-in only feature. You can enable this feature from the Cost Explorer Settings page. For information on how to access the Settings page, see Controlling Access for Cost Explorer in the Billing and Cost Management User Guide.
\nSets the AWS cost granularity to MONTHLY
, DAILY
, or HOURLY
. If\n\t Granularity
isn't set, the response object doesn't include the\n\t Granularity
, MONTHLY
, DAILY
, or HOURLY
.
Sets the Amazon Web Services cost granularity to MONTHLY
, DAILY
, or HOURLY
. If\n\t Granularity
isn't set, the response object doesn't include the\n\t Granularity
, MONTHLY
, DAILY
, or HOURLY
.
The token to retrieve the next set of results. AWS provides the token when the response from a previous call has more results than the maximum page size.
" + "smithy.api#documentation": "The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.
" } } } @@ -2487,7 +2634,7 @@ "NextPageToken": { "target": "com.amazonaws.costexplorer#NextPageToken", "traits": { - "smithy.api#documentation": "The token for the next set of retrievable results. AWS provides the token when the response from a previous call has more results than the maximum page size.
" + "smithy.api#documentation": "The token for the next set of retrievable results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.
" } }, "GroupDefinitions": { @@ -2536,7 +2683,7 @@ } ], "traits": { - "smithy.api#documentation": "Retrieves an array of Cost Category names and values incurred cost.
\n\tIf some Cost Category names and values are not associated with any cost, they will not be returned by this API.
\nRetrieves an array of Cost Category names and values incurred cost.
\nIf some Cost Category names and values are not associated with any cost, they will not be returned by this API.
\nThe value that you want to search the filter values for.
\n\tIf you do not specify a CostCategoryName
, SearchString
will be used to filter Cost Category names that match the SearchString
pattern. If you do specifiy a CostCategoryName
, SearchString
will be used to filter Cost Category values that match the SearchString
pattern.
The value that you want to search the filter values for.
\nIf you do not specify a CostCategoryName
, SearchString
will be used to filter Cost Category names that match the SearchString
pattern. If you do specifiy a CostCategoryName
, SearchString
will be used to filter Cost Category values that match the SearchString
pattern.
The value by which you want to sort the data.
\n\tThe key represents cost and usage metrics. The following values are supported:
\n\n BlendedCost
\n
\n UnblendedCost
\n
\n AmortizedCost
\n
\n NetAmortizedCost
\n
\n NetUnblendedCost
\n
\n UsageQuantity
\n
\n NormalizedUsageAmount
\n
Supported values for SortOrder
are ASCENDING
or DESCENDING
.
When using SortBy
, NextPageToken
and SearchString
are not supported.
The value by which you want to sort the data.
\nThe key represents cost and usage metrics. The following values are supported:
\n\n BlendedCost
\n
\n UnblendedCost
\n
\n AmortizedCost
\n
\n NetAmortizedCost
\n
\n NetUnblendedCost
\n
\n UsageQuantity
\n
\n NormalizedUsageAmount
\n
Supported values for SortOrder
are ASCENDING
or DESCENDING
.
When using SortBy
, NextPageToken
and SearchString
are not supported.
This field is only used when SortBy
is provided in the request.
The maximum number of objects that to be returned for this request. If MaxResults
is not specified with SortBy
, the request will return 1000 results as the default value for this parameter.
For GetCostCategories
, MaxResults has an upper limit of 1000.
This field is only used when SortBy
is provided in the request.
The maximum number of objects that to be returned for this request. If MaxResults
is not specified with SortBy
, the request will return 1000 results as the default value for this parameter.
For GetCostCategories
, MaxResults has an upper limit of 1000.
If the number of objects that are still available for retrieval exceeds the limit, AWS returns a NextPageToken value in the response. To retrieve the next batch of objects, provide the NextPageToken from the prior call in your next request.
" + "smithy.api#documentation": "If the number of objects that are still available for retrieval exceeds the limit, Amazon Web Services returns a NextPageToken value in the response. To retrieve the next batch of objects, provide the NextPageToken from the prior call in your next request.
" } } } @@ -2586,7 +2733,7 @@ "NextPageToken": { "target": "com.amazonaws.costexplorer#NextPageToken", "traits": { - "smithy.api#documentation": "If the number of objects that are still available for retrieval exceeds the limit, AWS returns a NextPageToken value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.
" + "smithy.api#documentation": "If the number of objects that are still available for retrieval exceeds the limit, Amazon Web Services returns a NextPageToken value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.
" } }, "CostCategoryNames": { @@ -2598,7 +2745,7 @@ "CostCategoryValues": { "target": "com.amazonaws.costexplorer#CostCategoryValuesList", "traits": { - "smithy.api#documentation": "The Cost Category values.
\n\t\n CostCategoryValues
are not returned if CostCategoryName
is not specified in the request.
The Cost Category values.
\n\n CostCategoryValues
are not returned if CostCategoryName
is not specified in the request.
The filters that you want to use to filter your forecast. The GetCostForecast
API supports filtering by the following dimensions:
\n AZ
\n
\n INSTANCE_TYPE
\n
\n LINKED_ACCOUNT
\n
\n LINKED_ACCOUNT_NAME
\n
\n OPERATION
\n
\n PURCHASE_TYPE
\n
\n REGION
\n
\n SERVICE
\n
\n USAGE_TYPE
\n
\n USAGE_TYPE_GROUP
\n
\n RECORD_TYPE
\n
\n OPERATING_SYSTEM
\n
\n TENANCY
\n
\n SCOPE
\n
\n PLATFORM
\n
\n SUBSCRIPTION_ID
\n
\n LEGAL_ENTITY_NAME
\n
\n DEPLOYMENT_OPTION
\n
\n DATABASE_ENGINE
\n
\n INSTANCE_TYPE_FAMILY
\n
\n BILLING_ENTITY
\n
\n RESERVATION_ID
\n
\n SAVINGS_PLAN_ARN
\n
The filters that you want to use to filter your forecast. The GetCostForecast
API supports filtering by the following dimensions:
\n AZ
\n
\n INSTANCE_TYPE
\n
\n LINKED_ACCOUNT
\n
\n LINKED_ACCOUNT_NAME
\n
\n OPERATION
\n
\n PURCHASE_TYPE
\n
\n REGION
\n
\n SERVICE
\n
\n USAGE_TYPE
\n
\n USAGE_TYPE_GROUP
\n
\n RECORD_TYPE
\n
\n OPERATING_SYSTEM
\n
\n TENANCY
\n
\n SCOPE
\n
\n PLATFORM
\n
\n SUBSCRIPTION_ID
\n
\n LEGAL_ENTITY_NAME
\n
\n DEPLOYMENT_OPTION
\n
\n DATABASE_ENGINE
\n
\n INSTANCE_TYPE_FAMILY
\n
\n BILLING_ENTITY
\n
\n RESERVATION_ID
\n
\n SAVINGS_PLAN_ARN
\n
The start and end dates for retrieving the dimension values. The start date is inclusive, but the end date is exclusive. For example, if start
is 2017-01-01
and end
is 2017-05-01
, then the cost and usage data is \n retrieved from 2017-01-01
up to and including 2017-04-30
but not including 2017-05-01
.
The start date and end date for retrieving the dimension values. The start date is\n inclusive, but the end date is exclusive. For example, if start
is\n 2017-01-01
and end
is 2017-05-01
, then the cost and\n usage data is retrieved from 2017-01-01
up to and including\n 2017-04-30
but not including 2017-05-01
.
The context for the call to GetDimensionValues
. This can be RESERVATIONS
or COST_AND_USAGE
. \n\t\t\tThe default value is COST_AND_USAGE
. If the context is set to RESERVATIONS
, the resulting dimension values \n\t\t\tcan be used in the GetReservationUtilization
operation. If the context is set to COST_AND_USAGE
, \n\t\t\tthe resulting dimension values can be used in the GetCostAndUsage
operation.
If you set the context to COST_AND_USAGE
, you can use the following \n dimensions for searching:
AZ - The Availability Zone. An example is us-east-1a
.
DATABASE_ENGINE - The Amazon Relational Database Service database. Examples are Aurora or MySQL.
\nINSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge
.
LEGAL_ENTITY_NAME - The name of the organization that sells you AWS services, such as Amazon Web Services.
\nLINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value \n field contains the AWS ID of the member account.
\nOPERATING_SYSTEM - The operating system. Examples are Windows or Linux.
\nOPERATION - The action performed. Examples include RunInstance
and CreateBucket
.
PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux.
\nPURCHASE_TYPE - The reservation type of the purchase to which this usage is related. Examples include On-Demand \n Instances and Standard Reserved Instances.
\nSERVICE - The AWS service such as Amazon DynamoDB.
\nUSAGE_TYPE - The type of usage. An example is DataTransfer-In-Bytes. The response for the GetDimensionValues
operation\n includes a unit attribute. Examples include GB and Hrs.
USAGE_TYPE_GROUP - The grouping of common usage types. An example is Amazon EC2: CloudWatch – Alarms. The response for this \n operation includes a unit attribute.
\nREGION - The AWS Region.
\nRECORD_TYPE - The different types of charges such as RI fees, usage costs, tax refunds, and credits.
\nRESOURCE_ID - The unique identifier of the resource. ResourceId is an opt-in feature only available for last 14 days for EC2-Compute Service.
\nIf you set the context to RESERVATIONS
, you can use the following \n dimensions for searching:
AZ - The Availability Zone. An example is us-east-1a
.
CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux.
\nDEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ
and MultiAZ
.
INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge
.
LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value \n field contains the AWS ID of the member account.
\nPLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux.
\nREGION - The AWS Region.
\nSCOPE (Utilization only) - The scope of a Reserved Instance (RI). Values are regional or a single Availability Zone.
\nTAG (Coverage only) - The tags that are associated with a Reserved Instance (RI).
\nTENANCY - The tenancy of a resource. Examples are shared or dedicated.
\nIf you set the context to SAVINGS_PLANS
, you can use the following dimensions for searching:
SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute)
\nPAYMENT_OPTION - Payment option for the given Savings Plans (for example, All Upfront)
\nREGION - The AWS Region.
\nINSTANCE_TYPE_FAMILY - The family of instances (For example, m5
)
LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value \n field contains the AWS ID of the member account.
\nSAVINGS_PLAN_ARN - The unique identifier for your Savings Plan
\nThe context for the call to GetDimensionValues
. This can be RESERVATIONS
or COST_AND_USAGE
. \n\t\t\tThe default value is COST_AND_USAGE
. If the context is set to RESERVATIONS
, the resulting dimension values \n\t\t\tcan be used in the GetReservationUtilization
operation. If the context is set to COST_AND_USAGE
, \n\t\t\tthe resulting dimension values can be used in the GetCostAndUsage
operation.
If you set the context to COST_AND_USAGE
, you can use the following \n dimensions for searching:
AZ - The Availability Zone. An example is us-east-1a
.
DATABASE_ENGINE - The Amazon Relational Database Service database. Examples are Aurora or MySQL.
\nINSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge
.
LEGAL_ENTITY_NAME - The name of the organization that sells you Amazon Web Services services, such as Amazon Web Services.
\nLINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value \n field contains the Amazon Web Services ID of the member account.
\nOPERATING_SYSTEM - The operating system. Examples are Windows or Linux.
\nOPERATION - The action performed. Examples include RunInstance
and CreateBucket
.
PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux.
\nPURCHASE_TYPE - The reservation type of the purchase to which this usage is related. Examples include On-Demand \n Instances and Standard Reserved Instances.
\nSERVICE - The Amazon Web Services service such as Amazon DynamoDB.
\nUSAGE_TYPE - The type of usage. An example is DataTransfer-In-Bytes. The response for the GetDimensionValues
operation\n includes a unit attribute. Examples include GB and Hrs.
USAGE_TYPE_GROUP - The grouping of common usage types. An example is Amazon EC2: CloudWatch – Alarms. The response for this \n operation includes a unit attribute.
\nREGION - The Amazon Web Services Region.
\nRECORD_TYPE - The different types of charges such as RI fees, usage costs, tax refunds, and credits.
\nRESOURCE_ID - The unique identifier of the resource. ResourceId is an opt-in feature only available for last 14 days for EC2-Compute Service.
\nIf you set the context to RESERVATIONS
, you can use the following \n dimensions for searching:
AZ - The Availability Zone. An example is us-east-1a
.
CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux.
\nDEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ
and MultiAZ
.
INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge
.
LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value \n field contains the Amazon Web Services ID of the member account.
\nPLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux.
\nREGION - The Amazon Web Services Region.
\nSCOPE (Utilization only) - The scope of a Reserved Instance (RI). Values are regional or a single Availability Zone.
\nTAG (Coverage only) - The tags that are associated with a Reserved Instance (RI).
\nTENANCY - The tenancy of a resource. Examples are shared or dedicated.
\nIf you set the context to SAVINGS_PLANS
, you can use the following dimensions for searching:
SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute)
\nPAYMENT_OPTION - Payment option for the given Savings Plans (for example, All Upfront)
\nREGION - The Amazon Web Services Region.
\nINSTANCE_TYPE_FAMILY - The family of instances (For example, m5
)
LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value \n field contains the Amazon Web Services ID of the member account.
\nSAVINGS_PLAN_ARN - The unique identifier for your Savings Plan
\nThe value by which you want to sort the data.
\n\tThe key represents cost and usage metrics. The following values are supported:
\n\n BlendedCost
\n
\n UnblendedCost
\n
\n AmortizedCost
\n
\n NetAmortizedCost
\n
\n NetUnblendedCost
\n
\n UsageQuantity
\n
\n NormalizedUsageAmount
\n
Supported values for SortOrder
are ASCENDING
or DESCENDING
.
When you specify a SortBy
paramater, the context must be COST_AND_USAGE
. Further, when using SortBy
, NextPageToken
and SearchString
are not supported.
The value by which you want to sort the data.
\nThe key represents cost and usage metrics. The following values are supported:
\n\n BlendedCost
\n
\n UnblendedCost
\n
\n AmortizedCost
\n
\n NetAmortizedCost
\n
\n NetUnblendedCost
\n
\n UsageQuantity
\n
\n NormalizedUsageAmount
\n
Supported values for SortOrder
are ASCENDING
or DESCENDING
.
When you specify a SortBy
paramater, the context must be COST_AND_USAGE
. Further, when using SortBy
, NextPageToken
and SearchString
are not supported.
This field is only used when SortBy is provided in the request. The maximum number of objects that to be returned for this request. If MaxResults is not specified with SortBy, the request will return 1000 results as the default value for this parameter.
\n\tFor GetDimensionValues
, MaxResults has an upper limit of 1000.
This field is only used when SortBy is provided in the request. The maximum number of objects that to be returned for this request. If MaxResults is not specified with SortBy, the request will return 1000 results as the default value for this parameter.
\nFor GetDimensionValues
, MaxResults has an upper limit of 1000.
The token to retrieve the next set of results. AWS provides the token when the response from a previous call has more results than the maximum page size.
" + "smithy.api#documentation": "The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.
" } } } @@ -2779,14 +2926,14 @@ "DimensionValues": { "target": "com.amazonaws.costexplorer#DimensionValuesWithAttributesList", "traits": { - "smithy.api#documentation": "The filters that you used to filter your request. Some dimensions are available only for a specific context.
\n\t\tIf you set the context to COST_AND_USAGE
, you can use the following \n dimensions for searching:
AZ - The Availability Zone. An example is us-east-1a
.
DATABASE_ENGINE - The Amazon Relational Database Service database. Examples are Aurora or MySQL.
\nINSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge
.
LEGAL_ENTITY_NAME - The name of the organization that sells you AWS services, such as Amazon Web Services.
\nLINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value \n field contains the AWS ID of the member account.
\nOPERATING_SYSTEM - The operating system. Examples are Windows or Linux.
\nOPERATION - The action performed. Examples include RunInstance
and CreateBucket
.
PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux.
\nPURCHASE_TYPE - The reservation type of the purchase to which this usage is related. Examples include On-Demand \n Instances and Standard Reserved Instances.
\nSERVICE - The AWS service such as Amazon DynamoDB.
\nUSAGE_TYPE - The type of usage. An example is DataTransfer-In-Bytes. The response for the GetDimensionValues
operation\n includes a unit attribute. Examples include GB and Hrs.
USAGE_TYPE_GROUP - The grouping of common usage types. An example is Amazon EC2: CloudWatch – Alarms. The response for this \n operation includes a unit attribute.
\nRECORD_TYPE - The different types of charges such as RI fees, usage costs, tax refunds, and credits.
\nRESOURCE_ID - The unique identifier of the resource. ResourceId is an opt-in feature only available for last 14 days for EC2-Compute Service.
\nIf you set the context to RESERVATIONS
, you can use the following \n dimensions for searching:
AZ - The Availability Zone. An example is us-east-1a
.
CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux.
\nDEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ
and MultiAZ
.
INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge
.
LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value \n field contains the AWS ID of the member account.
\nPLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux.
\nREGION - The AWS Region.
\nSCOPE (Utilization only) - The scope of a Reserved Instance (RI). Values are regional or a single Availability Zone.
\nTAG (Coverage only) - The tags that are associated with a Reserved Instance (RI).
\nTENANCY - The tenancy of a resource. Examples are shared or dedicated.
\nIf you set the context to SAVINGS_PLANS
, you can use the following dimensions for searching:
SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute)
\nPAYMENT_OPTION - Payment option for the given Savings Plans (for example, All Upfront)
\nREGION - The AWS Region.
\nINSTANCE_TYPE_FAMILY - The family of instances (For example, m5
)
LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value \n field contains the AWS ID of the member account.
\nSAVINGS_PLAN_ARN - The unique identifier for your Savings Plan
\nThe filters that you used to filter your request. Some dimensions are available only for a specific context.
\n\t\tIf you set the context to COST_AND_USAGE
, you can use the following \n dimensions for searching:
AZ - The Availability Zone. An example is us-east-1a
.
DATABASE_ENGINE - The Amazon Relational Database Service database. Examples are Aurora or MySQL.
\nINSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge
.
LEGAL_ENTITY_NAME - The name of the organization that sells you Amazon Web Services services, such as Amazon Web Services.
\nLINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value \n field contains the Amazon Web Services ID of the member account.
\nOPERATING_SYSTEM - The operating system. Examples are Windows or Linux.
\nOPERATION - The action performed. Examples include RunInstance
and CreateBucket
.
PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux.
\nPURCHASE_TYPE - The reservation type of the purchase to which this usage is related. Examples include On-Demand \n Instances and Standard Reserved Instances.
\nSERVICE - The Amazon Web Services service such as Amazon DynamoDB.
\nUSAGE_TYPE - The type of usage. An example is DataTransfer-In-Bytes. The response for the GetDimensionValues
operation\n includes a unit attribute. Examples include GB and Hrs.
USAGE_TYPE_GROUP - The grouping of common usage types. An example is Amazon EC2: CloudWatch – Alarms. The response for this \n operation includes a unit attribute.
\nRECORD_TYPE - The different types of charges such as RI fees, usage costs, tax refunds, and credits.
\nRESOURCE_ID - The unique identifier of the resource. ResourceId is an opt-in feature only available for last 14 days for EC2-Compute Service.
\nIf you set the context to RESERVATIONS
, you can use the following \n dimensions for searching:
AZ - The Availability Zone. An example is us-east-1a
.
CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux.
\nDEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ
and MultiAZ
.
INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge
.
LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value \n field contains the Amazon Web Services ID of the member account.
\nPLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux.
\nREGION - The Amazon Web Services Region.
\nSCOPE (Utilization only) - The scope of a Reserved Instance (RI). Values are regional or a single Availability Zone.
\nTAG (Coverage only) - The tags that are associated with a Reserved Instance (RI).
\nTENANCY - The tenancy of a resource. Examples are shared or dedicated.
\nIf you set the context to SAVINGS_PLANS
, you can use the following dimensions for searching:
SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute)
\nPAYMENT_OPTION - Payment option for the given Savings Plans (for example, All Upfront)
\nREGION - The Amazon Web Services Region.
\nINSTANCE_TYPE_FAMILY - The family of instances (For example, m5
)
LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value \n field contains the Amazon Web Services ID of the member account.
\nSAVINGS_PLAN_ARN - The unique identifier for your Savings Plan
\nThe number of results that AWS returned at one time.
", + "smithy.api#documentation": "The number of results that Amazon Web Services returned at one time.
", "smithy.api#required": {} } }, @@ -2800,7 +2947,7 @@ "NextPageToken": { "target": "com.amazonaws.costexplorer#NextPageToken", "traits": { - "smithy.api#documentation": "The token for the next set of retrievable results. AWS provides the token when the response from a previous call has more results than the maximum page size.
" + "smithy.api#documentation": "The token for the next set of retrievable results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.
" } } } @@ -2847,7 +2994,7 @@ "Granularity": { "target": "com.amazonaws.costexplorer#Granularity", "traits": { - "smithy.api#documentation": "The granularity of the AWS cost data for the reservation. Valid values are MONTHLY
and DAILY
.
If GroupBy
is set, Granularity
can't be set. If Granularity
isn't set, \n\t\t\tthe response object doesn't include Granularity
, either MONTHLY
or DAILY
.
The GetReservationCoverage
operation supports only DAILY
and MONTHLY
granularities.
The granularity of the Amazon Web Services cost data for the reservation. Valid values are MONTHLY
and DAILY
.
If GroupBy
is set, Granularity
can't be set. If Granularity
isn't set, \n\t\t\tthe response object doesn't include Granularity
, either MONTHLY
or DAILY
.
The GetReservationCoverage
operation supports only DAILY
and MONTHLY
granularities.
The token to retrieve the next set of results. AWS provides the token when the response from a previous call has more results than the maximum page size.
" + "smithy.api#documentation": "The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.
" } }, "SortBy": { "target": "com.amazonaws.costexplorer#SortDefinition", "traits": { - "smithy.api#documentation": "The value by which you want to sort the data.
\n\tThe following values are supported for Key
:
\n OnDemandCost
\n
\n CoverageHoursPercentage
\n
\n OnDemandHours
\n
\n ReservedHours
\n
\n TotalRunningHours
\n
\n CoverageNormalizedUnitsPercentage
\n
\n OnDemandNormalizedUnits
\n
\n ReservedNormalizedUnits
\n
\n TotalRunningNormalizedUnits
\n
\n Time
\n
Supported values for SortOrder
are ASCENDING
or DESCENDING
.
The value by which you want to sort the data.
\nThe following values are supported for Key
:
\n OnDemandCost
\n
\n CoverageHoursPercentage
\n
\n OnDemandHours
\n
\n ReservedHours
\n
\n TotalRunningHours
\n
\n CoverageNormalizedUnitsPercentage
\n
\n OnDemandNormalizedUnits
\n
\n ReservedNormalizedUnits
\n
\n TotalRunningNormalizedUnits
\n
\n Time
\n
Supported values for SortOrder
are ASCENDING
or DESCENDING
.
The maximum number of objects that you returned for this request. If more objects are available, in the response, AWS provides a NextPageToken value that you can use in a subsequent call to get the next batch of objects.
" + "smithy.api#documentation": "The maximum number of objects that you returned for this request. If more objects are available, in the response, Amazon Web Services provides a NextPageToken value that you can use in a subsequent call to get the next batch of objects.
" } } }, @@ -2905,7 +3052,7 @@ "NextPageToken": { "target": "com.amazonaws.costexplorer#NextPageToken", "traits": { - "smithy.api#documentation": "The token for the next set of retrievable results. AWS provides the token when the response from a previous call has more results than the maximum page size.
" + "smithy.api#documentation": "The token for the next set of retrievable results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.
" } } } @@ -2930,7 +3077,7 @@ } ], "traits": { - "smithy.api#documentation": "Gets recommendations for which reservations to purchase. These recommendations could help you reduce your costs. \n\t\t\tReservations provide a discounted hourly rate (up to 75%) compared to On-Demand pricing.
\n\t\tAWS generates your recommendations by identifying your On-Demand usage during a specific time period and collecting your usage \n\t\t\tinto categories that are eligible for a reservation. After AWS has these categories, it simulates every combination of reservations \n\t\t\tin each category of usage to identify the best number of each type of RI to purchase to maximize your estimated savings.
\n\t\tFor example, AWS automatically aggregates your Amazon EC2 Linux, shared tenancy, and c4 family usage in the US West (Oregon) Region \n\t\t\tand recommends that you buy size-flexible regional reservations to apply to the c4 family usage. AWS recommends the smallest size instance \n\t\t\tin an instance family. This makes it easier to purchase a size-flexible RI. AWS also shows the equal number of normalized units \n\t\t\tso that you can purchase any instance size that you want. For this example, your RI recommendation would be for c4.large
\n\t\t\tbecause that is the smallest size instance in the c4 instance family.
Gets recommendations for which reservations to purchase. These recommendations could help you reduce your costs. \n\t\t\tReservations provide a discounted hourly rate (up to 75%) compared to On-Demand pricing.
\n\t\tAmazon Web Services generates your recommendations by identifying your On-Demand usage during a specific time period and collecting your usage \n\t\t\tinto categories that are eligible for a reservation. After Amazon Web Services has these categories, it simulates every combination of reservations \n\t\t\tin each category of usage to identify the best number of each type of RI to purchase to maximize your estimated savings.
\n\t\tFor example, Amazon Web Services automatically aggregates your Amazon EC2 Linux, shared tenancy, and c4 family usage in the US West (Oregon) Region \n\t\t\tand recommends that you buy size-flexible regional reservations to apply to the c4 family usage. Amazon Web Services recommends the smallest size instance \n\t\t\tin an instance family. This makes it easier to purchase a size-flexible RI. Amazon Web Services also shows the equal number of normalized units \n\t\t\tso that you can purchase any instance size that you want. For this example, your RI recommendation would be for c4.large
\n\t\t\tbecause that is the smallest size instance in the c4 instance family.
The number of previous days that you want AWS to consider when it calculates your recommendations.
" + "smithy.api#documentation": "The number of previous days that you want Amazon Web Services to consider when it calculates your recommendations.
" } }, "TermInYears": { @@ -3073,20 +3220,20 @@ "SortBy": { "target": "com.amazonaws.costexplorer#SortDefinition", "traits": { - "smithy.api#documentation": "The value by which you want to sort the data.
\n\t \n\tThe following values are supported for Key
:
\n UtilizationPercentage
\n
\n UtilizationPercentageInUnits
\n
\n PurchasedHours
\n
\n PurchasedUnits
\n
\n TotalActualHours
\n
\n TotalActualUnits
\n
\n UnusedHours
\n
\n UnusedUnits
\n
\n OnDemandCostOfRIHoursUsed
\n
\n NetRISavings
\n
\n TotalPotentialRISavings
\n
\n AmortizedUpfrontFee
\n
\n AmortizedRecurringFee
\n
\n TotalAmortizedFee
\n
\n RICostForUnusedHours
\n
\n RealizedSavings
\n
\n UnrealizedSavings
\n
Supported values for SortOrder
are ASCENDING
or DESCENDING
.
The value by which you want to sort the data.
\n \nThe following values are supported for Key
:
\n UtilizationPercentage
\n
\n UtilizationPercentageInUnits
\n
\n PurchasedHours
\n
\n PurchasedUnits
\n
\n TotalActualHours
\n
\n TotalActualUnits
\n
\n UnusedHours
\n
\n UnusedUnits
\n
\n OnDemandCostOfRIHoursUsed
\n
\n NetRISavings
\n
\n TotalPotentialRISavings
\n
\n AmortizedUpfrontFee
\n
\n AmortizedRecurringFee
\n
\n TotalAmortizedFee
\n
\n RICostForUnusedHours
\n
\n RealizedSavings
\n
\n UnrealizedSavings
\n
Supported values for SortOrder
are ASCENDING
or DESCENDING
.
The token to retrieve the next set of results. AWS provides the token when the response from a previous call has more results than the maximum page size.
" + "smithy.api#documentation": "The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.
" } }, "MaxResults": { "target": "com.amazonaws.costexplorer#MaxResults", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "The maximum number of objects that you returned for this request. If more objects are available, in the response, AWS provides a NextPageToken value that you can use in a subsequent call to get the next batch of objects.
" + "smithy.api#documentation": "The maximum number of objects that you returned for this request. If more objects are available, in the response, Amazon Web Services provides a NextPageToken value that you can use in a subsequent call to get the next batch of objects.
" } } } @@ -3110,7 +3257,7 @@ "NextPageToken": { "target": "com.amazonaws.costexplorer#NextPageToken", "traits": { - "smithy.api#documentation": "The token for the next set of retrievable results. AWS provides the token when the response from a previous call has more results than the maximum page size.
" + "smithy.api#documentation": "The token for the next set of retrievable results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.
" } } } @@ -3132,7 +3279,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates recommendations that\n help you save cost\n by identifying idle and underutilized Amazon EC2 instances.
\nRecommendations are generated to either downsize or terminate instances, along with\n providing savings detail and metrics. For details on calculation and function, see\n Optimizing\n Your Cost with Rightsizing\n Recommendations\n in the AWS Billing and Cost Management User\n Guide.
" + "smithy.api#documentation": "Creates recommendations that\n help you save cost\n by identifying idle and underutilized Amazon EC2 instances.
\nRecommendations are generated to either downsize or terminate instances, along with\n providing savings detail and metrics. For details on calculation and function, see\n Optimizing\n Your Cost with Rightsizing\n Recommendations\n in the Billing and Cost Management User\n Guide.
" } }, "com.amazonaws.costexplorer#GetRightsizingRecommendationRequest": { @@ -3281,7 +3428,7 @@ "SortBy": { "target": "com.amazonaws.costexplorer#SortDefinition", "traits": { - "smithy.api#documentation": "The value by which you want to sort the data.
\n\t \tThe following values are supported for Key
:
\n SpendCoveredBySavingsPlan
\n
\n OnDemandCost
\n
\n CoveragePercentage
\n
\n TotalCost
\n
\n InstanceFamily
\n
\n Region
\n
\n Service
\n
Supported values for SortOrder
are ASCENDING
or DESCENDING
.
The value by which you want to sort the data.
\nThe following values are supported for Key
:
\n SpendCoveredBySavingsPlan
\n
\n OnDemandCost
\n
\n CoveragePercentage
\n
\n TotalCost
\n
\n InstanceFamily
\n
\n Region
\n
\n Service
\n
Supported values for SortOrder
are ASCENDING
or DESCENDING
.
The token for the next set of retrievable results. AWS provides the token when the response from a previous call has more results than the maximum page size.
" + "smithy.api#documentation": "The token for the next set of retrievable results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.
" } } } @@ -3490,7 +3637,7 @@ "SortBy": { "target": "com.amazonaws.costexplorer#SortDefinition", "traits": { - "smithy.api#documentation": "The value by which you want to sort the data.
\n\t \n\tThe following values are supported for Key
:
\n UtilizationPercentage
\n
\n TotalCommitment
\n
\n UsedCommitment
\n
\n UnusedCommitment
\n
\n NetSavings
\n
\n AmortizedRecurringCommitment
\n
\n AmortizedUpfrontCommitment
\n
Supported values for SortOrder
are ASCENDING
or DESCENDING
.
The value by which you want to sort the data.
\n \nThe following values are supported for Key
:
\n UtilizationPercentage
\n
\n TotalCommitment
\n
\n UsedCommitment
\n
\n UnusedCommitment
\n
\n NetSavings
\n
\n AmortizedRecurringCommitment
\n
\n AmortizedUpfrontCommitment
\n
Supported values for SortOrder
are ASCENDING
or DESCENDING
.
The value by which you want to sort the data.
\n\tThe following values are supported for Key
:
\n UtilizationPercentage
\n
\n TotalCommitment
\n
\n UsedCommitment
\n
\n UnusedCommitment
\n
\n NetSavings
\n
Supported values for SortOrder
are ASCENDING
or DESCENDING
.
The value by which you want to sort the data.
\nThe following values are supported for Key
:
\n UtilizationPercentage
\n
\n TotalCommitment
\n
\n UsedCommitment
\n
\n UnusedCommitment
\n
\n NetSavings
\n
Supported values for SortOrder
are ASCENDING
or DESCENDING
.
The value by which you want to sort the data.
\n\tThe key represents cost and usage metrics. The following values are supported:
\n\n BlendedCost
\n
\n UnblendedCost
\n
\n AmortizedCost
\n
\n NetAmortizedCost
\n
\n NetUnblendedCost
\n
\n UsageQuantity
\n
\n NormalizedUsageAmount
\n
Supported values for SortOrder
are ASCENDING
or DESCENDING
.
When using SortBy
, NextPageToken
and SearchString
are not supported.
The value by which you want to sort the data.
\nThe key represents cost and usage metrics. The following values are supported:
\n\n BlendedCost
\n
\n UnblendedCost
\n
\n AmortizedCost
\n
\n NetAmortizedCost
\n
\n NetUnblendedCost
\n
\n UsageQuantity
\n
\n NormalizedUsageAmount
\n
Supported values for SortOrder
are ASCENDING
or DESCENDING
.
When using SortBy
, NextPageToken
and SearchString
are not supported.
This field is only used when SortBy is provided in the request. The maximum number of objects that to be returned for this request. If MaxResults is not specified with SortBy, the request will return 1000 results as the default value for this parameter.
\n\tFor GetTags
, MaxResults has an upper limit of 1000.
This field is only used when SortBy is provided in the request. The maximum number of objects that to be returned for this request. If MaxResults is not specified with SortBy, the request will return 1000 results as the default value for this parameter.
\nFor GetTags
, MaxResults has an upper limit of 1000.
The token to retrieve the next set of results. AWS provides the token when the response from a previous call has more results than the maximum page size.
" + "smithy.api#documentation": "The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.
" } } } @@ -3653,7 +3800,7 @@ "NextPageToken": { "target": "com.amazonaws.costexplorer#NextPageToken", "traits": { - "smithy.api#documentation": "The token for the next set of retrievable results. AWS provides the token when the response from a previous call has more results than the maximum page size.
" + "smithy.api#documentation": "The token for the next set of retrievable results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.
" } }, "Tags": { @@ -3666,7 +3813,7 @@ "ReturnSize": { "target": "com.amazonaws.costexplorer#PageSize", "traits": { - "smithy.api#documentation": "The number of query results that AWS returns at a time.
", + "smithy.api#documentation": "The number of query results that Amazon Web Services returns at a time.
", "smithy.api#required": {} } }, @@ -3729,7 +3876,7 @@ "Filter": { "target": "com.amazonaws.costexplorer#Expression", "traits": { - "smithy.api#documentation": "The filters that you want to use to filter your forecast. The GetUsageForecast
API supports filtering by the following dimensions:
\n AZ
\n
\n INSTANCE_TYPE
\n
\n LINKED_ACCOUNT
\n
\n LINKED_ACCOUNT_NAME
\n
\n OPERATION
\n
\n PURCHASE_TYPE
\n
\n REGION
\n
\n SERVICE
\n
\n USAGE_TYPE
\n
\n USAGE_TYPE_GROUP
\n
\n RECORD_TYPE
\n
\n OPERATING_SYSTEM
\n
\n TENANCY
\n
\n SCOPE
\n
\n PLATFORM
\n
\n SUBSCRIPTION_ID
\n
\n LEGAL_ENTITY_NAME
\n
\n DEPLOYMENT_OPTION
\n
\n DATABASE_ENGINE
\n
\n INSTANCE_TYPE_FAMILY
\n
\n BILLING_ENTITY
\n
\n RESERVATION_ID
\n
\n SAVINGS_PLAN_ARN
\n
The filters that you want to use to filter your forecast. The GetUsageForecast
API supports filtering by the following dimensions:
\n AZ
\n
\n INSTANCE_TYPE
\n
\n LINKED_ACCOUNT
\n
\n LINKED_ACCOUNT_NAME
\n
\n OPERATION
\n
\n PURCHASE_TYPE
\n
\n REGION
\n
\n SERVICE
\n
\n USAGE_TYPE
\n
\n USAGE_TYPE_GROUP
\n
\n RECORD_TYPE
\n
\n OPERATING_SYSTEM
\n
\n TENANCY
\n
\n SCOPE
\n
\n PLATFORM
\n
\n SUBSCRIPTION_ID
\n
\n LEGAL_ENTITY_NAME
\n
\n DEPLOYMENT_OPTION
\n
\n DATABASE_ENGINE
\n
\n INSTANCE_TYPE_FAMILY
\n
\n BILLING_ENTITY
\n
\n RESERVATION_ID
\n
\n SAVINGS_PLAN_ARN
\n
Represents a group when you specify a group by criteria or in the response to a\n query with a specific grouping.
" + "smithy.api#documentation": "Represents a group when you specify a group by criteria or in the response to a query\n with a specific grouping.
" } }, "com.amazonaws.costexplorer#GroupDefinitionKey": { @@ -3823,7 +3970,7 @@ "min": 0, "max": 1024 }, - "smithy.api#pattern": "[\\S\\s]*" + "smithy.api#pattern": "^[\\S\\s]*$" } }, "com.amazonaws.costexplorer#GroupDefinitionType": { @@ -3863,19 +4010,19 @@ "MaxImpact": { "target": "com.amazonaws.costexplorer#GenericDouble", "traits": { - "smithy.api#documentation": "\n The maximum dollar value observed for an anomaly.\n
", + "smithy.api#documentation": "The maximum dollar value that's observed for an anomaly.
", "smithy.api#required": {} } }, "TotalImpact": { "target": "com.amazonaws.costexplorer#GenericDouble", "traits": { - "smithy.api#documentation": "\n The cumulative dollar value observed for an anomaly.\n
" + "smithy.api#documentation": "The cumulative dollar value that's observed for an anomaly.
" } } }, "traits": { - "smithy.api#documentation": "\n The anomaly's dollar value.\n
" + "smithy.api#documentation": "The dollar value of the anomaly.
" } }, "com.amazonaws.costexplorer#InstanceDetails": { @@ -3884,36 +4031,36 @@ "EC2InstanceDetails": { "target": "com.amazonaws.costexplorer#EC2InstanceDetails", "traits": { - "smithy.api#documentation": "The Amazon EC2 instances that AWS recommends that you purchase.
" + "smithy.api#documentation": "The Amazon EC2 instances that Amazon Web Services recommends that you purchase.
" } }, "RDSInstanceDetails": { "target": "com.amazonaws.costexplorer#RDSInstanceDetails", "traits": { - "smithy.api#documentation": "The Amazon RDS instances that AWS recommends that you purchase.
" + "smithy.api#documentation": "The Amazon RDS instances that Amazon Web Services recommends that you purchase.
" } }, "RedshiftInstanceDetails": { "target": "com.amazonaws.costexplorer#RedshiftInstanceDetails", "traits": { - "smithy.api#documentation": "The Amazon Redshift instances that AWS recommends that you purchase.
" + "smithy.api#documentation": "The Amazon Redshift instances that Amazon Web Services recommends that you\n purchase.
" } }, "ElastiCacheInstanceDetails": { "target": "com.amazonaws.costexplorer#ElastiCacheInstanceDetails", "traits": { - "smithy.api#documentation": "The ElastiCache instances that AWS recommends that you purchase.
" + "smithy.api#documentation": "The ElastiCache instances that Amazon Web Services recommends that you purchase.
" } }, "ESInstanceDetails": { "target": "com.amazonaws.costexplorer#ESInstanceDetails", "traits": { - "smithy.api#documentation": "The Amazon ES instances that AWS recommends that you purchase.
" + "smithy.api#documentation": "The Amazon ES instances that Amazon Web Services recommends that you purchase.
" } } }, "traits": { - "smithy.api#documentation": "Details about the instances that AWS recommends that you purchase.
" + "smithy.api#documentation": "Details about the instances that Amazon Web Services recommends that you\n purchase.
" } }, "com.amazonaws.costexplorer#InvalidNextTokenException": { @@ -4125,7 +4272,7 @@ "min": 0, "max": 1024 }, - "smithy.api#pattern": "[\\S\\s]*" + "smithy.api#pattern": "^[\\S\\s]*$" } }, "com.amazonaws.costexplorer#MetricNames": { @@ -4172,7 +4319,7 @@ "TargetInstances": { "target": "com.amazonaws.costexplorer#TargetInstancesList", "traits": { - "smithy.api#documentation": "Identifies whether this instance type is the AWS default recommendation.
" + "smithy.api#documentation": "Determines whether this instance type is the Amazon Web Services default\n recommendation.
" } } }, @@ -4221,30 +4368,30 @@ "NetworkInBytesPerSecond": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The network ingress throughput utilization measured in Bytes per second.\n
" + "smithy.api#documentation": "The network inbound throughput utilization measured in Bytes per second.
" } }, "NetworkOutBytesPerSecond": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The network outgress throughput utilization measured in Bytes per second.\n
" + "smithy.api#documentation": "The network outbound throughput utilization measured in Bytes per second.
" } }, "NetworkPacketsInPerSecond": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The network ingress packets measured in packets per second.\n
" + "smithy.api#documentation": "The network ingress packets that are measured in packets per second.
" } }, "NetworkPacketsOutPerSecond": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The network outgress packets measured in packets per second.\n
" + "smithy.api#documentation": "The network outgress packets that are measured in packets per second.
" } } }, "traits": { - "smithy.api#documentation": "\n The network field that contains a list of network metrics associated with the current instance.\n
" + "smithy.api#documentation": "The network field that contains a list of network metrics that are associated with\n the current instance.
" } }, "com.amazonaws.costexplorer#NextPageToken": { @@ -4254,7 +4401,7 @@ "min": 0, "max": 8192 }, - "smithy.api#pattern": "[\\S\\s]*" + "smithy.api#pattern": "^[\\S\\s]*$" } }, "com.amazonaws.costexplorer#NonNegativeInteger": { @@ -4435,7 +4582,7 @@ "AnomalyId": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n A cost anomaly ID.\n
", + "smithy.api#documentation": "A cost anomaly ID.
", "smithy.api#required": {} } }, @@ -4454,7 +4601,7 @@ "AnomalyId": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The ID of the modified cost anomaly.\n
", + "smithy.api#documentation": "The ID of the modified cost anomaly.
", "smithy.api#required": {} } } @@ -4478,13 +4625,13 @@ "InstanceType": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The type of instance that AWS recommends.
" + "smithy.api#documentation": "The type of instance that Amazon Web Services recommends.
" } }, "Region": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The AWS Region of the recommended reservation.
" + "smithy.api#documentation": "The Amazon Web Services Region of the recommended reservation.
" } }, "DatabaseEngine": { @@ -4502,7 +4649,7 @@ "DeploymentOption": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "Whether the recommendation is for a reservation in a single Availability Zone or a\n reservation with a backup in a second Availability Zone.
" + "smithy.api#documentation": "Determines whether the recommendation is for a reservation in a single Availability\n Zone or a reservation with a backup in a second Availability Zone.
" } }, "LicenseModel": { @@ -4514,18 +4661,18 @@ "CurrentGeneration": { "target": "com.amazonaws.costexplorer#GenericBoolean", "traits": { - "smithy.api#documentation": "Whether the recommendation is for a current-generation instance.
" + "smithy.api#documentation": "Determines whether the recommendation is for a current-generation instance.
" } }, "SizeFlexEligible": { "target": "com.amazonaws.costexplorer#GenericBoolean", "traits": { - "smithy.api#documentation": "Whether the recommended reservation is size flexible.
" + "smithy.api#documentation": "Determines whether the recommended reservation is size flexible.
" } } }, "traits": { - "smithy.api#documentation": "Details about the Amazon RDS instances that AWS recommends that you\n purchase.
" + "smithy.api#documentation": "Details about the Amazon RDS instances that Amazon Web Services recommends that you\n purchase.
" } }, "com.amazonaws.costexplorer#RICostForUnusedHours": { @@ -4561,30 +4708,30 @@ "NodeType": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The type of node that AWS recommends.
" + "smithy.api#documentation": "The type of node that Amazon Web Services recommends.
" } }, "Region": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The AWS Region of the recommended reservation.
" + "smithy.api#documentation": "The Amazon Web Services Region of the recommended reservation.
" } }, "CurrentGeneration": { "target": "com.amazonaws.costexplorer#GenericBoolean", "traits": { - "smithy.api#documentation": "Whether the recommendation is for a current-generation instance.
" + "smithy.api#documentation": "Determines whether the recommendation is for a current-generation instance.
" } }, "SizeFlexEligible": { "target": "com.amazonaws.costexplorer#GenericBoolean", "traits": { - "smithy.api#documentation": "Whether the recommended reservation is size flexible.
" + "smithy.api#documentation": "Determines whether the recommended reservation is size flexible.
" } } }, "traits": { - "smithy.api#documentation": "Details about the Amazon Redshift instances that AWS recommends that you\n purchase.
" + "smithy.api#documentation": "Details about the Amazon Redshift instances that Amazon Web Services recommends that\n you purchase.
" } }, "com.amazonaws.costexplorer#RequestChangedException": { @@ -4611,7 +4758,7 @@ "UtilizationPercentageInUnits": { "target": "com.amazonaws.costexplorer#UtilizationPercentageInUnits", "traits": { - "smithy.api#documentation": "The percentage of Amazon EC2 reservation time that you used, converted to normalized\n units. Normalized units are available only for Amazon EC2 usage after November 11,\n 2017.
" + "smithy.api#documentation": "The percentage of Amazon EC2 reservation time that you used. It's converted to\n normalized units. Normalized units are available only for Amazon EC2 usage after\n November 11, 2017.
" } }, "PurchasedHours": { @@ -4623,7 +4770,7 @@ "PurchasedUnits": { "target": "com.amazonaws.costexplorer#PurchasedUnits", "traits": { - "smithy.api#documentation": "How many Amazon EC2 reservation hours that you purchased, converted to normalized units.\n Normalized units are available only for Amazon EC2 usage after November 11, 2017.
" + "smithy.api#documentation": "The number of Amazon EC2 reservation hours that you purchased. It's converted to\n normalized units. Normalized units are available only for Amazon EC2 usage after\n November 11, 2017.
" } }, "TotalActualHours": { @@ -4635,7 +4782,7 @@ "TotalActualUnits": { "target": "com.amazonaws.costexplorer#TotalActualUnits", "traits": { - "smithy.api#documentation": "The total number of Amazon EC2 reservation hours that you used, converted to normalized\n units. Normalized units are available only for Amazon EC2 usage after November 11,\n 2017.
" + "smithy.api#documentation": "The total number of Amazon EC2 reservation hours that you used. It's converted to\n normalized units. Normalized units are available only for Amazon EC2 usage after\n November 11, 2017.
" } }, "UnusedHours": { @@ -4647,43 +4794,43 @@ "UnusedUnits": { "target": "com.amazonaws.costexplorer#UnusedUnits", "traits": { - "smithy.api#documentation": "The number of Amazon EC2 reservation hours that you didn't use, converted to normalized\n units. Normalized units are available only for Amazon EC2 usage after November 11,\n 2017.
" + "smithy.api#documentation": "The number of Amazon EC2 reservation hours that you didn't use. It's converted to\n normalized units. Normalized units are available only for Amazon EC2 usage after\n November 11, 2017.
" } }, "OnDemandCostOfRIHoursUsed": { "target": "com.amazonaws.costexplorer#OnDemandCostOfRIHoursUsed", "traits": { - "smithy.api#documentation": "How much your reservation would cost if charged On-Demand rates.
" + "smithy.api#documentation": "How much your reservation costs if charged On-Demand rates.
" } }, "NetRISavings": { "target": "com.amazonaws.costexplorer#NetRISavings", "traits": { - "smithy.api#documentation": "How much you saved due to purchasing and utilizing reservation. AWS calculates this\n by subtracting TotalAmortizedFee
from\n OnDemandCostOfRIHoursUsed
.
How much you saved due to purchasing and utilizing reservation. Amazon Web Services\n calculates this by subtracting TotalAmortizedFee
from\n OnDemandCostOfRIHoursUsed
.
How much you could save if you use your entire reservation.
" + "smithy.api#documentation": "How much you might save if you use your entire reservation.
" } }, "AmortizedUpfrontFee": { "target": "com.amazonaws.costexplorer#AmortizedUpfrontFee", "traits": { - "smithy.api#documentation": "The upfront cost of your reservation, amortized over the reservation\n period.
" + "smithy.api#documentation": "The upfront cost of your reservation. It's amortized over the reservation\n period.
" } }, "AmortizedRecurringFee": { "target": "com.amazonaws.costexplorer#AmortizedRecurringFee", "traits": { - "smithy.api#documentation": "The monthly cost of your reservation, amortized over the reservation\n period.
" + "smithy.api#documentation": "The monthly cost of your reservation. It's amortized over the reservation\n period.
" } }, "TotalAmortizedFee": { "target": "com.amazonaws.costexplorer#TotalAmortizedFee", "traits": { - "smithy.api#documentation": "The total cost of your reservation, amortized over the reservation\n period.
" + "smithy.api#documentation": "The total cost of your reservation. It's amortized over the reservation period.
" } }, "RICostForUnusedHours": { @@ -4695,13 +4842,13 @@ "RealizedSavings": { "target": "com.amazonaws.costexplorer#RealizedSavings", "traits": { - "smithy.api#documentation": "The realized savings due to purchasing and using a reservation.
" + "smithy.api#documentation": "The realized savings because of purchasing and using a reservation.
" } }, "UnrealizedSavings": { "target": "com.amazonaws.costexplorer#UnrealizedSavings", "traits": { - "smithy.api#documentation": "The unrealized savings due to purchasing and using a reservation.
" + "smithy.api#documentation": "The unrealized savings because of purchasing and using a reservation.
" } } }, @@ -4726,7 +4873,7 @@ } }, "traits": { - "smithy.api#documentation": "A\n group of reservations that share a set of attributes.
" + "smithy.api#documentation": "A group of reservations that share a set of attributes.
" } }, "com.amazonaws.costexplorer#ReservationCoverageGroups": { @@ -4747,13 +4894,13 @@ "AccountScope": { "target": "com.amazonaws.costexplorer#AccountScope", "traits": { - "smithy.api#documentation": "The account scope that AWS recommends that you purchase this instance for. For\n example, you can purchase this reservation for an entire organization in AWS\n Organizations.
" + "smithy.api#documentation": "The account scope that Amazon Web Services recommends that you purchase this instance\n for. For example, you can purchase this reservation for an entire organization in\n Amazon Web Services Organizations.
" } }, "LookbackPeriodInDays": { "target": "com.amazonaws.costexplorer#LookbackPeriodInDays", "traits": { - "smithy.api#documentation": "How many days of previous usage that AWS considers when making this\n recommendation.
" + "smithy.api#documentation": "How many days of previous usage that Amazon Web Services considers when making this\n recommendation.
" } }, "TermInYears": { @@ -4765,13 +4912,13 @@ "PaymentOption": { "target": "com.amazonaws.costexplorer#PaymentOption", "traits": { - "smithy.api#documentation": "The payment option for the reservation. For example, AllUpfront
or\n NoUpfront
.
The payment option for the reservation (for example, AllUpfront
or\n NoUpfront
).
Hardware specifications for the service that you want recommendations\n for.
" + "smithy.api#documentation": "Hardware specifications for the service that you want recommendations for.
" } }, "RecommendationDetails": { @@ -4788,7 +4935,7 @@ } }, "traits": { - "smithy.api#documentation": "A specific reservation that AWS recommends for purchase.
" + "smithy.api#documentation": "A specific reservation that Amazon Web Services recommends for purchase.
" } }, "com.amazonaws.costexplorer#ReservationPurchaseRecommendationDetail": { @@ -4803,97 +4950,97 @@ "InstanceDetails": { "target": "com.amazonaws.costexplorer#InstanceDetails", "traits": { - "smithy.api#documentation": "Details about the instances that AWS recommends that you purchase.
" + "smithy.api#documentation": "Details about the instances that Amazon Web Services recommends that you\n purchase.
" } }, "RecommendedNumberOfInstancesToPurchase": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The number of instances that AWS recommends that you purchase.
" + "smithy.api#documentation": "The number of instances that Amazon Web Services recommends that you purchase.
" } }, "RecommendedNormalizedUnitsToPurchase": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The number of normalized units that AWS recommends that you purchase.
" + "smithy.api#documentation": "The number of normalized units that Amazon Web Services recommends that you\n purchase.
" } }, "MinimumNumberOfInstancesUsedPerHour": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The minimum number of instances that you used in an hour during the historical\n period. AWS uses this to calculate your recommended reservation purchases.
" + "smithy.api#documentation": "The minimum number of instances that you used in an hour during the historical period.\n Amazon Web Services uses this to calculate your recommended reservation\n purchases.
" } }, "MinimumNormalizedUnitsUsedPerHour": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The minimum number of normalized units that you used in an hour during the\n historical period. AWS uses this to calculate your recommended reservation\n purchases.
" + "smithy.api#documentation": "The minimum number of normalized units that you used in an hour during the historical\n period. Amazon Web Services uses this to calculate your recommended reservation\n purchases.
" } }, "MaximumNumberOfInstancesUsedPerHour": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The maximum number of instances that you used in an hour during the historical\n period. AWS uses this to calculate your recommended reservation purchases.
" + "smithy.api#documentation": "The maximum number of instances that you used in an hour during the historical period.\n Amazon Web Services uses this to calculate your recommended reservation\n purchases.
" } }, "MaximumNormalizedUnitsUsedPerHour": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The maximum number of normalized units that you used in an hour during the\n historical period. AWS uses this to calculate your recommended reservation\n purchases.
" + "smithy.api#documentation": "The maximum number of normalized units that you used in an hour during the historical\n period. Amazon Web Services uses this to calculate your recommended reservation\n purchases.
" } }, "AverageNumberOfInstancesUsedPerHour": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The average number of instances that you used in an hour during the historical\n period. AWS uses this to calculate your recommended reservation purchases.
" + "smithy.api#documentation": "The average number of instances that you used in an hour during the historical period.\n Amazon Web Services uses this to calculate your recommended reservation\n purchases.
" } }, "AverageNormalizedUnitsUsedPerHour": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The average number of normalized units that you used in an hour during the\n historical period. AWS uses this to calculate your recommended reservation\n purchases.
" + "smithy.api#documentation": "The average number of normalized units that you used in an hour during the historical\n period. Amazon Web Services uses this to calculate your recommended reservation\n purchases.
" } }, "AverageUtilization": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The average utilization of your instances. AWS uses this to calculate your\n recommended reservation purchases.
" + "smithy.api#documentation": "The average utilization of your instances. Amazon Web Services uses this to calculate\n your recommended reservation purchases.
" } }, "EstimatedBreakEvenInMonths": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "How long AWS estimates that it takes for this instance to start saving you money,\n in months.
" + "smithy.api#documentation": "How long Amazon Web Services estimates that it takes for this instance to start saving\n you money, in months.
" } }, "CurrencyCode": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The currency code that AWS used to calculate the costs for this instance.
" + "smithy.api#documentation": "The currency code that Amazon Web Services used to calculate the costs for this\n instance.
" } }, "EstimatedMonthlySavingsAmount": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "How much AWS estimates that this specific recommendation could save you in a\n month.
" + "smithy.api#documentation": "How much Amazon Web Services estimates that this specific recommendation could save you\n in a month.
" } }, "EstimatedMonthlySavingsPercentage": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "How much AWS estimates that this specific recommendation could save you in a month,\n as a percentage of your overall costs.
" + "smithy.api#documentation": "How much Amazon Web Services estimates that this specific recommendation could save you\n in a month, as a percentage of your overall costs.
" } }, "EstimatedMonthlyOnDemandCost": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "How much AWS estimates that you spend on On-Demand Instances in a month.
" + "smithy.api#documentation": "How much Amazon Web Services estimates that you spend on On-Demand Instances in a\n month.
" } }, "EstimatedReservationCostForLookbackPeriod": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "How much AWS estimates that you would have spent for all usage during the specified\n historical period if you had\n a\n reservation.
" + "smithy.api#documentation": "How much Amazon Web Services estimates that you would have spent for all usage during\n the specified historical period if you had a reservation.
" } }, "UpfrontCost": { @@ -4931,12 +5078,12 @@ "GenerationTimestamp": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The timestamp for when AWS made this recommendation.
" + "smithy.api#documentation": "The timestamp for when Amazon Web Services made this recommendation.
" } } }, "traits": { - "smithy.api#documentation": "Information about this specific recommendation, such as the timestamp for when AWS\n made a specific recommendation.
" + "smithy.api#documentation": "Information about this specific recommendation, such as the timestamp for when Amazon Web Services made a specific recommendation.
" } }, "com.amazonaws.costexplorer#ReservationPurchaseRecommendationSummary": { @@ -4945,13 +5092,13 @@ "TotalEstimatedMonthlySavingsAmount": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The total amount that AWS estimates that this recommendation could save you in a\n month.
" + "smithy.api#documentation": "The total amount that Amazon Web Services estimates that this recommendation could save\n you in a month.
" } }, "TotalEstimatedMonthlySavingsPercentage": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The total amount that AWS estimates that this recommendation could save you in a\n month, as a percentage of your costs.
" + "smithy.api#documentation": "The total amount that Amazon Web Services estimates that this recommendation could save\n you in a month, as a percentage of your costs.
" } }, "CurrencyCode": { @@ -4962,7 +5109,7 @@ } }, "traits": { - "smithy.api#documentation": "A summary about this recommendation, such as the currency code, the amount that AWS\n estimates that you could save, and the total amount of reservation to\n purchase.
" + "smithy.api#documentation": "A summary about this recommendation, such as the currency code, the amount that\n Amazon Web Services estimates that you could save, and the total amount of\n reservation to purchase.
" } }, "com.amazonaws.costexplorer#ReservationPurchaseRecommendations": { @@ -5048,12 +5195,12 @@ "EC2ResourceUtilization": { "target": "com.amazonaws.costexplorer#EC2ResourceUtilization", "traits": { - "smithy.api#documentation": "Utilization of current Amazon EC2 instance.
" + "smithy.api#documentation": "The utilization of current Amazon EC2 instance.
" } } }, "traits": { - "smithy.api#documentation": "Resource utilization of current resource.
" + "smithy.api#documentation": "Resource utilization of current resource.
" } }, "com.amazonaws.costexplorer#ResultByTime": { @@ -5080,12 +5227,12 @@ "Estimated": { "target": "com.amazonaws.costexplorer#Estimated", "traits": { - "smithy.api#documentation": "Whether\n the result is estimated.
" + "smithy.api#documentation": "Determines whether the result is estimated.
" } } }, "traits": { - "smithy.api#documentation": "The result that is associated with a time period.
" + "smithy.api#documentation": "The result that's associated with a time period.
" } }, "com.amazonaws.costexplorer#ResultsByTime": { @@ -5106,31 +5253,31 @@ "CurrentInstance": { "target": "com.amazonaws.costexplorer#CurrentInstance", "traits": { - "smithy.api#documentation": "Context regarding the current instance.
" + "smithy.api#documentation": "Context regarding the current instance.
" } }, "RightsizingType": { "target": "com.amazonaws.costexplorer#RightsizingType", "traits": { - "smithy.api#documentation": "Recommendation to either terminate or modify the resource.
" + "smithy.api#documentation": "A recommendation to either terminate or modify the resource.
" } }, "ModifyRecommendationDetail": { "target": "com.amazonaws.costexplorer#ModifyRecommendationDetail", "traits": { - "smithy.api#documentation": "Details for modification recommendations.
" + "smithy.api#documentation": "The details for the modification recommendations.
" } }, "TerminateRecommendationDetail": { "target": "com.amazonaws.costexplorer#TerminateRecommendationDetail", "traits": { - "smithy.api#documentation": "Details for termination recommendations.
" + "smithy.api#documentation": "The details for termination recommendations.
" } }, "FindingReasonCodes": { "target": "com.amazonaws.costexplorer#FindingReasonCodes", "traits": { - "smithy.api#documentation": "\n The list of possible reasons why the recommendation is generated such as under or over utilization of specific metrics (for example, CPU, Memory, Network).\n
" + "smithy.api#documentation": "The list of possible reasons why the recommendation is generated such as under or\n over utilization of specific metrics (for example, CPU, Memory, Network).
" } } }, @@ -5144,20 +5291,20 @@ "RecommendationTarget": { "target": "com.amazonaws.costexplorer#RecommendationTarget", "traits": { - "smithy.api#documentation": "\n The option to see recommendations within the same instance family, or recommendations for instances across other families. The default value is SAME_INSTANCE_FAMILY
.\n
The option to see recommendations within the same instance family or recommendations\n for instances across other families. The default value is\n SAME_INSTANCE_FAMILY
.
\n The option to consider RI or Savings Plans discount benefits in your savings calculation. The default value is TRUE
.\n
The option to consider RI or Savings Plans discount benefits in your savings\n calculation. The default value is TRUE
.
Enables you to customize recommendations across two attributes. You can choose to view\n recommendations for instances within the same instance families or across different instance\n families. You can also choose to view your estimated savings associated with recommendations\n with consideration of existing Savings Plans or RI benefits, or\n neither.
" + "smithy.api#documentation": "You can use RightsizingRecommendationConfiguration
to customize\n recommendations across two attributes. You can choose to view recommendations for\n instances within the same instance families or across different instance families. You\n can also choose to view your estimated savings that are associated with recommendations\n with consideration of existing Savings Plans or RI benefits, or neither.
The ID for this specific recommendation.
" + "smithy.api#documentation": "The ID for this specific recommendation.
" } }, "GenerationTimestamp": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The timestamp for when AWS made this recommendation.
" + "smithy.api#documentation": "The timestamp for when Amazon Web Services made this recommendation.
" } }, "LookbackPeriodInDays": { "target": "com.amazonaws.costexplorer#LookbackPeriodInDays", "traits": { - "smithy.api#documentation": "How many days of previous usage that AWS considers when making this\n recommendation.
" + "smithy.api#documentation": "The number of days of previous usage that Amazon Web Services considers when making\n this recommendation.
" } }, "AdditionalMetadata": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "Additional metadata that may be applicable to the recommendation.
" + "smithy.api#documentation": "Additional metadata that might be applicable to the recommendation.
" } } }, @@ -5204,30 +5351,30 @@ "TotalRecommendationCount": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "Total number of instance recommendations.
" + "smithy.api#documentation": "The total number of instance recommendations.
" } }, "EstimatedTotalMonthlySavingsAmount": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "Estimated total savings resulting from modifications, on a monthly basis.
" + "smithy.api#documentation": "The estimated total savings resulting from modifications, on a monthly basis.
" } }, "SavingsCurrencyCode": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The currency code that AWS used to calculate the savings.
" + "smithy.api#documentation": "The currency code that Amazon Web Services used to calculate the savings.
" } }, "SavingsPercentage": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "Savings percentage based on the recommended modifications, relative to the total\n On-Demand\n costs associated with these instances.
" + "smithy.api#documentation": "The savings percentage based on the recommended modifications. It's relative to the\n total On-Demand costs that are associated with these instances.
" } } }, "traits": { - "smithy.api#documentation": "Summary of rightsizing recommendations
" + "smithy.api#documentation": "The summary of rightsizing recommendations
" } }, "com.amazonaws.costexplorer#RightsizingType": { @@ -5251,30 +5398,30 @@ "Service": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The AWS service name associated with the cost anomaly.\n
" + "smithy.api#documentation": "The Amazon Web Services service name that's associated with the cost anomaly.
" } }, "Region": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The AWS Region associated with the cost anomaly.\n
" + "smithy.api#documentation": "The Amazon Web Services Region that's associated with the cost anomaly.
" } }, "LinkedAccount": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The linked account value associated with the cost anomaly.\n
" + "smithy.api#documentation": "The member account value that's associated with the cost anomaly.
" } }, "UsageType": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The UsageType
value associated with the cost anomaly.\n
The UsageType
value that's associated with the cost anomaly.
\n The combination of AWS service, linked account, Region, and usage type where a cost anomaly is observed.\n
" + "smithy.api#documentation": "The combination of Amazon Web Services service, linked account, Region, and usage type\n where a cost anomaly is observed.
" } }, "com.amazonaws.costexplorer#RootCauses": { @@ -5292,24 +5439,24 @@ "AmortizedRecurringCommitment": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The amortized amount of your Savings Plans commitment that was purchased with either a Partial
or a NoUpfront
.
The amortized amount of your Savings Plans commitment that was purchased with either a\n Partial
or a NoUpfront
.
The amortized amount of your Savings Plans commitment that was purchased with an Upfront
or PartialUpfront
Savings Plans.
The amortized amount of your Savings Plans commitment that was purchased with an\n Upfront
or PartialUpfront
Savings Plans.
The total amortized amount of your Savings Plans commitment, regardless of your Savings Plans purchase method.
" + "smithy.api#documentation": "The total amortized amount of your Savings Plans commitment, regardless of your\n Savings Plans purchase method.
" } } }, "traits": { - "smithy.api#documentation": "The amortized amount of Savings Plans purchased in a specific account during a specific time interval.
" + "smithy.api#documentation": "The amortized amount of Savings Plans purchased in a specific account during a\n specific time interval.
" } }, "com.amazonaws.costexplorer#SavingsPlansCoverage": { @@ -5332,7 +5479,7 @@ } }, "traits": { - "smithy.api#documentation": "The amount of Savings Plans eligible usage that is covered by Savings Plans. All calculations consider the On-Demand equivalent of your Savings Plans usage.
" + "smithy.api#documentation": "The amount of Savings Plans eligible usage that is covered by Savings Plans. All\n calculations consider the On-Demand equivalent of your Savings Plans usage.
" } }, "com.amazonaws.costexplorer#SavingsPlansCoverageData": { @@ -5341,30 +5488,30 @@ "SpendCoveredBySavingsPlans": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The amount of your AWS usage that is covered by a Savings Plans.
" + "smithy.api#documentation": "The amount of your Amazon Web Services usage that is covered by a Savings Plans.
" } }, "OnDemandCost": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The cost of your AWS usage at the public On-Demand rate.
" + "smithy.api#documentation": "The cost of your Amazon Web Services usage at the public On-Demand rate.
" } }, "TotalCost": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The total cost of your AWS usage, regardless of your purchase option.
" + "smithy.api#documentation": "The total cost of your Amazon Web Services usage, regardless of your purchase\n option.
" } }, "CoveragePercentage": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The percentage of your existing Savings Plans covered usage, divided by all of your\n eligible Savings Plans usage in an account(or set of accounts).
" + "smithy.api#documentation": "The percentage of your existing Savings Plans covered usage, divided by all of your\n eligible Savings Plans usage in an account (or set of accounts).
" } } }, "traits": { - "smithy.api#documentation": "Specific coverage percentage, On-Demand costs, and spend covered by Savings Plans, and total Savings Plans costs for an account.
" + "smithy.api#documentation": "Specific coverage percentage, On-Demand costs, and spend covered by Savings Plans, and\n total Savings Plans costs for an account.
" } }, "com.amazonaws.costexplorer#SavingsPlansCoverages": { @@ -5408,7 +5555,7 @@ "Region": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "A collection of AWS resources in a geographic area. Each AWS Region is isolated and independent of the other Regions.
" + "smithy.api#documentation": "A collection of Amazon Web Services resources in a geographic area. Each Amazon Web Services Region is isolated and independent of the other Regions.
" } }, "InstanceFamily": { @@ -5420,12 +5567,12 @@ "OfferingId": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The unique ID used to distinguish Savings Plans from one another.
" + "smithy.api#documentation": "The unique ID that's used to distinguish Savings Plans from one another.
" } } }, "traits": { - "smithy.api#documentation": "Attribute details on a specific Savings Plan.
" + "smithy.api#documentation": "The attribute details on a specific Savings Plan.
" } }, "com.amazonaws.costexplorer#SavingsPlansPurchaseRecommendation": { @@ -5434,7 +5581,7 @@ "AccountScope": { "target": "com.amazonaws.costexplorer#AccountScope", "traits": { - "smithy.api#documentation": "The account scope that you want your recommendations for. Amazon Web Services calculates recommendations including the management account and member accounts if the value is set to PAYER
. If the value is LINKED
, recommendations are calculated for individual member accounts only.
The account scope that you want your recommendations for. Amazon Web Services\n calculates recommendations that include the management account and member accounts if\n the value is set to PAYER
. If the value is LINKED
,\n recommendations are calculated for individual member accounts only.
The Savings Plans recommendation term in years, used to generate the recommendation.
" + "smithy.api#documentation": "The Savings Plans recommendation term in years. It's used to generate the\n recommendation.
" } }, "PaymentOption": { @@ -5475,7 +5622,7 @@ } }, "traits": { - "smithy.api#documentation": "Contains your request parameters, Savings Plan Recommendations Summary, and Details.
" + "smithy.api#documentation": "Contains your request parameters, Savings Plan Recommendations Summary, and\n Details.
" } }, "com.amazonaws.costexplorer#SavingsPlansPurchaseRecommendationDetail": { @@ -5496,55 +5643,55 @@ "UpfrontCost": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The upfront cost of the recommended Savings Plans, based on the selected payment option.
" + "smithy.api#documentation": "The upfront cost of the recommended Savings Plans, based on the selected payment\n option.
" } }, "EstimatedROI": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The estimated return on investment based on the recommended Savings Plans purchased. This is calculated as estimatedSavingsAmount
/ estimatedSPCost
*100.
The estimated return on investment that's based on the recommended Savings Plans that\n you purchased. This is calculated as estimatedSavingsAmount
/\n estimatedSPCost
*100.
The currency code AWS used to generate the recommendations and present potential\n savings.
" + "smithy.api#documentation": "The currency code that Amazon Web Services used to generate the recommendations and\n present potential savings.
" } }, "EstimatedSPCost": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The cost of the recommended Savings Plans over the length of the lookback period.
" + "smithy.api#documentation": "The cost of the recommended Savings Plans over the length of the lookback\n period.
" } }, "EstimatedOnDemandCost": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The remaining On-Demand cost estimated to not be covered by the recommended Savings Plans, over the length of the lookback period.
" + "smithy.api#documentation": "The remaining On-Demand cost estimated to not be covered by the recommended Savings\n Plans, over the length of the lookback period.
" } }, "EstimatedOnDemandCostWithCurrentCommitment": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The estimated On-Demand costs you would expect with no additional commitment, based on your usage of the selected time period and the Savings Plans you own.\n
" + "smithy.api#documentation": "The estimated On-Demand costs you would expect with no additional commitment, based\n on your usage of the selected time period and the Savings Plans you own.
" } }, "EstimatedSavingsAmount": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The estimated savings amount based on the recommended Savings Plans over the length of the lookback period.
" + "smithy.api#documentation": "The estimated savings amount that's based on the recommended Savings Plans over the\n length of the lookback period.
" } }, "EstimatedSavingsPercentage": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The estimated savings percentage relative to the total cost of applicable On-Demand usage over the lookback period.
" + "smithy.api#documentation": "The estimated savings percentage relative to the total cost of applicable On-Demand\n usage over the lookback period.
" } }, "HourlyCommitmentToPurchase": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The recommended hourly commitment level for the Savings Plans type, and configuration based on the usage during the lookback period.
" + "smithy.api#documentation": "The recommended hourly commitment level for the Savings Plans type and the\n configuration that's based on the usage during the lookback period.
" } }, "EstimatedAverageUtilization": { @@ -5556,25 +5703,25 @@ "EstimatedMonthlySavingsAmount": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The estimated monthly savings amount, based on the recommended Savings Plans.
" + "smithy.api#documentation": "The estimated monthly savings amount based on the recommended Savings Plans.
" } }, "CurrentMinimumHourlyOnDemandSpend": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The lowest value of hourly On-Demand spend over the lookback period of the applicable usage type.
" + "smithy.api#documentation": "The lowest value of hourly On-Demand spend over the lookback period of the applicable\n usage type.
" } }, "CurrentMaximumHourlyOnDemandSpend": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The highest value of hourly On-Demand spend over the lookback period of the applicable usage type.
" + "smithy.api#documentation": "The highest value of hourly On-Demand spend over the lookback period of the applicable\n usage type.
" } }, "CurrentAverageHourlyOnDemandSpend": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The average value of hourly On-Demand spend over the lookback period of the applicable usage type.
" + "smithy.api#documentation": "The average value of hourly On-Demand spend over the lookback period of the applicable\n usage type.
" } } }, @@ -5606,7 +5753,7 @@ "AdditionalMetadata": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "Additional metadata that may be applicable to the recommendation.
" + "smithy.api#documentation": "Additional metadata that might be applicable to the recommendation.
" } } }, @@ -5620,37 +5767,37 @@ "EstimatedROI": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The estimated return on investment based on the recommended Savings Plans and estimated savings.
" + "smithy.api#documentation": "The estimated return on investment that's based on the recommended Savings Plans and\n estimated savings.
" } }, "CurrencyCode": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The currency code AWS used to generate the recommendations and present potential\n savings.
" + "smithy.api#documentation": "The currency code that Amazon Web Services used to generate the recommendations and\n present potential savings.
" } }, "EstimatedTotalCost": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The estimated total cost of the usage after purchasing the recommended Savings Plans. This is a sum of the cost of Savings Plans during this term, and the remaining On-Demand usage.
" + "smithy.api#documentation": "The estimated total cost of the usage after purchasing the recommended Savings Plans.\n This is a sum of the cost of Savings Plans during this term, and the remaining On-Demand\n usage.
" } }, "CurrentOnDemandSpend": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The current total on demand spend of the applicable usage types over the lookback period.
" + "smithy.api#documentation": "The current total on demand spend of the applicable usage types over the lookback\n period.
" } }, "EstimatedSavingsAmount": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The estimated total savings over the lookback period, based on the purchase of the recommended Savings Plans.
" + "smithy.api#documentation": "The estimated total savings over the lookback period, based on the purchase of the\n recommended Savings Plans.
" } }, "TotalRecommendationCount": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The aggregate number of Savings Plans recommendations that exist for your account.
" + "smithy.api#documentation": "The aggregate number of Savings Plans recommendations that exist for your\n account.
" } }, "DailyCommitmentToPurchase": { @@ -5662,25 +5809,25 @@ "HourlyCommitmentToPurchase": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The recommended hourly commitment based on the recommendation parameters.
" + "smithy.api#documentation": "The recommended hourly commitment that's based on the recommendation\n parameters.
" } }, "EstimatedSavingsPercentage": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The estimated savings relative to the total cost of On-Demand usage, over the lookback period. This is calculated as estimatedSavingsAmount
/ CurrentOnDemandSpend
*100.
The estimated savings relative to the total cost of On-Demand usage, over the lookback\n period. This is calculated as estimatedSavingsAmount
/\n CurrentOnDemandSpend
*100.
The estimated monthly savings amount, based on the recommended Savings Plans purchase.
" + "smithy.api#documentation": "The estimated monthly savings amount that's based on the recommended Savings Plans\n purchase.
" } }, "EstimatedOnDemandCostWithCurrentCommitment": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The estimated On-Demand costs you would expect with no additional commitment, based on your usage of the selected time period and the Savings Plans you own.\n
" + "smithy.api#documentation": "The estimated On-Demand costs you would expect with no additional commitment. It's\n based on your usage of the selected time period and the Savings Plans you own.
" } } }, @@ -5694,18 +5841,18 @@ "NetSavings": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The savings amount that you are accumulating for the usage that is covered by a Savings Plans, when compared to the On-Demand equivalent of the same usage.
" + "smithy.api#documentation": "The savings amount that you're accumulating for the usage that's covered by a Savings\n Plans, when compared to the On-Demand equivalent of the same usage.
" } }, "OnDemandCostEquivalent": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "How much the amount that the usage would have cost if it was\n accrued\n at the On-Demand rate.
" + "smithy.api#documentation": "How much the amount that the usage would have cost if it was accrued at the On-Demand\n rate.
" } } }, "traits": { - "smithy.api#documentation": "The amount of savings you're accumulating, against the public On-Demand rate of the usage accrued in an account.
" + "smithy.api#documentation": "The amount of savings that you're accumulating, against the public On-Demand rate of\n the usage accrued in an account.
" } }, "com.amazonaws.costexplorer#SavingsPlansUtilization": { @@ -5714,30 +5861,30 @@ "TotalCommitment": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The total amount of Savings Plans commitment that's been purchased in an account (or set of accounts).
" + "smithy.api#documentation": "The total amount of Savings Plans commitment that's been purchased in an account (or\n set of accounts).
" } }, "UsedCommitment": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The amount of your Savings Plans commitment that was consumed from Savings Plans eligible usage in a specific period.
" + "smithy.api#documentation": "The amount of your Savings Plans commitment that was consumed from Savings Plans\n eligible usage in a specific period.
" } }, "UnusedCommitment": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The amount of your Savings Plans commitment that was not consumed from Savings Plans eligible usage in a specific period.
" + "smithy.api#documentation": "The amount of your Savings Plans commitment that wasn't consumed from Savings Plans\n eligible usage in a specific period.
" } }, "UtilizationPercentage": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The amount of UsedCommitment
divided by the TotalCommitment
for your Savings Plans.
The amount of UsedCommitment
divided by the TotalCommitment
\n for your Savings Plans.
The measurement of how well you are using your existing Savings Plans.
" + "smithy.api#documentation": "The measurement of how well you're using your existing Savings Plans.
" } }, "com.amazonaws.costexplorer#SavingsPlansUtilizationAggregates": { @@ -5746,20 +5893,20 @@ "Utilization": { "target": "com.amazonaws.costexplorer#SavingsPlansUtilization", "traits": { - "smithy.api#documentation": "A ratio of your effectiveness of using existing Savings Plans to apply to workloads that are Savings Plans eligible.
", + "smithy.api#documentation": "A ratio of your effectiveness of using existing Savings Plans to apply to workloads\n that are Savings Plans eligible.
", "smithy.api#required": {} } }, "Savings": { "target": "com.amazonaws.costexplorer#SavingsPlansSavings", "traits": { - "smithy.api#documentation": "The amount saved by using existing Savings Plans. Savings returns both net savings from Savings Plans, as well as the onDemandCostEquivalent
of the Savings Plans when considering the utilization rate.
The amount saved by using existing Savings Plans. Savings returns both net savings\n from Savings Plans, as well as the onDemandCostEquivalent
of the Savings\n Plans when considering the utilization rate.
The total amortized commitment for a Savings Plans. This includes the sum of the upfront and recurring Savings Plans fees.
" + "smithy.api#documentation": "The total amortized commitment for a Savings Plans. This includes the sum of the\n upfront and recurring Savings Plans fees.
" } } }, @@ -5779,20 +5926,20 @@ "Utilization": { "target": "com.amazonaws.costexplorer#SavingsPlansUtilization", "traits": { - "smithy.api#documentation": "A ratio of your effectiveness of using existing Savings Plans to apply to workloads that are Savings Plans eligible.
", + "smithy.api#documentation": "A ratio of your effectiveness of using existing Savings Plans to apply to workloads\n that are Savings Plans eligible.
", "smithy.api#required": {} } }, "Savings": { "target": "com.amazonaws.costexplorer#SavingsPlansSavings", "traits": { - "smithy.api#documentation": "The amount saved by using existing Savings Plans. Savings returns both net savings from Savings Plans as well as the onDemandCostEquivalent
of the Savings Plans when considering the utilization rate.
The amount saved by using existing Savings Plans. Savings returns both net savings\n from Savings Plans as well as the onDemandCostEquivalent
of the Savings\n Plans when considering the utilization rate.
The total amortized commitment for a Savings Plans. This includes the sum of the upfront and recurring Savings Plans fees.
" + "smithy.api#documentation": "The total amortized commitment for a Savings Plans. This includes the sum of the\n upfront and recurring Savings Plans fees.
" } } }, @@ -5818,24 +5965,24 @@ "Utilization": { "target": "com.amazonaws.costexplorer#SavingsPlansUtilization", "traits": { - "smithy.api#documentation": "A ratio of your effectiveness of using existing Savings Plans to apply to workloads that are Savings Plans eligible.
" + "smithy.api#documentation": "A ratio of your effectiveness of using existing Savings Plans to apply to workloads\n that are Savings Plans eligible.
" } }, "Savings": { "target": "com.amazonaws.costexplorer#SavingsPlansSavings", "traits": { - "smithy.api#documentation": "The amount saved by using existing Savings Plans. Savings returns both net savings from savings plans as well as the onDemandCostEquivalent
of the Savings Plans when considering the utilization rate.
The amount saved by using existing Savings Plans. Savings returns both net savings\n from savings plans as well as the onDemandCostEquivalent
of the Savings\n Plans when considering the utilization rate.
The total amortized commitment for a Savings Plans. Includes the sum of the upfront and recurring Savings Plans fees.
" + "smithy.api#documentation": "The total amortized commitment for a Savings Plans. Includes the sum of the upfront\n and recurring Savings Plans fees.
" } } }, "traits": { - "smithy.api#documentation": "A single daily or monthly Savings Plans utilization rate, and details for your account. A management account in an organization have access to member accounts. You can use GetDimensionValues
to determine the possible dimension values.
A single daily or monthly Savings Plans utilization rate, and details for your\n account. A management account in an organization have access to member accounts. You can\n use GetDimensionValues
to determine the possible dimension values.
The Amazon EC2 hardware specifications that you want AWS to provide recommendations\n for.
" + "smithy.api#documentation": "The Amazon EC2 hardware specifications that you want Amazon Web Services to provide\n recommendations for.
" } } }, "traits": { - "smithy.api#documentation": "Hardware specifications for the service that you want recommendations\n for.
" + "smithy.api#documentation": "Hardware specifications for the service that you want recommendations for.
" } }, "com.amazonaws.costexplorer#SortDefinition": { @@ -5893,14 +6040,14 @@ "Key": { "target": "com.amazonaws.costexplorer#SortDefinitionKey", "traits": { - "smithy.api#documentation": "The key by which to sort the data.
", + "smithy.api#documentation": "The key that's used to sort the data.
", "smithy.api#required": {} } }, "SortOrder": { "target": "com.amazonaws.costexplorer#SortOrder", "traits": { - "smithy.api#documentation": "The order in which to sort the data.
" + "smithy.api#documentation": "The order that's used to sort the data.
" } } }, @@ -5915,7 +6062,7 @@ "min": 0, "max": 1024 }, - "smithy.api#pattern": "[\\S\\s]*" + "smithy.api#pattern": "^[\\S\\s]*$" } }, "com.amazonaws.costexplorer#SortDefinitions": { @@ -5945,24 +6092,24 @@ "Address": { "target": "com.amazonaws.costexplorer#SubscriberAddress", "traits": { - "smithy.api#documentation": " The email address or SNS Amazon Resource Name (ARN), depending on the Type
.
The email address or SNS Amazon Resource Name (ARN). This depends on the\n Type
.
\n The notification delivery channel.\n
" + "smithy.api#documentation": "The notification delivery channel.
" } }, "Status": { "target": "com.amazonaws.costexplorer#SubscriberStatus", "traits": { - "smithy.api#documentation": "Indicates if the subscriber accepts the notifications.
" + "smithy.api#documentation": "Indicates if the subscriber accepts the notifications.
" } } }, "traits": { - "smithy.api#documentation": "\n The recipient of AnomalySubscription
notifications.\n
The recipient of AnomalySubscription
notifications.
The match options that you can use to filter your results.\n MatchOptions
is only applicable for actions related to Cost Category.\n The default values for MatchOptions
are EQUALS
and\n CASE_SENSITIVE
.
The match options that you can use to filter your results. MatchOptions
\n is only applicable for actions related to Cost Category. The default values for\n MatchOptions
are EQUALS
and\n CASE_SENSITIVE
.
The values that are available for a tag.
\n\t\tIf Values
and Key
are not specified, the ABSENT
\n MatchOption
is applied to all tags. That is, filtering on resources with no tags.
If Values
is provided and Key
is not specified, the ABSENT
\n MatchOption
is applied to the tag Key
only. That is, filtering on resources without the given tag key.
The values that are available for a tag.
\nIf Values
and Key
aren't specified, the ABSENT
\n MatchOption
is applied to all tags. That is, it's filtered on resources\n with no tags.
If Values
is provided and Key
isn't specified, the\n ABSENT
\n MatchOption
is applied to the tag Key
only. That is, it's\n filtered on resources without the given tag key.
Expected cost to operate this instance type on a monthly basis.
" + "smithy.api#documentation": "The expected cost to operate this instance type on a monthly basis.
" } }, "EstimatedMonthlySavings": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "Estimated savings resulting from modification, on a monthly basis.
" + "smithy.api#documentation": "The estimated savings that result from modification, on a monthly basis.
" } }, "CurrencyCode": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The currency code that AWS used to calculate the costs for this instance.
" + "smithy.api#documentation": "The currency code that Amazon Web Services used to calculate the costs for this\n instance.
" } }, "DefaultTargetInstance": { "target": "com.amazonaws.costexplorer#GenericBoolean", "traits": { - "smithy.api#documentation": "Indicates whether this recommendation is the defaulted AWS recommendation.
" + "smithy.api#documentation": "Determines whether this recommendation is the defaulted Amazon Web Services\n recommendation.
" } }, "ResourceDetails": { "target": "com.amazonaws.costexplorer#ResourceDetails", "traits": { - "smithy.api#documentation": "Details on the target instance type.
" + "smithy.api#documentation": "Details on the target instance type.
" } }, "ExpectedResourceUtilization": { "target": "com.amazonaws.costexplorer#ResourceUtilization", "traits": { - "smithy.api#documentation": "Expected utilization metrics for target instance type.
" + "smithy.api#documentation": "The expected utilization metrics for target instance type.
" } }, "PlatformDifferences": { "target": "com.amazonaws.costexplorer#PlatformDifferences", "traits": { - "smithy.api#documentation": "\n Explains the actions you might need to take in order to successfully migrate your workloads from the current instance type to the recommended instance type.\n
" + "smithy.api#documentation": "Explains the actions you might need to take in order to successfully migrate your\n workloads from the current instance type to the recommended instance type.
" } } }, "traits": { - "smithy.api#documentation": "Details on recommended instance.
" + "smithy.api#documentation": "Details on recommended instance.
" } }, "com.amazonaws.costexplorer#TargetInstancesList": { @@ -6155,18 +6302,18 @@ "EstimatedMonthlySavings": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "Estimated savings resulting from modification, on a monthly basis.
" + "smithy.api#documentation": "The estimated savings that result from modification, on a monthly basis.
" } }, "CurrencyCode": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "The currency code that AWS used to calculate the costs for this instance.
" + "smithy.api#documentation": "The currency code that Amazon Web Services used to calculate the costs for this\n instance.
" } } }, "traits": { - "smithy.api#documentation": "Details on termination recommendation.
" + "smithy.api#documentation": "Details on termination recommendation.
" } }, "com.amazonaws.costexplorer#TotalActualHours": { @@ -6184,26 +6331,26 @@ "NumericOperator": { "target": "com.amazonaws.costexplorer#NumericOperator", "traits": { - "smithy.api#documentation": "\n The comparing value used in the filter.\n
", + "smithy.api#documentation": "The comparing value that's used in the filter.
", "smithy.api#required": {} } }, "StartValue": { "target": "com.amazonaws.costexplorer#GenericDouble", "traits": { - "smithy.api#documentation": "\n The lower bound dollar value used in the filter.\n
", + "smithy.api#documentation": "The lower bound dollar value that's used in the filter.
", "smithy.api#required": {} } }, "EndValue": { "target": "com.amazonaws.costexplorer#GenericDouble", "traits": { - "smithy.api#documentation": "The upper bound dollar value used in the filter.
" + "smithy.api#documentation": "The upper bound dollar value that's used in the filter.
" } } }, "traits": { - "smithy.api#documentation": "Filters cost anomalies based on the total impact.
" + "smithy.api#documentation": "Filters cost anomalies based on the total impact.
" } }, "com.amazonaws.costexplorer#TotalPotentialRISavings": { @@ -6279,7 +6426,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates an existing cost anomaly monitor. The changes made are applied going forward, and\n does not change anomalies detected in the past.
" + "smithy.api#documentation": "Updates an existing cost anomaly monitor. The changes made are applied going forward, and\n doesn'tt change anomalies detected in the past.
" } }, "com.amazonaws.costexplorer#UpdateAnomalyMonitorRequest": { @@ -6288,14 +6435,14 @@ "MonitorArn": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "Cost anomaly monitor Amazon Resource Names (ARNs).
", + "smithy.api#documentation": "Cost anomaly monitor Amazon Resource Names (ARNs).
", "smithy.api#required": {} } }, "MonitorName": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The new name for the cost anomaly monitor.\n
" + "smithy.api#documentation": "The new name for the cost anomaly monitor.
" } } } @@ -6306,7 +6453,7 @@ "MonitorArn": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n A cost anomaly monitor ARN.\n
", + "smithy.api#documentation": "A cost anomaly monitor ARN.
", "smithy.api#required": {} } } @@ -6332,7 +6479,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates an existing cost anomaly monitor subscription.
" + "smithy.api#documentation": "Updates an existing cost anomaly monitor subscription.
" } }, "com.amazonaws.costexplorer#UpdateAnomalySubscriptionRequest": { @@ -6341,38 +6488,38 @@ "SubscriptionArn": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "A cost anomaly subscription Amazon Resource Name (ARN).
", + "smithy.api#documentation": "A cost anomaly subscription Amazon Resource Name (ARN).
", "smithy.api#required": {} } }, "Threshold": { "target": "com.amazonaws.costexplorer#NullableNonNegativeDouble", "traits": { - "smithy.api#documentation": "\n The update to the threshold value for receiving notifications.\n
" + "smithy.api#documentation": "The update to the threshold value for receiving notifications.
" } }, "Frequency": { "target": "com.amazonaws.costexplorer#AnomalySubscriptionFrequency", "traits": { - "smithy.api#documentation": "\n The update to the frequency value at which subscribers will receive notifications.\n
" + "smithy.api#documentation": "The update to the frequency value that subscribers receive notifications.
" } }, "MonitorArnList": { "target": "com.amazonaws.costexplorer#MonitorArnList", "traits": { - "smithy.api#documentation": "\n A list of cost anomaly monitor ARNs.\n
" + "smithy.api#documentation": "A list of cost anomaly monitor ARNs.
" } }, "Subscribers": { "target": "com.amazonaws.costexplorer#Subscribers", "traits": { - "smithy.api#documentation": "\n The update to the subscriber list.\n
" + "smithy.api#documentation": "The update to the subscriber list.
" } }, "SubscriptionName": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n The subscription's new name.\n
" + "smithy.api#documentation": "The new name of the subscription.
" } } } @@ -6383,7 +6530,7 @@ "SubscriptionArn": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "\n A cost anomaly subscription ARN.\n
", + "smithy.api#documentation": "A cost anomaly subscription ARN.
", "smithy.api#required": {} } } @@ -6437,6 +6584,12 @@ }, "DefaultValue": { "target": "com.amazonaws.costexplorer#CostCategoryValue" + }, + "SplitChargeRules": { + "target": "com.amazonaws.costexplorer#CostCategorySplitChargeRulesList", + "traits": { + "smithy.api#documentation": "\n The split charge rules used to allocate your charges between your Cost Category values.\n
" + } } } }, @@ -6502,7 +6655,7 @@ "min": 0, "max": 1024 }, - "smithy.api#pattern": "[\\S\\s]*" + "smithy.api#pattern": "^[\\S\\s]*$" } }, "com.amazonaws.costexplorer#Values": { @@ -6518,7 +6671,7 @@ "min": 0, "max": 40 }, - "smithy.api#pattern": "(\\d{4}-\\d{2}-\\d{2})(T\\d{2}:\\d{2}:\\d{2}Z)?" + "smithy.api#pattern": "^(\\d{4}-\\d{2}-\\d{2})(T\\d{2}:\\d{2}:\\d{2}Z)?$" } }, "com.amazonaws.costexplorer#ZonedDateTime": { diff --git a/codegen/sdk-codegen/aws-models/customerprofiles.2020-08-15.json b/codegen/sdk-codegen/aws-models/customerprofiles.2020-08-15.json index 0efb35024e6..95673a44778 100644 --- a/codegen/sdk-codegen/aws-models/customerprofiles.2020-08-15.json +++ b/codegen/sdk-codegen/aws-models/customerprofiles.2020-08-15.json @@ -233,7 +233,7 @@ "min": 3, "max": 63 }, - "smithy.api#pattern": "\\S+" + "smithy.api#pattern": "^\\S+$" } }, "com.amazonaws.customerprofiles#BucketPrefix": { @@ -291,7 +291,7 @@ "min": 0, "max": 256 }, - "smithy.api#pattern": "[\\w/!@#+=.-]+" + "smithy.api#pattern": "^[\\w/!@#+=.-]+$" } }, "com.amazonaws.customerprofiles#CreateDomain": { @@ -320,7 +320,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a domain, which is a container for all customer data, such as customer profile\n attributes, object types, profile keys, and encryption keys. You can create multiple\n domains, and each domain can have multiple third-party integrations.
\nEach Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can\n be associated with one domain.
", + "smithy.api#documentation": "Creates a domain, which is a container for all customer data, such as customer profile\n attributes, object types, profile keys, and encryption keys. You can create multiple\n domains, and each domain can have multiple third-party integrations.
\nEach Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can\n be associated with one domain.
\nUse this API or UpdateDomain to\n enable identity\n resolution: set Matching
to true.
The process of matching duplicate profiles. This process runs every Saturday at 12AM.
" + "smithy.api#documentation": "The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process every Saturday at 12AM UTC to detect duplicate profiles in your domains. \nAfter that batch process completes, use the \nGetMatches\nAPI to return and review the results.
" } }, "Tags": { @@ -404,7 +404,7 @@ "Matching": { "target": "com.amazonaws.customerprofiles#MatchingResponse", "traits": { - "smithy.api#documentation": "The process of matching duplicate profiles. This process runs every Saturday at 12AM.
" + "smithy.api#documentation": "The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process every Saturday at 12AM UTC to detect duplicate profiles in your domains. \nAfter that batch process completes, use the \nGetMatches\nAPI to return and review the results.
" } }, "CreatedAt": { @@ -1448,7 +1448,7 @@ "min": 0, "max": 2048 }, - "smithy.api#pattern": "[\\w!@#\\-.?,\\s]*" + "smithy.api#pattern": "^[\\w!@#\\-.?,\\s]*$" } }, "com.amazonaws.customerprofiles#FlowName": { @@ -1458,7 +1458,7 @@ "min": 0, "max": 256 }, - "smithy.api#pattern": "[a-zA-Z0-9][\\w!@#.-]+" + "smithy.api#pattern": "^[a-zA-Z0-9][\\w!@#.-]+$" } }, "com.amazonaws.customerprofiles#Gender": { @@ -1564,7 +1564,7 @@ "Matching": { "target": "com.amazonaws.customerprofiles#MatchingResponse", "traits": { - "smithy.api#documentation": "The process of matching duplicate profiles. This process runs every Saturday at 12AM.
" + "smithy.api#documentation": "The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process every Saturday at 12AM UTC to detect duplicate profiles in your domains. \nAfter that batch process completes, use the \nGetMatches\nAPI to return and review the results.
" } }, "CreatedAt": { @@ -1715,7 +1715,7 @@ } ], "traits": { - "smithy.api#documentation": "This API is in preview release for Amazon Connect and subject to change.
\nBefore calling this API, use CreateDomain or\n UpdateDomain to\n enable identity resolution: set Matching
to true.
GetMatches returns potentially matching profiles, based on the results of the latest run\n of a machine learning process.
\nAmazon Connect runs a batch process every Saturday at 12AM UTC to identify matching profiles.\n The results are returned up to seven days after the Saturday run.
\nAmazon Connect uses the following profile attributes to identify matches:
\nPhoneNumber
\nHomePhoneNumber
\nBusinessPhoneNumber
\nMobilePhoneNumber
\nEmailAddress
\nPersonalEmailAddress
\nBusinessEmailAddress
\nFullName
\nBusinessName
\nThis API is in preview release for Amazon Connect and subject to change.
\nBefore calling this API, use CreateDomain or\n UpdateDomain to\n enable identity resolution: set Matching
to true.
GetMatches returns potentially matching profiles, based on the results of the latest run\n of a machine learning process.
\nAmazon Connect starts a batch process every Saturday at 12AM UTC to identify matching profiles.\n The results are returned up to seven days after the Saturday run.
\nAmazon Connect uses the following profile attributes to identify matches:
\nPhoneNumber
\nHomePhoneNumber
\nBusinessPhoneNumber
\nMobilePhoneNumber
\nEmailAddress
\nPersonalEmailAddress
\nBusinessEmailAddress
\nFullName
\nBusinessName
\nFor example, two or more profiles—with spelling mistakes such as John Doe and Jhn Doe, or different casing\n email addresses such as JOHN_DOE@ANYCOMPANY.COM and\n johndoe@anycompany.com, or different phone number\n formats such as 555-010-0000 and +1-555-010-0000—can be detected as belonging to the same customer John Doe and merged into a unified profile.
", "smithy.api#http": { "method": "GET", "uri": "/domains/{DomainName}/matches", @@ -2044,7 +2044,7 @@ "min": 20, "max": 2048 }, - "smithy.api#pattern": "arn:aws:kms:.*:[0-9]+:.*" + "smithy.api#pattern": "^arn:aws:kms:.*:[0-9]+:" } }, "com.amazonaws.customerprofiles#ListAccountIntegrations": { @@ -2668,6 +2668,12 @@ "smithy.api#documentation": "The unique identifier of a customer profile.
", "smithy.api#required": {} } + }, + "ObjectFilter": { + "target": "com.amazonaws.customerprofiles#ObjectFilter", + "traits": { + "smithy.api#documentation": "Applies a filter to the response to include profile objects with the specified index values.\n This filter is only supported for ObjectTypeName _asset and _case.
" + } } } }, @@ -2940,7 +2946,7 @@ "FieldSourceProfileIds": { "target": "com.amazonaws.customerprofiles#FieldSourceProfileIds", "traits": { - "smithy.api#documentation": "The identifiers of the fields in the profile that has the information you want to apply to the\n merge. For example, say you want to merge EmailAddress from Profile1 into MainProfile. This would be the \n identifier of the EmailAddress field in Profile1.
" + "smithy.api#documentation": "The identifiers of the fields in the profile that has the information you want to apply\n to the merge. For example, say you want to merge EmailAddress from Profile1 into\n MainProfile. This would be the identifier of the EmailAddress field in Profile1.
" } } } @@ -2963,7 +2969,29 @@ "min": 0, "max": 512 }, - "smithy.api#pattern": "\\S+" + "smithy.api#pattern": "^\\S+$" + } + }, + "com.amazonaws.customerprofiles#ObjectFilter": { + "type": "structure", + "members": { + "KeyName": { + "target": "com.amazonaws.customerprofiles#name", + "traits": { + "smithy.api#documentation": "A searchable identifier of a standard profile object. The predefined keys you can use to search for _asset include: _assetId, _assetName, _serialNumber.\n The predefined keys you can use to search for _case include: _caseId.
", + "smithy.api#required": {} + } + }, + "Values": { + "target": "com.amazonaws.customerprofiles#requestValueList", + "traits": { + "smithy.api#documentation": "A list of key values.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The filter applied to ListProfileObjects response to include profile objects with the specified index values.\n This filter is only supported for ObjectTypeName _asset and _case.
" } }, "com.amazonaws.customerprofiles#ObjectTypeField": { @@ -2998,7 +3026,7 @@ "StandardIdentifiers": { "target": "com.amazonaws.customerprofiles#StandardIdentifierList", "traits": { - "smithy.api#documentation": "The types of keys that a ProfileObject can have. Each ProfileObject can have only 1\n UNIQUE key but multiple PROFILE keys. PROFILE means that this key can be used to tie an\n object to a PROFILE. UNIQUE means that it can be used to uniquely identify an object. If a\n key a is marked as SECONDARY, it will be used to search for profiles after all other\n PROFILE keys have been searched. A LOOKUP_ONLY key is only used to match a profile but is\n not persisted to be used for searching of the profile. A NEW_ONLY key is only used if the\n profile does not already exist before the object is ingested, otherwise it is only used for\n matching objects to profiles.
" + "smithy.api#documentation": "The types of keys that a ProfileObject can have. Each ProfileObject can have only 1\n UNIQUE key but multiple PROFILE keys. PROFILE, ASSET or CASE means that this key can be used to tie an\n object to a PROFILE, ASSET or CASE respectively. UNIQUE means that it can be used to uniquely identify an object.\n If a key a is marked as SECONDARY, it will be used to search for profiles after all other\n PROFILE keys have been searched. A LOOKUP_ONLY key is only used to match a profile but is\n not persisted to be used for searching of the profile. A NEW_ONLY key is only used if the\n profile does not already exist before the object is ingested, otherwise it is only used for\n matching objects to profiles.
" } }, "FieldNames": { @@ -3289,7 +3317,7 @@ "min": 0, "max": 2048 }, - "smithy.api#pattern": ".+" + "smithy.api#pattern": "^.+$" } }, "com.amazonaws.customerprofiles#PutIntegration": { @@ -4280,6 +4308,14 @@ "value": "PROFILE", "name": "PROFILE" }, + { + "value": "ASSET", + "name": "ASSET" + }, + { + "value": "CASE", + "name": "CASE" + }, { "value": "UNIQUE", "name": "UNIQUE" @@ -4312,7 +4348,7 @@ "min": 0, "max": 256 }, - "smithy.api#pattern": "^arn:[a-z0-9]{1,10}:profile" + "smithy.api#pattern": "^arn:[a-z0-9]{1,10}:profile$" } }, "com.amazonaws.customerprofiles#TagKey": { @@ -4736,7 +4772,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates the properties of a domain, including creating or selecting a dead letter queue\n or an encryption key.
\nAfter a domain is created, the name can’t be changed.
", + "smithy.api#documentation": "Updates the properties of a domain, including creating or selecting a dead letter queue\n or an encryption key.
\nAfter a domain is created, the name can’t be changed.
\nUse this API or CreateDomain to\n enable identity\n resolution: set Matching
to true.
The process of matching duplicate profiles. This process runs every Saturday at 12AM.
" + "smithy.api#documentation": "The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process every Saturday at 12AM UTC to detect duplicate profiles in your domains. \nAfter that batch process completes, use the \nGetMatches\nAPI to return and review the results.
" } }, "Tags": { @@ -4818,7 +4854,7 @@ "Matching": { "target": "com.amazonaws.customerprofiles#MatchingResponse", "traits": { - "smithy.api#documentation": "The process of matching duplicate profiles. This process runs every Saturday at 12AM.
" + "smithy.api#documentation": "The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process every Saturday at 12AM UTC to detect duplicate profiles in your domains. \nAfter that batch process completes, use the \nGetMatches\nAPI to return and review the results.
" } }, "CreatedAt": { @@ -5280,7 +5316,7 @@ "com.amazonaws.customerprofiles#uuid": { "type": "string", "traits": { - "smithy.api#pattern": "[a-f0-9]{32}" + "smithy.api#pattern": "^[a-f0-9]{32}$" } } } diff --git a/codegen/sdk-codegen/aws-models/databrew.2017-07-25.json b/codegen/sdk-codegen/aws-models/databrew.2017-07-25.json index 65356fd3c3e..6c2d803237b 100644 --- a/codegen/sdk-codegen/aws-models/databrew.2017-07-25.json +++ b/codegen/sdk-codegen/aws-models/databrew.2017-07-25.json @@ -3989,6 +3989,10 @@ { "value": "XML", "name": "XML" + }, + { + "value": "TABLEAUHYPER", + "name": "TABLEAUHYPER" } ] } diff --git a/codegen/sdk-codegen/aws-models/directoryservice.2015-04-16.json b/codegen/sdk-codegen/aws-models/directoryservice.2015-04-16.json index ac22439b941..49f84c048fa 100644 --- a/codegen/sdk-codegen/aws-models/directoryservice.2015-04-16.json +++ b/codegen/sdk-codegen/aws-models/directoryservice.2015-04-16.json @@ -137,7 +137,7 @@ } ], "traits": { - "smithy.api#documentation": "If the DNS server for your on-premises domain uses a publicly addressable IP address, you must add a CIDR address block to correctly route traffic to and from your Microsoft AD on Amazon Web Services. AddIpRoutes adds this address block. You can also use AddIpRoutes to facilitate routing traffic that uses public IP ranges from your Microsoft AD on AWS to a peer VPC.
\nBefore you call AddIpRoutes, ensure that all of the required permissions have been explicitly granted through a policy. For details about what permissions are required to run the AddIpRoutes operation, see AWS Directory Service API Permissions: Actions, Resources, and Conditions Reference.
" + "smithy.api#documentation": "If the DNS server for your self-managed domain uses a publicly addressable IP address,\n you must add a CIDR address block to correctly route traffic to and from your Microsoft AD\n on Amazon Web Services. AddIpRoutes adds this address block. You can\n also use AddIpRoutes to facilitate routing traffic that uses public IP\n ranges from your Microsoft AD on Amazon Web Services to a peer VPC.
\nBefore you call AddIpRoutes, ensure that all of the required\n permissions have been explicitly granted through a policy. For details about what\n permissions are required to run the AddIpRoutes operation, see Directory Service API Permissions: Actions, Resources, and Conditions Reference.
" } }, "com.amazonaws.directoryservice#AddIpRoutesRequest": { @@ -153,14 +153,14 @@ "IpRoutes": { "target": "com.amazonaws.directoryservice#IpRoutes", "traits": { - "smithy.api#documentation": "IP address blocks, using CIDR format, of the traffic to route. This is often the IP address block of the DNS server used for your on-premises domain.
", + "smithy.api#documentation": "IP address blocks, using CIDR format, of the traffic to route. This is often the IP\n address block of the DNS server used for your self-managed domain.
", "smithy.api#required": {} } }, "UpdateSecurityGroupForDirectoryControllers": { "target": "com.amazonaws.directoryservice#UpdateSecurityGroupForDirectoryControllers", "traits": { - "smithy.api#documentation": "If set to true, updates the inbound and outbound rules of the security group that has the description: \"AWS created security group for directory ID directory controllers.\"\n Following are the new rules:\n
\nInbound:
\nType: Custom UDP Rule, Protocol: UDP, Range: 88, Source: 0.0.0.0/0
\nType: Custom UDP Rule, Protocol: UDP, Range: 123, Source: 0.0.0.0/0
\nType: Custom UDP Rule, Protocol: UDP, Range: 138, Source: 0.0.0.0/0
\nType: Custom UDP Rule, Protocol: UDP, Range: 389, Source: 0.0.0.0/0
\nType: Custom UDP Rule, Protocol: UDP, Range: 464, Source: 0.0.0.0/0
\nType: Custom UDP Rule, Protocol: UDP, Range: 445, Source: 0.0.0.0/0
\nType: Custom TCP Rule, Protocol: TCP, Range: 88, Source: 0.0.0.0/0
\nType: Custom TCP Rule, Protocol: TCP, Range: 135, Source: 0.0.0.0/0
\nType: Custom TCP Rule, Protocol: TCP, Range: 445, Source: 0.0.0.0/0
\nType: Custom TCP Rule, Protocol: TCP, Range: 464, Source: 0.0.0.0/0
\nType: Custom TCP Rule, Protocol: TCP, Range: 636, Source: 0.0.0.0/0
\nType: Custom TCP Rule, Protocol: TCP, Range: 1024-65535, Source: 0.0.0.0/0
\nType: Custom TCP Rule, Protocol: TCP, Range: 3268-33269, Source: 0.0.0.0/0
\nType: DNS (UDP), Protocol: UDP, Range: 53, Source: 0.0.0.0/0
\nType: DNS (TCP), Protocol: TCP, Range: 53, Source: 0.0.0.0/0
\nType: LDAP, Protocol: TCP, Range: 389, Source: 0.0.0.0/0
\nType: All ICMP, Protocol: All, Range: N/A, Source: 0.0.0.0/0
\nOutbound:
\nType: All traffic, Protocol: All, Range: All, Destination: 0.0.0.0/0
\nThese security rules impact an internal network interface that is not exposed publicly.
" + "smithy.api#documentation": "If set to true, updates the inbound and outbound rules of the security group that has\n the description: \"Amazon Web Services created security group for directory ID\n directory controllers.\" Following are the new rules:
\nInbound:
\nType: Custom UDP Rule, Protocol: UDP, Range: 88, Source: 0.0.0.0/0
\nType: Custom UDP Rule, Protocol: UDP, Range: 123, Source: 0.0.0.0/0
\nType: Custom UDP Rule, Protocol: UDP, Range: 138, Source: 0.0.0.0/0
\nType: Custom UDP Rule, Protocol: UDP, Range: 389, Source: 0.0.0.0/0
\nType: Custom UDP Rule, Protocol: UDP, Range: 464, Source: 0.0.0.0/0
\nType: Custom UDP Rule, Protocol: UDP, Range: 445, Source: 0.0.0.0/0
\nType: Custom TCP Rule, Protocol: TCP, Range: 88, Source: 0.0.0.0/0
\nType: Custom TCP Rule, Protocol: TCP, Range: 135, Source: 0.0.0.0/0
\nType: Custom TCP Rule, Protocol: TCP, Range: 445, Source: 0.0.0.0/0
\nType: Custom TCP Rule, Protocol: TCP, Range: 464, Source: 0.0.0.0/0
\nType: Custom TCP Rule, Protocol: TCP, Range: 636, Source: 0.0.0.0/0
\nType: Custom TCP Rule, Protocol: TCP, Range: 1024-65535, Source:\n 0.0.0.0/0
\nType: Custom TCP Rule, Protocol: TCP, Range: 3268-33269, Source:\n 0.0.0.0/0
\nType: DNS (UDP), Protocol: UDP, Range: 53, Source: 0.0.0.0/0
\nType: DNS (TCP), Protocol: TCP, Range: 53, Source: 0.0.0.0/0
\nType: LDAP, Protocol: TCP, Range: 389, Source: 0.0.0.0/0
\nType: All ICMP, Protocol: All, Range: N/A, Source: 0.0.0.0/0
\nOutbound:
\nType: All traffic, Protocol: All, Range: All, Destination: 0.0.0.0/0
\nThese security rules impact an internal network interface that is not exposed\n publicly.
" } } } @@ -268,7 +268,7 @@ } ], "traits": { - "smithy.api#documentation": "Adds or overwrites one or more tags for the specified directory. Each directory can have a maximum of 50 tags. Each tag consists of a key and optional value. Tag keys must be unique to each resource.
" + "smithy.api#documentation": "Adds or overwrites one or more tags for the specified directory. Each directory can\n have a maximum of 50 tags. Each tag consists of a key and optional value. Tag keys must be\n unique to each resource.
" } }, "com.amazonaws.directoryservice#AddTagsToResourceRequest": { @@ -310,7 +310,7 @@ "min": 1, "max": 62 }, - "smithy.api#pattern": "^(?!d-)([\\da-zA-Z]+)([-]*[\\da-zA-Z])*" + "smithy.api#pattern": "^(?!D-|d-)([\\da-zA-Z]+)([-]*[\\da-zA-Z])*$" } }, "com.amazonaws.directoryservice#Attribute": { @@ -400,7 +400,7 @@ } ], "traits": { - "smithy.api#documentation": "Cancels an in-progress schema extension to a Microsoft AD directory. Once a schema extension has started replicating to all domain controllers, the task can no longer be canceled. A schema extension can be canceled during any of the following states; Initializing
, CreatingSnapshot
, and UpdatingSchema
.
Cancels an in-progress schema extension to a Microsoft AD directory. Once a schema\n extension has started replicating to all domain controllers, the task can no longer be\n canceled. A schema extension can be canceled during any of the following states;\n Initializing
, CreatingSnapshot
, and\n UpdatingSchema
.
The type of client authentication for the specified directory. If no type is specified, a list of all client authentication types that are supported for the directory is retrieved.
" + } + }, + "Status": { + "target": "com.amazonaws.directoryservice#ClientAuthenticationStatus", + "traits": { + "smithy.api#documentation": "Whether the client authentication type is enabled or disabled for the specified directory.
" + } + }, + "LastUpdatedDateTime": { + "target": "com.amazonaws.directoryservice#LastUpdatedDateTime", + "traits": { + "smithy.api#documentation": "The date and time when the status of the client authentication type was last updated.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Contains information about a client authentication method for a directory.
" + } + }, + "com.amazonaws.directoryservice#ClientAuthenticationSettingsInfo": { + "type": "list", + "member": { + "target": "com.amazonaws.directoryservice#ClientAuthenticationSettingInfo" + } + }, + "com.amazonaws.directoryservice#ClientAuthenticationStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "Enabled", + "name": "ENABLED" + }, + { + "value": "Disabled", + "name": "DISABLED" + } + ] + } + }, "com.amazonaws.directoryservice#ClientAuthenticationType": { "type": "string", "traits": { @@ -756,7 +803,7 @@ "min": 8, "max": 64 }, - "smithy.api#pattern": "[\\u0020-\\u00FF]+", + "smithy.api#pattern": "^[\\u0020-\\u00FF]+$", "smithy.api#sensitive": {} } }, @@ -766,24 +813,24 @@ "RemoteDomainName": { "target": "com.amazonaws.directoryservice#RemoteDomainName", "traits": { - "smithy.api#documentation": "The fully qualified domain name (FQDN) of the remote domains pointed to by the conditional forwarder.
" + "smithy.api#documentation": "The fully qualified domain name (FQDN) of the remote domains pointed to by the\n conditional forwarder.
" } }, "DnsIpAddrs": { "target": "com.amazonaws.directoryservice#DnsIpAddrs", "traits": { - "smithy.api#documentation": "The IP addresses of the remote DNS server associated with RemoteDomainName. This is the IP address of the DNS server that your conditional forwarder points to.
" + "smithy.api#documentation": "The IP addresses of the remote DNS server associated with RemoteDomainName. This is the\n IP address of the DNS server that your conditional forwarder points to.
" } }, "ReplicationScope": { "target": "com.amazonaws.directoryservice#ReplicationScope", "traits": { - "smithy.api#documentation": "The replication scope of the conditional forwarder. The only allowed value is\n Domain
, which will replicate the conditional forwarder to all of the\n domain controllers for your AWS directory.
The replication scope of the conditional forwarder. The only allowed value is\n Domain
, which will replicate the conditional forwarder to all of the domain\n controllers for your Amazon Web Services directory.
Points to a remote domain with which you are setting up a trust relationship. Conditional forwarders are required in order to set up a trust relationship with another domain.
" + "smithy.api#documentation": "Points to a remote domain with which you are setting up a trust relationship.\n Conditional forwarders are required in order to set up a trust relationship with another\n domain.
" } }, "com.amazonaws.directoryservice#ConditionalForwarders": { @@ -815,7 +862,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates an AD Connector to connect to an on-premises directory.
\nBefore you call ConnectDirectory
, ensure that all of the required permissions\n have been explicitly granted through a policy. For details about what permissions are required\n to run the ConnectDirectory
operation, see AWS Directory Service API Permissions: Actions, Resources, and Conditions\n Reference.
Creates an AD Connector to connect to a self-managed directory.
\nBefore you call ConnectDirectory
, ensure that all of the required permissions\n have been explicitly granted through a policy. For details about what permissions are required\n to run the ConnectDirectory
operation, see Directory Service API Permissions: Actions, Resources, and Conditions\n Reference.
The fully qualified name of the on-premises directory, such as\n corp.example.com
.
The fully qualified name of your self-managed directory, such as\n corp.example.com
.
The NetBIOS name of the on-premises directory, such as CORP
.
The NetBIOS name of your self-managed directory, such as CORP
.
The password for the on-premises user account.
", + "smithy.api#documentation": "The password for your self-managed user account.
", "smithy.api#required": {} } }, @@ -941,7 +988,7 @@ "Alias": { "target": "com.amazonaws.directoryservice#AliasName", "traits": { - "smithy.api#documentation": "The requested alias.
\nThe alias must be unique amongst all aliases in AWS. This operation throws an\n EntityAlreadyExistsException
error if the alias already exists.
The requested alias.
\nThe alias must be unique amongst all aliases in Amazon Web Services. This operation throws an\n EntityAlreadyExistsException
error if the alias already exists.
Creates a conditional forwarder associated with your AWS directory. Conditional forwarders are required in order to set up a trust relationship with another domain. The conditional forwarder points to the trusted domain.
" + "smithy.api#documentation": "Creates a conditional forwarder associated with your Amazon Web Services directory. Conditional\n forwarders are required in order to set up a trust relationship with another domain. The\n conditional forwarder points to the trusted domain.
" } }, "com.amazonaws.directoryservice#CreateConditionalForwarderRequest": { @@ -1104,14 +1151,14 @@ "DirectoryId": { "target": "com.amazonaws.directoryservice#DirectoryId", "traits": { - "smithy.api#documentation": "The directory ID of the AWS directory for which you are creating the conditional forwarder.
", + "smithy.api#documentation": "The directory ID of the Amazon Web Services directory for which you are creating the conditional\n forwarder.
", "smithy.api#required": {} } }, "RemoteDomainName": { "target": "com.amazonaws.directoryservice#RemoteDomainName", "traits": { - "smithy.api#documentation": "The fully qualified domain name (FQDN) of the remote domain with which you will set up a trust relationship.
", + "smithy.api#documentation": "The fully qualified domain name (FQDN) of the remote domain with which you will set up\n a trust relationship.
", "smithy.api#required": {} } }, @@ -1124,7 +1171,7 @@ } }, "traits": { - "smithy.api#documentation": "Initiates the creation of a conditional forwarder for your AWS Directory Service for Microsoft Active Directory. Conditional forwarders are required in order to set up a trust relationship with another domain.
" + "smithy.api#documentation": "Initiates the creation of a conditional forwarder for your Directory Service for Microsoft Active\n Directory. Conditional forwarders are required in order to set up a trust relationship with\n another domain.
" } }, "com.amazonaws.directoryservice#CreateConditionalForwarderResult": { @@ -1157,7 +1204,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a Simple AD directory. For more information, see Simple Active Directory in the AWS Directory Service Admin\n Guide.
\nBefore you call CreateDirectory
, ensure that all of the required permissions\n have been explicitly granted through a policy. For details about what permissions are required\n to run the CreateDirectory
operation, see AWS Directory Service API Permissions: Actions, Resources, and Conditions\n Reference.
Creates a Simple AD directory. For more information, see Simple Active Directory in the Directory Service Admin\n Guide.
\nBefore you call CreateDirectory
, ensure that all of the required permissions\n have been explicitly granted through a policy. For details about what permissions are required\n to run the CreateDirectory
operation, see Directory Service API Permissions: Actions, Resources, and Conditions\n Reference.
Creates a subscription to forward real-time Directory Service domain controller security\n logs to the specified Amazon CloudWatch log group in your AWS account.
" + "smithy.api#documentation": "Creates a subscription to forward real-time Directory Service domain controller security\n logs to the specified Amazon CloudWatch log group in your Amazon Web Services account.
" } }, "com.amazonaws.directoryservice#CreateLogSubscriptionRequest": { @@ -1308,7 +1355,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a Microsoft AD directory in the AWS Cloud. For more information, see AWS Managed Microsoft AD in the AWS Directory Service Admin Guide.
\nBefore you call CreateMicrosoftAD, ensure that all of the required permissions have been explicitly granted through a policy. For details about what permissions are required to run the CreateMicrosoftAD operation, see AWS Directory Service API Permissions: Actions, Resources, and Conditions Reference.
" + "smithy.api#documentation": "Creates a Microsoft AD directory in the Amazon Web Services Cloud. For more information, see Managed Microsoft AD in the Directory Service Admin Guide.
\nBefore you call CreateMicrosoftAD, ensure that all of the required\n permissions have been explicitly granted through a policy. For details about what permissions\n are required to run the CreateMicrosoftAD operation, see Directory Service API Permissions: Actions, Resources, and Conditions Reference.
" } }, "com.amazonaws.directoryservice#CreateMicrosoftADRequest": { @@ -1317,14 +1364,14 @@ "Name": { "target": "com.amazonaws.directoryservice#DirectoryName", "traits": { - "smithy.api#documentation": "The fully qualified domain name for the AWS Managed Microsoft AD directory, such as\n corp.example.com
. This name will resolve inside your VPC only. It does not need\n to be publicly resolvable.
The fully qualified domain name for the Managed Microsoft AD directory, such as\n corp.example.com
. This name will resolve inside your VPC only. It does not need\n to be publicly resolvable.
The NetBIOS name for your domain, such as CORP
. If you don't specify a NetBIOS name, it will default to the first part of your directory DNS. For example, CORP
for the directory DNS corp.example.com
.
The NetBIOS name for your domain, such as CORP
. If you don't specify a\n NetBIOS name, it will default to the first part of your directory DNS. For example,\n CORP
for the directory DNS corp.example.com
.
A description for the directory. This label will appear on the AWS console Directory Details
page after the directory is created.
A description for the directory. This label will appear on the Amazon Web Services console\n Directory Details
page after the directory is created.
AWS Managed Microsoft AD is available in two editions: Standard
and Enterprise
. Enterprise
is the default.
Managed Microsoft AD is available in two editions: Standard
and\n Enterprise
. Enterprise
is the default.
The tags to be assigned to the AWS Managed Microsoft AD directory.
" + "smithy.api#documentation": "The tags to be assigned to the Managed Microsoft AD directory.
" } } }, "traits": { - "smithy.api#documentation": "Creates an AWS Managed Microsoft AD directory.
" + "smithy.api#documentation": "Creates an Managed Microsoft AD directory.
" } }, "com.amazonaws.directoryservice#CreateMicrosoftADResult": { @@ -1404,7 +1451,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a snapshot of a Simple AD or Microsoft AD directory in the AWS cloud.
\nYou cannot take snapshots of AD Connector directories.
\nCreates a snapshot of a Simple AD or Microsoft AD directory in the Amazon Web Services cloud.
\nYou cannot take snapshots of AD Connector directories.
\nAWS Directory Service for Microsoft Active Directory allows you to configure trust relationships. For example, you can establish a trust between your AWS Managed Microsoft AD directory, and your existing on-premises Microsoft Active Directory. This would allow you to provide users and groups access to resources in either domain, with a single set of credentials.
\nThis action initiates the creation of the AWS side of a trust relationship between an AWS Managed Microsoft AD directory and an external domain. You can create either a forest trust or an external trust.
" + "smithy.api#documentation": "Directory Service for Microsoft Active Directory allows you to configure trust relationships. For\n example, you can establish a trust between your Managed Microsoft AD directory, and your existing\n self-managed Microsoft Active Directory. This would allow you to provide users and groups\n access to resources in either domain, with a single set of credentials.
\nThis action initiates the creation of the Amazon Web Services side of a trust relationship between an\n Managed Microsoft AD directory and an external domain. You can create either a forest trust or an\n external trust.
" } }, "com.amazonaws.directoryservice#CreateTrustRequest": { @@ -1483,21 +1530,21 @@ "DirectoryId": { "target": "com.amazonaws.directoryservice#DirectoryId", "traits": { - "smithy.api#documentation": "The Directory ID of the AWS Managed Microsoft AD directory for which to establish the trust relationship.
", + "smithy.api#documentation": "The Directory ID of the Managed Microsoft AD directory for which to establish the trust\n relationship.
", "smithy.api#required": {} } }, "RemoteDomainName": { "target": "com.amazonaws.directoryservice#RemoteDomainName", "traits": { - "smithy.api#documentation": "The Fully Qualified Domain Name (FQDN) of the external domain for which to create the trust relationship.
", + "smithy.api#documentation": "The Fully Qualified Domain Name (FQDN) of the external domain for which to create the\n trust relationship.
", "smithy.api#required": {} } }, "TrustPassword": { "target": "com.amazonaws.directoryservice#TrustPassword", "traits": { - "smithy.api#documentation": "The trust password. The must be the same password that was used when creating the trust relationship on the external domain.
", + "smithy.api#documentation": "The trust password. The must be the same password that was used when creating the trust\n relationship on the external domain.
", "smithy.api#required": {} } }, @@ -1528,7 +1575,7 @@ } }, "traits": { - "smithy.api#documentation": "AWS Directory Service for Microsoft Active Directory allows you to configure trust relationships. For example, you can establish a trust between your AWS Managed Microsoft AD directory, and your existing on-premises Microsoft Active Directory. This would allow you to provide users and groups access to resources in either domain, with a single set of credentials.
\nThis action initiates the creation of the AWS side of a trust relationship between an AWS Managed Microsoft AD directory and an external domain.
" + "smithy.api#documentation": "Directory Service for Microsoft Active Directory allows you to configure trust relationships. For\n example, you can establish a trust between your Managed Microsoft AD directory, and your existing\n self-managed Microsoft Active Directory. This would allow you to provide users and groups\n access to resources in either domain, with a single set of credentials.
\nThis action initiates the creation of the Amazon Web Services side of a trust relationship between an\n Managed Microsoft AD directory and an external domain.
" } }, "com.amazonaws.directoryservice#CreateTrustResult": { @@ -1596,7 +1643,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes a conditional forwarder that has been set up for your AWS directory.
" + "smithy.api#documentation": "Deletes a conditional forwarder that has been set up for your Amazon Web Services\n directory.
" } }, "com.amazonaws.directoryservice#DeleteConditionalForwarderRequest": { @@ -1612,7 +1659,7 @@ "RemoteDomainName": { "target": "com.amazonaws.directoryservice#RemoteDomainName", "traits": { - "smithy.api#documentation": "The fully qualified domain name (FQDN) of the remote domain with which you are deleting the conditional forwarder.
", + "smithy.api#documentation": "The fully qualified domain name (FQDN) of the remote domain with which you are deleting\n the conditional forwarder.
", "smithy.api#required": {} } } @@ -1648,7 +1695,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes an AWS Directory Service directory.
\nBefore you call DeleteDirectory
, ensure that all of the required permissions\n have been explicitly granted through a policy. For details about what permissions are required\n to run the DeleteDirectory
operation, see AWS Directory Service API Permissions: Actions, Resources, and Conditions\n Reference.
Deletes an Directory Service directory.
\nBefore you call DeleteDirectory
, ensure that all of the required permissions\n have been explicitly granted through a policy. For details about what permissions are required\n to run the DeleteDirectory
operation, see Directory Service API Permissions: Actions, Resources, and Conditions\n Reference.
Deletes an existing trust relationship between your AWS Managed Microsoft AD directory and an external domain.
" + "smithy.api#documentation": "Deletes an existing trust relationship between your Managed Microsoft AD directory and an external\n domain.
" } }, "com.amazonaws.directoryservice#DeleteTrustRequest": { @@ -1824,7 +1871,7 @@ } }, "traits": { - "smithy.api#documentation": "Deletes the local side of an existing trust relationship between the AWS Managed Microsoft AD directory and the external domain.
" + "smithy.api#documentation": "Deletes the local side of an existing trust relationship between the Managed Microsoft AD\n directory and the external domain.
" } }, "com.amazonaws.directoryservice#DeleteTrustResult": { @@ -1925,7 +1972,7 @@ } ], "traits": { - "smithy.api#documentation": "Removes the specified directory as a publisher to the specified SNS topic.
" + "smithy.api#documentation": "Removes the specified directory as a publisher to the specified Amazon SNS topic.
" } }, "com.amazonaws.directoryservice#DeregisterEventTopicRequest": { @@ -1934,20 +1981,20 @@ "DirectoryId": { "target": "com.amazonaws.directoryservice#DirectoryId", "traits": { - "smithy.api#documentation": "The Directory ID to remove as a publisher. This directory will no longer send messages to the specified SNS topic.
", + "smithy.api#documentation": "The Directory ID to remove as a publisher. This directory will no longer send messages\n to the specified Amazon SNS topic.
", "smithy.api#required": {} } }, "TopicName": { "target": "com.amazonaws.directoryservice#TopicName", "traits": { - "smithy.api#documentation": "The name of the SNS topic from which to remove the directory as a publisher.
", + "smithy.api#documentation": "The name of the Amazon SNS topic from which to remove the directory as a\n publisher.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "Removes the specified directory as a publisher to the specified SNS topic.
" + "smithy.api#documentation": "Removes the specified directory as a publisher to the specified Amazon SNS topic.
" } }, "com.amazonaws.directoryservice#DeregisterEventTopicResult": { @@ -2019,6 +2066,85 @@ } } }, + "com.amazonaws.directoryservice#DescribeClientAuthenticationSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.directoryservice#DescribeClientAuthenticationSettingsRequest" + }, + "output": { + "target": "com.amazonaws.directoryservice#DescribeClientAuthenticationSettingsResult" + }, + "errors": [ + { + "target": "com.amazonaws.directoryservice#AccessDeniedException" + }, + { + "target": "com.amazonaws.directoryservice#ClientException" + }, + { + "target": "com.amazonaws.directoryservice#DirectoryDoesNotExistException" + }, + { + "target": "com.amazonaws.directoryservice#InvalidParameterException" + }, + { + "target": "com.amazonaws.directoryservice#ServiceException" + }, + { + "target": "com.amazonaws.directoryservice#UnsupportedOperationException" + } + ], + "traits": { + "smithy.api#documentation": "Retrieves information about the type of client authentication for the specified directory, if the type is specified. If no type is specified, information about all client authentication types that are supported for the specified directory is retrieved. Currently, only SmartCard
is supported.\n
The identifier of the directory for which to retrieve information.
", + "smithy.api#required": {} + } + }, + "Type": { + "target": "com.amazonaws.directoryservice#ClientAuthenticationType", + "traits": { + "smithy.api#documentation": "The type of client authentication for which to retrieve information. If no type is specified, a list of all client authentication types that are supported for the specified directory is retrieved.
" + } + }, + "NextToken": { + "target": "com.amazonaws.directoryservice#NextToken", + "traits": { + "smithy.api#documentation": "The DescribeClientAuthenticationSettingsResult.NextToken value from a previous call to DescribeClientAuthenticationSettings. Pass null if this is the first call.
" + } + }, + "Limit": { + "target": "com.amazonaws.directoryservice#PageLimit", + "traits": { + "smithy.api#documentation": "The maximum number of items to return. If this value is zero, the maximum number of items is specified by the limitations of the operation.
" + } + } + } + }, + "com.amazonaws.directoryservice#DescribeClientAuthenticationSettingsResult": { + "type": "structure", + "members": { + "ClientAuthenticationSettingsInfo": { + "target": "com.amazonaws.directoryservice#ClientAuthenticationSettingsInfo", + "traits": { + "smithy.api#documentation": "Information about the type of client authentication for the specified directory. The following information is retrieved: The date and time when the status of the client authentication type was last updated, whether the client authentication type is enabled or disabled, and the type of client authentication.
" + } + }, + "NextToken": { + "target": "com.amazonaws.directoryservice#NextToken", + "traits": { + "smithy.api#documentation": "The next token used to retrieve the client authentication settings if the number of setting types exceeds\n page limit and there is another page.
" + } + } + } + }, "com.amazonaws.directoryservice#DescribeConditionalForwarders": { "type": "operation", "input": { @@ -2048,7 +2174,7 @@ } ], "traits": { - "smithy.api#documentation": "Obtains information about the conditional forwarders for this account.
\nIf no input parameters are provided for RemoteDomainNames, this request describes all conditional forwarders for the specified directory ID.
" + "smithy.api#documentation": "Obtains information about the conditional forwarders for this account.
\nIf no input parameters are provided for RemoteDomainNames, this request describes all\n conditional forwarders for the specified directory ID.
" } }, "com.amazonaws.directoryservice#DescribeConditionalForwardersRequest": { @@ -2057,14 +2183,14 @@ "DirectoryId": { "target": "com.amazonaws.directoryservice#DirectoryId", "traits": { - "smithy.api#documentation": "The directory ID for which to get the list of associated conditional forwarders.
", + "smithy.api#documentation": "The directory ID for which to get the list of associated conditional\n forwarders.
", "smithy.api#required": {} } }, "RemoteDomainNames": { "target": "com.amazonaws.directoryservice#RemoteDomainNames", "traits": { - "smithy.api#documentation": "The fully qualified domain names (FQDN) of the remote domains for which to get the list of associated conditional forwarders. If this member is null, all conditional forwarders are returned.
" + "smithy.api#documentation": "The fully qualified domain names (FQDN) of the remote domains for which to get the list\n of associated conditional forwarders. If this member is null, all conditional forwarders are\n returned.
" } } }, @@ -2204,20 +2330,20 @@ "DirectoryId": { "target": "com.amazonaws.directoryservice#DirectoryId", "traits": { - "smithy.api#documentation": "Identifier of the directory for which to retrieve the domain controller information.
", + "smithy.api#documentation": "Identifier of the directory for which to retrieve the domain controller\n information.
", "smithy.api#required": {} } }, "DomainControllerIds": { "target": "com.amazonaws.directoryservice#DomainControllerIds", "traits": { - "smithy.api#documentation": "A list of identifiers for the domain controllers whose information will be provided.
" + "smithy.api#documentation": "A list of identifiers for the domain controllers whose information will be\n provided.
" } }, "NextToken": { "target": "com.amazonaws.directoryservice#NextToken", "traits": { - "smithy.api#documentation": "The DescribeDomainControllers.NextToken value from a previous call to DescribeDomainControllers. Pass null if this is the first call.
" + "smithy.api#documentation": "The DescribeDomainControllers.NextToken value from a previous call\n to DescribeDomainControllers. Pass null if this is the first call.\n
" } }, "Limit": { @@ -2240,7 +2366,7 @@ "NextToken": { "target": "com.amazonaws.directoryservice#NextToken", "traits": { - "smithy.api#documentation": "If not null, more results are available. Pass this value for the NextToken
parameter in a subsequent call to DescribeDomainControllers retrieve the next set of items.
If not null, more results are available. Pass this value for the NextToken
\n parameter in a subsequent call to DescribeDomainControllers retrieve the\n next set of items.
Obtains information about which SNS topics receive status messages from the specified directory.
\nIf no input parameters are provided, such as DirectoryId or TopicName, this request describes all of the associations in the account.
" + "smithy.api#documentation": "Obtains information about which Amazon SNS topics receive status messages from the specified\n directory.
\nIf no input parameters are provided, such as DirectoryId or TopicName, this request\n describes all of the associations in the account.
" } }, "com.amazonaws.directoryservice#DescribeEventTopicsRequest": { @@ -2277,13 +2403,13 @@ "DirectoryId": { "target": "com.amazonaws.directoryservice#DirectoryId", "traits": { - "smithy.api#documentation": "The Directory ID for which to get the list of associated SNS topics. If this member is null, associations for all Directory IDs are returned.
" + "smithy.api#documentation": "The Directory ID for which to get the list of associated Amazon SNS topics. If this member\n is null, associations for all Directory IDs are returned.
" } }, "TopicNames": { "target": "com.amazonaws.directoryservice#TopicNames", "traits": { - "smithy.api#documentation": "A list of SNS topic names for which to obtain the information. If this member is null, all associations for the specified Directory ID are returned.
\nAn empty list results in an InvalidParameterException
being thrown.
A list of Amazon SNS topic names for which to obtain the information. If this member is\n null, all associations for the specified Directory ID are returned.
\nAn empty list results in an InvalidParameterException
being\n thrown.
A list of SNS topic names that receive status messages from the specified Directory ID.
" + "smithy.api#documentation": "A list of Amazon SNS topic names that receive status messages from the specified Directory\n ID.
" } } }, @@ -2649,7 +2775,7 @@ } ], "traits": { - "smithy.api#documentation": "Obtains information about the trust relationships for this account.
\nIf no input parameters are provided, such as DirectoryId or TrustIds, this request describes all the trust relationships belonging to the account.
" + "smithy.api#documentation": "Obtains information about the trust relationships for this account.
\nIf no input parameters are provided, such as DirectoryId or TrustIds, this request\n describes all the trust relationships belonging to the account.
" } }, "com.amazonaws.directoryservice#DescribeTrustsRequest": { @@ -2658,19 +2784,19 @@ "DirectoryId": { "target": "com.amazonaws.directoryservice#DirectoryId", "traits": { - "smithy.api#documentation": "The Directory ID of the AWS directory that is a part of the requested trust relationship.
" + "smithy.api#documentation": "The Directory ID of the Amazon Web Services directory that is a part of the requested trust\n relationship.
" } }, "TrustIds": { "target": "com.amazonaws.directoryservice#TrustIds", "traits": { - "smithy.api#documentation": "A list of identifiers of the trust relationships for which to obtain the information. If this member is null, all trust relationships that belong to the current account are returned.
\nAn empty list results in an InvalidParameterException
being thrown.
A list of identifiers of the trust relationships for which to obtain the information. If\n this member is null, all trust relationships that belong to the current account are\n returned.
\nAn empty list results in an InvalidParameterException
being thrown.
The DescribeTrustsResult.NextToken value from a previous call to\n DescribeTrusts. Pass null if this is the first call.
" + "smithy.api#documentation": "The DescribeTrustsResult.NextToken value from a previous call to\n DescribeTrusts. Pass null if this is the first call.
" } }, "Limit": { @@ -2681,7 +2807,7 @@ } }, "traits": { - "smithy.api#documentation": "Describes the trust relationships for a particular AWS Managed Microsoft AD directory. If no input parameters are are provided, such as directory ID or trust ID, this request describes all the trust relationships.
" + "smithy.api#documentation": "Describes the trust relationships for a particular Managed Microsoft AD directory. If no input\n parameters are provided, such as directory ID or trust ID, this request describes all the\n trust relationships.
" } }, "com.amazonaws.directoryservice#DescribeTrustsResult": { @@ -2690,13 +2816,13 @@ "Trusts": { "target": "com.amazonaws.directoryservice#Trusts", "traits": { - "smithy.api#documentation": "The list of Trust objects that were retrieved.
\nIt is possible that this list contains less than the number of items specified in the\n Limit member of the request. This occurs if there are less than the requested\n number of items left to retrieve, or if the limitations of the operation have been\n exceeded.
" + "smithy.api#documentation": "The list of Trust objects that were retrieved.
\nIt is possible that this list contains less than the number of items specified in the\n Limit member of the request. This occurs if there are less than the\n requested number of items left to retrieve, or if the limitations of the operation have been\n exceeded.
" } }, "NextToken": { "target": "com.amazonaws.directoryservice#NextToken", "traits": { - "smithy.api#documentation": "If not null, more results are available. Pass this value for the NextToken parameter\n in a subsequent call to DescribeTrusts to retrieve the next set of items.
" + "smithy.api#documentation": "If not null, more results are available. Pass this value for the\n NextToken parameter in a subsequent call to DescribeTrusts to retrieve the next set of items.
" } } }, @@ -2733,7 +2859,7 @@ } }, "traits": { - "smithy.api#documentation": "The Region you specified is the same Region where the AWS Managed Microsoft AD directory\n was created. Specify a different Region and try again.
", + "smithy.api#documentation": "The Region you specified is the same Region where the Managed Microsoft AD directory\n was created. Specify a different Region and try again.
", "smithy.api#error": "client" } }, @@ -2748,7 +2874,7 @@ } }, "traits": { - "smithy.api#documentation": "The specified directory has already been shared with this AWS account.
", + "smithy.api#documentation": "The specified directory has already been shared with this Amazon Web Services account.
", "smithy.api#error": "client" } }, @@ -2772,14 +2898,14 @@ "CustomerDnsIps": { "target": "com.amazonaws.directoryservice#DnsIpAddrs", "traits": { - "smithy.api#documentation": "A list of one or more IP addresses of DNS servers or domain controllers in the on-premises\n directory.
", + "smithy.api#documentation": "A list of one or more IP addresses of DNS servers or domain controllers in your self-managed\n directory.
", "smithy.api#required": {} } }, "CustomerUserName": { "target": "com.amazonaws.directoryservice#UserName", "traits": { - "smithy.api#documentation": "The user name of an account in the on-premises directory that is used to connect to the\n directory. This account must have the following permissions:
\nRead users and groups
\nCreate computer objects
\nJoin computers to the domain
\nThe user name of an account in your self-managed directory that is used to connect to the\n directory. This account must have the following permissions:
\nRead users and groups
\nCreate computer objects
\nJoin computers to the domain
\nThe user name of the service account in the on-premises directory.
" + "smithy.api#documentation": "The user name of the service account in your self-managed directory.
" } }, "SecurityGroupId": { @@ -2886,7 +3012,7 @@ "DnsIpAddrs": { "target": "com.amazonaws.directoryservice#DnsIpAddrs", "traits": { - "smithy.api#documentation": "The IP addresses of the DNS servers for the directory. For a Simple AD or Microsoft AD\n directory, these are the IP addresses of the Simple AD or Microsoft AD directory servers. For\n an AD Connector directory, these are the IP addresses of the DNS servers or domain controllers\n in the on-premises directory to which the AD Connector is connected.
" + "smithy.api#documentation": "The IP addresses of the DNS servers for the directory. For a Simple AD or Microsoft AD\n directory, these are the IP addresses of the Simple AD or Microsoft AD directory servers. For\n an AD Connector directory, these are the IP addresses of the DNS servers or domain controllers\n in your self-managed directory to which the AD Connector is connected.
" } }, "Stage": { @@ -2898,13 +3024,13 @@ "ShareStatus": { "target": "com.amazonaws.directoryservice#ShareStatus", "traits": { - "smithy.api#documentation": "Current directory status of the shared AWS Managed Microsoft AD directory.
" + "smithy.api#documentation": "Current directory status of the shared Managed Microsoft AD directory.
" } }, "ShareMethod": { "target": "com.amazonaws.directoryservice#ShareMethod", "traits": { - "smithy.api#documentation": "The method used when sharing a directory to determine whether the directory should be\n shared within your AWS organization (ORGANIZATIONS
) or with any AWS account by\n sending a shared directory request (HANDSHAKE
).
The method used when sharing a directory to determine whether the directory should be\n shared within your Amazon Web Services organization (ORGANIZATIONS
) or with any Amazon Web Services account by\n sending a shared directory request (HANDSHAKE
).
A DirectoryVpcSettingsDescription object that contains additional\n information about a directory. This member is only present if the directory is a Simple AD or\n Managed AD directory.
" + "smithy.api#documentation": "A DirectoryVpcSettingsDescription object that contains additional\n information about a directory. This member is only present if the directory is a Simple AD or\n Managed Microsoft AD directory.
" } }, "ConnectSettings": { @@ -2970,13 +3096,13 @@ "DesiredNumberOfDomainControllers": { "target": "com.amazonaws.directoryservice#DesiredNumberOfDomainControllers", "traits": { - "smithy.api#documentation": "The desired number of domain controllers in the directory if the directory is Microsoft AD.
" + "smithy.api#documentation": "The desired number of domain controllers in the directory if the directory is Microsoft\n AD.
" } }, "OwnerDirectoryDescription": { "target": "com.amazonaws.directoryservice#OwnerDirectoryDescription", "traits": { - "smithy.api#documentation": "Describes the AWS Managed Microsoft AD directory in the directory owner account.
" + "smithy.api#documentation": "Describes the Managed Microsoft AD directory in the directory owner account.
" } }, "RegionsInfo": { @@ -2987,7 +3113,7 @@ } }, "traits": { - "smithy.api#documentation": "Contains information about an AWS Directory Service directory.
" + "smithy.api#documentation": "Contains information about an Directory Service directory.
" } }, "com.amazonaws.directoryservice#DirectoryDescriptions": { @@ -3055,7 +3181,7 @@ } }, "traits": { - "smithy.api#documentation": "The maximum number of directories in the region has been reached. You can use the\n GetDirectoryLimits operation to determine your directory limits in the\n region.
", + "smithy.api#documentation": "The maximum number of directories in the region has been reached. You can use the\n GetDirectoryLimits operation to determine your directory limits in\n the region.
", "smithy.api#error": "client" } }, @@ -3083,19 +3209,19 @@ "CloudOnlyMicrosoftADLimit": { "target": "com.amazonaws.directoryservice#Limit", "traits": { - "smithy.api#documentation": "The maximum number of AWS Managed Microsoft AD directories allowed in the region.
" + "smithy.api#documentation": "The maximum number of Managed Microsoft AD directories allowed in the region.
" } }, "CloudOnlyMicrosoftADCurrentCount": { "target": "com.amazonaws.directoryservice#Limit", "traits": { - "smithy.api#documentation": "The current number of AWS Managed Microsoft AD directories in the region.
" + "smithy.api#documentation": "The current number of Managed Microsoft AD directories in the region.
" } }, "CloudOnlyMicrosoftADLimitReached": { "target": "com.amazonaws.directoryservice#CloudOnlyDirectoriesLimitReached", "traits": { - "smithy.api#documentation": "Indicates if the AWS Managed Microsoft AD directory limit has been reached.
" + "smithy.api#documentation": "Indicates if the Managed Microsoft AD directory limit has been reached.
" } }, "ConnectedDirectoriesLimit": { @@ -3138,7 +3264,7 @@ } }, "traits": { - "smithy.api#documentation": "The specified directory has not been shared with this AWS account.
", + "smithy.api#documentation": "The specified directory has not been shared with this Amazon Web Services account.
", "smithy.api#error": "client" } }, @@ -3212,6 +3338,9 @@ { "target": "com.amazonaws.directoryservice#DescribeCertificate" }, + { + "target": "com.amazonaws.directoryservice#DescribeClientAuthenticationSettings" + }, { "target": "com.amazonaws.directoryservice#DescribeConditionalForwarders" }, @@ -3345,7 +3474,7 @@ "name": "ds" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "AWS Directory Service is a web service that makes it easy for you to setup and run directories in the AWS cloud, or connect your AWS resources with an existing on-premises Microsoft Active Directory. This guide provides detailed information about AWS Directory Service operations, data types, parameters, and errors. For information about AWS Directory Services features, see AWS Directory Service and the AWS Directory Service Administration Guide.
\nAWS provides SDKs that consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .Net, iOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to AWS Directory Service and other AWS services. For more information about the AWS SDKs, including how to download and install them, see Tools for Amazon Web\n Services.
\nDirectory Service is a web service that makes it easy for you to setup and run directories in the\n Amazon Web Services cloud, or connect your Amazon Web Services resources with an existing self-managed Microsoft Active\n Directory. This guide provides detailed information about Directory Service operations, data types,\n parameters, and errors. For information about Directory Services features, see Directory Service and the Directory Service\n Administration Guide.
\nAmazon Web Services provides SDKs that consist of libraries and sample code for various\n programming languages and platforms (Java, Ruby, .Net, iOS, Android, etc.). The SDKs\n provide a convenient way to create programmatic access to Directory Service and other Amazon Web Services\n services. For more information about the Amazon Web Services SDKs, including how to download and\n install them, see Tools for Amazon Web\n Services.
\nThe identifiers of the subnets for the directory servers. The two subnets must be in\n different Availability Zones. AWS Directory Service creates a directory server and a DNS\n server in each of these subnets.
", + "smithy.api#documentation": "The identifiers of the subnets for the directory servers. The two subnets must be in\n different Availability Zones. Directory Service creates a directory server and a DNS\n server in each of these subnets.
", "smithy.api#required": {} } } @@ -3649,7 +3778,7 @@ } ], "traits": { - "smithy.api#documentation": "Disables multi-factor authentication (MFA) with the Remote Authentication Dial In User Service (RADIUS) server for an AD Connector or Microsoft AD directory.
" + "smithy.api#documentation": "Disables multi-factor authentication (MFA) with the Remote Authentication Dial In\n User Service (RADIUS) server for an AD Connector or Microsoft AD directory.
" } }, "com.amazonaws.directoryservice#DisableRadiusRequest": { @@ -3716,13 +3845,13 @@ "UserName": { "target": "com.amazonaws.directoryservice#UserName", "traits": { - "smithy.api#documentation": "The username of an alternate account to use to disable single-sign on. This is only used for AD Connector directories. This account must have privileges to remove a service principal name.
\nIf the AD Connector service account does not have privileges to remove a service principal\n name, you can specify an alternate account with the UserName and Password\n parameters. These credentials are only used to disable single sign-on and are not stored by\n the service. The AD Connector service account is not changed.
" + "smithy.api#documentation": "The username of an alternate account to use to disable single-sign on. This is only used\n for AD Connector directories. This account must have privileges to remove a service\n principal name.
\nIf the AD Connector service account does not have privileges to remove a service\n principal name, you can specify an alternate account with the UserName\n and Password parameters. These credentials are only used to disable\n single sign-on and are not stored by the service. The AD Connector service account is not\n changed.
" } }, "Password": { "target": "com.amazonaws.directoryservice#ConnectPassword", "traits": { - "smithy.api#documentation": "The password of an alternate account to use to disable single-sign on. This is only used\n for AD Connector directories. For more information, see the UserName parameter.
" + "smithy.api#documentation": "The password of an alternate account to use to disable single-sign on. This is only used\n for AD Connector directories. For more information, see the UserName\n parameter.
" } } }, @@ -3834,7 +3963,7 @@ } }, "traits": { - "smithy.api#documentation": "The maximum allowed number of domain controllers per directory was exceeded. The default limit per directory is 20 domain controllers.
", + "smithy.api#documentation": "The maximum allowed number of domain controllers per directory was exceeded. The\n default limit per directory is 20 domain controllers.
", "smithy.api#error": "client" } }, @@ -3930,7 +4059,7 @@ "Type": { "target": "com.amazonaws.directoryservice#ClientAuthenticationType", "traits": { - "smithy.api#documentation": "The type of client authentication to enable. Currently only the value SmartCard
is\n supported. Smart card authentication in AD Connector requires that you enable Kerberos\n Constrained Delegation for the Service User to the LDAP service in the on-premises AD.\n
The type of client authentication to enable. Currently only the value SmartCard
is\n supported. Smart card authentication in AD Connector requires that you enable Kerberos\n Constrained Delegation for the Service User to the LDAP service in your self-managed AD.\n
Enables multi-factor authentication (MFA) with the Remote Authentication Dial In User Service (RADIUS) server for an AD Connector or Microsoft AD directory.
" + "smithy.api#documentation": "Enables multi-factor authentication (MFA) with the Remote Authentication Dial In User\n Service (RADIUS) server for an AD Connector or Microsoft AD directory.
" } }, "com.amazonaws.directoryservice#EnableRadiusRequest": { @@ -4043,7 +4172,7 @@ "RadiusSettings": { "target": "com.amazonaws.directoryservice#RadiusSettings", "traits": { - "smithy.api#documentation": "A RadiusSettings object that contains information about the RADIUS server.
", + "smithy.api#documentation": "A RadiusSettings object that contains information about the RADIUS\n server.
", "smithy.api#required": {} } } @@ -4085,7 +4214,7 @@ } ], "traits": { - "smithy.api#documentation": "Enables single sign-on for a directory. Single sign-on allows users in your directory to access certain AWS services from a computer joined to the directory without having to enter their credentials separately.
" + "smithy.api#documentation": "Enables single sign-on for a directory. Single sign-on allows users in your directory to\n access certain Amazon Web Services services from a computer joined to the directory without having to enter\n their credentials separately.
" } }, "com.amazonaws.directoryservice#EnableSsoRequest": { @@ -4101,13 +4230,13 @@ "UserName": { "target": "com.amazonaws.directoryservice#UserName", "traits": { - "smithy.api#documentation": "The username of an alternate account to use to enable single-sign on. This is only used for AD Connector directories. This account must have privileges to add a service principal name.
\nIf the AD Connector service account does not have privileges to add a service principal\n name, you can specify an alternate account with the UserName and Password\n parameters. These credentials are only used to enable single sign-on and are not stored by\n the service. The AD Connector service account is not changed.
" + "smithy.api#documentation": "The username of an alternate account to use to enable single-sign on. This is only used\n for AD Connector directories. This account must have privileges to add a service principal\n name.
\nIf the AD Connector service account does not have privileges to add a service principal\n name, you can specify an alternate account with the UserName and\n Password parameters. These credentials are only used to enable single\n sign-on and are not stored by the service. The AD Connector service account is not\n changed.
" } }, "Password": { "target": "com.amazonaws.directoryservice#ConnectPassword", "traits": { - "smithy.api#documentation": "The password of an alternate account to use to enable single-sign on. This is only used for\n AD Connector directories. For more information, see the UserName parameter.
" + "smithy.api#documentation": "The password of an alternate account to use to enable single-sign on. This is only used\n for AD Connector directories. For more information, see the UserName\n parameter.
" } } }, @@ -4161,25 +4290,25 @@ "DirectoryId": { "target": "com.amazonaws.directoryservice#DirectoryId", "traits": { - "smithy.api#documentation": "The Directory ID of an AWS Directory Service directory that will publish status messages to an SNS topic.
" + "smithy.api#documentation": "The Directory ID of an Directory Service directory that will publish status messages to an Amazon SNS\n topic.
" } }, "TopicName": { "target": "com.amazonaws.directoryservice#TopicName", "traits": { - "smithy.api#documentation": "The name of an AWS SNS topic the receives status messages from the directory.
" + "smithy.api#documentation": "The name of an Amazon SNS topic the receives status messages from the directory.
" } }, "TopicArn": { "target": "com.amazonaws.directoryservice#TopicArn", "traits": { - "smithy.api#documentation": "The SNS topic ARN (Amazon Resource Name).
" + "smithy.api#documentation": "The Amazon SNS topic ARN (Amazon Resource Name).
" } }, "CreatedDateTime": { "target": "com.amazonaws.directoryservice#CreatedDateTime", "traits": { - "smithy.api#documentation": "The date and time of when you associated your directory with the SNS topic.
" + "smithy.api#documentation": "The date and time of when you associated your directory with the Amazon SNS topic.
" } }, "Status": { @@ -4190,7 +4319,7 @@ } }, "traits": { - "smithy.api#documentation": "Information about SNS topic and AWS Directory Service directory associations.
" + "smithy.api#documentation": "Information about Amazon SNS topic and Directory Service directory associations.
" } }, "com.amazonaws.directoryservice#EventTopics": { @@ -4241,7 +4370,7 @@ "DirectoryLimits": { "target": "com.amazonaws.directoryservice#DirectoryLimits", "traits": { - "smithy.api#documentation": "A DirectoryLimits object that contains the directory limits for the\n current rRegion.
" + "smithy.api#documentation": "A DirectoryLimits object that contains the directory limits for the\n current Region.
" } } }, @@ -4402,7 +4531,7 @@ } }, "traits": { - "smithy.api#documentation": "The new password provided by the user does not meet the password complexity requirements defined in your directory.
", + "smithy.api#documentation": "The new password provided by the user does not meet the password complexity\n requirements defined in your directory.
", "smithy.api#error": "client" } }, @@ -4439,7 +4568,7 @@ "CidrIp": { "target": "com.amazonaws.directoryservice#CidrIp", "traits": { - "smithy.api#documentation": "IP address block using CIDR format, for example 10.0.0.0/24. This is often the address block of the DNS server used for your on-premises domain. For a single IP address use a CIDR address block with /32. For example 10.0.0.0/32.
" + "smithy.api#documentation": "IP address block using CIDR format, for example 10.0.0.0/24. This is often the\n address block of the DNS server used for your self-managed domain. For a single IP address\n use a CIDR address block with /32. For example 10.0.0.0/32.
" } }, "Description": { @@ -4450,7 +4579,7 @@ } }, "traits": { - "smithy.api#documentation": "IP address block. This is often the address block of the DNS server used for your on-premises domain.
" + "smithy.api#documentation": "IP address block. This is often the address block of the DNS server used for your\n self-managed domain.
" } }, "com.amazonaws.directoryservice#IpRouteInfo": { @@ -4508,7 +4637,7 @@ } }, "traits": { - "smithy.api#documentation": "The maximum allowed number of IP addresses was exceeded. The default limit is 100 IP address blocks.
", + "smithy.api#documentation": "The maximum allowed number of IP addresses was exceeded. The default limit is 100 IP\n address blocks.
", "smithy.api#error": "client" } }, @@ -4759,20 +4888,20 @@ "DirectoryId": { "target": "com.amazonaws.directoryservice#DirectoryId", "traits": { - "smithy.api#documentation": "Identifier (ID) of the directory for which you want to retrieve the IP addresses.
", + "smithy.api#documentation": "Identifier (ID) of the directory for which you want to retrieve the IP\n addresses.
", "smithy.api#required": {} } }, "NextToken": { "target": "com.amazonaws.directoryservice#NextToken", "traits": { - "smithy.api#documentation": "The ListIpRoutes.NextToken value from a previous call to\n ListIpRoutes. Pass null if this is the first call.
" + "smithy.api#documentation": "The ListIpRoutes.NextToken value from a previous call to ListIpRoutes. Pass null if this is the first call.
" } }, "Limit": { "target": "com.amazonaws.directoryservice#Limit", "traits": { - "smithy.api#documentation": "Maximum number of items to return. If this value is zero, the maximum number of items is specified by the limitations of the operation.
" + "smithy.api#documentation": "Maximum number of items to return. If this value is zero, the maximum number of items\n is specified by the limitations of the operation.
" } } } @@ -4789,7 +4918,7 @@ "NextToken": { "target": "com.amazonaws.directoryservice#NextToken", "traits": { - "smithy.api#documentation": "If not null, more results are available. Pass this value for the NextToken parameter\n in a subsequent call to ListIpRoutes to retrieve the next set of items.
" + "smithy.api#documentation": "If not null, more results are available. Pass this value for the\n NextToken parameter in a subsequent call to ListIpRoutes to retrieve the next set of items.
" } } } @@ -4817,7 +4946,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists the active log subscriptions for the AWS account.
" + "smithy.api#documentation": "Lists the active log subscriptions for the Amazon Web Services account.
" } }, "com.amazonaws.directoryservice#ListLogSubscriptionsRequest": { @@ -4826,7 +4955,7 @@ "DirectoryId": { "target": "com.amazonaws.directoryservice#DirectoryId", "traits": { - "smithy.api#documentation": "If a DirectoryID is provided, lists only the log subscription\n associated with that directory. If no DirectoryId is provided, lists all\n log subscriptions associated with your AWS account. If there are no log subscriptions for the\n AWS account or the directory, an empty list will be returned.
" + "smithy.api#documentation": "If a DirectoryID is provided, lists only the log subscription\n associated with that directory. If no DirectoryId is provided, lists all\n log subscriptions associated with your Amazon Web Services account. If there are no log subscriptions for the\n Amazon Web Services account or the directory, an empty list will be returned.
" } }, "NextToken": { @@ -4849,7 +4978,7 @@ "LogSubscriptions": { "target": "com.amazonaws.directoryservice#LogSubscriptions", "traits": { - "smithy.api#documentation": "A list of active LogSubscription objects for calling the AWS\n account.
" + "smithy.api#documentation": "A list of active LogSubscription objects for calling the Amazon Web Services account.
" } }, "NextToken": { @@ -4892,14 +5021,14 @@ "DirectoryId": { "target": "com.amazonaws.directoryservice#DirectoryId", "traits": { - "smithy.api#documentation": "The identifier of the directory from which to retrieve the schema extension information.
", + "smithy.api#documentation": "The identifier of the directory from which to retrieve the schema extension\n information.
", "smithy.api#required": {} } }, "NextToken": { "target": "com.amazonaws.directoryservice#NextToken", "traits": { - "smithy.api#documentation": "The ListSchemaExtensions.NextToken
value from a previous call to ListSchemaExtensions
. Pass null if this is the first call.
The ListSchemaExtensions.NextToken
value from a previous call to\n ListSchemaExtensions
. Pass null if this is the first call.
If not null, more results are available. Pass this value for the NextToken
parameter in a subsequent call to ListSchemaExtensions
to retrieve the next set of items.
If not null, more results are available. Pass this value for the NextToken
\n parameter in a subsequent call to ListSchemaExtensions
to retrieve the next set\n of items.
Exception encountered while trying to access your AWS organization.
", + "smithy.api#documentation": "Exception encountered while trying to access your Amazon Web Services organization.
", "smithy.api#error": "client" } }, @@ -5110,7 +5239,7 @@ "DirectoryId": { "target": "com.amazonaws.directoryservice#DirectoryId", "traits": { - "smithy.api#documentation": "Identifier of the AWS Managed Microsoft AD directory in the directory owner\n account.
" + "smithy.api#documentation": "Identifier of the Managed Microsoft AD directory in the directory owner\n account.
" } }, "AccountId": { @@ -5161,7 +5290,7 @@ "com.amazonaws.directoryservice#Password": { "type": "string", "traits": { - "smithy.api#pattern": "(?=^.{8,64}$)((?=.*\\d)(?=.*[A-Z])(?=.*[a-z])|(?=.*\\d)(?=.*[^A-Za-z0-9\\s])(?=.*[a-z])|(?=.*[^A-Za-z0-9\\s])(?=.*[A-Z])(?=.*[a-z])|(?=.*\\d)(?=.*[A-Z])(?=.*[^A-Za-z0-9\\s]))^.*", + "smithy.api#pattern": "^(?=^.{8,64}$)((?=.*\\d)(?=.*[A-Z])(?=.*[a-z])|(?=.*\\d)(?=.*[^A-Za-z0-9\\s])(?=.*[a-z])|(?=.*[^A-Za-z0-9\\s])(?=.*[A-Z])(?=.*[a-z])|(?=.*\\d)(?=.*[A-Z])(?=.*[^A-Za-z0-9\\s]))^", "smithy.api#sensitive": {} } }, @@ -5221,13 +5350,13 @@ "RadiusServers": { "target": "com.amazonaws.directoryservice#Servers", "traits": { - "smithy.api#documentation": "An array of strings that contains the fully qualified domain name (FQDN) or IP addresses of the RADIUS server endpoints, or the FQDN or IP addresses of your RADIUS server load balancer.
" + "smithy.api#documentation": "An array of strings that contains the fully qualified domain name (FQDN) or IP\n addresses of the RADIUS server endpoints, or the FQDN or IP addresses of your RADIUS server\n load balancer.
" } }, "RadiusPort": { "target": "com.amazonaws.directoryservice#PortNumber", "traits": { - "smithy.api#documentation": "The port that your RADIUS server is using for communications. Your on-premises network must allow inbound traffic over this port from the AWS Directory Service servers.
" + "smithy.api#documentation": "The port that your RADIUS server is using for communications. Your self-managed\n network must allow inbound traffic over this port from the Directory Service servers.
" } }, "RadiusTimeout": { @@ -5239,7 +5368,7 @@ "RadiusRetries": { "target": "com.amazonaws.directoryservice#RadiusRetries", "traits": { - "smithy.api#documentation": "The maximum number of times that communication with the RADIUS server is attempted.
" + "smithy.api#documentation": "The maximum number of times that communication with the RADIUS server is\n attempted.
" } }, "SharedSecret": { @@ -5268,7 +5397,7 @@ } }, "traits": { - "smithy.api#documentation": "Contains information about a Remote Authentication Dial In User Service (RADIUS) server.
" + "smithy.api#documentation": "Contains information about a Remote Authentication Dial In User Service (RADIUS)\n server.
" } }, "com.amazonaws.directoryservice#RadiusSharedSecret": { @@ -5278,6 +5407,7 @@ "min": 8, "max": 512 }, + "smithy.api#pattern": "^(\\p{LD}|\\p{Punct}| )+$", "smithy.api#sensitive": {} } }, @@ -5419,7 +5549,7 @@ "PrimaryRegion": { "target": "com.amazonaws.directoryservice#RegionName", "traits": { - "smithy.api#documentation": "The Region where the AWS Managed Microsoft AD directory was originally created.
" + "smithy.api#documentation": "The Region where the Managed Microsoft AD directory was originally created.
" } }, "AdditionalRegions": { @@ -5539,7 +5669,7 @@ } ], "traits": { - "smithy.api#documentation": "Associates a directory with an SNS topic. This establishes the directory as a publisher to the specified SNS topic. You can then receive email or text (SMS) messages when the status of your directory changes. You get notified if your directory goes from an Active status to an Impaired or Inoperable status. You also receive a notification when the directory returns to an Active status.
" + "smithy.api#documentation": "Associates a directory with an Amazon SNS topic. This establishes the directory as a\n publisher to the specified Amazon SNS topic. You can then receive email or text (SMS) messages when\n the status of your directory changes. You get notified if your directory goes from an Active\n status to an Impaired or Inoperable status. You also receive a notification when the directory\n returns to an Active status.
" } }, "com.amazonaws.directoryservice#RegisterEventTopicRequest": { @@ -5548,14 +5678,14 @@ "DirectoryId": { "target": "com.amazonaws.directoryservice#DirectoryId", "traits": { - "smithy.api#documentation": "The Directory ID that will publish status messages to the SNS topic.
", + "smithy.api#documentation": "The Directory ID that will publish status messages to the Amazon SNS topic.
", "smithy.api#required": {} } }, "TopicName": { "target": "com.amazonaws.directoryservice#TopicName", "traits": { - "smithy.api#documentation": "The SNS topic name to which the directory will publish status messages. This SNS topic must be in the same region as the specified Directory ID.
", + "smithy.api#documentation": "The Amazon SNS topic name to which the directory will publish status messages. This Amazon SNS\n topic must be in the same region as the specified Directory ID.
", "smithy.api#required": {} } } @@ -5670,7 +5800,7 @@ "DirectoryId": { "target": "com.amazonaws.directoryservice#DirectoryId", "traits": { - "smithy.api#documentation": "Identifier (ID) of the directory from which you want to remove the IP addresses.
", + "smithy.api#documentation": "Identifier (ID) of the directory from which you want to remove the IP\n addresses.
", "smithy.api#required": {} } }, @@ -5798,7 +5928,7 @@ "com.amazonaws.directoryservice#RequestId": { "type": "string", "traits": { - "smithy.api#documentation": "The AWS request identifier.
", + "smithy.api#documentation": "The Amazon Web Services request identifier.
", "smithy.api#pattern": "^([A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12})$" } }, @@ -5834,7 +5964,7 @@ } ], "traits": { - "smithy.api#documentation": "Resets the password for any user in your AWS Managed Microsoft AD or Simple AD\n directory.
\nYou can reset the password for any user in your directory with the following\n exceptions:
\nFor Simple AD, you cannot reset the password for any user that is a member of either\n the Domain Admins or Enterprise\n Admins group except for the administrator user.
\nFor AWS Managed Microsoft AD, you can only reset the password for a user that is in an\n OU based off of the NetBIOS name that you typed when you created your directory. For\n example, you cannot reset the password for a user in the AWS\n Reserved OU. For more information about the OU structure for an AWS Managed\n Microsoft AD directory, see What Gets Created in the AWS Directory Service Administration\n Guide.
\nResets the password for any user in your Managed Microsoft AD or Simple AD\n directory.
\nYou can reset the password for any user in your directory with the following\n exceptions:
\nFor Simple AD, you cannot reset the password for any user that is a member of either\n the Domain Admins or Enterprise\n Admins group except for the administrator user.
\nFor Managed Microsoft AD, you can only reset the password for a user that is in an\n OU based off of the NetBIOS name that you typed when you created your directory. For\n example, you cannot reset the password for a user in the Amazon Web Services\n Reserved OU. For more information about the OU structure for an Managed Microsoft AD directory, see What Gets Created in the Directory Service Administration\n Guide.
\nIdentifier of the AWS Managed Microsoft AD or Simple AD directory in which the user\n resides.
", + "smithy.api#documentation": "Identifier of the Managed Microsoft AD or Simple AD directory in which the user\n resides.
", "smithy.api#required": {} } }, @@ -5928,7 +6058,7 @@ "min": 1, "max": 256 }, - "smithy.api#pattern": "[&\\w+-.@]+" + "smithy.api#pattern": "^[&\\w+-.@]+$" } }, "com.amazonaws.directoryservice#SchemaExtensionId": { @@ -5973,7 +6103,7 @@ "StartDateTime": { "target": "com.amazonaws.directoryservice#StartDateTime", "traits": { - "smithy.api#documentation": "The date and time that the schema extension started being applied to the directory.
" + "smithy.api#documentation": "The date and time that the schema extension started being applied to the\n directory.
" } }, "EndDateTime": { @@ -6086,7 +6216,7 @@ } }, "traits": { - "smithy.api#documentation": "An exception has occurred in AWS Directory Service.
", + "smithy.api#documentation": "An exception has occurred in Directory Service.
", "smithy.api#error": "server" } }, @@ -6131,7 +6261,7 @@ } ], "traits": { - "smithy.api#documentation": "Shares a specified directory (DirectoryId
) in your AWS account (directory\n owner) with another AWS account (directory consumer). With this operation you can use your\n directory from any AWS account and from any Amazon VPC within an AWS Region.
When you share your AWS Managed Microsoft AD directory, AWS Directory Service creates a\n shared directory in the directory consumer account. This shared directory contains the\n metadata to provide access to the directory within the directory owner account. The shared\n directory is visible in all VPCs in the directory consumer account.
\nThe ShareMethod
parameter determines whether the specified directory can be\n shared between AWS accounts inside the same AWS organization (ORGANIZATIONS
). It\n also determines whether you can share the directory with any other AWS account either inside\n or outside of the organization (HANDSHAKE
).
The ShareNotes
parameter is only used when HANDSHAKE
is called,\n which sends a directory sharing request to the directory consumer.
Shares a specified directory (DirectoryId
) in your Amazon Web Services account (directory\n owner) with another Amazon Web Services account (directory consumer). With this operation you can use your\n directory from any Amazon Web Services account and from any Amazon VPC within an Amazon Web Services Region.
When you share your Managed Microsoft AD directory, Directory Service creates a\n shared directory in the directory consumer account. This shared directory contains the\n metadata to provide access to the directory within the directory owner account. The shared\n directory is visible in all VPCs in the directory consumer account.
\nThe ShareMethod
parameter determines whether the specified directory can be\n shared between Amazon Web Services accounts inside the same Amazon Web Services organization (ORGANIZATIONS
). It\n also determines whether you can share the directory with any other Amazon Web Services account either inside\n or outside of the organization (HANDSHAKE
).
The ShareNotes
parameter is only used when HANDSHAKE
is called,\n which sends a directory sharing request to the directory consumer.
Identifier of the AWS Managed Microsoft AD directory that you want to share with other AWS\n accounts.
", + "smithy.api#documentation": "Identifier of the Managed Microsoft AD directory that you want to share with other Amazon Web Services accounts.
", "smithy.api#required": {} } }, @@ -6160,7 +6290,7 @@ "ShareMethod": { "target": "com.amazonaws.directoryservice#ShareMethod", "traits": { - "smithy.api#documentation": "The method used when sharing a directory to determine whether the directory should be\n shared within your AWS organization (ORGANIZATIONS
) or with any AWS account by\n sending a directory sharing request (HANDSHAKE
).
The method used when sharing a directory to determine whether the directory should be\n shared within your Amazon Web Services organization (ORGANIZATIONS
) or with any Amazon Web Services account by\n sending a directory sharing request (HANDSHAKE
).
The maximum number of AWS accounts that you can share with this directory has been reached.
", + "smithy.api#documentation": "The maximum number of Amazon Web Services accounts that you can share with this directory has been\n reached.
", "smithy.api#error": "client" } }, @@ -6296,7 +6426,7 @@ "ShareMethod": { "target": "com.amazonaws.directoryservice#ShareMethod", "traits": { - "smithy.api#documentation": "The method used when sharing a directory to determine whether the directory should be\n shared within your AWS organization (ORGANIZATIONS
) or with any AWS account by\n sending a shared directory request (HANDSHAKE
).
The method used when sharing a directory to determine whether the directory should be\n shared within your Amazon Web Services organization (ORGANIZATIONS
) or with any Amazon Web Services account by\n sending a shared directory request (HANDSHAKE
).
Current directory status of the shared AWS Managed Microsoft AD directory.
" + "smithy.api#documentation": "Current directory status of the shared Managed Microsoft AD directory.
" } }, "ShareNotes": { @@ -6410,7 +6540,7 @@ } }, "traits": { - "smithy.api#documentation": "The maximum number of manual snapshots for the directory has been reached. You can\n use the GetSnapshotLimits operation to determine the snapshot limits for a\n directory.
", + "smithy.api#documentation": "The maximum number of manual snapshots for the directory has been reached. You can\n use the GetSnapshotLimits operation to determine the snapshot limits\n for a directory.
", "smithy.api#error": "client" } }, @@ -6540,21 +6670,21 @@ "DirectoryId": { "target": "com.amazonaws.directoryservice#DirectoryId", "traits": { - "smithy.api#documentation": "The identifier of the directory for which the schema extension will be applied to.
", + "smithy.api#documentation": "The identifier of the directory for which the schema extension will be applied\n to.
", "smithy.api#required": {} } }, "CreateSnapshotBeforeSchemaExtension": { "target": "com.amazonaws.directoryservice#CreateSnapshotBeforeSchemaExtension", "traits": { - "smithy.api#documentation": "If true, creates a snapshot of the directory before applying the schema extension.
", + "smithy.api#documentation": "If true, creates a snapshot of the directory before applying the schema\n extension.
", "smithy.api#required": {} } }, "LdifContent": { "target": "com.amazonaws.directoryservice#LdifContent", "traits": { - "smithy.api#documentation": "The LDIF file represented as a string. To construct the LdifContent string, precede each line as it would be formatted in an ldif file with \\n. See the example request below for more details. The file size can be no larger than 1MB.
", + "smithy.api#documentation": "The LDIF file represented as a string. To construct the LdifContent string, precede\n each line as it would be formatted in an ldif file with \\n. See the example request below for\n more details. The file size can be no larger than 1MB.
", "smithy.api#required": {} } }, @@ -6605,14 +6735,14 @@ "Key": { "target": "com.amazonaws.directoryservice#TagKey", "traits": { - "smithy.api#documentation": "Required name of the tag. The string value can be Unicode characters and cannot be prefixed with \"aws:\". The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").
", + "smithy.api#documentation": "Required name of the tag. The string value can be Unicode characters and cannot be\n prefixed with \"aws:\". The string can contain only the set of Unicode letters, digits,\n white-space, '_', '.', '/', '=', '+', '-' (Java regex:\n \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").
", "smithy.api#required": {} } }, "Value": { "target": "com.amazonaws.directoryservice#TagValue", "traits": { - "smithy.api#documentation": "The optional value of the tag. The string value can be Unicode characters. The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").
", + "smithy.api#documentation": "The optional value of the tag. The string value can be Unicode characters. The string\n can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-'\n (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").
", "smithy.api#required": {} } } @@ -6698,7 +6828,7 @@ "min": 1, "max": 256 }, - "smithy.api#pattern": "[a-zA-Z0-9_-]+" + "smithy.api#pattern": "^[a-zA-Z0-9_-]+$" } }, "com.amazonaws.directoryservice#TopicNames": { @@ -6736,7 +6866,7 @@ "DirectoryId": { "target": "com.amazonaws.directoryservice#DirectoryId", "traits": { - "smithy.api#documentation": "The Directory ID of the AWS directory involved in the trust relationship.
" + "smithy.api#documentation": "The Directory ID of the Amazon Web Services directory involved in the trust relationship.
" } }, "TrustId": { @@ -6748,7 +6878,7 @@ "RemoteDomainName": { "target": "com.amazonaws.directoryservice#RemoteDomainName", "traits": { - "smithy.api#documentation": "The Fully Qualified Domain Name (FQDN) of the external domain involved in the trust relationship.
" + "smithy.api#documentation": "The Fully Qualified Domain Name (FQDN) of the external domain involved in the trust\n relationship.
" } }, "TrustType": { @@ -6801,7 +6931,7 @@ } }, "traits": { - "smithy.api#documentation": "Describes a trust relationship between an AWS Managed Microsoft AD directory and an external domain.
" + "smithy.api#documentation": "Describes a trust relationship between an Managed Microsoft AD directory and an external\n domain.
" } }, "com.amazonaws.directoryservice#TrustDirection": { @@ -6842,7 +6972,7 @@ "min": 1, "max": 128 }, - "smithy.api#pattern": "(.|\\s)*\\S(.|\\s)*", + "smithy.api#pattern": "^(.|\\s)*\\S(.|\\s)*$", "smithy.api#sensitive": {} } }, @@ -6956,7 +7086,7 @@ "DirectoryId": { "target": "com.amazonaws.directoryservice#DirectoryId", "traits": { - "smithy.api#documentation": "The identifier of the AWS Managed Microsoft AD directory that you want to stop\n sharing.
", + "smithy.api#documentation": "The identifier of the Managed Microsoft AD directory that you want to stop\n sharing.
", "smithy.api#required": {} } }, @@ -7046,7 +7176,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates a conditional forwarder that has been set up for your AWS directory.
" + "smithy.api#documentation": "Updates a conditional forwarder that has been set up for your Amazon Web Services\n directory.
" } }, "com.amazonaws.directoryservice#UpdateConditionalForwarderRequest": { @@ -7055,21 +7185,21 @@ "DirectoryId": { "target": "com.amazonaws.directoryservice#DirectoryId", "traits": { - "smithy.api#documentation": "The directory ID of the AWS directory for which to update the conditional forwarder.
", + "smithy.api#documentation": "The directory ID of the Amazon Web Services directory for which to update the conditional\n forwarder.
", "smithy.api#required": {} } }, "RemoteDomainName": { "target": "com.amazonaws.directoryservice#RemoteDomainName", "traits": { - "smithy.api#documentation": "The fully qualified domain name (FQDN) of the remote domain with which you will set up a trust relationship.
", + "smithy.api#documentation": "The fully qualified domain name (FQDN) of the remote domain with which you will set up\n a trust relationship.
", "smithy.api#required": {} } }, "DnsIpAddrs": { "target": "com.amazonaws.directoryservice#DnsIpAddrs", "traits": { - "smithy.api#documentation": "The updated IP addresses of the remote DNS server associated with the conditional forwarder.
", + "smithy.api#documentation": "The updated IP addresses of the remote DNS server associated with the conditional\n forwarder.
", "smithy.api#required": {} } } @@ -7117,7 +7247,7 @@ } ], "traits": { - "smithy.api#documentation": "Adds or removes domain controllers to or from the directory. Based on the difference between current value and new value (provided through this API call), domain controllers will be added or removed. It may take up to 45 minutes for any new domain controllers to become fully active once the requested number of domain controllers is updated. During this time, you cannot make another update request.
" + "smithy.api#documentation": "Adds or removes domain controllers to or from the directory. Based on the difference\n between current value and new value (provided through this API call), domain controllers will\n be added or removed. It may take up to 45 minutes for any new domain controllers to become\n fully active once the requested number of domain controllers is updated. During this time, you\n cannot make another update request.
" } }, "com.amazonaws.directoryservice#UpdateNumberOfDomainControllersRequest": { @@ -7126,7 +7256,7 @@ "DirectoryId": { "target": "com.amazonaws.directoryservice#DirectoryId", "traits": { - "smithy.api#documentation": "Identifier of the directory to which the domain controllers will be added or removed.
", + "smithy.api#documentation": "Identifier of the directory to which the domain controllers will be added or\n removed.
", "smithy.api#required": {} } }, @@ -7166,7 +7296,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates the Remote Authentication Dial In User Service (RADIUS) server information for an AD Connector or Microsoft AD directory.
" + "smithy.api#documentation": "Updates the Remote Authentication Dial In User Service (RADIUS) server information\n for an AD Connector or Microsoft AD directory.
" } }, "com.amazonaws.directoryservice#UpdateRadiusRequest": { @@ -7175,14 +7305,14 @@ "DirectoryId": { "target": "com.amazonaws.directoryservice#DirectoryId", "traits": { - "smithy.api#documentation": "The identifier of the directory for which to update the RADIUS server information.
", + "smithy.api#documentation": "The identifier of the directory for which to update the RADIUS server\n information.
", "smithy.api#required": {} } }, "RadiusSettings": { "target": "com.amazonaws.directoryservice#RadiusSettings", "traits": { - "smithy.api#documentation": "A RadiusSettings object that contains information about the RADIUS server.
", + "smithy.api#documentation": "A RadiusSettings object that contains information about the RADIUS\n server.
", "smithy.api#required": {} } } @@ -7224,7 +7354,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates the trust that has been set up between your AWS Managed Microsoft AD directory and an on-premises Active Directory.
" + "smithy.api#documentation": "Updates the trust that has been set up between your Managed Microsoft AD directory and an\n self-managed Active Directory.
" } }, "com.amazonaws.directoryservice#UpdateTrustRequest": { @@ -7283,7 +7413,7 @@ "smithy.api#length": { "min": 1 }, - "smithy.api#pattern": "[a-zA-Z0-9._-]+" + "smithy.api#pattern": "^[a-zA-Z0-9._-]+$" } }, "com.amazonaws.directoryservice#UserPassword": { @@ -7322,7 +7452,7 @@ } ], "traits": { - "smithy.api#documentation": "AWS Directory Service for Microsoft Active Directory allows you to configure and verify trust relationships.
\nThis action verifies a trust relationship between your AWS Managed Microsoft AD directory and an external domain.
" + "smithy.api#documentation": "Directory Service for Microsoft Active Directory allows you to configure and verify trust\n relationships.
\nThis action verifies a trust relationship between your Managed Microsoft AD directory and an\n external domain.
" } }, "com.amazonaws.directoryservice#VerifyTrustRequest": { @@ -7337,7 +7467,7 @@ } }, "traits": { - "smithy.api#documentation": "Initiates the verification of an existing trust relationship between an AWS Managed Microsoft AD directory and an external domain.
" + "smithy.api#documentation": "Initiates the verification of an existing trust relationship between an Managed Microsoft AD\n directory and an external domain.
" } }, "com.amazonaws.directoryservice#VerifyTrustResult": { diff --git a/codegen/sdk-codegen/aws-models/ec2.2016-11-15.json b/codegen/sdk-codegen/aws-models/ec2.2016-11-15.json index 2b7ddf961d6..01c6a0169db 100644 --- a/codegen/sdk-codegen/aws-models/ec2.2016-11-15.json +++ b/codegen/sdk-codegen/aws-models/ec2.2016-11-15.json @@ -10080,7 +10080,7 @@ "target": "com.amazonaws.ec2#KeyPair" }, "traits": { - "smithy.api#documentation": "Creates a 2048-bit RSA key pair with the specified name. Amazon EC2 stores the public\n key and displays the private key for you to save to a file. The private key is returned\n as an unencrypted PEM encoded PKCS#1 private key. If a key with the specified name\n already exists, Amazon EC2 returns an error.
\nYou can have up to five thousand key pairs per Region.
\n\t\tThe key pair returned to you is available only in the Region in which you create it.\n If you prefer, you can create your own key pair using a third-party tool and upload it\n to any Region using ImportKeyPair.
\nFor more information, see Key Pairs in the\n Amazon Elastic Compute Cloud User Guide.
" + "smithy.api#documentation": "Creates an ED25519 or 2048-bit RSA key pair with the specified name. Amazon EC2 stores the public\n key and displays the private key for you to save to a file. The private key is returned\n as an unencrypted PEM encoded PKCS#1 private key. If a key with the specified name\n already exists, Amazon EC2 returns an error.
\n\t\tThe key pair returned to you is available only in the Amazon Web Services Region in which you create it.\n If you prefer, you can create your own key pair using a third-party tool and upload it\n to any Region using ImportKeyPair.
\nYou can have up to 5,000 key pairs per Amazon Web Services Region.
\nFor more information, see Amazon EC2 key pairs in the\n Amazon Elastic Compute Cloud User Guide.
" } }, "com.amazonaws.ec2#CreateKeyPairRequest": { @@ -10101,6 +10101,12 @@ "smithy.api#xmlName": "dryRun" } }, + "KeyType": { + "target": "com.amazonaws.ec2#KeyType", + "traits": { + "smithy.api#documentation": "The type of key pair. Note that ED25519 keys are not supported for Windows instances, EC2 Instance Connect, and EC2 Serial Console.
\nDefault: rsa
\n
Creates a security group.
\nA security group acts as a virtual firewall for your instance to control inbound and outbound traffic.\n For more information, see\n\t\t\t\tAmazon EC2 Security Groups in \n\t\t\t\tthe Amazon Elastic Compute Cloud User Guide and \n\t\t\t\tSecurity Groups for Your VPC in the\n\t\t\t\tAmazon Virtual Private Cloud User Guide.
\nWhen you create a security group, you specify a friendly name of your choice. You can have a security group for use in EC2-Classic with the same name as a security group for use in a VPC. However, you can't have two security groups for use in EC2-Classic with the same name or two security groups for use in a VPC with the same name.
\nYou have a default security group for use in EC2-Classic and a default security group for use in your VPC. If you don't specify a security group when you launch an instance, the instance is launched into the appropriate default security group. A default security group includes a default rule that grants instances unrestricted network access to each other.
\nYou can add or remove rules from your security groups using \n\t\t\t\t\tAuthorizeSecurityGroupIngress,\n\t\t\t\t\tAuthorizeSecurityGroupEgress,\n\t\t\t\t\tRevokeSecurityGroupIngress, and\n\t\t\t\t\tRevokeSecurityGroupEgress.
\nFor more information about VPC security group limits, see Amazon VPC Limits.
" + "smithy.api#documentation": "Creates a security group.
\nA security group acts as a virtual firewall for your instance to control inbound and outbound traffic.\n For more information, see\n\t\t\t\tAmazon EC2 security groups in \n\t\t\t\tthe Amazon Elastic Compute Cloud User Guide and \n\t\t\t\tSecurity groups for your VPC in the\n\t\t\t\tAmazon Virtual Private Cloud User Guide.
\nWhen you create a security group, you specify a friendly name of your choice. You can have a security group for use in EC2-Classic with the same name as a security group for use in a VPC. However, you can't have two security groups for use in EC2-Classic with the same name or two security groups for use in a VPC with the same name.
\nYou have a default security group for use in EC2-Classic and a default security group for use in your VPC. If you don't specify a security group when you launch an instance, the instance is launched into the appropriate default security group. A default security group includes a default rule that grants instances unrestricted network access to each other.
\nYou can add or remove rules from your security groups using \n\t\t\t\t\tAuthorizeSecurityGroupIngress,\n\t\t\t\t\tAuthorizeSecurityGroupEgress,\n\t\t\t\t\tRevokeSecurityGroupIngress, and\n\t\t\t\t\tRevokeSecurityGroupEgress.
\nFor more information about VPC security group limits, see Amazon VPC Limits.
" } }, "com.amazonaws.ec2#CreateSecurityGroupRequest": { @@ -21062,7 +21068,7 @@ "target": "com.amazonaws.ec2#DescribeKeyPairsResult" }, "traits": { - "smithy.api#documentation": "Describes the specified key pairs or all of your key pairs.
\nFor more information about key pairs, see Key Pairs \n\t\t\t\tin the Amazon Elastic Compute Cloud User Guide.
", + "smithy.api#documentation": "Describes the specified key pairs or all of your key pairs.
\nFor more information about key pairs, see Amazon EC2 key pairs \n\t\t\t\tin the Amazon Elastic Compute Cloud User Guide.
", "smithy.api#suppress": [ "WaitableTraitInvalidErrorType" ], @@ -23856,7 +23862,7 @@ "target": "com.amazonaws.ec2#DescribeSecurityGroupsResult" }, "traits": { - "smithy.api#documentation": "Describes the specified security groups or all of your security groups.
\nA security group is for use with instances either in the EC2-Classic platform \n\t\t\t\tor in a specific VPC. For more information, see\n\t\t\t\tAmazon EC2 Security Groups in \n\t\t\t\tthe Amazon Elastic Compute Cloud User Guide and \n\t\t\t\tSecurity Groups for Your VPC in the\n\t\t\t\tAmazon Virtual Private Cloud User Guide.
", + "smithy.api#documentation": "Describes the specified security groups or all of your security groups.
\nA security group is for use with instances either in the EC2-Classic platform \n\t\t\t\tor in a specific VPC. For more information, see\n\t\t\t\tAmazon EC2 security groups in \n\t\t\t\tthe Amazon Elastic Compute Cloud User Guide and \n\t\t\t\tSecurity groups for your VPC in the\n\t\t\t\tAmazon Virtual Private Cloud User Guide.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -31286,7 +31292,7 @@ "S3ExportLocation": { "target": "com.amazonaws.ec2#ExportTaskS3LocationRequest", "traits": { - "smithy.api#documentation": "Information about the destination Amazon S3 bucket. The bucket must exist and grant WRITE\n and READ_ACP permissions to the AWS account vm-import-export@amazon.com.
", + "smithy.api#documentation": "Information about the destination Amazon S3 bucket. The bucket must exist and grant WRITE\n and READ_ACP permissions to the Amazon Web Services account vm-import-export@amazon.com.
", "smithy.api#required": {} } }, @@ -31659,7 +31665,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "S3Bucket", - "smithy.api#documentation": "The Amazon S3 bucket for the destination image. The destination bucket must exist and grant\n WRITE and READ_ACP permissions to the AWS account\n vm-import-export@amazon.com
.
The Amazon S3 bucket for the destination image. The destination bucket must exist and grant\n WRITE and READ_ACP permissions to the Amazon Web Services account vm-import-export@amazon.com
.
The Amazon S3 bucket for the destination image. The destination bucket must exist and grant\n WRITE and READ_ACP permissions to the AWS account\n vm-import-export@amazon.com
.
The Amazon S3 bucket for the destination image. The destination bucket must exist and grant\n WRITE and READ_ACP permissions to the Amazon Web Services account vm-import-export@amazon.com
.
Specifies whether the destination AMI of the imported image should be encrypted. The default CMK for EBS is used\n unless you specify a non-default AWS Key Management Service (AWS KMS) CMK using KmsKeyId
. For more information, see Amazon EBS Encryption in the\n Amazon Elastic Compute Cloud User Guide.
Specifies whether the destination AMI of the imported image should be encrypted. The default KMS key for EBS is used\n unless you specify a non-default KMS key using KmsKeyId
. For more information, see Amazon EBS Encryption in the\n Amazon Elastic Compute Cloud User Guide.
An identifier for the symmetric AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the\n encrypted AMI. This parameter is only required if you want to use a non-default CMK; if this\n parameter is not specified, the default CMK for EBS is used. If a KmsKeyId
is\n specified, the Encrypted
flag must also be set.
The CMK identifier may be provided in any of the following formats:
\nKey ID
\nKey alias. The alias ARN contains the arn:aws:kms
namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the alias
namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
ARN using key ID. The ID ARN contains the arn:aws:kms
namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the key
namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
ARN using key alias. The alias ARN contains the arn:aws:kms
namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the alias
namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
AWS parses KmsKeyId
asynchronously, meaning that the action you call may appear to complete even\n though you provided an invalid identifier. This action will eventually report failure.
The specified CMK must exist in the Region that the AMI is being copied to.
\nAmazon EBS does not support asymmetric CMKs.
" + "smithy.api#documentation": "An identifier for the symmetric KMS key to use when creating the\n encrypted AMI. This parameter is only required if you want to use a non-default KMS key; if this\n parameter is not specified, the default KMS key for EBS is used. If a KmsKeyId
is\n specified, the Encrypted
flag must also be set.
The KMS key identifier may be provided in any of the following formats:
\nKey ID
\nKey alias. The alias ARN contains the arn:aws:kms
namespace, followed by the Region of the key, the Amazon Web Services account ID of the key owner, the alias
namespace, and then the key alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
ARN using key ID. The ID ARN contains the arn:aws:kms
namespace, followed by the Region of the key, the Amazon Web Services account ID of the key owner, the key
namespace, and then the key ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
ARN using key alias. The alias ARN contains the arn:aws:kms
namespace, followed by the Region of the key, the Amazon Web Services account ID of the key owner, the alias
namespace, and then the key alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
Amazon Web Services parses KmsKeyId
asynchronously, meaning that the action you call may appear to complete even\n though you provided an invalid identifier. This action will eventually report failure.
The specified KMS key must exist in the Region that the AMI is being copied to.
\nAmazon EBS does not support asymmetric KMS keys.
" } }, "LicenseType": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "The license type to be used for the Amazon Machine Image (AMI) after importing.
\nBy default, we detect the source-system operating system (OS) and apply the appropriate license. Specify\n AWS
to replace the source-system license with an AWS license, if appropriate. Specify BYOL
\n to retain the source-system license, if appropriate.
To use BYOL
, you must have existing licenses with rights to use these licenses in a third party\n cloud, such as AWS. For more information, see Prerequisites in the\n VM Import/Export User Guide.
The license type to be used for the Amazon Machine Image (AMI) after importing.
\nBy default, we detect the source-system operating system (OS) and apply the appropriate license. Specify\n AWS
to replace the source-system license with an Amazon Web Services license, if appropriate. Specify BYOL
\n to retain the source-system license, if appropriate.
To use BYOL
, you must have existing licenses with rights to use these licenses in a third party\n cloud, such as Amazon Web Services. For more information, see Prerequisites in the\n VM Import/Export User Guide.
The tags to apply to the import image task during creation.
", "smithy.api#xmlName": "TagSpecification" } + }, + "UsageOperation": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "The usage operation value. For more information, see AMI billing information fields in the Amazon Elastic Compute Cloud User Guide.
" + } } } }, @@ -36595,7 +36607,7 @@ "target": "com.amazonaws.ec2#KmsKeyId", "traits": { "aws.protocols#ec2QueryName": "KmsKeyId", - "smithy.api#documentation": "The identifier for the symmetric AWS Key Management Service (AWS KMS) customer master key\n (CMK) that was used to create the encrypted AMI.
", + "smithy.api#documentation": "The identifier for the symmetric KMS key that was used to create the encrypted AMI.
", "smithy.api#xmlName": "kmsKeyId" } }, @@ -36662,6 +36674,14 @@ "smithy.api#documentation": "Any tags assigned to the import image task.
", "smithy.api#xmlName": "tagSet" } + }, + "UsageOperation": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "UsageOperation", + "smithy.api#documentation": "The usage operation value.
", + "smithy.api#xmlName": "usageOperation" + } } } }, @@ -36720,7 +36740,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "KmsKeyId", - "smithy.api#documentation": "The identifier for the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to create\n the encrypted image.
", + "smithy.api#documentation": "The identifier for the KMS key that was used to create the encrypted image.
", "smithy.api#xmlName": "kmsKeyId" } }, @@ -36787,6 +36807,14 @@ "smithy.api#documentation": "The ARNs of the license configurations that are associated with the import image task.
", "smithy.api#xmlName": "licenseSpecifications" } + }, + "UsageOperation": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "UsageOperation", + "smithy.api#documentation": "The usage operation value.
", + "smithy.api#xmlName": "usageOperation" + } } }, "traits": { @@ -36814,7 +36842,7 @@ "target": "com.amazonaws.ec2#ImportInstanceResult" }, "traits": { - "smithy.api#documentation": "Creates an import instance task using metadata from the specified disk image.
\nThis API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage\n instead.
\nThis API action is not supported by the AWS Command Line Interface (AWS CLI). For \n information about using the Amazon EC2 CLI, which is deprecated, see\n Importing a VM to Amazon EC2 in the Amazon EC2 CLI Reference PDF file.
\nFor information about the import manifest referenced by this API action, see VM Import Manifest.
" + "smithy.api#documentation": "Creates an import instance task using metadata from the specified disk image.
\nThis API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage\n instead.
\nThis API action is not supported by the Command Line Interface (CLI). For \n information about using the Amazon EC2 CLI, which is deprecated, see\n Importing a VM to Amazon EC2 in the Amazon EC2 CLI Reference PDF file.
\nFor information about the import manifest referenced by this API action, see VM Import Manifest.
" } }, "com.amazonaws.ec2#ImportInstanceLaunchSpecification": { @@ -37092,7 +37120,7 @@ "target": "com.amazonaws.ec2#ImportKeyPairResult" }, "traits": { - "smithy.api#documentation": "Imports the public key from an RSA key pair that you created with a third-party tool. \n Compare this with CreateKeyPair, in which Amazon Web Services creates the key pair and gives the keys to you \n (Amazon Web Services keeps a copy of the public key). With ImportKeyPair, you create the key pair and give Amazon Web Services just the public key. \n The private key is never transferred between you and Amazon Web Services.
\nFor more information about key pairs, see Key Pairs \n\t\t\t\tin the Amazon Elastic Compute Cloud User Guide.
" + "smithy.api#documentation": "Imports the public key from an RSA or ED25519 key pair that you created with a third-party tool. \n Compare this with CreateKeyPair, in which Amazon Web Services creates the key pair and gives the keys to you \n (Amazon Web Services keeps a copy of the public key). With ImportKeyPair, you create the key pair and give Amazon Web Services just the public key. \n The private key is never transferred between you and Amazon Web Services.
\nFor more information about key pairs, see Amazon EC2 key pairs \n\t\t\t\tin the Amazon Elastic Compute Cloud User Guide.
" } }, "com.amazonaws.ec2#ImportKeyPairRequest": { @@ -37218,13 +37246,13 @@ "Encrypted": { "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "Specifies whether the destination snapshot of the imported image should be encrypted. The default CMK for EBS is\n used unless you specify a non-default AWS Key Management Service (AWS KMS) CMK using KmsKeyId
. For more information, see Amazon EBS Encryption in the\n Amazon Elastic Compute Cloud User Guide.
Specifies whether the destination snapshot of the imported image should be encrypted. The default KMS key for EBS is\n used unless you specify a non-default KMS key using KmsKeyId
. For more information, see Amazon EBS Encryption in the\n Amazon Elastic Compute Cloud User Guide.
An identifier for the symmetric AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the\n encrypted snapshot. This parameter is only required if you want to use a non-default CMK; if this\n parameter is not specified, the default CMK for EBS is used. If a KmsKeyId
is\n specified, the Encrypted
flag must also be set.
The CMK identifier may be provided in any of the following formats:
\nKey ID
\nKey alias. The alias ARN contains the arn:aws:kms
namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the alias
namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
ARN using key ID. The ID ARN contains the arn:aws:kms
namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the key
namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
ARN using key alias. The alias ARN contains the arn:aws:kms
namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the alias
namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
AWS parses KmsKeyId
asynchronously, meaning that the action you call may appear to complete even\n though you provided an invalid identifier. This action will eventually report failure.
The specified CMK must exist in the Region that the snapshot is being copied to.
\nAmazon EBS does not support asymmetric CMKs.
" + "smithy.api#documentation": "An identifier for the symmetric KMS key to use when creating the\n encrypted snapshot. This parameter is only required if you want to use a non-default KMS key; if this\n parameter is not specified, the default KMS key for EBS is used. If a KmsKeyId
is\n specified, the Encrypted
flag must also be set.
The KMS key identifier may be provided in any of the following formats:
\nKey ID
\nKey alias. The alias ARN contains the arn:aws:kms
namespace, followed by the Region of the key, the Amazon Web Services account ID of the key owner, the alias
namespace, and then the key alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
ARN using key ID. The ID ARN contains the arn:aws:kms
namespace, followed by the Region of the key, the Amazon Web Services account ID of the key owner, the key
namespace, and then the key ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
ARN using key alias. The alias ARN contains the arn:aws:kms
namespace, followed by the Region of the key, the Amazon Web Services account ID of the key owner, the alias
namespace, and then the key alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
Amazon Web Services parses KmsKeyId
asynchronously, meaning that the action you call may appear to complete even\n though you provided an invalid identifier. This action will eventually report failure.
The specified KMS key must exist in the Region that the snapshot is being copied to.
\nAmazon EBS does not support asymmetric KMS keys.
" } }, "RoleName": { @@ -37361,7 +37389,7 @@ "target": "com.amazonaws.ec2#ImportVolumeResult" }, "traits": { - "smithy.api#documentation": "Creates an import volume task using metadata from the specified disk image.
\nThis API action supports only single-volume VMs. To import multi-volume VMs, use \n ImportImage instead. To import a disk to a snapshot, use\n ImportSnapshot instead.
\nThis API action is not supported by the AWS Command Line Interface (AWS CLI). For \n information about using the Amazon EC2 CLI, which is deprecated, see Importing Disks to Amazon EBS in the Amazon EC2 CLI Reference PDF file.
\nFor information about the import manifest referenced by this API action, see VM Import Manifest.
" + "smithy.api#documentation": "Creates an import volume task using metadata from the specified disk image.
\nThis API action supports only single-volume VMs. To import multi-volume VMs, use \n ImportImage instead. To import a disk to a snapshot, use\n ImportSnapshot instead.
\nThis API action is not supported by the Command Line Interface (CLI). For \n information about using the Amazon EC2 CLI, which is deprecated, see Importing Disks to Amazon EBS in the Amazon EC2 CLI Reference PDF file.
\nFor information about the import manifest referenced by this API action, see VM Import Manifest.
" } }, "com.amazonaws.ec2#ImportVolumeRequest": { @@ -42547,7 +42575,7 @@ "target": "com.amazonaws.ec2#SensitiveUserData", "traits": { "aws.protocols#ec2QueryName": "KeyMaterial", - "smithy.api#documentation": "An unencrypted PEM encoded RSA private key.
", + "smithy.api#documentation": "An unencrypted PEM encoded RSA or ED25519 private key.
", "smithy.api#xmlName": "keyMaterial" } }, @@ -42607,7 +42635,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "KeyFingerprint", - "smithy.api#documentation": "If you used CreateKeyPair to create the key pair, this is the SHA-1 digest of the DER encoded private key. \n If you used ImportKeyPair to provide Amazon Web Services the public key, this is the MD5 public key fingerprint as specified in section 4 of RFC4716.
", + "smithy.api#documentation": "If you used CreateKeyPair to create the key pair:
\nFor RSA key pairs, the key fingerprint is the SHA-1 digest of the DER encoded private key. \n
\nFor ED25519 key pairs, the key fingerprint is the base64-encoded SHA-256 digest, which \n is the default for OpenSSH, starting with OpenSSH 6.8.
\nIf you used ImportKeyPair to provide Amazon Web Services the public key:
\nFor RSA key pairs, the key fingerprint is the MD5 public key fingerprint as specified in section 4 of RFC4716.
\nFor ED25519 key pairs, the key fingerprint is the base64-encoded SHA-256\n digest, which is the default for OpenSSH, starting with OpenSSH 6.8.
\nThe type of key pair.
", + "smithy.api#xmlName": "keyType" + } + }, "Tags": { "target": "com.amazonaws.ec2#TagList", "traits": { @@ -42644,6 +42680,21 @@ "com.amazonaws.ec2#KeyPairName": { "type": "string" }, + "com.amazonaws.ec2#KeyType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "rsa", + "name": "rsa" + }, + { + "value": "ed25519", + "name": "ed25519" + } + ] + } + }, "com.amazonaws.ec2#KmsKeyId": { "type": "string" }, @@ -60255,7 +60306,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "KmsKeyId", - "smithy.api#documentation": "The identifier for the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to create\n the encrypted snapshot.
", + "smithy.api#documentation": "The identifier for the KMS key that was used to create the encrypted snapshot.
", "smithy.api#xmlName": "kmsKeyId" } }, @@ -66706,7 +66757,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "Data", - "smithy.api#documentation": "The user data. If you are using an AWS SDK or command line tool, Base64-encoding is performed for you, and you\n can load the text from a file. Otherwise, you must provide Base64-encoded text.
", + "smithy.api#documentation": "The user data. If you are using an Amazon Web Services SDK or command line tool, Base64-encoding is performed for you, and you\n can load the text from a file. Otherwise, you must provide Base64-encoded text.
", "smithy.api#xmlName": "data" } } diff --git a/codegen/sdk-codegen/aws-models/elasticache.2015-02-02.json b/codegen/sdk-codegen/aws-models/elasticache.2015-02-02.json index 4370369d29a..e37ffd0d5e9 100644 --- a/codegen/sdk-codegen/aws-models/elasticache.2015-02-02.json +++ b/codegen/sdk-codegen/aws-models/elasticache.2015-02-02.json @@ -114,7 +114,7 @@ } ], "traits": { - "smithy.api#documentation": "A tag is a key-value pair where the key and value are case-sensitive. \n You can use tags to categorize and track all your ElastiCache resources, with the exception of global replication group. When you add or remove tags on replication groups, those actions will be replicated to all nodes in the replication group. \n For more information, see Resource-level permissions.
\n\n For example, you can use cost-allocation tags to your ElastiCache resources, \n AWS generates a cost allocation report as a comma-separated value (CSV) file \n with your usage and costs aggregated by your tags. \n You can apply tags that represent business categories (such as cost centers, application names, or owners) \n to organize your costs across multiple services.
\nFor more information, \n see Using Cost Allocation Tags in Amazon ElastiCache \n in the ElastiCache User Guide.
" + "smithy.api#documentation": "A tag is a key-value pair where the key and value are case-sensitive. \n You can use tags to categorize and track all your ElastiCache resources, with the exception of global replication group. When you add or remove tags on replication groups, those actions will be replicated to all nodes in the replication group. \n For more information, see Resource-level permissions.
\n\n For example, you can use cost-allocation tags to your ElastiCache resources, \n Amazon generates a cost allocation report as a comma-separated value (CSV) file \n with your usage and costs aggregated by your tags. \n You can apply tags that represent business categories (such as cost centers, application names, or owners) \n to organize your costs across multiple services.
\nFor more information, \n see Using Cost Allocation Tags in Amazon ElastiCache \n in the ElastiCache User Guide.
" } }, "com.amazonaws.elasticache#AddTagsToResourceMessage": { @@ -123,7 +123,7 @@ "ResourceName": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource to which the tags are to be added, \n for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster
\n or arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot
.\n ElastiCache resources are cluster and snapshot.
For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource to which the tags are to be added, \n for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster
\n or arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot
.\n ElastiCache resources are cluster and snapshot.
For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Service Namespaces.
", "smithy.api#required": {} } }, @@ -540,7 +540,7 @@ "EC2SecurityGroupOwnerId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The AWS account number of the Amazon EC2 security group owner. \n Note that this is not the same thing as an AWS access key ID - \n you must provide a valid AWS account number for this parameter.
", + "smithy.api#documentation": "The Amazon account number of the Amazon EC2 security group owner. \n Note that this is not the same thing as an Amazon access key ID - \n you must provide a valid Amazon account number for this parameter.
", "smithy.api#required": {} } } @@ -1036,7 +1036,7 @@ "CacheNodeId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The cache node identifier. A node ID is a numeric identifier (0001, 0002, etc.). The combination of cluster ID and node ID uniquely identifies every cache node used in a customer's AWS account.
" + "smithy.api#documentation": "The cache node identifier. A node ID is a numeric identifier (0001, 0002, etc.). The combination of cluster ID and node ID uniquely identifies every cache node used in a customer's Amazon account.
" } }, "CacheNodeStatus": { @@ -1459,7 +1459,7 @@ "OwnerId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The AWS account ID of the cache security group owner.
" + "smithy.api#documentation": "The Amazon account ID of the cache security group owner.
" } }, "CacheSecurityGroupName": { @@ -2467,7 +2467,7 @@ "GlobalReplicationGroupIdSuffix": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The suffix name of a Global datastore. Amazon ElastiCache automatically applies a prefix \n to the Global datastore ID when it is created. Each AWS Region has its own prefix. For instance, a Global datastore ID created in the US-West-1 region will begin with \"dsdfu\" along with the suffix name you provide. The suffix, combined with the auto-generated prefix, guarantees uniqueness of the Global datastore name across multiple regions.
\nFor a full list of AWS Regions and their respective Global datastore iD prefixes, see Using the AWS CLI with Global datastores .
", + "smithy.api#documentation": "The suffix name of a Global datastore. Amazon ElastiCache automatically applies a prefix \n to the Global datastore ID when it is created. Each Amazon Region has its own prefix. For instance, a Global datastore ID created in the US-West-1 region will begin with \"dsdfu\" along with the suffix name you provide. The suffix, combined with the auto-generated prefix, guarantees uniqueness of the Global datastore name across multiple regions.
\nFor a full list of Amazon Regions and their respective Global datastore iD prefixes, see Using the Amazon CLI with Global datastores .
", "smithy.api#required": {} } }, @@ -2562,7 +2562,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group.
\nThis API can be used to create a standalone regional replication group or a secondary replication group associated with a Global datastore.
\nA Redis (cluster mode disabled) replication group is a collection of clusters, \n where one of the clusters is a read/write primary and the others are read-only replicas. \n Writes to the primary are asynchronously propagated to the replicas.
\nA Redis cluster-mode enabled cluster is comprised of from 1 to 90 shards (API/CLI: node groups). \n Each shard has a primary node and up to 5 read-only replica nodes. The configuration can range from 90 shards and 0 replicas to 15 shards and 5 replicas, which is the maximum number or replicas allowed.\n \n
\nThe node or shard limit can be increased to a maximum of 500 per cluster if the Redis engine version is 5.0.6 or higher. For example, you can choose to configure a 500 node cluster that ranges between \n 83 shards (one primary and 5 replicas per shard) and 500 shards (single primary and no replicas). Make sure there are enough available IP addresses to accommodate the increase. \n Common pitfalls include the subnets in the subnet group have too small a CIDR range or the subnets are shared and heavily used by other clusters. For more information, see \n Creating a Subnet Group. For versions below 5.0.6, \n the limit is 250 per cluster.
\nTo request a limit increase, see \n AWS Service Limits \n and choose the limit type Nodes per cluster per instance type.
\nWhen a Redis (cluster mode disabled) replication group has been successfully created, \n you can add one or more read replicas to it, up to a total of 5 read replicas. \n If you need to increase or decrease the number of node groups (console: shards), \n you can avail yourself of ElastiCache for Redis' scaling. For more information,\n see Scaling ElastiCache for Redis Clusters in the ElastiCache User Guide.
\n \n \nThis operation is valid for Redis only.
\nCreates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group.
\nThis API can be used to create a standalone regional replication group or a secondary replication group associated with a Global datastore.
\nA Redis (cluster mode disabled) replication group is a collection of clusters, \n where one of the clusters is a read/write primary and the others are read-only replicas. \n Writes to the primary are asynchronously propagated to the replicas.
\nA Redis cluster-mode enabled cluster is comprised of from 1 to 90 shards (API/CLI: node groups). \n Each shard has a primary node and up to 5 read-only replica nodes. The configuration can range from 90 shards and 0 replicas to 15 shards and 5 replicas, which is the maximum number or replicas allowed.\n \n
\nThe node or shard limit can be increased to a maximum of 500 per cluster if the Redis engine version is 5.0.6 or higher. For example, you can choose to configure a 500 node cluster that ranges between \n 83 shards (one primary and 5 replicas per shard) and 500 shards (single primary and no replicas). Make sure there are enough available IP addresses to accommodate the increase. \n Common pitfalls include the subnets in the subnet group have too small a CIDR range or the subnets are shared and heavily used by other clusters. For more information, see \n Creating a Subnet Group. For versions below 5.0.6, \n the limit is 250 per cluster.
\nTo request a limit increase, see \n Amazon Service Limits \n and choose the limit type Nodes per cluster per instance type.
\nWhen a Redis (cluster mode disabled) replication group has been successfully created, \n you can add one or more read replicas to it, up to a total of 5 read replicas. \n If you need to increase or decrease the number of node groups (console: shards), \n you can avail yourself of ElastiCache for Redis' scaling. For more information,\n see Scaling ElastiCache for Redis Clusters in the ElastiCache User Guide.
\n \n \nThis operation is valid for Redis only.
\nRemove a secondary cluster from the Global datastore using the Global datastore name. The secondary cluster will no longer receive updates from the primary cluster, but will remain as a standalone cluster in that AWS region.
" + "smithy.api#documentation": "Remove a secondary cluster from the Global datastore using the Global datastore name. The secondary cluster will no longer receive updates from the primary cluster, but will remain as a standalone cluster in that Amazon region.
" } }, "com.amazonaws.elasticache#DisassociateGlobalReplicationGroupMessage": { @@ -5037,7 +5122,7 @@ "ReplicationGroupRegion": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The AWS region of secondary cluster you wish to remove from the Global datastore
", + "smithy.api#documentation": "The Amazon region of secondary cluster you wish to remove from the Global datastore
", "smithy.api#required": {} } } @@ -5089,7 +5174,7 @@ "EC2SecurityGroupOwnerId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The AWS account ID of the Amazon EC2 security group owner.
" + "smithy.api#documentation": "The Amazon account ID of the Amazon EC2 security group owner.
" } } }, @@ -5267,7 +5352,7 @@ "PrimaryRegion": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The AWS region of the primary cluster of the Global datastore
", + "smithy.api#documentation": "The Amazon region of the primary cluster of the Global datastore
", "smithy.api#required": {} } }, @@ -5460,7 +5545,7 @@ } }, "traits": { - "smithy.api#documentation": "Consists of a primary cluster that accepts writes and an associated secondary cluster that resides in a different AWS region. The secondary cluster accepts only reads. The primary\n cluster automatically replicates updates to the secondary cluster.
\n \n \nThe GlobalReplicationGroupIdSuffix represents the name of the Global datastore,\n which is what you use to associate a secondary cluster.
\nConsists of a primary cluster that accepts writes and an associated secondary cluster that resides in a different Amazon region. The secondary cluster accepts only reads. The primary\n cluster automatically replicates updates to the secondary cluster.
\n \n \nThe GlobalReplicationGroupIdSuffix represents the name of the Global datastore,\n which is what you use to associate a secondary cluster.
\nThe AWS region of the Global datastore member.
" + "smithy.api#documentation": "The Amazon region of the Global datastore member.
" } }, "Role": { @@ -5544,7 +5629,7 @@ } }, "traits": { - "smithy.api#documentation": "A member of a Global datastore. It contains the Replication Group Id, the AWS region and the role of the replication group.
" + "smithy.api#documentation": "A member of a Global datastore. It contains the Replication Group Id, the Amazon region and the role of the replication group.
" } }, "com.amazonaws.elasticache#GlobalReplicationGroupMemberList": { @@ -5616,7 +5701,7 @@ "RegionalConfigurations": { "target": "com.amazonaws.elasticache#RegionalConfigurationList", "traits": { - "smithy.api#documentation": "Describes the replication group IDs, the AWS regions where they are stored and the shard configuration for each that comprise the Global datastore
" + "smithy.api#documentation": "Describes the replication group IDs, the Amazon regions where they are stored and the shard configuration for each that comprise the Global datastore
" } }, "ApplyImmediately": { @@ -6114,7 +6199,7 @@ "ResourceName": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource for which you want the list of tags, \n for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster
\n or arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot
.
For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource for which you want the list of tags, \n for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster
\n or arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot
.
For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.
", "smithy.api#required": {} } } @@ -6754,7 +6839,7 @@ "MultiAZEnabled": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted.
" + "smithy.api#documentation": "A flag to indicate MultiAZ is enabled.
" } }, "NodeGroupId": { @@ -8125,7 +8210,7 @@ "ReplicationGroupRegion": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The AWS region where the cluster is stored
", + "smithy.api#documentation": "The Amazon region where the cluster is stored
", "smithy.api#required": {} } }, @@ -8212,7 +8297,7 @@ "ResourceName": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource from which you want the tags removed, \n for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster
\n or arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot
.
For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource from which you want the tags removed, \n for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster
\n or arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot
.
For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Service Namespaces.
", "smithy.api#required": {} } }, @@ -8383,6 +8468,12 @@ "traits": { "smithy.api#documentation": "Returns the destination, format and type of the logs.
" } + }, + "ReplicationGroupCreateTime": { + "target": "com.amazonaws.elasticache#TStamp", + "traits": { + "smithy.api#documentation": "The date and time when the cluster was created.
" + } } }, "traits": { @@ -8967,7 +9058,7 @@ "EC2SecurityGroupOwnerId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The AWS account number of the Amazon EC2 security group owner. \n Note that this is not the same thing as an AWS access key ID - you must provide \n a valid AWS account number for this parameter.
", + "smithy.api#documentation": "The Amazon account number of the Amazon EC2 security group owner. \n Note that this is not the same thing as an Amazon access key ID - you must provide \n a valid Amazon account number for this parameter.
", "smithy.api#required": {} } } @@ -9814,7 +9905,7 @@ } ], "traits": { - "smithy.api#documentation": "Represents the input of a TestFailover
operation which test automatic failover on\n a specified node group (called shard in the console) in a replication group (called cluster in the console).
\n Note the following\n
\nA customer can use this operation to test automatic failover on up to 5 shards (called node groups in the ElastiCache API and AWS CLI) \n in any rolling 24-hour period.
\nIf calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently.
\n\n
If calling this operation multiple times on different shards in the same Redis (cluster mode enabled) replication group, \n the first node replacement must complete before a subsequent call can be made.
\nTo determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console,\n the AWS CLI, or the ElastiCache API.\n Look for the following automatic failover related events, listed here in order of occurrance:
\nReplication group message: Test Failover API called for node group
\n
Cache cluster message: Failover from primary node
\n
Replication group message: Failover from primary node
\n
Cache cluster message: Recovering cache nodes
\n
Cache cluster message: Finished recovery for cache nodes
\n
For more information see:
\n\n Viewing ElastiCache Events\n in the ElastiCache User Guide\n
\n\n DescribeEvents in the ElastiCache API Reference
\nAlso see, Testing Multi-AZ in the ElastiCache User Guide.
" + "smithy.api#documentation": "Represents the input of a TestFailover
operation which test automatic failover on\n a specified node group (called shard in the console) in a replication group (called cluster in the console).
\n Note the following\n
\nA customer can use this operation to test automatic failover on up to 5 shards (called node groups in the ElastiCache API and Amazon CLI) \n in any rolling 24-hour period.
\nIf calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently.
\n\n
If calling this operation multiple times on different shards in the same Redis (cluster mode enabled) replication group, \n the first node replacement must complete before a subsequent call can be made.
\nTo determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console,\n the Amazon CLI, or the ElastiCache API.\n Look for the following automatic failover related events, listed here in order of occurrance:
\nReplication group message: Test Failover API called for node group
\n
Cache cluster message: Failover from primary node
\n
Replication group message: Failover from primary node
\n
Cache cluster message: Recovering cache nodes
\n
Cache cluster message: Finished recovery for cache nodes
\n
For more information see:
\n\n Viewing ElastiCache Events\n in the ElastiCache User Guide\n
\n\n DescribeEvents in the ElastiCache API Reference
\nAlso see, Testing Multi-AZ in the ElastiCache User Guide.
" } }, "com.amazonaws.elasticache#TestFailoverMessage": { diff --git a/codegen/sdk-codegen/aws-models/emr.2009-03-31.json b/codegen/sdk-codegen/aws-models/emr.2009-03-31.json index 0e15e9615aa..d63257dc401 100644 --- a/codegen/sdk-codegen/aws-models/emr.2009-03-31.json +++ b/codegen/sdk-codegen/aws-models/emr.2009-03-31.json @@ -871,7 +871,7 @@ "VisibleToAllUsers": { "target": "com.amazonaws.emr#Boolean", "traits": { - "smithy.api#documentation": "Indicates whether the cluster is visible to IAM principals in the account associated\n with the cluster. When true
, IAM principals in the\n account can perform EMR cluster actions on the cluster that their IAM policies allow. When false
, only the IAM principal that created the cluster and the account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.
The default value is false
if a value is not provided when creating a cluster using the EMR API RunJobFlow command or the CLI create-cluster command. The default value is true
when a cluster is created using the Management Console. IAM principals that are allowed to perform actions on the cluster can use the SetVisibleToAllUsers action to change the value on a running cluster. For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMR Management Guide.
Indicates whether the cluster is visible to IAM principals in the account associated\n with the cluster. When true
, IAM principals in the\n account can perform EMR cluster actions on the cluster that their IAM policies allow. When false
, only the IAM principal that created the cluster and the account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.
The default value is true
if a value is not provided when creating a\n cluster using the EMR API RunJobFlow command, the CLI\n create-cluster command, or the Management Console. IAM principals that are\n allowed to perform actions on the cluster can use the SetVisibleToAllUsers action to change the value on a running cluster. For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMRManagement Guide.
The globally unique identifier (GUID) of the user or group from the Amazon Web Services SSO Identity\n Store. For more information, see UserId and GroupId in the Amazon Web Services SSO Identity Store API Reference.\n Either IdentityName
or IdentityId
must be specified.
The globally unique identifier (GUID) of the user or group from the Amazon Web Services SSO Identity\n Store. For more information, see UserId and GroupId in the Amazon Web Services SSO Identity Store API Reference.\n Either IdentityName
or IdentityId
must be specified, but not both.
The name of the user or group. For more information, see UserName and DisplayName in the Amazon Web Services SSO Identity Store API Reference.\n Either IdentityName
or IdentityId
must be specified.
The name of the user or group. For more information, see UserName and DisplayName in the Amazon Web Services SSO Identity Store API Reference.\n Either IdentityName
or IdentityId
must be specified, but not both.
Provides cluster-level details including status, hardware and software configuration,\n VPC settings, and so on.
", + "smithy.api#documentation": "Provides cluster-level details including status, hardware and software configuration,\n VPC settings, and so on.
", "smithy.waiters#waitable": { "ClusterRunning": { "acceptors": [ @@ -2524,7 +2524,7 @@ }, "aws.protocols#awsJson1_1": {}, "smithy.api#documentation": "Amazon EMR is a web service that makes it easier to process large amounts of data\n efficiently. Amazon EMR uses Hadoop processing combined with several Amazon Web Services services to do\n tasks such as web indexing, data mining, log file analysis, machine learning, scientific\n simulation, and data warehouse management.
", - "smithy.api#title": "Amazon Elastic MapReduce", + "smithy.api#title": "Amazon EMR", "smithy.api#xmlNamespace": { "uri": "http://elasticmapreduce.amazonaws.com/doc/2009-03-31" } @@ -3349,6 +3349,12 @@ "traits": { "smithy.api#documentation": "An automatic scaling policy for a core instance group or task instance group in an\n Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically\n adds and terminates EC2 instances in response to the value of a CloudWatch metric. See\n PutAutoScalingPolicy.
" } + }, + "CustomAmiId": { + "target": "com.amazonaws.emr#XmlStringMaxLen256", + "traits": { + "smithy.api#documentation": "The custom AMI ID to use for the provisioned instance group.
" + } } }, "traits": { @@ -3414,6 +3420,12 @@ "traits": { "smithy.api#documentation": "An automatic scaling policy for a core instance group or task instance group in an\n Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically\n adds and terminates EC2 instances in response to the value of a CloudWatch metric. See\n PutAutoScalingPolicy.
" } + }, + "CustomAmiId": { + "target": "com.amazonaws.emr#XmlStringMaxLen256", + "traits": { + "smithy.api#documentation": "The custom AMI ID to use for the provisioned instance group.
" + } } }, "traits": { @@ -3519,6 +3531,12 @@ "traits": { "smithy.api#documentation": "The date/time the instance group was terminated.
" } + }, + "CustomAmiId": { + "target": "com.amazonaws.emr#XmlStringMaxLen256", + "traits": { + "smithy.api#documentation": "The custom AMI ID to use for the provisioned instance group.
" + } } }, "traits": { @@ -3997,6 +4015,12 @@ "traits": { "smithy.api#documentation": "A configuration classification that applies when provisioning cluster instances, which\n can include configurations for applications and software that run on the cluster.
" } + }, + "CustomAmiId": { + "target": "com.amazonaws.emr#XmlStringMaxLen256", + "traits": { + "smithy.api#documentation": "The custom AMI ID to use for the instance type.
" + } } }, "traits": { @@ -4053,6 +4077,12 @@ "traits": { "smithy.api#documentation": "Evaluates to TRUE
when the specified InstanceType
is\n EBS-optimized.
The custom AMI ID to use for the instance type.
" + } } }, "traits": { @@ -4190,7 +4220,7 @@ "VisibleToAllUsers": { "target": "com.amazonaws.emr#Boolean", "traits": { - "smithy.api#documentation": "Indicates whether the cluster is visible to IAM principals in the account associated\n with the cluster. When true
, IAM principals in the\n account can perform EMR cluster actions that their IAM policies allow. When false
, only the IAM principal that created the cluster and the account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.
The default value is false
if a value is not provided when creating a cluster using the EMR API RunJobFlow command or the CLI create-cluster command. The default value is true
when a cluster is created using the Management Console. IAM principals that are authorized to perform actions on the cluster can use the SetVisibleToAllUsers action to change the value on a running cluster. For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMR Management Guide.
Indicates whether the cluster is visible to IAM principals in the account associated\n with the cluster. When true
, IAM principals in the\n account can perform EMR cluster actions that their IAM policies allow. When false
, only the IAM principal that created the cluster and the account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.
The default value is true
if a value is not provided when creating a\n cluster using the EMR API RunJobFlow command, the CLI\n create-cluster command, or the Management Console. IAM principals that are authorized to perform actions on the cluster can use the SetVisibleToAllUsers action to change the value on a running cluster. For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMRManagement Guide.
Specifies the next page of results. If NextToken
is not specified, which is usually the case for the first request of ListReleaseLabels, the first page of results are determined by other filtering parameters or by the latest version. The ListReleaseLabels
request fails if the identity (AWS AccountID) and all filtering parameters are different from the original request, or if the NextToken
is expired or tampered with.
Specifies the next page of results. If NextToken
is not specified, which is usually the case for the first request of ListReleaseLabels, the first page of results are determined by other filtering parameters or by the latest version. The ListReleaseLabels
request fails if the identity (account ID) and all filtering parameters are different from the original request, or if the NextToken
is expired or tampered with.
Set this value to true
so that IAM principals in the account associated with the cluster can perform EMR actions on the cluster that their IAM policies allow. This value defaults to false
for clusters created using the EMR API or the CLI create-cluster command.
When set to false
, only the IAM principal that created the cluster and the account root user can perform EMR actions for the cluster, regardless of the IAM permissions policies attached to other IAM principals. For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMR Management Guide.
Set this value to true
so that IAM principals in the account associated with the cluster can perform EMR actions on the cluster that their IAM policies allow. This value defaults to true
for clusters created using the EMR API or the CLI create-cluster command.
When set to false
, only the IAM principal that created the cluster and the account root user can perform EMR actions for the cluster, regardless of the IAM permissions policies attached to other IAM principals. For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMRManagement Guide.
Sets the Cluster$VisibleToAllUsers value for an EMR cluster. When true
, IAM principals in the\n account can perform EMR cluster actions that their IAM policies allow. When false
, only the IAM principal that created the cluster and the account root user can perform EMR actions on the cluster, regardless of IAM permissions policies attached to other IAM principals.
This action works on running clusters. When you create a cluster, use the RunJobFlowInput$VisibleToAllUsers parameter.
\nFor more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMR Management Guide.
" + "smithy.api#documentation": "Sets the Cluster$VisibleToAllUsers value for an EMR cluster. When true
, IAM principals in the\n account can perform EMR cluster actions that their IAM policies allow. When false
, only the IAM principal that created the cluster and the account root user can perform EMR actions on the cluster, regardless of IAM permissions policies attached to other IAM principals.
This action works on running clusters. When you create a cluster, use the RunJobFlowInput$VisibleToAllUsers parameter.
\nFor more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMRManagement Guide.
" } }, "com.amazonaws.emr#SetVisibleToAllUsersInput": { diff --git a/codegen/sdk-codegen/aws-models/iotsitewise.2019-12-02.json b/codegen/sdk-codegen/aws-models/iotsitewise.2019-12-02.json index c88ac189332..a5e62d6c04a 100644 --- a/codegen/sdk-codegen/aws-models/iotsitewise.2019-12-02.json +++ b/codegen/sdk-codegen/aws-models/iotsitewise.2019-12-02.json @@ -5283,10 +5283,17 @@ "type": { "target": "com.amazonaws.iotsitewise#InterpolationType", "traits": { - "smithy.api#documentation": "The interpolation type.
\nValid values: LINEAR_INTERPOLATION
\n
The interpolation type.
\nValid values: LINEAR_INTERPOLATION | LOCF_INTERPOLATION
\n
For the LOCF_INTERPOLATION
interpolation, if no data point is found for an interval, \n IoT SiteWise returns the same interpolated value calculated for the previous interval \n and carries forward this interpolated value until a new data point is found.
For example, you can get the interpolated temperature values for a wind turbine every 24 hours over a duration of 7 days. \n If the LOCF_INTERPOLATION
interpolation starts on July 1, 2021, at 9 AM, IoT SiteWise uses the data points from July 1, 2021, \n at 9 AM to July 2, 2021, at 9 AM to compute the first interpolated value. \n If no data points is found after 9 A.M. on July 2, 2021, IoT SiteWise uses the same interpolated value for the rest of the days.
The query interval for the window in seconds. IoT SiteWise computes each interpolated value by using data points \n from the timestamp of each interval minus the window to the timestamp of each interval plus the window. \n If not specified, the window is between the start time minus the interval and the end time plus the interval.
\nIf you specify a value for the intervalWindowInSeconds
parameter, \n the type
parameter must be LINEAR_INTERPOLATION
.
If no data point is found during the specified query window, \n IoT SiteWise won't return an interpolated value for the interval. \n This indicates that there's a gap in the ingested data points.
\nFor example, you can get the interpolated temperature values for a wind turbine \n every 24 hours over a duration of 7 days. If the interpolation starts on July 1, 2021, \n at 9 AM with a window of 2 hours, IoT SiteWise uses the data points from 7 AM (9 AM - 2 hours) \n to 11 AM (9 AM + 2 hours) on July 2, 2021 to compute the first interpolated value, \n uses the data points from 7 AM (9 AM - 2 hours) to 11 AM (9 AM + 2 hours) on July 3, 2021 \n to compute the second interpolated value, and so on.
", + "smithy.api#httpQuery": "intervalWindowInSeconds" + } } } }, @@ -5617,6 +5624,16 @@ } } }, + "com.amazonaws.iotsitewise#IntervalWindowInSeconds": { + "type": "long", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 320000000 + } + } + }, "com.amazonaws.iotsitewise#InvalidRequestException": { "type": "structure", "members": { @@ -7973,7 +7990,7 @@ "offset": { "target": "com.amazonaws.iotsitewise#Offset", "traits": { - "smithy.api#documentation": "The offset for the tumbling window. The offset
parameter accepts the following:
The offset time.
\nFor example, if you specify 18h
for offset
\n and 1d
for interval
, IoT SiteWise aggregates data in one of the following ways:
If you create the metric before or at 6:00 p.m. (UTC), \n you get the first aggregation result at 6 p.m. (UTC) on the day when you create the metric.
\nIf you create the metric after 6:00 p.m. (UTC), \n you get the first aggregation result at 6 p.m. (UTC) the next day.
\nThe ISO 8601 format.
\nFor example, if you specify PT18H
for offset
\n and 1d
for interval
, IoT SiteWise aggregates data in one of the following ways:
If you create the metric before or at 6:00 p.m. (UTC), \n you get the first aggregation result at 6 p.m. (UTC) on the day when you create the metric.
\nIf you create the metric after 6:00 p.m. (UTC), \n you get the first aggregation result at 6 p.m. (UTC) the next day.
\nThe 24-hour clock.
\nFor example, if you specify 00:03:00
for offset
\n and 5m
for interval
, and you create the metric at 2 p.m. (UTC), \n you get the first aggregation result at 2:03 p.m. (UTC). \n You get the second aggregation result at 2:08 p.m. (UTC).
The offset time zone.
\nFor example, if you specify 2021-07-23T18:00-08
for offset
\n and 1d
for interval
, IoT SiteWise aggregates data in one of the following ways:
If you create the metric before or at 6:00 p.m. (PST), \n you get the first aggregation result at 6 p.m. (PST) on the day when you create the metric.
\nIf you create the metric after 6:00 p.m. (PST), \n you get the first aggregation result at 6 p.m. (PST) the next day.
\nThe offset for the tumbling window. The offset
parameter accepts the following:
The offset time.
\nFor example, if you specify 18h
for offset
\n and 1d
for interval
, IoT SiteWise aggregates data in one of the following ways:
If you create the metric before or at 6:00 PM (UTC), \n you get the first aggregation result at 6 PM (UTC) on the day when you create the metric.
\nIf you create the metric after 6:00 PM (UTC), \n you get the first aggregation result at 6 PM (UTC) the next day.
\nThe ISO 8601 format.
\nFor example, if you specify PT18H
for offset
\n and 1d
for interval
, IoT SiteWise aggregates data in one of the following ways:
If you create the metric before or at 6:00 PM (UTC), \n you get the first aggregation result at 6 PM (UTC) on the day when you create the metric.
\nIf you create the metric after 6:00 PM (UTC), \n you get the first aggregation result at 6 PM (UTC) the next day.
\nThe 24-hour clock.
\nFor example, if you specify 00:03:00
for offset
\n and 5m
for interval
, and you create the metric at 2 PM (UTC), \n you get the first aggregation result at 2:03 PM (UTC). \n You get the second aggregation result at 2:08 PM (UTC).
The offset time zone.
\nFor example, if you specify 2021-07-23T18:00-08
for offset
\n and 1d
for interval
, IoT SiteWise aggregates data in one of the following ways:
If you create the metric before or at 6:00 PM (PST), \n you get the first aggregation result at 6 PM (PST) on the day when you create the metric.
\nIf you create the metric after 6:00 PM (PST), \n you get the first aggregation result at 6 PM (PST) the next day.
\nFor Lambda@Edge functions, the Region of the master function. For example, us-east-1
filters\n the list of functions to only include Lambda@Edge functions replicated from a master function in US East (N.\n Virginia). If specified, you must set FunctionVersion
to ALL
.
For Lambda@Edge functions, the Amazon Web Services Region of the master function. For example, us-east-1
filters\n the list of functions to only include Lambda@Edge functions replicated from a master function in US East (N.\n Virginia). If specified, you must set FunctionVersion
to ALL
.
The name of the Access Control List
" + } + }, + "Status": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "Indicates ACL status. Can be \"creating\", \"active\", \"modifying\", \"deleting\".
" + } + }, + "UserNames": { + "target": "com.amazonaws.memorydb#UserNameList", + "traits": { + "smithy.api#documentation": "The list of user names that belong to the ACL.
" + } + }, + "MinimumEngineVersion": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The minimum engine version supported for the ACL
" + } + }, + "PendingChanges": { + "target": "com.amazonaws.memorydb#ACLPendingChanges", + "traits": { + "smithy.api#documentation": "A list of updates being applied to the ACL.
" + } + }, + "Clusters": { + "target": "com.amazonaws.memorydb#ACLClusterNameList", + "traits": { + "smithy.api#documentation": "A list of clusters associated with the ACL.
" + } + }, + "ARN": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the ACL
" + } + } + }, + "traits": { + "smithy.api#documentation": "An Access Control List. You can authenticate users with Access Contol Lists.\n \n ACLs enable you to control cluster access by grouping users. These Access control lists are designed as a way to organize access to clusters.
" + } + }, + "com.amazonaws.memorydb#ACLAlreadyExistsFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "ACLAlreadyExists", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#ACLClusterNameList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#String" + } + }, + "com.amazonaws.memorydb#ACLList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#ACL" + } + }, + "com.amazonaws.memorydb#ACLName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1 + }, + "smithy.api#pattern": "^[a-zA-Z][a-zA-Z0-9\\-]*$" + } + }, + "com.amazonaws.memorydb#ACLNameList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#ACLName" + } + }, + "com.amazonaws.memorydb#ACLNotFoundFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "ACLNotFound", + "httpResponseCode": 404 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.memorydb#ACLPendingChanges": { + "type": "structure", + "members": { + "UserNamesToRemove": { + "target": "com.amazonaws.memorydb#UserNameList", + "traits": { + "smithy.api#documentation": "A list of user names being removed from the ACL
" + } + }, + "UserNamesToAdd": { + "target": "com.amazonaws.memorydb#UserNameList", + "traits": { + "smithy.api#documentation": "A list of users being added to the ACL
" + } + } + }, + "traits": { + "smithy.api#documentation": "Returns the updates being applied to the ACL.
" + } + }, + "com.amazonaws.memorydb#ACLQuotaExceededFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "ACLQuotaExceeded", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#ACLsUpdateStatus": { + "type": "structure", + "members": { + "ACLToApply": { + "target": "com.amazonaws.memorydb#ACLName", + "traits": { + "smithy.api#documentation": "A list of ACLs pending to be applied.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The status of the ACL update
" + } + }, + "com.amazonaws.memorydb#APICallRateForCustomerExceededFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "APICallRateForCustomerExceeded", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#AZStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "singleaz", + "name": "SingleAZ" + }, + { + "value": "multiaz", + "name": "MultiAZ" + } + ] + } + }, + "com.amazonaws.memorydb#AccessString": { + "type": "string", + "traits": { + "smithy.api#pattern": "\\S" + } + }, + "com.amazonaws.memorydb#AmazonMemoryDB": { + "type": "service", + "version": "2021-01-01", + "operations": [ + { + "target": "com.amazonaws.memorydb#BatchUpdateCluster" + }, + { + "target": "com.amazonaws.memorydb#CopySnapshot" + }, + { + "target": "com.amazonaws.memorydb#CreateACL" + }, + { + "target": "com.amazonaws.memorydb#CreateCluster" + }, + { + "target": "com.amazonaws.memorydb#CreateParameterGroup" + }, + { + "target": "com.amazonaws.memorydb#CreateSnapshot" + }, + { + "target": "com.amazonaws.memorydb#CreateSubnetGroup" + }, + { + "target": "com.amazonaws.memorydb#CreateUser" + }, + { + "target": "com.amazonaws.memorydb#DeleteACL" + }, + { + "target": "com.amazonaws.memorydb#DeleteCluster" + }, + { + "target": "com.amazonaws.memorydb#DeleteParameterGroup" + }, + { + "target": "com.amazonaws.memorydb#DeleteSnapshot" + }, + { + "target": "com.amazonaws.memorydb#DeleteSubnetGroup" + }, + { + "target": "com.amazonaws.memorydb#DeleteUser" + }, + { + "target": "com.amazonaws.memorydb#DescribeACLs" + }, + { + "target": "com.amazonaws.memorydb#DescribeClusters" + }, + { + "target": "com.amazonaws.memorydb#DescribeEngineVersions" + }, + { + "target": "com.amazonaws.memorydb#DescribeEvents" + }, + { + "target": "com.amazonaws.memorydb#DescribeParameterGroups" + }, + { + "target": "com.amazonaws.memorydb#DescribeParameters" + }, + { + "target": "com.amazonaws.memorydb#DescribeServiceUpdates" + }, + { + "target": "com.amazonaws.memorydb#DescribeSnapshots" + }, + { + "target": "com.amazonaws.memorydb#DescribeSubnetGroups" + }, + { + "target": "com.amazonaws.memorydb#DescribeUsers" + }, + { + "target": "com.amazonaws.memorydb#FailoverShard" + }, + { + "target": "com.amazonaws.memorydb#ListAllowedNodeTypeUpdates" + }, + { + "target": "com.amazonaws.memorydb#ListTags" + }, + { + "target": "com.amazonaws.memorydb#ResetParameterGroup" + }, + { + "target": "com.amazonaws.memorydb#TagResource" + }, + { + "target": "com.amazonaws.memorydb#UntagResource" + }, + { + "target": "com.amazonaws.memorydb#UpdateACL" + }, + { + "target": "com.amazonaws.memorydb#UpdateCluster" + }, + { + "target": "com.amazonaws.memorydb#UpdateParameterGroup" + }, + { + "target": "com.amazonaws.memorydb#UpdateSubnetGroup" + }, + { + "target": "com.amazonaws.memorydb#UpdateUser" + } + ], + "traits": { + "aws.api#service": { + "sdkId": "MemoryDB", + "arnNamespace": "memorydb", + "cloudFormationName": "MemoryDB", + "cloudTrailEventSource": "memorydb.amazonaws.com", + "endpointPrefix": "memory-db" + }, + "aws.auth#sigv4": { + "name": "memorydb" + }, + "aws.protocols#awsJson1_1": {}, + "smithy.api#documentation": "Amazon MemoryDB for Redis is a fully managed, Redis-compatible, in-memory database that delivers ultra-fast performance and Multi-AZ durability for modern applications built using microservices architectures.\n \n MemoryDB stores the entire database in-memory, enabling low latency and high throughput data access. It is compatible with Redis, a popular open source data store, enabling you to leverage Redis’ flexible and friendly data structures, APIs, and commands.
", + "smithy.api#title": "Amazon MemoryDB", + "smithy.api#xmlNamespace": { + "uri": "http://memorydb.amazonaws.com/doc/2021-01-01/" + } + } + }, + "com.amazonaws.memorydb#Authentication": { + "type": "structure", + "members": { + "Type": { + "target": "com.amazonaws.memorydb#AuthenticationType", + "traits": { + "smithy.api#documentation": "Indicates whether the user requires a password to authenticate.
" + } + }, + "PasswordCount": { + "target": "com.amazonaws.memorydb#IntegerOptional", + "traits": { + "smithy.api#documentation": "The number of passwords belonging to the user. The maximum is two.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Denotes the user's authentication properties, such as whether it requires a password to authenticate. Used in output responses.
" + } + }, + "com.amazonaws.memorydb#AuthenticationMode": { + "type": "structure", + "members": { + "Type": { + "target": "com.amazonaws.memorydb#InputAuthenticationType", + "traits": { + "smithy.api#documentation": "Indicates whether the user requires a password to authenticate. All newly-created users require a password.
" + } + }, + "Passwords": { + "target": "com.amazonaws.memorydb#PasswordListInput", + "traits": { + "smithy.api#documentation": "The password(s) used for authentication
" + } + } + }, + "traits": { + "smithy.api#documentation": "Denotes the user's authentication properties, such as whether it requires a password to authenticate. Used in output responses.
" + } + }, + "com.amazonaws.memorydb#AuthenticationType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "password", + "name": "PASSWORD" + }, + { + "value": "no-password", + "name": "NO_PASSWORD" + } + ] + } + }, + "com.amazonaws.memorydb#AvailabilityZone": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the Availability Zone.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Indicates if the cluster has a Multi-AZ configuration (multiaz) or not (singleaz).
" + } + }, + "com.amazonaws.memorydb#AwsQueryErrorMessage": { + "type": "string" + }, + "com.amazonaws.memorydb#BatchUpdateCluster": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#BatchUpdateClusterRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#BatchUpdateClusterResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#ServiceUpdateNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "Apply the service update to a list of clusters supplied. For more information on service updates and applying them, see Applying the service updates.
" + } + }, + "com.amazonaws.memorydb#BatchUpdateClusterRequest": { + "type": "structure", + "members": { + "ClusterNames": { + "target": "com.amazonaws.memorydb#ClusterNameList", + "traits": { + "smithy.api#documentation": "The cluster names to apply the updates.
", + "smithy.api#required": {} + } + }, + "ServiceUpdate": { + "target": "com.amazonaws.memorydb#ServiceUpdateRequest", + "traits": { + "smithy.api#documentation": "The unique ID of the service update
" + } + } + } + }, + "com.amazonaws.memorydb#BatchUpdateClusterResponse": { + "type": "structure", + "members": { + "ProcessedClusters": { + "target": "com.amazonaws.memorydb#ClusterList", + "traits": { + "smithy.api#documentation": "The list of clusters that have been updated.
" + } + }, + "UnprocessedClusters": { + "target": "com.amazonaws.memorydb#UnprocessedClusterList", + "traits": { + "smithy.api#documentation": "The list of clusters where updates have not been applied.
" + } + } + } + }, + "com.amazonaws.memorydb#Boolean": { + "type": "boolean" + }, + "com.amazonaws.memorydb#BooleanOptional": { + "type": "boolean", + "traits": { + "smithy.api#box": {} + } + }, + "com.amazonaws.memorydb#Cluster": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The user-supplied name of the cluster. This identifier is a unique key that identifies a cluster.
" + } + }, + "Description": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "A description of the cluster
" + } + }, + "Status": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The status of the cluster. For example, Available, Updating, Creating.
" + } + }, + "PendingUpdates": { + "target": "com.amazonaws.memorydb#ClusterPendingUpdates", + "traits": { + "smithy.api#documentation": "A group of settings that are currently being applied.
" + } + }, + "NumberOfShards": { + "target": "com.amazonaws.memorydb#IntegerOptional", + "traits": { + "smithy.api#documentation": "The number of shards in the cluster
" + } + }, + "Shards": { + "target": "com.amazonaws.memorydb#ShardList", + "traits": { + "smithy.api#documentation": "A list of shards that are members of the cluster.
" + } + }, + "AvailabilityMode": { + "target": "com.amazonaws.memorydb#AZStatus", + "traits": { + "smithy.api#documentation": "Indicates if the cluster has a Multi-AZ configuration (multiaz) or not (singleaz).
" + } + }, + "ClusterEndpoint": { + "target": "com.amazonaws.memorydb#Endpoint", + "traits": { + "smithy.api#documentation": "The cluster's configuration endpoint
" + } + }, + "NodeType": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The cluster's node type
" + } + }, + "EngineVersion": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The Redis engine version used by the cluster
" + } + }, + "EnginePatchVersion": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The Redis engine patch version used by the cluster
" + } + }, + "ParameterGroupName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the parameter group used by the cluster
" + } + }, + "ParameterGroupStatus": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The status of the parameter group used by the cluster, for example 'active' or 'applying'.
" + } + }, + "SecurityGroups": { + "target": "com.amazonaws.memorydb#SecurityGroupMembershipList", + "traits": { + "smithy.api#documentation": "A list of security groups used by the cluster
" + } + }, + "SubnetGroupName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the subnet group used by the cluster
" + } + }, + "TLSEnabled": { + "target": "com.amazonaws.memorydb#BooleanOptional", + "traits": { + "smithy.api#documentation": "A flag to indicate if In-transit encryption is enabled
" + } + }, + "KmsKeyId": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The ID of the KMS key used to encrypt the cluster
" + } + }, + "ARN": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the cluster.
" + } + }, + "SnsTopicArn": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the SNS notification topic
" + } + }, + "SnsTopicStatus": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The SNS topic must be in Active status to receive notifications
" + } + }, + "SnapshotRetentionLimit": { + "target": "com.amazonaws.memorydb#IntegerOptional", + "traits": { + "smithy.api#documentation": "The number of days for which MemoryDB retains automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted.
" + } + }, + "MaintenanceWindow": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.
" + } + }, + "SnapshotWindow": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The daily time range (in UTC) during which MemoryDB begins taking a daily snapshot of your shard.\n \n Example: 05:00-09:00\n \n If you do not specify this parameter, MemoryDB automatically chooses an appropriate time range.
" + } + }, + "ACLName": { + "target": "com.amazonaws.memorydb#ACLName", + "traits": { + "smithy.api#documentation": "The name of the Access Control List associated with this cluster.
" + } + }, + "AutoMinorVersionUpgrade": { + "target": "com.amazonaws.memorydb#BooleanOptional", + "traits": { + "smithy.api#documentation": "When set to true, the cluster will automatically receive minor engine version upgrades after launch.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Contains all of the attributes of a specific cluster.
" + } + }, + "com.amazonaws.memorydb#ClusterAlreadyExistsFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "ClusterAlreadyExists", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#ClusterConfiguration": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the cluster
" + } + }, + "Description": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The description of the cluster configuration
" + } + }, + "NodeType": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The node type used for the cluster
" + } + }, + "EngineVersion": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The Redis engine version used by the cluster
" + } + }, + "MaintenanceWindow": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The specified maintenance window for the cluster
" + } + }, + "TopicArn": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the SNS notification topic for the cluster
" + } + }, + "Port": { + "target": "com.amazonaws.memorydb#IntegerOptional", + "traits": { + "smithy.api#documentation": "The port used by the cluster
" + } + }, + "ParameterGroupName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of parameter group used by the cluster
" + } + }, + "SubnetGroupName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the subnet group used by the cluster
" + } + }, + "VpcId": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The ID of the VPC the cluster belongs to
" + } + }, + "SnapshotRetentionLimit": { + "target": "com.amazonaws.memorydb#IntegerOptional", + "traits": { + "smithy.api#documentation": "The snapshot retention limit set by the cluster
" + } + }, + "SnapshotWindow": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The snapshot window set by the cluster
" + } + }, + "NumShards": { + "target": "com.amazonaws.memorydb#IntegerOptional", + "traits": { + "smithy.api#documentation": "The number of shards in the cluster
" + } + }, + "Shards": { + "target": "com.amazonaws.memorydb#ShardDetails", + "traits": { + "smithy.api#documentation": "The list of shards in the cluster
" + } + } + }, + "traits": { + "smithy.api#documentation": "A list of cluster configuration options.
" + } + }, + "com.amazonaws.memorydb#ClusterList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#Cluster", + "traits": { + "smithy.api#xmlName": "Cluster" + } + } + }, + "com.amazonaws.memorydb#ClusterNameList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#String" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 20 + } + } + }, + "com.amazonaws.memorydb#ClusterNotFoundFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "ClusterNotFound", + "httpResponseCode": 404 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.memorydb#ClusterPendingUpdates": { + "type": "structure", + "members": { + "Resharding": { + "target": "com.amazonaws.memorydb#ReshardingStatus", + "traits": { + "smithy.api#documentation": "The status of an online resharding operation.
" + } + }, + "ACLs": { + "target": "com.amazonaws.memorydb#ACLsUpdateStatus", + "traits": { + "smithy.api#documentation": "A list of ACLs associated with the cluster that are being updated
" + } + }, + "ServiceUpdates": { + "target": "com.amazonaws.memorydb#PendingModifiedServiceUpdateList", + "traits": { + "smithy.api#documentation": "A list of service updates being applied to the cluster
" + } + } + }, + "traits": { + "smithy.api#documentation": "A list of updates being applied to the cluster
" + } + }, + "com.amazonaws.memorydb#ClusterQuotaForCustomerExceededFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "ClusterQuotaForCustomerExceeded", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#CopySnapshot": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#CopySnapshotRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#CopySnapshotResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#InvalidSnapshotStateFault" + }, + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#SnapshotAlreadyExistsFault" + }, + { + "target": "com.amazonaws.memorydb#SnapshotNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#SnapshotQuotaExceededFault" + }, + { + "target": "com.amazonaws.memorydb#TagQuotaPerResourceExceeded" + } + ], + "traits": { + "smithy.api#documentation": "Makes a copy of an existing snapshot.
" + } + }, + "com.amazonaws.memorydb#CopySnapshotRequest": { + "type": "structure", + "members": { + "SourceSnapshotName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of an existing snapshot from which to make a copy.
", + "smithy.api#required": {} + } + }, + "TargetSnapshotName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "A name for the snapshot copy. MemoryDB does not permit overwriting a snapshot, therefore this name must be unique within its context - MemoryDB or an Amazon S3 bucket if exporting.
", + "smithy.api#required": {} + } + }, + "TargetBucket": { + "target": "com.amazonaws.memorydb#TargetBucket", + "traits": { + "smithy.api#documentation": "The Amazon S3 bucket to which the snapshot is exported. This parameter is used only when exporting a snapshot for external access.\n \n When using this parameter to export a snapshot, be sure MemoryDB has the needed permissions to this S3 bucket. For more information, see \n \n Step 2: Grant MemoryDB Access to Your Amazon S3 Bucket. \n \n
" + } + }, + "KmsKeyId": { + "target": "com.amazonaws.memorydb#KmsKeyId", + "traits": { + "smithy.api#documentation": "The ID of the KMS key used to encrypt the target snapshot.
" + } + }, + "Tags": { + "target": "com.amazonaws.memorydb#TagList", + "traits": { + "smithy.api#documentation": "A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted.
" + } + } + } + }, + "com.amazonaws.memorydb#CopySnapshotResponse": { + "type": "structure", + "members": { + "Snapshot": { + "target": "com.amazonaws.memorydb#Snapshot", + "traits": { + "smithy.api#documentation": "Represents a copy of an entire cluster as of the time when the snapshot was taken.
" + } + } + } + }, + "com.amazonaws.memorydb#CreateACL": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#CreateACLRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#CreateACLResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#ACLAlreadyExistsFault" + }, + { + "target": "com.amazonaws.memorydb#ACLQuotaExceededFault" + }, + { + "target": "com.amazonaws.memorydb#DefaultUserRequired" + }, + { + "target": "com.amazonaws.memorydb#DuplicateUserNameFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#TagQuotaPerResourceExceeded" + }, + { + "target": "com.amazonaws.memorydb#UserNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "Creates an Access Control List. For more information, see Authenticating users with Access Contol Lists (ACLs).
" + } + }, + "com.amazonaws.memorydb#CreateACLRequest": { + "type": "structure", + "members": { + "ACLName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the Access Control List.
", + "smithy.api#required": {} + } + }, + "UserNames": { + "target": "com.amazonaws.memorydb#UserNameListInput", + "traits": { + "smithy.api#documentation": "The list of users that belong to the Access Control List.
" + } + }, + "Tags": { + "target": "com.amazonaws.memorydb#TagList", + "traits": { + "smithy.api#documentation": "A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted.
" + } + } + } + }, + "com.amazonaws.memorydb#CreateACLResponse": { + "type": "structure", + "members": { + "ACL": { + "target": "com.amazonaws.memorydb#ACL", + "traits": { + "smithy.api#documentation": "The newly-created Access Control List.
" + } + } + } + }, + "com.amazonaws.memorydb#CreateCluster": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#CreateClusterRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#CreateClusterResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#ACLNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#ClusterAlreadyExistsFault" + }, + { + "target": "com.amazonaws.memorydb#ClusterQuotaForCustomerExceededFault" + }, + { + "target": "com.amazonaws.memorydb#InsufficientClusterCapacityFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidACLStateFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidCredentialsException" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#InvalidVPCNetworkStateFault" + }, + { + "target": "com.amazonaws.memorydb#NodeQuotaForClusterExceededFault" + }, + { + "target": "com.amazonaws.memorydb#NodeQuotaForCustomerExceededFault" + }, + { + "target": "com.amazonaws.memorydb#ParameterGroupNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#ShardsPerClusterQuotaExceededFault" + }, + { + "target": "com.amazonaws.memorydb#SubnetGroupNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#TagQuotaPerResourceExceeded" + } + ], + "traits": { + "smithy.api#documentation": "Creates a cluster. All nodes in the cluster run the same protocol-compliant engine software.
" + } + }, + "com.amazonaws.memorydb#CreateClusterRequest": { + "type": "structure", + "members": { + "ClusterName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the cluster. This value must be unique as it also serves as the cluster identifier.
", + "smithy.api#required": {} + } + }, + "NodeType": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The compute and memory capacity of the nodes in the cluster.
", + "smithy.api#required": {} + } + }, + "ParameterGroupName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the parameter group associated with the cluster.
" + } + }, + "Description": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "An optional description of the cluster.
" + } + }, + "NumShards": { + "target": "com.amazonaws.memorydb#IntegerOptional", + "traits": { + "smithy.api#documentation": "The number of shards the cluster will contain.
\nClusters can have up to 500 shards, with your data partitioned across the shards. For example, you can choose to configure a 500 node cluster that ranges between \n\t\t\t83 shards (one primary and 5 replicas per shard) and 500 shards (single primary and no replicas). Make sure there are enough available IP addresses to accommodate the increase. \n\t\t\tCommon pitfalls include the subnets in the subnet group have too small a CIDR range or the subnets are shared and heavily used by other clusters.
" + } + }, + "NumReplicasPerShard": { + "target": "com.amazonaws.memorydb#IntegerOptional", + "traits": { + "smithy.api#documentation": "The number of replicas to apply to each shard. The limit is 5.
" + } + }, + "SubnetGroupName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the subnet group to be used for the cluster.
" + } + }, + "SecurityGroupIds": { + "target": "com.amazonaws.memorydb#SecurityGroupIdsList", + "traits": { + "smithy.api#documentation": "A list of security group names to associate with this cluster.
" + } + }, + "MaintenanceWindow": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi
(24H Clock UTC). The minimum maintenance window is a 60 minute period.
The port number on which each of the nodes accepts connections.
" + } + }, + "SnsTopicArn": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent.
" + } + }, + "TLSEnabled": { + "target": "com.amazonaws.memorydb#BooleanOptional", + "traits": { + "smithy.api#documentation": "A flag to enable in-transit encryption on the cluster.
" + } + }, + "KmsKeyId": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The ID of the KMS key used to encrypt the cluster.
" + } + }, + "SnapshotArns": { + "target": "com.amazonaws.memorydb#SnapshotArnsList", + "traits": { + "smithy.api#documentation": "A list of Amazon Resource Names (ARN) that uniquely identify the RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new cluster. The Amazon S3 object name in the ARN cannot contain any commas.
" + } + }, + "SnapshotName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of a snapshot from which to restore data into the new cluster. The snapshot status changes to restoring while the new cluster is being created.
" + } + }, + "SnapshotRetentionLimit": { + "target": "com.amazonaws.memorydb#IntegerOptional", + "traits": { + "smithy.api#documentation": "The number of days for which MemoryDB retains automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted.
" + } + }, + "Tags": { + "target": "com.amazonaws.memorydb#TagList", + "traits": { + "smithy.api#documentation": "A list of tags to be added to this resource. Tags are comma-separated key,value pairs (e.g. Key=myKey, Value=myKeyValue. You can include multiple tags as shown following: Key=myKey, Value=myKeyValue Key=mySecondKey, Value=mySecondKeyValue.
" + } + }, + "SnapshotWindow": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The daily time range (in UTC) during which MemoryDB begins taking a daily snapshot of your shard.
\n \nExample: 05:00-09:00
\n \nIf you do not specify this parameter, MemoryDB automatically chooses an appropriate time range.
" + } + }, + "ACLName": { + "target": "com.amazonaws.memorydb#ACLName", + "traits": { + "smithy.api#documentation": "The name of the Access Control List to associate with the cluster.
", + "smithy.api#required": {} + } + }, + "EngineVersion": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The version number of the Redis engine to be used for the cluster.
" + } + }, + "AutoMinorVersionUpgrade": { + "target": "com.amazonaws.memorydb#BooleanOptional", + "traits": { + "smithy.api#documentation": "When set to true, the cluster will automatically receive minor engine version upgrades after launch.
" + } + } + } + }, + "com.amazonaws.memorydb#CreateClusterResponse": { + "type": "structure", + "members": { + "Cluster": { + "target": "com.amazonaws.memorydb#Cluster", + "traits": { + "smithy.api#documentation": "The newly-created cluster.
" + } + } + } + }, + "com.amazonaws.memorydb#CreateParameterGroup": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#CreateParameterGroupRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#CreateParameterGroupResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterGroupStateFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#ParameterGroupAlreadyExistsFault" + }, + { + "target": "com.amazonaws.memorydb#ParameterGroupQuotaExceededFault" + }, + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#TagQuotaPerResourceExceeded" + } + ], + "traits": { + "smithy.api#documentation": "Creates a new MemoryDB parameter group. A parameter group is a collection of parameters and their values that are applied to all of the nodes in any cluster. For \n more information, see Configuring engine parameters using parameter groups.\n \n
" + } + }, + "com.amazonaws.memorydb#CreateParameterGroupRequest": { + "type": "structure", + "members": { + "ParameterGroupName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the parameter group.
", + "smithy.api#required": {} + } + }, + "Family": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the parameter group family that the parameter group can be used with.
", + "smithy.api#required": {} + } + }, + "Description": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "An optional description of the parameter group.
" + } + }, + "Tags": { + "target": "com.amazonaws.memorydb#TagList", + "traits": { + "smithy.api#documentation": "A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted.
" + } + } + } + }, + "com.amazonaws.memorydb#CreateParameterGroupResponse": { + "type": "structure", + "members": { + "ParameterGroup": { + "target": "com.amazonaws.memorydb#ParameterGroup", + "traits": { + "smithy.api#documentation": "The newly-created parameter group.
" + } + } + } + }, + "com.amazonaws.memorydb#CreateSnapshot": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#CreateSnapshotRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#CreateSnapshotResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#ClusterNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidClusterStateFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#SnapshotAlreadyExistsFault" + }, + { + "target": "com.amazonaws.memorydb#SnapshotQuotaExceededFault" + }, + { + "target": "com.amazonaws.memorydb#TagQuotaPerResourceExceeded" + } + ], + "traits": { + "smithy.api#documentation": "Creates a copy of an entire cluster at a specific moment in time.
" + } + }, + "com.amazonaws.memorydb#CreateSnapshotRequest": { + "type": "structure", + "members": { + "ClusterName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The snapshot is created from this cluster.
", + "smithy.api#required": {} + } + }, + "SnapshotName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "A name for the snapshot being created.
", + "smithy.api#required": {} + } + }, + "KmsKeyId": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The ID of the KMS key used to encrypt the snapshot.
" + } + }, + "Tags": { + "target": "com.amazonaws.memorydb#TagList", + "traits": { + "smithy.api#documentation": "A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted.
" + } + } + } + }, + "com.amazonaws.memorydb#CreateSnapshotResponse": { + "type": "structure", + "members": { + "Snapshot": { + "target": "com.amazonaws.memorydb#Snapshot", + "traits": { + "smithy.api#documentation": "The newly-created snapshot.
" + } + } + } + }, + "com.amazonaws.memorydb#CreateSubnetGroup": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#CreateSubnetGroupRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#CreateSubnetGroupResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#InvalidSubnet" + }, + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#SubnetGroupAlreadyExistsFault" + }, + { + "target": "com.amazonaws.memorydb#SubnetGroupQuotaExceededFault" + }, + { + "target": "com.amazonaws.memorydb#SubnetNotAllowedFault" + }, + { + "target": "com.amazonaws.memorydb#SubnetQuotaExceededFault" + }, + { + "target": "com.amazonaws.memorydb#TagQuotaPerResourceExceeded" + } + ], + "traits": { + "smithy.api#documentation": "Creates a subnet group. A subnet group is a collection of subnets (typically private) that you can designate for your clusters running in an Amazon Virtual Private Cloud (VPC) environment.\n \n When you create a cluster in an Amazon VPC, you must specify a subnet group. MemoryDB uses that subnet group to choose a subnet and IP addresses within that subnet to associate with your nodes. \n For more information, see Subnets and subnet groups.
" + } + }, + "com.amazonaws.memorydb#CreateSubnetGroupRequest": { + "type": "structure", + "members": { + "SubnetGroupName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the subnet group.
", + "smithy.api#required": {} + } + }, + "Description": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "A description for the subnet group.
" + } + }, + "SubnetIds": { + "target": "com.amazonaws.memorydb#SubnetIdentifierList", + "traits": { + "smithy.api#documentation": "A list of VPC subnet IDs for the subnet group.
", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.memorydb#TagList", + "traits": { + "smithy.api#documentation": "A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted.
" + } + } + } + }, + "com.amazonaws.memorydb#CreateSubnetGroupResponse": { + "type": "structure", + "members": { + "SubnetGroup": { + "target": "com.amazonaws.memorydb#SubnetGroup", + "traits": { + "smithy.api#documentation": "The newly-created subnet group
" + } + } + } + }, + "com.amazonaws.memorydb#CreateUser": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#CreateUserRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#CreateUserResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#DuplicateUserNameFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#TagQuotaPerResourceExceeded" + }, + { + "target": "com.amazonaws.memorydb#UserAlreadyExistsFault" + }, + { + "target": "com.amazonaws.memorydb#UserQuotaExceededFault" + } + ], + "traits": { + "smithy.api#documentation": "Creates a MemoryDB user. For more information, see Authenticating users with Access Contol Lists (ACLs).
" + } + }, + "com.amazonaws.memorydb#CreateUserRequest": { + "type": "structure", + "members": { + "UserName": { + "target": "com.amazonaws.memorydb#UserName", + "traits": { + "smithy.api#documentation": "The name of the user. This value must be unique as it also serves as the user identifier.
", + "smithy.api#required": {} + } + }, + "AuthenticationMode": { + "target": "com.amazonaws.memorydb#AuthenticationMode", + "traits": { + "smithy.api#documentation": "Denotes the user's authentication properties, such as whether it requires a password to authenticate.
", + "smithy.api#required": {} + } + }, + "AccessString": { + "target": "com.amazonaws.memorydb#AccessString", + "traits": { + "smithy.api#documentation": "Access permissions string used for this user.
", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.memorydb#TagList", + "traits": { + "smithy.api#documentation": "A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted.
" + } + } + } + }, + "com.amazonaws.memorydb#CreateUserResponse": { + "type": "structure", + "members": { + "User": { + "target": "com.amazonaws.memorydb#User", + "traits": { + "smithy.api#documentation": "The newly-created user.
" + } + } + } + }, + "com.amazonaws.memorydb#DefaultUserRequired": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "DefaultUserRequired", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#DeleteACL": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#DeleteACLRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#DeleteACLResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#ACLNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidACLStateFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + } + ], + "traits": { + "smithy.api#documentation": "Deletes an Access Control List. The ACL must first be disassociated from the cluster before it can be deleted. For more information, see Authenticating users with Access Contol Lists (ACLs).
" + } + }, + "com.amazonaws.memorydb#DeleteACLRequest": { + "type": "structure", + "members": { + "ACLName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the Access Control List to delete
", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.memorydb#DeleteACLResponse": { + "type": "structure", + "members": { + "ACL": { + "target": "com.amazonaws.memorydb#ACL", + "traits": { + "smithy.api#documentation": "The Access Control List object that has been deleted.
" + } + } + } + }, + "com.amazonaws.memorydb#DeleteCluster": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#DeleteClusterRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#DeleteClusterResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#ClusterNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidClusterStateFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#SnapshotAlreadyExistsFault" + } + ], + "traits": { + "smithy.api#documentation": "Deletes a cluster. It also deletes all associated nodes and node endpoints
" + } + }, + "com.amazonaws.memorydb#DeleteClusterRequest": { + "type": "structure", + "members": { + "ClusterName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the cluster to be deleted
", + "smithy.api#required": {} + } + }, + "FinalSnapshotName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The user-supplied name of a final cluster snapshot. This is the unique name that identifies the snapshot. MemoryDB creates the snapshot, and then deletes the cluster immediately afterward.
" + } + } + } + }, + "com.amazonaws.memorydb#DeleteClusterResponse": { + "type": "structure", + "members": { + "Cluster": { + "target": "com.amazonaws.memorydb#Cluster", + "traits": { + "smithy.api#documentation": "The cluster object that has been deleted
" + } + } + } + }, + "com.amazonaws.memorydb#DeleteParameterGroup": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#DeleteParameterGroupRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#DeleteParameterGroupResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterGroupStateFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#ParameterGroupNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "Deletes the specified parameter group. You cannot delete a parameter group if it is associated with any clusters. \n You cannot delete the default parameter groups in your account.
" + } + }, + "com.amazonaws.memorydb#DeleteParameterGroupRequest": { + "type": "structure", + "members": { + "ParameterGroupName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the parameter group to delete.
", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.memorydb#DeleteParameterGroupResponse": { + "type": "structure", + "members": { + "ParameterGroup": { + "target": "com.amazonaws.memorydb#ParameterGroup", + "traits": { + "smithy.api#documentation": "The parameter group that has been deleted.
" + } + } + } + }, + "com.amazonaws.memorydb#DeleteSnapshot": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#DeleteSnapshotRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#DeleteSnapshotResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#InvalidSnapshotStateFault" + }, + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#SnapshotNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "Deletes an existing snapshot. When you receive a successful response from this operation, MemoryDB immediately begins deleting the snapshot; you cannot cancel or revert this operation.
" + } + }, + "com.amazonaws.memorydb#DeleteSnapshotRequest": { + "type": "structure", + "members": { + "SnapshotName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the snapshot to delete
", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.memorydb#DeleteSnapshotResponse": { + "type": "structure", + "members": { + "Snapshot": { + "target": "com.amazonaws.memorydb#Snapshot", + "traits": { + "smithy.api#documentation": "The snapshot object that has been deleted.
" + } + } + } + }, + "com.amazonaws.memorydb#DeleteSubnetGroup": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#DeleteSubnetGroupRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#DeleteSubnetGroupResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#SubnetGroupInUseFault" + }, + { + "target": "com.amazonaws.memorydb#SubnetGroupNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "Deletes a subnet group. You cannot delete a default subnet group or one that is associated with any clusters.
" + } + }, + "com.amazonaws.memorydb#DeleteSubnetGroupRequest": { + "type": "structure", + "members": { + "SubnetGroupName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the subnet group to delete
", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.memorydb#DeleteSubnetGroupResponse": { + "type": "structure", + "members": { + "SubnetGroup": { + "target": "com.amazonaws.memorydb#SubnetGroup", + "traits": { + "smithy.api#documentation": "The subnet group object that has been deleted.
" + } + } + } + }, + "com.amazonaws.memorydb#DeleteUser": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#DeleteUserRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#DeleteUserResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#InvalidUserStateFault" + }, + { + "target": "com.amazonaws.memorydb#UserNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "Deletes a user. The user will be removed from all ACLs and in turn removed from all clusters.
" + } + }, + "com.amazonaws.memorydb#DeleteUserRequest": { + "type": "structure", + "members": { + "UserName": { + "target": "com.amazonaws.memorydb#UserName", + "traits": { + "smithy.api#documentation": "The name of the user to delete
", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.memorydb#DeleteUserResponse": { + "type": "structure", + "members": { + "User": { + "target": "com.amazonaws.memorydb#User", + "traits": { + "smithy.api#documentation": "The user object that has been deleted.
" + } + } + } + }, + "com.amazonaws.memorydb#DescribeACLs": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#DescribeACLsRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#DescribeACLsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#ACLNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + } + ], + "traits": { + "smithy.api#documentation": "Returns a list of ACLs
" + } + }, + "com.amazonaws.memorydb#DescribeACLsRequest": { + "type": "structure", + "members": { + "ACLName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the ACL
" + } + }, + "MaxResults": { + "target": "com.amazonaws.memorydb#IntegerOptional", + "traits": { + "smithy.api#documentation": "The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
" + } + }, + "NextToken": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
" + } + } + } + }, + "com.amazonaws.memorydb#DescribeACLsResponse": { + "type": "structure", + "members": { + "ACLs": { + "target": "com.amazonaws.memorydb#ACLList", + "traits": { + "smithy.api#documentation": "The list of ACLs
" + } + }, + "NextToken": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
" + } + } + } + }, + "com.amazonaws.memorydb#DescribeClusters": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#DescribeClustersRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#DescribeClustersResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#ClusterNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "Returns information about all provisioned clusters if no cluster identifier is specified, or about a specific cluster if a cluster name is supplied.
" + } + }, + "com.amazonaws.memorydb#DescribeClustersRequest": { + "type": "structure", + "members": { + "ClusterName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the cluster
" + } + }, + "MaxResults": { + "target": "com.amazonaws.memorydb#IntegerOptional", + "traits": { + "smithy.api#documentation": "The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
" + } + }, + "NextToken": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
" + } + }, + "ShowShardDetails": { + "target": "com.amazonaws.memorydb#BooleanOptional", + "traits": { + "smithy.api#documentation": "An optional flag that can be included in the request to retrieve information about the individual shard(s).
" + } + } + } + }, + "com.amazonaws.memorydb#DescribeClustersResponse": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
" + } + }, + "Clusters": { + "target": "com.amazonaws.memorydb#ClusterList", + "traits": { + "smithy.api#documentation": "A list of clusters
" + } + } + } + }, + "com.amazonaws.memorydb#DescribeEngineVersions": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#DescribeEngineVersionsRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#DescribeEngineVersionsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "Returns a list of the available Redis engine versions.
" + } + }, + "com.amazonaws.memorydb#DescribeEngineVersionsRequest": { + "type": "structure", + "members": { + "EngineVersion": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The Redis engine version
" + } + }, + "ParameterGroupFamily": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of a specific parameter group family to return details for.
" + } + }, + "MaxResults": { + "target": "com.amazonaws.memorydb#IntegerOptional", + "traits": { + "smithy.api#documentation": "The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
" + } + }, + "NextToken": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
" + } + }, + "DefaultOnly": { + "target": "com.amazonaws.memorydb#Boolean", + "traits": { + "smithy.api#documentation": "If true, specifies that only the default version of the specified engine or engine and major version combination is to be returned.
" + } + } + } + }, + "com.amazonaws.memorydb#DescribeEngineVersionsResponse": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
" + } + }, + "EngineVersions": { + "target": "com.amazonaws.memorydb#EngineVersionInfoList", + "traits": { + "smithy.api#documentation": "A list of engine version details. Each element in the list contains detailed information about one engine version.
" + } + } + } + }, + "com.amazonaws.memorydb#DescribeEvents": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#DescribeEventsRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#DescribeEventsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "Returns events related to clusters, security groups, and parameter groups. You can obtain events specific to a particular cluster, security group, or parameter group by providing the name as a parameter.\n \n By default, only the events occurring within the last hour are returned; however, you can retrieve up to 14 days' worth of events if necessary.
" + } + }, + "com.amazonaws.memorydb#DescribeEventsRequest": { + "type": "structure", + "members": { + "SourceName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The identifier of the event source for which events are returned. If not specified, all sources are included in the response.
" + } + }, + "SourceType": { + "target": "com.amazonaws.memorydb#SourceType", + "traits": { + "smithy.api#documentation": "The event source to retrieve events for. If no value is specified, all events are returned.
" + } + }, + "StartTime": { + "target": "com.amazonaws.memorydb#TStamp", + "traits": { + "smithy.api#documentation": "The beginning of the time interval to retrieve events for, specified in ISO 8601 format.\n \n Example: 2017-03-30T07:03:49.555Z
" + } + }, + "EndTime": { + "target": "com.amazonaws.memorydb#TStamp", + "traits": { + "smithy.api#documentation": "The end of the time interval for which to retrieve events, specified in ISO 8601 format.\n \n Example: 2017-03-30T07:03:49.555Z
" + } + }, + "Duration": { + "target": "com.amazonaws.memorydb#IntegerOptional", + "traits": { + "smithy.api#documentation": "The number of minutes worth of events to retrieve.
" + } + }, + "MaxResults": { + "target": "com.amazonaws.memorydb#IntegerOptional", + "traits": { + "smithy.api#documentation": "The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
" + } + }, + "NextToken": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
" + } + } + } + }, + "com.amazonaws.memorydb#DescribeEventsResponse": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
" + } + }, + "Events": { + "target": "com.amazonaws.memorydb#EventList", + "traits": { + "smithy.api#documentation": "A list of events. Each element in the list contains detailed information about one event.
" + } + } + } + }, + "com.amazonaws.memorydb#DescribeParameterGroups": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#DescribeParameterGroupsRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#DescribeParameterGroupsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#ParameterGroupNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "Returns a list of parameter group descriptions. If a parameter group name is specified, the list contains only the descriptions for that group.
" + } + }, + "com.amazonaws.memorydb#DescribeParameterGroupsRequest": { + "type": "structure", + "members": { + "ParameterGroupName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of a specific parameter group to return details for.
" + } + }, + "MaxResults": { + "target": "com.amazonaws.memorydb#IntegerOptional", + "traits": { + "smithy.api#documentation": "The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
" + } + }, + "NextToken": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
" + } + } + } + }, + "com.amazonaws.memorydb#DescribeParameterGroupsResponse": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
" + } + }, + "ParameterGroups": { + "target": "com.amazonaws.memorydb#ParameterGroupList", + "traits": { + "smithy.api#documentation": "A list of parameter groups. Each element in the list contains detailed information about one parameter group.
" + } + } + } + }, + "com.amazonaws.memorydb#DescribeParameters": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#DescribeParametersRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#DescribeParametersResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#ParameterGroupNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "Returns the detailed parameter list for a particular parameter group.
" + } + }, + "com.amazonaws.memorydb#DescribeParametersRequest": { + "type": "structure", + "members": { + "ParameterGroupName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "he name of a specific parameter group to return details for.
", + "smithy.api#required": {} + } + }, + "MaxResults": { + "target": "com.amazonaws.memorydb#IntegerOptional", + "traits": { + "smithy.api#documentation": "The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
" + } + }, + "NextToken": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
" + } + } + } + }, + "com.amazonaws.memorydb#DescribeParametersResponse": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
" + } + }, + "Parameters": { + "target": "com.amazonaws.memorydb#ParametersList", + "traits": { + "smithy.api#documentation": "A list of parameters specific to a particular parameter group. Each element in the list contains detailed information about one parameter.
" + } + } + } + }, + "com.amazonaws.memorydb#DescribeServiceUpdates": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#DescribeServiceUpdatesRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#DescribeServiceUpdatesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + } + ], + "traits": { + "smithy.api#documentation": "Returns details of the service updates
" + } + }, + "com.amazonaws.memorydb#DescribeServiceUpdatesRequest": { + "type": "structure", + "members": { + "ServiceUpdateName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The unique ID of the service update to describe.
" + } + }, + "ClusterNames": { + "target": "com.amazonaws.memorydb#ClusterNameList", + "traits": { + "smithy.api#documentation": "The list of cluster names to identify service updates to apply
" + } + }, + "Status": { + "target": "com.amazonaws.memorydb#ServiceUpdateStatusList", + "traits": { + "smithy.api#documentation": "The status(es) of the service updates to filter on
" + } + }, + "MaxResults": { + "target": "com.amazonaws.memorydb#IntegerOptional", + "traits": { + "smithy.api#documentation": "The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
" + } + }, + "NextToken": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
" + } + } + } + }, + "com.amazonaws.memorydb#DescribeServiceUpdatesResponse": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
" + } + }, + "ServiceUpdates": { + "target": "com.amazonaws.memorydb#ServiceUpdateList", + "traits": { + "smithy.api#documentation": "A list of service updates
" + } + } + } + }, + "com.amazonaws.memorydb#DescribeSnapshots": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#DescribeSnapshotsRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#DescribeSnapshotsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#SnapshotNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "Returns information about cluster snapshots. By default, DescribeSnapshots lists all of your snapshots; it can optionally describe a single snapshot, \n or just the snapshots associated with a particular cluster.
" + } + }, + "com.amazonaws.memorydb#DescribeSnapshotsRequest": { + "type": "structure", + "members": { + "ClusterName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "A user-supplied cluster identifier. If this parameter is specified, only snapshots associated with that specific cluster are described.
" + } + }, + "SnapshotName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "A user-supplied name of the snapshot. If this parameter is specified, only this named snapshot is described.
" + } + }, + "Source": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "If set to system, the output shows snapshots that were automatically created by MemoryDB. If set to user the output shows snapshots that were manually created. If omitted, the output shows both automatically and manually created snapshots.
" + } + }, + "NextToken": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
" + } + }, + "MaxResults": { + "target": "com.amazonaws.memorydb#IntegerOptional", + "traits": { + "smithy.api#documentation": "The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
" + } + }, + "ShowDetail": { + "target": "com.amazonaws.memorydb#BooleanOptional", + "traits": { + "smithy.api#documentation": "A Boolean value which if true, the shard configuration is included in the snapshot description.
" + } + } + } + }, + "com.amazonaws.memorydb#DescribeSnapshotsResponse": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
" + } + }, + "Snapshots": { + "target": "com.amazonaws.memorydb#SnapshotList", + "traits": { + "smithy.api#documentation": "A list of snapshots. Each item in the list contains detailed information about one snapshot.
" + } + } + } + }, + "com.amazonaws.memorydb#DescribeSubnetGroups": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#DescribeSubnetGroupsRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#DescribeSubnetGroupsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#SubnetGroupNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "Returns a list of subnet group descriptions. If a subnet group name is specified, the list contains only the description of that group.
" + } + }, + "com.amazonaws.memorydb#DescribeSubnetGroupsRequest": { + "type": "structure", + "members": { + "SubnetGroupName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the subnet group to return details for.
" + } + }, + "MaxResults": { + "target": "com.amazonaws.memorydb#IntegerOptional", + "traits": { + "smithy.api#documentation": "The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
" + } + }, + "NextToken": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
" + } + } + } + }, + "com.amazonaws.memorydb#DescribeSubnetGroupsResponse": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
" + } + }, + "SubnetGroups": { + "target": "com.amazonaws.memorydb#SubnetGroupList", + "traits": { + "smithy.api#documentation": "A list of subnet groups. Each element in the list contains detailed information about one group.
" + } + } + } + }, + "com.amazonaws.memorydb#DescribeUsers": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#DescribeUsersRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#DescribeUsersResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.memorydb#UserNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "Returns a list of users.
" + } + }, + "com.amazonaws.memorydb#DescribeUsersRequest": { + "type": "structure", + "members": { + "UserName": { + "target": "com.amazonaws.memorydb#UserName", + "traits": { + "smithy.api#documentation": "The name of the user
" + } + }, + "Filters": { + "target": "com.amazonaws.memorydb#FilterList", + "traits": { + "smithy.api#documentation": "Filter to determine the list of users to return.
" + } + }, + "MaxResults": { + "target": "com.amazonaws.memorydb#IntegerOptional", + "traits": { + "smithy.api#documentation": "The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
" + } + }, + "NextToken": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
" + } + } + } + }, + "com.amazonaws.memorydb#DescribeUsersResponse": { + "type": "structure", + "members": { + "Users": { + "target": "com.amazonaws.memorydb#UserList", + "traits": { + "smithy.api#documentation": "A list of users.
" + } + }, + "NextToken": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
" + } + } + } + }, + "com.amazonaws.memorydb#Double": { + "type": "double" + }, + "com.amazonaws.memorydb#DuplicateUserNameFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "DuplicateUserName", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#Endpoint": { + "type": "structure", + "members": { + "Address": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The DNS hostname of the node.
" + } + }, + "Port": { + "target": "com.amazonaws.memorydb#Integer", + "traits": { + "smithy.api#documentation": "The port number that the engine is listening on.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Represents the information required for client programs to connect to the cluster and its nodes.
" + } + }, + "com.amazonaws.memorydb#EngineVersionInfo": { + "type": "structure", + "members": { + "EngineVersion": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The engine version
" + } + }, + "EnginePatchVersion": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The patched engine version
" + } + }, + "ParameterGroupFamily": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "Specifies the name of the parameter group family to which the engine default parameters apply.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Provides details of the Redis engine version
" + } + }, + "com.amazonaws.memorydb#EngineVersionInfoList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#EngineVersionInfo" + } + }, + "com.amazonaws.memorydb#Event": { + "type": "structure", + "members": { + "SourceName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name for the source of the event. For example, if the event occurred at the cluster level, the identifier would be the name of the cluster.
" + } + }, + "SourceType": { + "target": "com.amazonaws.memorydb#SourceType", + "traits": { + "smithy.api#documentation": "Specifies the origin of this event - a cluster, a parameter group, a security group, etc.
" + } + }, + "Message": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The text of the event.
" + } + }, + "Date": { + "target": "com.amazonaws.memorydb#TStamp", + "traits": { + "smithy.api#documentation": "The date and time when the event occurred.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Represents a single occurrence of something interesting within the system. Some examples of events are creating a cluster or adding or removing a \n node.
" + } + }, + "com.amazonaws.memorydb#EventList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#Event", + "traits": { + "smithy.api#xmlName": "Event" + } + } + }, + "com.amazonaws.memorydb#ExceptionMessage": { + "type": "string" + }, + "com.amazonaws.memorydb#FailoverShard": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#FailoverShardRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#FailoverShardResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#APICallRateForCustomerExceededFault" + }, + { + "target": "com.amazonaws.memorydb#ClusterNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidClusterStateFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidKMSKeyFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#ShardNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#TestFailoverNotAvailableFault" + } + ], + "traits": { + "smithy.api#documentation": "Used to failover a shard
" + } + }, + "com.amazonaws.memorydb#FailoverShardRequest": { + "type": "structure", + "members": { + "ClusterName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The cluster being failed over
", + "smithy.api#required": {} + } + }, + "ShardName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the shard
", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.memorydb#FailoverShardResponse": { + "type": "structure", + "members": { + "Cluster": { + "target": "com.amazonaws.memorydb#Cluster", + "traits": { + "smithy.api#documentation": "The cluster being failed over
" + } + } + } + }, + "com.amazonaws.memorydb#Filter": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.memorydb#FilterName", + "traits": { + "smithy.api#documentation": "The property being filtered. For example, UserName.
", + "smithy.api#required": {} + } + }, + "Values": { + "target": "com.amazonaws.memorydb#FilterValueList", + "traits": { + "smithy.api#documentation": "The property values to filter on. For example, \"user-123\".
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Used to streamline results of a search based on the property being filtered.
" + } + }, + "com.amazonaws.memorydb#FilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#Filter" + } + }, + "com.amazonaws.memorydb#FilterName": { + "type": "string", + "traits": { + "smithy.api#pattern": "\\S" + } + }, + "com.amazonaws.memorydb#FilterValue": { + "type": "string", + "traits": { + "smithy.api#pattern": "\\S" + } + }, + "com.amazonaws.memorydb#FilterValueList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#FilterValue" + }, + "traits": { + "smithy.api#length": { + "min": 1 + } + } + }, + "com.amazonaws.memorydb#InputAuthenticationType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "password", + "name": "PASSWORD" + } + ] + } + }, + "com.amazonaws.memorydb#InsufficientClusterCapacityFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "InsufficientClusterCapacity", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#Integer": { + "type": "integer" + }, + "com.amazonaws.memorydb#IntegerOptional": { + "type": "integer", + "traits": { + "smithy.api#box": {} + } + }, + "com.amazonaws.memorydb#InvalidACLStateFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "InvalidACLState", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#InvalidARNFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "InvalidARN", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#InvalidClusterStateFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "InvalidClusterState", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#InvalidCredentialsException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "InvalidCredentialsException", + "httpResponseCode": 408 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 408 + } + }, + "com.amazonaws.memorydb#InvalidKMSKeyFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "InvalidKMSKeyFault", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#InvalidNodeStateFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "InvalidNodeState", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#InvalidParameterCombinationException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#AwsQueryErrorMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "InvalidParameterCombination", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#InvalidParameterGroupStateFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "InvalidParameterGroupState", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#InvalidParameterValueException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#AwsQueryErrorMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "InvalidParameterValue", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#InvalidSnapshotStateFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "InvalidSnapshotState", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#InvalidSubnet": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "InvalidSubnet", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#InvalidUserStateFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "InvalidUserState", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#InvalidVPCNetworkStateFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "InvalidVPCNetworkStateFault", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#KeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#String" + } + }, + "com.amazonaws.memorydb#KmsKeyId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + } + } + }, + "com.amazonaws.memorydb#ListAllowedNodeTypeUpdates": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#ListAllowedNodeTypeUpdatesRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#ListAllowedNodeTypeUpdatesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#ClusterNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "Lists all available node types that you can scale to from your cluster's current node type.\n \n When you use the UpdateCluster operation to scale your cluster, the value of the NodeType parameter must be one of the node types returned by this operation.
" + } + }, + "com.amazonaws.memorydb#ListAllowedNodeTypeUpdatesRequest": { + "type": "structure", + "members": { + "ClusterName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the cluster you want to scale. MemoryDB uses the cluster name to identify the current node type being used by this cluster, and from that to create a list of node types\n you can scale up to.
", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.memorydb#ListAllowedNodeTypeUpdatesResponse": { + "type": "structure", + "members": { + "ScaleUpNodeTypes": { + "target": "com.amazonaws.memorydb#NodeTypeList", + "traits": { + "smithy.api#documentation": "A list node types which you can use to scale up your cluster.
" + } + }, + "ScaleDownNodeTypes": { + "target": "com.amazonaws.memorydb#NodeTypeList", + "traits": { + "smithy.api#documentation": "A list node types which you can use to scale down your cluster.
" + } + } + } + }, + "com.amazonaws.memorydb#ListTags": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#ListTagsRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#ListTagsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#ACLNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#ClusterNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidARNFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidClusterStateFault" + }, + { + "target": "com.amazonaws.memorydb#ParameterGroupNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#SnapshotNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#SubnetGroupNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#UserNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "Lists all tags currently on a named resource.\n \n A tag is a key-value pair where the key and value are case-sensitive. You can use tags to categorize and track your MemoryDB resources. \n For more information, see Tagging your MemoryDB resources\n
" + } + }, + "com.amazonaws.memorydb#ListTagsRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource for which you want the list of tags
", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.memorydb#ListTagsResponse": { + "type": "structure", + "members": { + "TagList": { + "target": "com.amazonaws.memorydb#TagList", + "traits": { + "smithy.api#documentation": "A list of tags as key-value pairs.
" + } + } + } + }, + "com.amazonaws.memorydb#NoOperationFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "NoOperationFault", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#Node": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The node identifier. A node name is a numeric identifier (0001, 0002, etc.). The combination of cluster name, shard name and node name uniquely identifies every node used in a customer's Amazon account.
" + } + }, + "Status": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The status of the service update on the node
" + } + }, + "AvailabilityZone": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The Availability Zone in which the node resides
" + } + }, + "CreateTime": { + "target": "com.amazonaws.memorydb#TStamp", + "traits": { + "smithy.api#documentation": "The date and time when the node was created.
" + } + }, + "Endpoint": { + "target": "com.amazonaws.memorydb#Endpoint", + "traits": { + "smithy.api#documentation": "The hostname for connecting to this node.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Represents an individual node within a cluster. Each node runs its own instance of the cluster's protocol-compliant caching software.
" + } + }, + "com.amazonaws.memorydb#NodeList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#Node", + "traits": { + "smithy.api#xmlName": "Node" + } + } + }, + "com.amazonaws.memorydb#NodeQuotaForClusterExceededFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "NodeQuotaForClusterExceeded", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#NodeQuotaForCustomerExceededFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "NodeQuotaForCustomerExceeded", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#NodeTypeList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#String" + } + }, + "com.amazonaws.memorydb#Parameter": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the parameter
" + } + }, + "Value": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The value of the parameter
" + } + }, + "Description": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "A description of the parameter
" + } + }, + "DataType": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The parameter's data type
" + } + }, + "AllowedValues": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The valid range of values for the parameter.
" + } + }, + "MinimumEngineVersion": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The earliest engine version to which the parameter can apply.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Describes an individual setting that controls some aspect of MemoryDB behavior.
" + } + }, + "com.amazonaws.memorydb#ParameterGroup": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the parameter group
" + } + }, + "Family": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the parameter group family that this parameter group is compatible with.
" + } + }, + "Description": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "A description of the parameter group
" + } + }, + "ARN": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the parameter group
" + } + } + }, + "traits": { + "smithy.api#documentation": "Represents the output of a CreateParameterGroup operation. A parameter group represents a combination of specific values for the parameters that are passed to the engine software during startup.
" + } + }, + "com.amazonaws.memorydb#ParameterGroupAlreadyExistsFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "ParameterGroupAlreadyExists", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#ParameterGroupList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#ParameterGroup", + "traits": { + "smithy.api#xmlName": "ParameterGroup" + } + } + }, + "com.amazonaws.memorydb#ParameterGroupNotFoundFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "ParameterGroupNotFound", + "httpResponseCode": 404 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.memorydb#ParameterGroupQuotaExceededFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "ParameterGroupQuotaExceeded", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#ParameterNameList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#String" + } + }, + "com.amazonaws.memorydb#ParameterNameValue": { + "type": "structure", + "members": { + "ParameterName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the parameter
" + } + }, + "ParameterValue": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The value of the parameter
" + } + } + }, + "traits": { + "smithy.api#documentation": "Describes a name-value pair that is used to update the value of a parameter.
" + } + }, + "com.amazonaws.memorydb#ParameterNameValueList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#ParameterNameValue", + "traits": { + "smithy.api#xmlName": "ParameterNameValue" + } + } + }, + "com.amazonaws.memorydb#ParametersList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#Parameter", + "traits": { + "smithy.api#xmlName": "Parameter" + } + } + }, + "com.amazonaws.memorydb#PasswordListInput": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#String" + }, + "traits": { + "smithy.api#length": { + "min": 1 + } + } + }, + "com.amazonaws.memorydb#PendingModifiedServiceUpdate": { + "type": "structure", + "members": { + "ServiceUpdateName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The unique ID of the service update
" + } + }, + "Status": { + "target": "com.amazonaws.memorydb#ServiceUpdateStatus", + "traits": { + "smithy.api#documentation": "The status of the service update
" + } + } + }, + "traits": { + "smithy.api#documentation": "Update action that has yet to be processed for the corresponding apply/stop request
" + } + }, + "com.amazonaws.memorydb#PendingModifiedServiceUpdateList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#PendingModifiedServiceUpdate", + "traits": { + "smithy.api#xmlName": "PendingModifiedServiceUpdate" + } + } + }, + "com.amazonaws.memorydb#ReplicaConfigurationRequest": { + "type": "structure", + "members": { + "ReplicaCount": { + "target": "com.amazonaws.memorydb#Integer", + "traits": { + "smithy.api#documentation": "The number of replicas to scale up or down to
" + } + } + }, + "traits": { + "smithy.api#documentation": "A request to configure the number of replicas in a shard
" + } + }, + "com.amazonaws.memorydb#ResetParameterGroup": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#ResetParameterGroupRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#ResetParameterGroupResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterGroupStateFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#ParameterGroupNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "Modifies the parameters of a parameter group to the engine or system default value. You can reset specific parameters by submitting a list of parameter names. To reset the entire parameter group, specify the AllParameters and ParameterGroupName parameters.
" + } + }, + "com.amazonaws.memorydb#ResetParameterGroupRequest": { + "type": "structure", + "members": { + "ParameterGroupName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the parameter group to reset.
", + "smithy.api#required": {} + } + }, + "AllParameters": { + "target": "com.amazonaws.memorydb#Boolean", + "traits": { + "smithy.api#documentation": "If true, all parameters in the parameter group are reset to their default values. If false, only the parameters listed by ParameterNames are reset to their default values.
" + } + }, + "ParameterNames": { + "target": "com.amazonaws.memorydb#ParameterNameList", + "traits": { + "smithy.api#documentation": "An array of parameter names to reset to their default values. If AllParameters is true, do not use ParameterNames. If AllParameters is false, you must specify the name of at least one parameter to reset.
" + } + } + } + }, + "com.amazonaws.memorydb#ResetParameterGroupResponse": { + "type": "structure", + "members": { + "ParameterGroup": { + "target": "com.amazonaws.memorydb#ParameterGroup", + "traits": { + "smithy.api#documentation": "The parameter group being reset.
" + } + } + } + }, + "com.amazonaws.memorydb#ReshardingStatus": { + "type": "structure", + "members": { + "SlotMigration": { + "target": "com.amazonaws.memorydb#SlotMigration", + "traits": { + "smithy.api#documentation": "The status of the online resharding slot migration
" + } + } + }, + "traits": { + "smithy.api#documentation": "The status of the online resharding
" + } + }, + "com.amazonaws.memorydb#SecurityGroupIdsList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#xmlName": "SecurityGroupId" + } + } + }, + "com.amazonaws.memorydb#SecurityGroupMembership": { + "type": "structure", + "members": { + "SecurityGroupId": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The identifier of the security group.
" + } + }, + "Status": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The status of the security group membership. The status changes whenever a security group is modified, or when the security groups assigned to a cluster are modified.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Represents a single security group and its status.
" + } + }, + "com.amazonaws.memorydb#SecurityGroupMembershipList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#SecurityGroupMembership" + } + }, + "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "ServiceLinkedRoleNotFoundFault", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#ServiceUpdate": { + "type": "structure", + "members": { + "ClusterName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the cluster to which the service update applies
" + } + }, + "ServiceUpdateName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The unique ID of the service update
" + } + }, + "ReleaseDate": { + "target": "com.amazonaws.memorydb#TStamp", + "traits": { + "smithy.api#documentation": "The date when the service update is initially available
" + } + }, + "Description": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "Provides details of the service update
" + } + }, + "Status": { + "target": "com.amazonaws.memorydb#ServiceUpdateStatus", + "traits": { + "smithy.api#documentation": "The status of the service update
" + } + }, + "Type": { + "target": "com.amazonaws.memorydb#ServiceUpdateType", + "traits": { + "smithy.api#documentation": "Reflects the nature of the service update
" + } + }, + "NodesUpdated": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "A list of nodes updated by the service update
" + } + }, + "AutoUpdateStartDate": { + "target": "com.amazonaws.memorydb#TStamp", + "traits": { + "smithy.api#documentation": "The date at which the service update will be automatically applied
" + } + } + }, + "traits": { + "smithy.api#documentation": "An update that you can apply to your MemoryDB clusters.
" + } + }, + "com.amazonaws.memorydb#ServiceUpdateList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#ServiceUpdate", + "traits": { + "smithy.api#xmlName": "ServiceUpdate" + } + } + }, + "com.amazonaws.memorydb#ServiceUpdateNotFoundFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "ServiceUpdateNotFoundFault", + "httpResponseCode": 404 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.memorydb#ServiceUpdateRequest": { + "type": "structure", + "members": { + "ServiceUpdateNameToApply": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The unique ID of the service update
" + } + } + }, + "traits": { + "smithy.api#documentation": "A request to apply a service update
" + } + }, + "com.amazonaws.memorydb#ServiceUpdateStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "available", + "name": "NOT_APPLIED" + }, + { + "value": "in-progress", + "name": "IN_PROGRESS" + }, + { + "value": "complete", + "name": "COMPLETE" + }, + { + "value": "scheduled", + "name": "SCHEDULED" + } + ] + } + }, + "com.amazonaws.memorydb#ServiceUpdateStatusList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#ServiceUpdateStatus" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 4 + } + } + }, + "com.amazonaws.memorydb#ServiceUpdateType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "security-update", + "name": "SECURITY_UPDATE" + } + ] + } + }, + "com.amazonaws.memorydb#Shard": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the shard
" + } + }, + "Status": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The current state of this replication group - creating, available, modifying, deleting.
" + } + }, + "Slots": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The keyspace for this shard.
" + } + }, + "Nodes": { + "target": "com.amazonaws.memorydb#NodeList", + "traits": { + "smithy.api#documentation": "A list containing information about individual nodes within the shard
" + } + }, + "NumberOfNodes": { + "target": "com.amazonaws.memorydb#IntegerOptional", + "traits": { + "smithy.api#documentation": "The number of nodes in the shard
" + } + } + }, + "traits": { + "smithy.api#documentation": "Represents a collection of nodes in a cluster. One node in the node group is the read/write primary node. All the other nodes are read-only Replica nodes.
" + } + }, + "com.amazonaws.memorydb#ShardConfiguration": { + "type": "structure", + "members": { + "Slots": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "A string that specifies the keyspace for a particular node group. Keyspaces range from 0 to 16,383. The string is in the format startkey-endkey.
" + } + }, + "ReplicaCount": { + "target": "com.amazonaws.memorydb#IntegerOptional", + "traits": { + "smithy.api#documentation": "The number of read replica nodes in this shard.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Shard configuration options. Each shard configuration has the following: Slots and ReplicaCount.
" + } + }, + "com.amazonaws.memorydb#ShardConfigurationRequest": { + "type": "structure", + "members": { + "ShardCount": { + "target": "com.amazonaws.memorydb#Integer", + "traits": { + "smithy.api#documentation": "The number of shards in the cluster
" + } + } + }, + "traits": { + "smithy.api#documentation": "A request to configure the sharding properties of a cluster
" + } + }, + "com.amazonaws.memorydb#ShardDetail": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the shard
" + } + }, + "Configuration": { + "target": "com.amazonaws.memorydb#ShardConfiguration", + "traits": { + "smithy.api#documentation": "The configuration details of the shard
" + } + }, + "Size": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The size of the shard's snapshot
" + } + }, + "SnapshotCreationTime": { + "target": "com.amazonaws.memorydb#TStamp", + "traits": { + "smithy.api#documentation": "The date and time that the shard's snapshot was created
" + } + } + }, + "traits": { + "smithy.api#documentation": "Provides details of a shard in a snapshot
" + } + }, + "com.amazonaws.memorydb#ShardDetails": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#ShardDetail" + } + }, + "com.amazonaws.memorydb#ShardList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#Shard", + "traits": { + "smithy.api#xmlName": "Shard" + } + } + }, + "com.amazonaws.memorydb#ShardNotFoundFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "ShardNotFoundFault", + "httpResponseCode": 404 + }, + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.memorydb#ShardsPerClusterQuotaExceededFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "ShardsPerClusterQuotaExceeded", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#SlotMigration": { + "type": "structure", + "members": { + "ProgressPercentage": { + "target": "com.amazonaws.memorydb#Double", + "traits": { + "smithy.api#documentation": "The percentage of the slot migration that is complete.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Represents the progress of an online resharding operation.
" + } + }, + "com.amazonaws.memorydb#Snapshot": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the snapshot
" + } + }, + "Status": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The status of the snapshot. Valid values: creating | available | restoring | copying | deleting.
" + } + }, + "Source": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "Indicates whether the snapshot is from an automatic backup (automated) or was created manually (manual).
" + } + }, + "KmsKeyId": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The ID of the KMS key used to encrypt the snapshot.
" + } + }, + "ARN": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The ARN (Amazon Resource Name) of the snapshot.
" + } + }, + "ClusterConfiguration": { + "target": "com.amazonaws.memorydb#ClusterConfiguration", + "traits": { + "smithy.api#documentation": "The configuration of the cluster from which the snapshot was taken
" + } + } + }, + "traits": { + "smithy.api#documentation": "Represents a copy of an entire cluster as of the time when the snapshot was taken.
" + } + }, + "com.amazonaws.memorydb#SnapshotAlreadyExistsFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "SnapshotAlreadyExistsFault", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#SnapshotArnsList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#xmlName": "SnapshotArn" + } + } + }, + "com.amazonaws.memorydb#SnapshotList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#Snapshot" + } + }, + "com.amazonaws.memorydb#SnapshotNotFoundFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "SnapshotNotFoundFault", + "httpResponseCode": 404 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.memorydb#SnapshotQuotaExceededFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "SnapshotQuotaExceededFault", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#SourceType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "node", + "name": "node" + }, + { + "value": "parameter-group", + "name": "parameter_group" + }, + { + "value": "subnet-group", + "name": "subnet_group" + }, + { + "value": "cluster", + "name": "cluster" + }, + { + "value": "user", + "name": "user" + }, + { + "value": "acl", + "name": "acl" + } + ] + } + }, + "com.amazonaws.memorydb#String": { + "type": "string" + }, + "com.amazonaws.memorydb#Subnet": { + "type": "structure", + "members": { + "Identifier": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The unique identifier for the subnet.
" + } + }, + "AvailabilityZone": { + "target": "com.amazonaws.memorydb#AvailabilityZone", + "traits": { + "smithy.api#documentation": "The Availability Zone where the subnet resides
" + } + } + }, + "traits": { + "smithy.api#documentation": "Represents the subnet associated with a cluster. This parameter refers to subnets defined in Amazon Virtual Private Cloud (Amazon VPC) and used with MemoryDB.
" + } + }, + "com.amazonaws.memorydb#SubnetGroup": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the subnet group
" + } + }, + "Description": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "A description of the subnet group
" + } + }, + "VpcId": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The Amazon Virtual Private Cloud identifier (VPC ID) of the subnet group.
" + } + }, + "Subnets": { + "target": "com.amazonaws.memorydb#SubnetList", + "traits": { + "smithy.api#documentation": "A list of subnets associated with the subnet group.
" + } + }, + "ARN": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The ARN (Amazon Resource Name) of the subnet group.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Represents the output of one of the following operations:
\nCreateSubnetGroup
\nUpdateSubnetGroup
\nA subnet group is a collection of subnets (typically private) that you can designate for your clusters running in an Amazon Virtual Private Cloud (VPC) environment.
" + } + }, + "com.amazonaws.memorydb#SubnetGroupAlreadyExistsFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "SubnetGroupAlreadyExists", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#SubnetGroupInUseFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "SubnetGroupInUse", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#SubnetGroupList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#SubnetGroup" + } + }, + "com.amazonaws.memorydb#SubnetGroupNotFoundFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "SubnetGroupNotFoundFault", + "httpResponseCode": 404 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.memorydb#SubnetGroupQuotaExceededFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "SubnetGroupQuotaExceeded", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#SubnetIdentifierList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#xmlName": "SubnetIdentifier" + } + } + }, + "com.amazonaws.memorydb#SubnetInUse": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "SubnetInUse", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#SubnetList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#Subnet", + "traits": { + "smithy.api#xmlName": "Subnet" + } + } + }, + "com.amazonaws.memorydb#SubnetNotAllowedFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "SubnetNotAllowedFault", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#SubnetQuotaExceededFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "SubnetQuotaExceededFault", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#TStamp": { + "type": "timestamp" + }, + "com.amazonaws.memorydb#Tag": { + "type": "structure", + "members": { + "Key": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The key for the tag. May not be null.
" + } + }, + "Value": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The tag's value. May be null.
" + } + } + }, + "traits": { + "smithy.api#documentation": "A tag that can be added to an MemoryDB resource. Tags are composed of a Key/Value pair. You can use tags to categorize and track all your MemoryDB resources. \n When you add or remove tags on clusters, those actions will be replicated to all nodes in the cluster. A tag with a null Value is permitted. For more information, see \n Tagging your MemoryDB resources\n
" + } + }, + "com.amazonaws.memorydb#TagList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#Tag", + "traits": { + "smithy.api#xmlName": "Tag" + } + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.memorydb#TagNotFoundFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "TagNotFound", + "httpResponseCode": 404 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.memorydb#TagQuotaPerResourceExceeded": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "TagQuotaPerResourceExceeded", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#TagResourceRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#TagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#ACLNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#ClusterNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidARNFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidClusterStateFault" + }, + { + "target": "com.amazonaws.memorydb#ParameterGroupNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#SnapshotNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#SubnetGroupNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#TagQuotaPerResourceExceeded" + }, + { + "target": "com.amazonaws.memorydb#UserNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "A tag is a key-value pair where the key and value are case-sensitive. You can use tags to categorize and track all your MemoryDB resources. \n\n When you add or remove tags on clusters, those actions will be replicated to all nodes in the cluster. For more information, see \n\n Resource-level permissions.
\n \nFor example, you can use cost-allocation tags to your MemoryDB resources, Amazon generates a cost allocation report as a comma-separated value \n (CSV) file with your usage and costs aggregated by your tags. You can apply tags that represent business categories \n (such as cost centers, application names, or owners) to organize your costs across multiple services.\n \n For more information, see Using Cost Allocation Tags.
" + } + }, + "com.amazonaws.memorydb#TagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource to which the tags are to be added
", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.memorydb#TagList", + "traits": { + "smithy.api#documentation": "A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted.
", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.memorydb#TagResourceResponse": { + "type": "structure", + "members": { + "TagList": { + "target": "com.amazonaws.memorydb#TagList", + "traits": { + "smithy.api#documentation": "A list of tags as key-value pairs.
" + } + } + } + }, + "com.amazonaws.memorydb#TargetBucket": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 255 + }, + "smithy.api#pattern": "^[A-Za-z0-9._-]+$" + } + }, + "com.amazonaws.memorydb#TestFailoverNotAvailableFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "TestFailoverNotAvailableFault", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#UnprocessedCluster": { + "type": "structure", + "members": { + "ClusterName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the cluster
" + } + }, + "ErrorType": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The error type associated with the update failure
" + } + }, + "ErrorMessage": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The error message associated with the update failure
" + } + } + }, + "traits": { + "smithy.api#documentation": "A cluster whose updates have failed
" + } + }, + "com.amazonaws.memorydb#UnprocessedClusterList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#UnprocessedCluster", + "traits": { + "smithy.api#xmlName": "UnprocessedCluster" + } + } + }, + "com.amazonaws.memorydb#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#UntagResourceRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#UntagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#ACLNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#ClusterNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidARNFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidClusterStateFault" + }, + { + "target": "com.amazonaws.memorydb#ParameterGroupNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#SnapshotNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#SubnetGroupNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#TagNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#UserNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "Use this operation to remove tags on a resource
" + } + }, + "com.amazonaws.memorydb#UntagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource to which the tags are to be removed
", + "smithy.api#required": {} + } + }, + "TagKeys": { + "target": "com.amazonaws.memorydb#KeyList", + "traits": { + "smithy.api#documentation": "The list of keys of the tags that are to be removed
", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.memorydb#UntagResourceResponse": { + "type": "structure", + "members": { + "TagList": { + "target": "com.amazonaws.memorydb#TagList", + "traits": { + "smithy.api#documentation": "The list of tags removed
" + } + } + } + }, + "com.amazonaws.memorydb#UpdateACL": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#UpdateACLRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#UpdateACLResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#ACLNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#DefaultUserRequired" + }, + { + "target": "com.amazonaws.memorydb#DuplicateUserNameFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidACLStateFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#UserNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "Changes the list of users that belong to the Access Control List.
" + } + }, + "com.amazonaws.memorydb#UpdateACLRequest": { + "type": "structure", + "members": { + "ACLName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the Access Control List
", + "smithy.api#required": {} + } + }, + "UserNamesToAdd": { + "target": "com.amazonaws.memorydb#UserNameListInput", + "traits": { + "smithy.api#documentation": "The list of users to add to the Access Control List
" + } + }, + "UserNamesToRemove": { + "target": "com.amazonaws.memorydb#UserNameListInput", + "traits": { + "smithy.api#documentation": "The list of users to remove from the Access Control List
" + } + } + } + }, + "com.amazonaws.memorydb#UpdateACLResponse": { + "type": "structure", + "members": { + "ACL": { + "target": "com.amazonaws.memorydb#ACL", + "traits": { + "smithy.api#documentation": "The updated Access Control List
" + } + } + } + }, + "com.amazonaws.memorydb#UpdateCluster": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#UpdateClusterRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#UpdateClusterResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#ACLNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#ClusterNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#ClusterQuotaForCustomerExceededFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidACLStateFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidClusterStateFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidKMSKeyFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidNodeStateFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#InvalidVPCNetworkStateFault" + }, + { + "target": "com.amazonaws.memorydb#NodeQuotaForClusterExceededFault" + }, + { + "target": "com.amazonaws.memorydb#NodeQuotaForCustomerExceededFault" + }, + { + "target": "com.amazonaws.memorydb#NoOperationFault" + }, + { + "target": "com.amazonaws.memorydb#ParameterGroupNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#ShardsPerClusterQuotaExceededFault" + } + ], + "traits": { + "smithy.api#documentation": "Modifies the settings for a cluster. You can use this operation to change one or more cluster configuration settings by specifying the settings and the new values.
" + } + }, + "com.amazonaws.memorydb#UpdateClusterRequest": { + "type": "structure", + "members": { + "ClusterName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the cluster to update
", + "smithy.api#required": {} + } + }, + "Description": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The description of the cluster to update
" + } + }, + "SecurityGroupIds": { + "target": "com.amazonaws.memorydb#SecurityGroupIdsList", + "traits": { + "smithy.api#documentation": "The SecurityGroupIds to update
" + } + }, + "MaintenanceWindow": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The maintenance window to update
" + } + }, + "SnsTopicArn": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The SNS topic ARN to update
" + } + }, + "SnsTopicStatus": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The status of the Amazon SNS notification topic. Notifications are sent only if the status is active.
" + } + }, + "ParameterGroupName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the parameter group to update
" + } + }, + "SnapshotWindow": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The daily time range (in UTC) during which MemoryDB begins taking a daily snapshot of your cluster.
" + } + }, + "SnapshotRetentionLimit": { + "target": "com.amazonaws.memorydb#IntegerOptional", + "traits": { + "smithy.api#documentation": "The number of days for which MemoryDB retains automatic cluster snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted.
" + } + }, + "NodeType": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "A valid node type that you want to scale this cluster up or down to.
" + } + }, + "EngineVersion": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The upgraded version of the engine to be run on the nodes. You can upgrade to a newer engine version, but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster and create it anew with the earlier engine version.
" + } + }, + "ReplicaConfiguration": { + "target": "com.amazonaws.memorydb#ReplicaConfigurationRequest", + "traits": { + "smithy.api#documentation": "The number of replicas that will reside in each shard
" + } + }, + "ShardConfiguration": { + "target": "com.amazonaws.memorydb#ShardConfigurationRequest", + "traits": { + "smithy.api#documentation": "The number of shards in the cluster
" + } + }, + "ACLName": { + "target": "com.amazonaws.memorydb#ACLName", + "traits": { + "smithy.api#documentation": "The Access Control List that is associated with the cluster
" + } + } + } + }, + "com.amazonaws.memorydb#UpdateClusterResponse": { + "type": "structure", + "members": { + "Cluster": { + "target": "com.amazonaws.memorydb#Cluster", + "traits": { + "smithy.api#documentation": "The updated cluster
" + } + } + } + }, + "com.amazonaws.memorydb#UpdateParameterGroup": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#UpdateParameterGroupRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#UpdateParameterGroupResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterGroupStateFault" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#ParameterGroupNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "Updates the parameters of a parameter group. You can modify up to 20 parameters in a single request by submitting a list parameter name and value pairs.
" + } + }, + "com.amazonaws.memorydb#UpdateParameterGroupRequest": { + "type": "structure", + "members": { + "ParameterGroupName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the parameter group to update.
", + "smithy.api#required": {} + } + }, + "ParameterNameValues": { + "target": "com.amazonaws.memorydb#ParameterNameValueList", + "traits": { + "smithy.api#documentation": "An array of parameter names and values for the parameter update. You must supply at least one parameter name and value; subsequent arguments are optional. A maximum of 20 parameters may be updated per request.
", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.memorydb#UpdateParameterGroupResponse": { + "type": "structure", + "members": { + "ParameterGroup": { + "target": "com.amazonaws.memorydb#ParameterGroup", + "traits": { + "smithy.api#documentation": "The updated parameter group
" + } + } + } + }, + "com.amazonaws.memorydb#UpdateSubnetGroup": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#UpdateSubnetGroupRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#UpdateSubnetGroupResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#InvalidSubnet" + }, + { + "target": "com.amazonaws.memorydb#ServiceLinkedRoleNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#SubnetGroupNotFoundFault" + }, + { + "target": "com.amazonaws.memorydb#SubnetInUse" + }, + { + "target": "com.amazonaws.memorydb#SubnetNotAllowedFault" + }, + { + "target": "com.amazonaws.memorydb#SubnetQuotaExceededFault" + } + ], + "traits": { + "smithy.api#documentation": "Updates a subnet group. For more information, see Updating a subnet group\n
" + } + }, + "com.amazonaws.memorydb#UpdateSubnetGroupRequest": { + "type": "structure", + "members": { + "SubnetGroupName": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the subnet group
", + "smithy.api#required": {} + } + }, + "Description": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "A description of the subnet group
" + } + }, + "SubnetIds": { + "target": "com.amazonaws.memorydb#SubnetIdentifierList", + "traits": { + "smithy.api#documentation": "The EC2 subnet IDs for the subnet group.
" + } + } + } + }, + "com.amazonaws.memorydb#UpdateSubnetGroupResponse": { + "type": "structure", + "members": { + "SubnetGroup": { + "target": "com.amazonaws.memorydb#SubnetGroup", + "traits": { + "smithy.api#documentation": "The updated subnet group
" + } + } + } + }, + "com.amazonaws.memorydb#UpdateUser": { + "type": "operation", + "input": { + "target": "com.amazonaws.memorydb#UpdateUserRequest" + }, + "output": { + "target": "com.amazonaws.memorydb#UpdateUserResponse" + }, + "errors": [ + { + "target": "com.amazonaws.memorydb#InvalidParameterCombinationException" + }, + { + "target": "com.amazonaws.memorydb#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.memorydb#InvalidUserStateFault" + }, + { + "target": "com.amazonaws.memorydb#UserNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "Changes user password(s) and/or access string.
" + } + }, + "com.amazonaws.memorydb#UpdateUserRequest": { + "type": "structure", + "members": { + "UserName": { + "target": "com.amazonaws.memorydb#UserName", + "traits": { + "smithy.api#documentation": "The name of the user
", + "smithy.api#required": {} + } + }, + "AuthenticationMode": { + "target": "com.amazonaws.memorydb#AuthenticationMode", + "traits": { + "smithy.api#documentation": "Denotes the user's authentication properties, such as whether it requires a password to authenticate.
" + } + }, + "AccessString": { + "target": "com.amazonaws.memorydb#AccessString", + "traits": { + "smithy.api#documentation": "Access permissions string used for this user.
" + } + } + } + }, + "com.amazonaws.memorydb#UpdateUserResponse": { + "type": "structure", + "members": { + "User": { + "target": "com.amazonaws.memorydb#User", + "traits": { + "smithy.api#documentation": "The updated user
" + } + } + } + }, + "com.amazonaws.memorydb#User": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The name of the user
" + } + }, + "Status": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "Indicates the user status. Can be \"active\", \"modifying\" or \"deleting\".
" + } + }, + "AccessString": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "Access permissions string used for this user.
" + } + }, + "ACLNames": { + "target": "com.amazonaws.memorydb#ACLNameList", + "traits": { + "smithy.api#documentation": "The names of the Access Control Lists to which the user belongs
" + } + }, + "MinimumEngineVersion": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The minimum engine version supported for the user
" + } + }, + "Authentication": { + "target": "com.amazonaws.memorydb#Authentication", + "traits": { + "smithy.api#documentation": "Denotes whether the user requires a password to authenticate.
" + } + }, + "ARN": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the user.\n \n
" + } + } + }, + "traits": { + "smithy.api#documentation": "You create users and assign them specific permissions by using an access string. You assign the users to Access Control Lists aligned with a specific role (administrators, human resources) that are then deployed to one or more MemoryDB clusters.
" + } + }, + "com.amazonaws.memorydb#UserAlreadyExistsFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "UserAlreadyExists", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.memorydb#UserList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#User" + } + }, + "com.amazonaws.memorydb#UserName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1 + }, + "smithy.api#pattern": "^[a-zA-Z][a-zA-Z0-9\\-]*$" + } + }, + "com.amazonaws.memorydb#UserNameList": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#UserName" + } + }, + "com.amazonaws.memorydb#UserNameListInput": { + "type": "list", + "member": { + "target": "com.amazonaws.memorydb#UserName" + }, + "traits": { + "smithy.api#length": { + "min": 1 + } + } + }, + "com.amazonaws.memorydb#UserNotFoundFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "UserNotFound", + "httpResponseCode": 404 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.memorydb#UserQuotaExceededFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.memorydb#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "UserQuotaExceeded", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + } + } +} diff --git a/codegen/sdk-codegen/aws-models/quicksight.2018-04-01.json b/codegen/sdk-codegen/aws-models/quicksight.2018-04-01.json index 8a066c641b5..9f4ec6d2e92 100644 --- a/codegen/sdk-codegen/aws-models/quicksight.2018-04-01.json +++ b/codegen/sdk-codegen/aws-models/quicksight.2018-04-01.json @@ -59,7 +59,7 @@ } }, "traits": { - "smithy.api#documentation": "The Amazon QuickSight customizations associated with your Amazon Web Services account; or a QuickSight namespace in a specific Region;.
" + "smithy.api#documentation": "The Amazon QuickSight customizations associated with your Amazon Web Services account; or a QuickSight namespace in a specific Amazon Web Services Region;.
" } }, "com.amazonaws.quicksight#AccountSettings": { @@ -1206,7 +1206,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates Amazon QuickSight customizations the current Region;. Currently, you can\n add a custom default theme by using the CreateAccountCustomization
or\n UpdateAccountCustomization
API operation. To further customize\n QuickSight by removing QuickSight sample assets and videos for all new users, see Customizing QuickSight in the Amazon QuickSight User\n Guide.\n
You can create customizations for your Amazon Web Services account; or, if you specify a namespace, for\n a QuickSight namespace instead. Customizations that apply to a namespace always override\n customizations that apply to an Amazon Web Services account;. To find out which customizations apply, use\n the DescribeAccountCustomization
API operation.
Before you use the CreateAccountCustomization
API operation to add a theme \n as the namespace default, make sure that you first share the theme with the namespace. \n If you don't share it with the namespace, the theme isn't visible to your users \n even if you make it the default theme. \n To check if the theme is shared, view the current permissions by using the \n \n DescribeThemePermissions\n
API operation.\n To share the theme, grant permissions by using the \n \n UpdateThemePermissions\n
API operation.
Creates Amazon QuickSight customizations the current Amazon Web Services Region;. Currently, you can\n add a custom default theme by using the CreateAccountCustomization
or\n UpdateAccountCustomization
API operation. To further customize\n QuickSight by removing QuickSight sample assets and videos for all new users, see Customizing QuickSight in the Amazon QuickSight User\n Guide.\n
You can create customizations for your Amazon Web Services account; or, if you specify a namespace, for\n a QuickSight namespace instead. Customizations that apply to a namespace always override\n customizations that apply to an Amazon Web Services account;. To find out which customizations apply, use\n the DescribeAccountCustomization
API operation.
Before you use the CreateAccountCustomization
API operation to add a theme \n as the namespace default, make sure that you first share the theme with the namespace. \n If you don't share it with the namespace, the theme isn't visible to your users \n even if you make it the default theme. \n To check if the theme is shared, view the current permissions by using the \n \n DescribeThemePermissions\n
API operation.\n To share the theme, grant permissions by using the \n \n UpdateThemePermissions\n
API operation.
The QuickSight customizations you're adding in the current Region;. You can add\n these to an Amazon Web Services account; and a QuickSight namespace.
\nFor example, you can add a default theme by setting AccountCustomization
\n to the midnight theme: \"AccountCustomization\": { \"DefaultTheme\":\n \"arn:aws:quicksight::aws:theme/MIDNIGHT\" }
. Or, you can add a custom theme by\n specifying \"AccountCustomization\": { \"DefaultTheme\":\n \"arn:aws:quicksight:us-west-2:111122223333:theme/bdb844d0-0fe9-4d9d-b520-0fe602d93639\"\n }
.
The QuickSight customizations you're adding in the current Amazon Web Services Region;. You can add\n these to an Amazon Web Services account; and a QuickSight namespace.
\nFor example, you can add a default theme by setting AccountCustomization
\n to the midnight theme: \"AccountCustomization\": { \"DefaultTheme\":\n \"arn:aws:quicksight::aws:theme/MIDNIGHT\" }
. Or, you can add a custom theme by\n specifying \"AccountCustomization\": { \"DefaultTheme\":\n \"arn:aws:quicksight:us-west-2:111122223333:theme/bdb844d0-0fe9-4d9d-b520-0fe602d93639\"\n }
.
The QuickSight customizations you're adding in the current Region;.
" + "smithy.api#documentation": "The QuickSight customizations you're adding in the current Amazon Web Services Region;.
" } }, "RequestId": { @@ -1520,7 +1520,7 @@ "SourceEntity": { "target": "com.amazonaws.quicksight#DashboardSourceEntity", "traits": { - "smithy.api#documentation": "The entity that you are using as a source when you create the dashboard. In\n SourceEntity
, you specify the type of object you're using as source. You\n can only create a dashboard from a template, so you use a SourceTemplate
\n entity. If you need to create a dashboard from an analysis, first convert the analysis\n to a template by using the CreateTemplate API operation. For\n SourceTemplate
, specify the Amazon Resource Name (ARN) of the source\n template. The SourceTemplate
ARN can contain any Amazon Web Services account; and any\n QuickSight-supported Region;.
Use the DataSetReferences
entity within SourceTemplate
to\n list the replacement datasets for the placeholders listed in the original. The schema in\n each dataset must match its placeholder.
The entity that you are using as a source when you create the dashboard. In\n SourceEntity
, you specify the type of object you're using as source. You\n can only create a dashboard from a template, so you use a SourceTemplate
\n entity. If you need to create a dashboard from an analysis, first convert the analysis\n to a template by using the CreateTemplate API operation. For\n SourceTemplate
, specify the Amazon Resource Name (ARN) of the source\n template. The SourceTemplate
ARN can contain any Amazon Web Services account; and any\n QuickSight-supported Amazon Web Services Region;.
Use the DataSetReferences
entity within SourceTemplate
to\n list the replacement datasets for the placeholders listed in the original. The schema in\n each dataset must match its placeholder.
An ID for the dataset that you want to create. This ID is unique per Region; for each Amazon Web Services account;.
", + "smithy.api#documentation": "An ID for the dataset that you want to create. This ID is unique per Amazon Web Services Region; for each Amazon Web Services account;.
", "smithy.api#required": {} } }, @@ -1739,7 +1739,7 @@ "DataSetId": { "target": "com.amazonaws.quicksight#ResourceId", "traits": { - "smithy.api#documentation": "The ID for the dataset that you want to create. This ID is unique per Region; for each Amazon Web Services account;.
" + "smithy.api#documentation": "The ID for the dataset that you want to create. This ID is unique per Amazon Web Services Region; for each Amazon Web Services account;.
" } }, "IngestionArn": { @@ -1826,7 +1826,7 @@ "DataSourceId": { "target": "com.amazonaws.quicksight#ResourceId", "traits": { - "smithy.api#documentation": "An ID for the data source. This ID is unique per Region; for each Amazon Web Services account;.
", + "smithy.api#documentation": "An ID for the data source. This ID is unique per Amazon Web Services Region; for each Amazon Web Services account;.
", "smithy.api#required": {} } }, @@ -1894,7 +1894,7 @@ "DataSourceId": { "target": "com.amazonaws.quicksight#ResourceId", "traits": { - "smithy.api#documentation": "The ID of the data source. This ID is unique per Region; for each Amazon Web Services account;.
" + "smithy.api#documentation": "The ID of the data source. This ID is unique per Amazon Web Services Region; for each Amazon Web Services account;.
" } }, "CreationStatus": { @@ -2699,7 +2699,7 @@ "CapacityRegion": { "target": "com.amazonaws.quicksight#String", "traits": { - "smithy.api#documentation": "The Region; that you want to use for the free SPICE capacity for the new namespace.\n This is set to the region that you run CreateNamespace in.
" + "smithy.api#documentation": "The Amazon Web Services Region; that you want to use for the free SPICE capacity for the new namespace.\n This is set to the region that you run CreateNamespace in.
" } }, "CreationStatus": { @@ -2889,7 +2889,7 @@ "TemplateId": { "target": "com.amazonaws.quicksight#RestrictiveResourceId", "traits": { - "smithy.api#documentation": "An ID for the template that you want to create. This template is unique per Region; in\n\t\t\teach Amazon Web Services account;.
", + "smithy.api#documentation": "An ID for the template that you want to create. This template is unique per Amazon Web Services Region; in\n\t\t\teach Amazon Web Services account;.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -2909,7 +2909,7 @@ "SourceEntity": { "target": "com.amazonaws.quicksight#TemplateSourceEntity", "traits": { - "smithy.api#documentation": "The entity that you are using as a source when you create the template. In\n\t\t\tSourceEntity
, you specify the type of object you're using as source:\n\t\t\tSourceTemplate
for a template or SourceAnalysis
for an\n\t\t\tanalysis. Both of these require an Amazon Resource Name (ARN). For\n\t\t\tSourceTemplate
, specify the ARN of the source template. For\n\t\t\tSourceAnalysis
, specify the ARN of the source analysis. The SourceTemplate
\n\t\t\tARN can contain any Amazon Web Services account; and any QuickSight-supported Region;.
Use the DataSetReferences
entity within SourceTemplate
or\n\t\t\tSourceAnalysis
to list the replacement datasets for the placeholders listed\n\t\t\tin the original. The schema in each dataset must match its placeholder.
The entity that you are using as a source when you create the template. In\n\t\t\tSourceEntity
, you specify the type of object you're using as source:\n\t\t\tSourceTemplate
for a template or SourceAnalysis
for an\n\t\t\tanalysis. Both of these require an Amazon Resource Name (ARN). For\n\t\t\tSourceTemplate
, specify the ARN of the source template. For\n\t\t\tSourceAnalysis
, specify the ARN of the source analysis. The SourceTemplate
\n\t\t\tARN can contain any Amazon Web Services account; and any QuickSight-supported Amazon Web Services Region;.
Use the DataSetReferences
entity within SourceTemplate
or\n\t\t\tSourceAnalysis
to list the replacement datasets for the placeholders listed\n\t\t\tin the original. The schema in each dataset must match its placeholder.
An ID for the theme that you want to create. The theme ID is unique per Region; in\n\t\t\teach Amazon Web Services account;.
", + "smithy.api#documentation": "An ID for the theme that you want to create. The theme ID is unique per Amazon Web Services Region; in\n\t\t\teach Amazon Web Services account;.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -4079,7 +4079,7 @@ "DataSourceId": { "target": "com.amazonaws.quicksight#ResourceId", "traits": { - "smithy.api#documentation": "The ID of the data source. This ID is unique per Region; for each \n Amazon Web Services account;.
" + "smithy.api#documentation": "The ID of the data source. This ID is unique per Amazon Web Services Region; for each \n Amazon Web Services account;.
" } }, "Name": { @@ -4578,7 +4578,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes all Amazon QuickSight customizations in this Region; for the specified \n Amazon Web Services account; and QuickSight namespace.
", + "smithy.api#documentation": "Deletes all Amazon QuickSight customizations in this Amazon Web Services Region; for the specified \n Amazon Web Services account; and QuickSight namespace.
", "smithy.api#http": { "method": "DELETE", "uri": "/accounts/{AwsAccountId}/customizations", @@ -4592,7 +4592,7 @@ "AwsAccountId": { "target": "com.amazonaws.quicksight#AwsAccountId", "traits": { - "smithy.api#documentation": "The ID for the Amazon Web Services account; that you want to delete QuickSight customizations from in\n this Region;.
", + "smithy.api#documentation": "The ID for the Amazon Web Services account; that you want to delete QuickSight customizations from in\n this Amazon Web Services Region;.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -4875,7 +4875,7 @@ "DataSetId": { "target": "com.amazonaws.quicksight#ResourceId", "traits": { - "smithy.api#documentation": "The ID for the dataset that you want to create. This ID is unique per Region; for each Amazon Web Services account;.
", + "smithy.api#documentation": "The ID for the dataset that you want to create. This ID is unique per Amazon Web Services Region; for each Amazon Web Services account;.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -4894,7 +4894,7 @@ "DataSetId": { "target": "com.amazonaws.quicksight#ResourceId", "traits": { - "smithy.api#documentation": "The ID for the dataset that you want to create. This ID is unique per Region; for each Amazon Web Services account;.
" + "smithy.api#documentation": "The ID for the dataset that you want to create. This ID is unique per Amazon Web Services Region; for each Amazon Web Services account;.
" } }, "RequestId": { @@ -4960,7 +4960,7 @@ "DataSourceId": { "target": "com.amazonaws.quicksight#ResourceId", "traits": { - "smithy.api#documentation": "The ID of the data source. This ID is unique per Region; for each Amazon Web Services account;.
", + "smithy.api#documentation": "The ID of the data source. This ID is unique per Amazon Web Services Region; for each Amazon Web Services account;.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -4979,7 +4979,7 @@ "DataSourceId": { "target": "com.amazonaws.quicksight#ResourceId", "traits": { - "smithy.api#documentation": "The ID of the data source. This ID is unique per Region; for each Amazon Web Services account;.
" + "smithy.api#documentation": "The ID of the data source. This ID is unique per Amazon Web Services Region; for each Amazon Web Services account;.
" } }, "RequestId": { @@ -6148,7 +6148,7 @@ } ], "traits": { - "smithy.api#documentation": "Describes the customizations associated with the provided Amazon Web Services account; and Amazon\n QuickSight namespace in an Region;. The QuickSight console evaluates which\n customizations to apply by running this API operation with the Resolved
flag\n included.
To determine what customizations display when you run this command, it can help to\n visualize the relationship of the entities involved.
\n\n Amazon Web Services account;
- The Amazon Web Services account; exists at the top of the hierarchy.\n It has the potential to use all of the Regions; and AWS Services. When you\n subscribe to QuickSight, you choose one Region; to use as your home Region.\n That's where your free SPICE capacity is located. You can use QuickSight in any\n supported Region;.
\n Region;
- In each Region; where you sign in to QuickSight\n at least once, QuickSight acts as a separate instance of the same service. If\n you have a user directory, it resides in us-east-1, which is the US East (N.\n Virginia). Generally speaking, these users have access to QuickSight in any \n Region;, unless they are constrained to a namespace.
To run the command in a different Region;, you change your Region settings.\n If you're using the AWS CLI, you can use one of the following options:
\nUse command line options.
\nUse named profiles.
\nRun aws configure
to change your default Region;. Use\n Enter to key the same settings for your keys. For more information, see\n Configuring the AWS CLI.
\n Namespace
- A QuickSight namespace is a partition that contains\n users and assets (data sources, datasets, dashboards, and so on). To access\n assets that are in a specific namespace, users and groups must also be part of\n the same namespace. People who share a namespace are completely isolated from\n users and assets in other namespaces, even if they are in the same Amazon Web Services account;\n and Region;.
\n Applied customizations
- Within an Region;, a set of\n QuickSight customizations can apply to an Amazon Web Services account; or to a namespace.\n Settings that you apply to a namespace override settings that you apply to an\n Amazon Web Services account;. All settings are isolated to a single Region;. To apply them in\n other Regions;, run the CreateAccountCustomization
command in\n each Region; where you want to apply the same customizations.
Describes the customizations associated with the provided Amazon Web Services account; and Amazon\n QuickSight namespace in an Amazon Web Services Region;. The QuickSight console evaluates which\n customizations to apply by running this API operation with the Resolved
flag\n included.
To determine what customizations display when you run this command, it can help to\n visualize the relationship of the entities involved.
\n\n Amazon Web Services account;
- The Amazon Web Services account; exists at the top of the hierarchy.\n It has the potential to use all of the Amazon Web Services Regions; and AWS Services. When you\n subscribe to QuickSight, you choose one Amazon Web Services Region; to use as your home Region.\n That's where your free SPICE capacity is located. You can use QuickSight in any\n supported Amazon Web Services Region;.
\n Amazon Web Services Region;
- In each Amazon Web Services Region; where you sign in to QuickSight\n at least once, QuickSight acts as a separate instance of the same service. If\n you have a user directory, it resides in us-east-1, which is the US East (N.\n Virginia). Generally speaking, these users have access to QuickSight in any \n Amazon Web Services Region;, unless they are constrained to a namespace.
To run the command in a different Amazon Web Services Region;, you change your Region settings.\n If you're using the AWS CLI, you can use one of the following options:
\nUse command line options.
\nUse named profiles.
\nRun aws configure
to change your default Amazon Web Services Region;. Use\n Enter to key the same settings for your keys. For more information, see\n Configuring the AWS CLI.
\n Namespace
- A QuickSight namespace is a partition that contains\n users and assets (data sources, datasets, dashboards, and so on). To access\n assets that are in a specific namespace, users and groups must also be part of\n the same namespace. People who share a namespace are completely isolated from\n users and assets in other namespaces, even if they are in the same Amazon Web Services account;\n and Amazon Web Services Region;.
\n Applied customizations
- Within an Amazon Web Services Region;, a set of\n QuickSight customizations can apply to an Amazon Web Services account; or to a namespace.\n Settings that you apply to a namespace override settings that you apply to an\n Amazon Web Services account;. All settings are isolated to a single Amazon Web Services Region;. To apply them in\n other Amazon Web Services Regions;, run the CreateAccountCustomization
command in\n each Amazon Web Services Region; where you want to apply the same customizations.
The QuickSight customizations that exist in the current Region;.
" + "smithy.api#documentation": "The QuickSight customizations that exist in the current Amazon Web Services Region;.
" } }, "RequestId": { @@ -6281,7 +6281,7 @@ "AccountSettings": { "target": "com.amazonaws.quicksight#AccountSettings", "traits": { - "smithy.api#documentation": "The QuickSight settings for this Amazon Web Services account;. This information includes the edition of Amazon\n QuickSight that you subscribed to (Standard or Enterprise) and the notification email for the\n QuickSight subscription. In the QuickSight console, the QuickSight subscription is sometimes\n referred to as a QuickSight \"account\" even though it's technically not an account\n by itself. Instead, it's a subscription to the QuickSight service for your Amazon Web Services account;. The\n edition that you subscribe to applies to QuickSight in every Region; where you use it.
" + "smithy.api#documentation": "The QuickSight settings for this Amazon Web Services account;. This information includes the edition of Amazon\n QuickSight that you subscribed to (Standard or Enterprise) and the notification email for the\n QuickSight subscription. In the QuickSight console, the QuickSight subscription is sometimes\n referred to as a QuickSight \"account\" even though it's technically not an account\n by itself. Instead, it's a subscription to the QuickSight service for your Amazon Web Services account;. The\n edition that you subscribe to applies to QuickSight in every Amazon Web Services Region; where you use it.
" } }, "RequestId": { @@ -6741,7 +6741,7 @@ "DataSetId": { "target": "com.amazonaws.quicksight#ResourceId", "traits": { - "smithy.api#documentation": "The ID for the dataset that you want to create. This ID is unique per Region; for each Amazon Web Services account;.
", + "smithy.api#documentation": "The ID for the dataset that you want to create. This ID is unique per Amazon Web Services Region; for each Amazon Web Services account;.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -6760,7 +6760,7 @@ "DataSetId": { "target": "com.amazonaws.quicksight#ResourceId", "traits": { - "smithy.api#documentation": "The ID for the dataset that you want to create. This ID is unique per Region; for each Amazon Web Services account;.
" + "smithy.api#documentation": "The ID for the dataset that you want to create. This ID is unique per Amazon Web Services Region; for each Amazon Web Services account;.
" } }, "Permissions": { @@ -6798,7 +6798,7 @@ "DataSetId": { "target": "com.amazonaws.quicksight#ResourceId", "traits": { - "smithy.api#documentation": "The ID for the dataset that you want to create. This ID is unique per Region; for each Amazon Web Services account;.
", + "smithy.api#documentation": "The ID for the dataset that you want to create. This ID is unique per Amazon Web Services Region; for each Amazon Web Services account;.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -6911,7 +6911,7 @@ "DataSourceId": { "target": "com.amazonaws.quicksight#ResourceId", "traits": { - "smithy.api#documentation": "The ID of the data source. This ID is unique per Region; for each Amazon Web Services account;.
", + "smithy.api#documentation": "The ID of the data source. This ID is unique per Amazon Web Services Region; for each Amazon Web Services account;.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -6930,7 +6930,7 @@ "DataSourceId": { "target": "com.amazonaws.quicksight#ResourceId", "traits": { - "smithy.api#documentation": "The ID of the data source. This ID is unique per Region; for each Amazon Web Services account;.
" + "smithy.api#documentation": "The ID of the data source. This ID is unique per Amazon Web Services Region; for each Amazon Web Services account;.
" } }, "Permissions": { @@ -6968,7 +6968,7 @@ "DataSourceId": { "target": "com.amazonaws.quicksight#ResourceId", "traits": { - "smithy.api#documentation": "The ID of the data source. This ID is unique per Region; for each Amazon Web Services account;.
", + "smithy.api#documentation": "The ID of the data source. This ID is unique per Amazon Web Services Region; for each Amazon Web Services account;.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -7606,7 +7606,7 @@ "Namespace": { "target": "com.amazonaws.quicksight#NamespaceInfoV2", "traits": { - "smithy.api#documentation": "The information about the namespace that you're describing. The response includes \n the namespace ARN, name, Region;, creation status, and identity store. DescribeNamespace
also\n works for namespaces that are in the process of being created. For incomplete namespaces,\n this API operation lists the namespace error types and messages associated with the creation process.
The information about the namespace that you're describing. The response includes \n the namespace ARN, name, Amazon Web Services Region;, creation status, and identity store. DescribeNamespace
also\n works for namespaces that are in the process of being created. For incomplete namespaces,\n this API operation lists the namespace error types and messages associated with the creation process.
Generates a session URL and authorization code that you can use to embed an Amazon\n QuickSight read-only dashboard in your web server code. Before you use this command,\n make sure that you have configured the dashboards and permissions.
\nCurrently, you can use GetDashboardEmbedURL
only from the server, not\n from the user's browser. The following rules apply to the combination of URL and\n authorization code:
They must be used together.
\nThey can be used one time only.
\nThey are valid for 5 minutes after you run this command.
\nThe resulting user session is valid for 10 hours.
\nFor more information, see Embedded Analytics in the Amazon QuickSight User\n Guide.
\nFor more information about the high-level steps for embedding and for an interactive demo of the ways you can customize embedding, visit the Amazon QuickSight Developer Portal.
", + "smithy.api#documentation": "Generates a session URL and authorization code that you can use to embed an Amazon\n QuickSight read-only dashboard in your web server code. Before you use this command,\n make sure that you have configured the dashboards and permissions.
\nCurrently, you can use GetDashboardEmbedURL
only from the server, not\n from the user's browser. The following rules apply to the combination of URL and\n authorization code:
They must be used together.
\nThey can be used one time only.
\nThey are valid for 5 minutes after you run this command.
\nThe resulting user session is valid for 10 hours.
\nFor more information, see Embedding Analytics Using GetDashboardEmbedUrl in the Amazon QuickSight User\n Guide.
\nFor more information about the high-level steps for embedding and for an interactive demo of the ways you can customize embedding, visit the Amazon QuickSight Developer Portal.
", "smithy.api#http": { "method": "GET", "uri": "/accounts/{AwsAccountId}/dashboards/{DashboardId}/embed-url", @@ -9279,7 +9279,7 @@ } ], "traits": { - "smithy.api#documentation": "Generates a session URL and authorization code that you can use to embed the Amazon\n QuickSight console in your web server code. Use GetSessionEmbedUrl
where\n you want to provide an authoring portal that allows users to create data sources,\n datasets, analyses, and dashboards. The users who access an embedded QuickSight console\n need belong to the author or admin security cohort. If you want to restrict permissions\n to some of these features, add a custom permissions profile to the user with the\n \n UpdateUser\n
API operation. Use \n RegisterUser\n
API operation to add a new user with a custom permission profile attached. For more\n information, see the following sections in the Amazon QuickSight User\n Guide:
Generates a session URL and authorization code that you can use to embed the Amazon\n QuickSight console in your web server code. Use GetSessionEmbedUrl
where\n you want to provide an authoring portal that allows users to create data sources,\n datasets, analyses, and dashboards. The users who access an embedded QuickSight console\n need belong to the author or admin security cohort. If you want to restrict permissions\n to some of these features, add a custom permissions profile to the user with the\n \n UpdateUser\n
API operation. Use \n RegisterUser\n
API operation to add a new user with a custom permission profile attached. For more\n information, see the following sections in the Amazon QuickSight User\n Guide:
\n Embedding the Amazon QuickSight Console Using GetSessionEmbedUrl\n
\nLists all of the datasets belonging to the current Amazon Web Services account; in an Region;.
\n\t\tThe permissions resource is arn:aws:quicksight:region:aws-account-id:dataset/*
.
Lists all of the datasets belonging to the current Amazon Web Services account; in an Amazon Web Services Region;.
\n\t\tThe permissions resource is arn:aws:quicksight:region:aws-account-id:dataset/*
.
Lists data sources in current Region; that belong to this Amazon Web Services account;.
", + "smithy.api#documentation": "Lists data sources in current Amazon Web Services Region; that belong to this Amazon Web Services account;.
", "smithy.api#http": { "method": "GET", "uri": "/accounts/{AwsAccountId}/data-sources", @@ -11580,7 +11580,7 @@ "Namespaces": { "target": "com.amazonaws.quicksight#Namespaces", "traits": { - "smithy.api#documentation": "The information about the namespaces in this Amazon Web Services account;. The response includes \n the namespace ARN, name, Region;, notification email address, creation status, and \n identity store.
" + "smithy.api#documentation": "The information about the namespaces in this Amazon Web Services account;. The response includes \n the namespace ARN, name, Amazon Web Services Region;, notification email address, creation status, and \n identity store.
" } }, "NextToken": { @@ -12835,7 +12835,7 @@ "CapacityRegion": { "target": "com.amazonaws.quicksight#String", "traits": { - "smithy.api#documentation": "The namespace Region;.
" + "smithy.api#documentation": "The namespace Amazon Web Services Region;.
" } }, "CreationStatus": { @@ -13631,7 +13631,7 @@ "name": "quicksight" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "Amazon QuickSight is a fully managed, serverless business intelligence service for the\n Cloud that makes it easy to extend data and insights to every user in your\n organization. This API reference contains documentation for a programming interface that\n you can use to manage Amazon QuickSight.
", + "smithy.api#documentation": "Amazon QuickSight is a fully managed, serverless business intelligence service for the\n Amazon Web Services Cloud that makes it easy to extend data and insights to every user in your\n organization. This API reference contains documentation for a programming interface that\n you can use to manage Amazon QuickSight.
", "smithy.api#title": "Amazon QuickSight" } }, @@ -15336,7 +15336,7 @@ "TemplateId": { "target": "com.amazonaws.quicksight#RestrictiveResourceId", "traits": { - "smithy.api#documentation": "The ID for the template. This is unique per Region; for each Amazon Web Services account;.
" + "smithy.api#documentation": "The ID for the template. This is unique per Amazon Web Services Region; for each Amazon Web Services account;.
" } }, "LastUpdatedTime": { @@ -15526,7 +15526,7 @@ "TemplateId": { "target": "com.amazonaws.quicksight#RestrictiveResourceId", "traits": { - "smithy.api#documentation": "The ID of the template. This ID is unique per Region; for each Amazon Web Services account;.
" + "smithy.api#documentation": "The ID of the template. This ID is unique per Amazon Web Services Region; for each Amazon Web Services account;.
" } }, "Name": { @@ -15906,7 +15906,7 @@ "ThemeId": { "target": "com.amazonaws.quicksight#RestrictiveResourceId", "traits": { - "smithy.api#documentation": "The ID of the theme. This ID is unique per Region; for each Amazon Web Services account;.
" + "smithy.api#documentation": "The ID of the theme. This ID is unique per Amazon Web Services Region; for each Amazon Web Services account;.
" } }, "LatestVersionNumber": { @@ -16462,7 +16462,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates Amazon QuickSight customizations the current Region;. Currently, the only\n customization you can use is a theme.
\nYou can use customizations for your Amazon Web Services account; or, if you specify a namespace, for a\n QuickSight namespace instead. Customizations that apply to a namespace override\n customizations that apply to an Amazon Web Services account;. To find out which customizations apply, use\n the DescribeAccountCustomization
API operation.
Updates Amazon QuickSight customizations the current Amazon Web Services Region;. Currently, the only\n customization you can use is a theme.
\nYou can use customizations for your Amazon Web Services account; or, if you specify a namespace, for a\n QuickSight namespace instead. Customizations that apply to a namespace override\n customizations that apply to an Amazon Web Services account;. To find out which customizations apply, use\n the DescribeAccountCustomization
API operation.
The QuickSight customizations you're updating in the current Region;.
", + "smithy.api#documentation": "The QuickSight customizations you're updating in the current Amazon Web Services Region;.
", "smithy.api#required": {} } } @@ -16521,7 +16521,7 @@ "AccountCustomization": { "target": "com.amazonaws.quicksight#AccountCustomization", "traits": { - "smithy.api#documentation": "The QuickSight customizations you're updating in the current Region;.
" + "smithy.api#documentation": "The QuickSight customizations you're updating in the current Amazon Web Services Region;.
" } }, "RequestId": { @@ -17120,7 +17120,7 @@ "SourceEntity": { "target": "com.amazonaws.quicksight#DashboardSourceEntity", "traits": { - "smithy.api#documentation": "The entity that you are using as a source when you update the dashboard. In\n SourceEntity
, you specify the type of object you're using as source. You\n can only update a dashboard from a template, so you use a SourceTemplate
\n entity. If you need to update a dashboard from an analysis, first convert the analysis\n to a template by using the CreateTemplate API operation. For\n SourceTemplate
, specify the Amazon Resource Name (ARN) of the source\n template. The SourceTemplate
ARN can contain any Amazon Web Services account; and any\n QuickSight-supported Region;.
Use the DataSetReferences
entity within SourceTemplate
to\n list the replacement datasets for the placeholders listed in the original. The schema in\n each dataset must match its placeholder.
The entity that you are using as a source when you update the dashboard. In\n SourceEntity
, you specify the type of object you're using as source. You\n can only update a dashboard from a template, so you use a SourceTemplate
\n entity. If you need to update a dashboard from an analysis, first convert the analysis\n to a template by using the CreateTemplate API operation. For\n SourceTemplate
, specify the Amazon Resource Name (ARN) of the source\n template. The SourceTemplate
ARN can contain any Amazon Web Services account; and any\n QuickSight-supported Amazon Web Services Region;.
Use the DataSetReferences
entity within SourceTemplate
to\n list the replacement datasets for the placeholders listed in the original. The schema in\n each dataset must match its placeholder.
The ID for the dataset whose permissions you want to update. This ID is unique per \n\t\t\tRegion; for each Amazon Web Services account;.
", + "smithy.api#documentation": "The ID for the dataset whose permissions you want to update. This ID is unique per \n\t\t\tAmazon Web Services Region; for each Amazon Web Services account;.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -17316,7 +17316,7 @@ "DataSetId": { "target": "com.amazonaws.quicksight#ResourceId", "traits": { - "smithy.api#documentation": "The ID for the dataset whose permissions you want to update. This ID is unique per \n\t\t\tRegion; for each Amazon Web Services account;.
" + "smithy.api#documentation": "The ID for the dataset whose permissions you want to update. This ID is unique per \n\t\t\tAmazon Web Services Region; for each Amazon Web Services account;.
" } }, "RequestId": { @@ -17348,7 +17348,7 @@ "DataSetId": { "target": "com.amazonaws.quicksight#ResourceId", "traits": { - "smithy.api#documentation": "The ID for the dataset that you want to update. This ID is unique per Region; for each\n\t\t\tAmazon Web Services account;.
", + "smithy.api#documentation": "The ID for the dataset that you want to update. This ID is unique per Amazon Web Services Region; for each\n\t\t\tAmazon Web Services account;.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -17424,7 +17424,7 @@ "DataSetId": { "target": "com.amazonaws.quicksight#ResourceId", "traits": { - "smithy.api#documentation": "The ID for the dataset that you want to create. This ID is unique per Region; for each Amazon Web Services account;.
" + "smithy.api#documentation": "The ID for the dataset that you want to create. This ID is unique per Amazon Web Services Region; for each Amazon Web Services account;.
" } }, "IngestionArn": { @@ -17542,7 +17542,7 @@ "DataSourceId": { "target": "com.amazonaws.quicksight#ResourceId", "traits": { - "smithy.api#documentation": "The ID of the data source. This ID is unique per Region; for each Amazon Web Services account;.
", + "smithy.api#documentation": "The ID of the data source. This ID is unique per Amazon Web Services Region; for each Amazon Web Services account;.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -17573,7 +17573,7 @@ "DataSourceId": { "target": "com.amazonaws.quicksight#ResourceId", "traits": { - "smithy.api#documentation": "The ID of the data source. This ID is unique per Region; for each Amazon Web Services account;.
" + "smithy.api#documentation": "The ID of the data source. This ID is unique per Amazon Web Services Region; for each Amazon Web Services account;.
" } }, "RequestId": { @@ -17605,7 +17605,7 @@ "DataSourceId": { "target": "com.amazonaws.quicksight#ResourceId", "traits": { - "smithy.api#documentation": "The ID of the data source. This ID is unique per Region; for each Amazon Web Services account;.
", + "smithy.api#documentation": "The ID of the data source. This ID is unique per Amazon Web Services Region; for each Amazon Web Services account;.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -17655,7 +17655,7 @@ "DataSourceId": { "target": "com.amazonaws.quicksight#ResourceId", "traits": { - "smithy.api#documentation": "The ID of the data source. This ID is unique per Region; for each Amazon Web Services account;.
" + "smithy.api#documentation": "The ID of the data source. This ID is unique per Amazon Web Services Region; for each Amazon Web Services account;.
" } }, "UpdateStatus": { @@ -18399,7 +18399,7 @@ "SourceEntity": { "target": "com.amazonaws.quicksight#TemplateSourceEntity", "traits": { - "smithy.api#documentation": "The entity that you are using as a source when you update the template. In\n\t\t\tSourceEntity
, you specify the type of object you're using as source:\n\t\t\tSourceTemplate
for a template or SourceAnalysis
for an\n\t\t\tanalysis. Both of these require an Amazon Resource Name (ARN). For\n\t\t\tSourceTemplate
, specify the ARN of the source template. For\n\t\t\tSourceAnalysis
, specify the ARN of the source analysis. The SourceTemplate
\n\t\t\tARN can contain any Amazon Web Services account; and any QuickSight-supported Region;.
Use the DataSetReferences
entity within SourceTemplate
or\n\t\t\tSourceAnalysis
to list the replacement datasets for the placeholders listed\n\t\t\tin the original. The schema in each dataset must match its placeholder.
The entity that you are using as a source when you update the template. In\n\t\t\tSourceEntity
, you specify the type of object you're using as source:\n\t\t\tSourceTemplate
for a template or SourceAnalysis
for an\n\t\t\tanalysis. Both of these require an Amazon Resource Name (ARN). For\n\t\t\tSourceTemplate
, specify the ARN of the source template. For\n\t\t\tSourceAnalysis
, specify the ARN of the source analysis. The SourceTemplate
\n\t\t\tARN can contain any Amazon Web Services account; and any QuickSight-supported Amazon Web Services Region;.
Use the DataSetReferences
entity within SourceTemplate
or\n\t\t\tSourceAnalysis
to list the replacement datasets for the placeholders listed\n\t\t\tin the original. The schema in each dataset must match its placeholder.
Amazon Route 53 behavior depends on whether you specify a value for IPAddress
.
\n If you specify a value for \n IPAddress
:
Amazon Route 53 sends health check requests to the specified IPv4 or IPv6 address and passes the value of FullyQualifiedDomainName
\n\t\t\tin the Host
header for all health checks except TCP health checks. This is typically the fully qualified DNS name of the endpoint \n\t\t\ton which you want Route 53 to perform health checks.
When Route 53 checks the health of an endpoint, here is how it constructs the Host
header:
If you specify a value of 80
for Port
and HTTP
or HTTP_STR_MATCH
for \n\t\t\t\t\tType
, Route 53 passes the value of FullyQualifiedDomainName
to the endpoint in the Host header.
If you specify a value of 443
for Port
and HTTPS
or HTTPS_STR_MATCH
for \n\t\t\t\t\tType
, Route 53 passes the value of FullyQualifiedDomainName
to the endpoint in the Host
header.
If you specify another value for Port
and any value except TCP
for Type
, Route 53 passes \n\t\t\t\t\tFullyQualifiedDomainName:Port
to the endpoint in the Host
header.
If you don't specify a value for FullyQualifiedDomainName
, Route 53 substitutes the value of IPAddress
in the \n\t\t\tHost
header in each of the preceding cases.
\n If you don't specify a value for IPAddress
\n :
Route 53 sends a DNS request to the domain that you specify for FullyQualifiedDomainName
at the interval that you specify for \n\t\t\tRequestInterval
. Using an IPv4 address that DNS returns, Route 53 then checks the health of the endpoint.
If you don't specify a value for IPAddress
, Route 53 uses only IPv4 to send health checks to the endpoint. If there's \n\t\t\tno resource record set with a type of A for the name that you specify for FullyQualifiedDomainName
, the health check fails with a \n\t\t\t\"DNS resolution failed\" error.
If you want to check the health of weighted, latency, or failover resource record sets and you choose to specify the endpoint only by \n\t\t\tFullyQualifiedDomainName
, we recommend that you create a separate health check for each endpoint. For example, create a \n\t\t\thealth check for each HTTP server that is serving content for www.example.com. For the value of FullyQualifiedDomainName
, \n\t\t\tspecify the domain name of the server (such as us-east-2-www.example.com), not the name of the resource record sets (www.example.com).
In this configuration, if you create a health check for which the value of FullyQualifiedDomainName
matches the name of the \n\t\t\t\tresource record sets and you then associate the health check with those resource record sets, health check results will be unpredictable.
In addition, if the value that you specify for Type
is HTTP
, HTTPS
, HTTP_STR_MATCH
, or \n\t\t\tHTTPS_STR_MATCH
, Route 53 passes the value of FullyQualifiedDomainName
in the Host
header, as it does when you \n\t\t\tspecify a value for IPAddress
. If the value of Type
is TCP
, Route 53 doesn't pass a Host
header.
Amazon Route 53 behavior depends on whether you specify a value for IPAddress
.
\n If you specify a value for \n IPAddress
:
Amazon Route 53 sends health check requests to the specified IPv4 or IPv6 address and passes the value of FullyQualifiedDomainName
\n\t\t\tin the Host
header for all health checks except TCP health checks. This is typically the fully qualified DNS name of the endpoint \n\t\t\ton which you want Route 53 to perform health checks.
When Route 53 checks the health of an endpoint, here is how it constructs the Host
header:
If you specify a value of 80
for Port
and HTTP
or HTTP_STR_MATCH
for \n\t\t\t\t\tType
, Route 53 passes the value of FullyQualifiedDomainName
to the endpoint in the Host header.
If you specify a value of 443
for Port
and HTTPS
or HTTPS_STR_MATCH
for \n\t\t\t\t\tType
, Route 53 passes the value of FullyQualifiedDomainName
to the endpoint in the Host
header.
If you specify another value for Port
and any value except TCP
for Type
, Route 53 passes \n\t\t\t\t\tFullyQualifiedDomainName:Port
to the endpoint in the Host
header.
If you don't specify a value for FullyQualifiedDomainName
, Route 53 substitutes the value of IPAddress
in the \n\t\t\tHost
header in each of the preceding cases.
\n If you don't specify a value for \n IPAddress
:
Route 53 sends a DNS request to the domain that you specify for FullyQualifiedDomainName
at the interval that you specify for \n\t\t\tRequestInterval
. Using an IPv4 address that DNS returns, Route 53 then checks the health of the endpoint.
If you don't specify a value for IPAddress
, Route 53 uses only IPv4 to send health checks to the endpoint. If there's \n\t\t\tno resource record set with a type of A for the name that you specify for FullyQualifiedDomainName
, the health check fails with a \n\t\t\t\"DNS resolution failed\" error.
If you want to check the health of weighted, latency, or failover resource record sets and you choose to specify the endpoint only by \n\t\t\tFullyQualifiedDomainName
, we recommend that you create a separate health check for each endpoint. For example, create a \n\t\t\thealth check for each HTTP server that is serving content for www.example.com. For the value of FullyQualifiedDomainName
, \n\t\t\tspecify the domain name of the server (such as us-east-2-www.example.com), not the name of the resource record sets (www.example.com).
In this configuration, if you create a health check for which the value of FullyQualifiedDomainName
matches the name of the \n\t\t\t\tresource record sets and you then associate the health check with those resource record sets, health check results will be unpredictable.
In addition, if the value that you specify for Type
is HTTP
, HTTPS
, HTTP_STR_MATCH
, or \n\t\t\tHTTPS_STR_MATCH
, Route 53 passes the value of FullyQualifiedDomainName
in the Host
header, as it does when you \n\t\t\tspecify a value for IPAddress
. If the value of Type
is TCP
, Route 53 doesn't pass a Host
header.
Creates a Resolver query logging configuration, which defines where you want Resolver to save DNS query logs that originate in your VPCs. \n\t\t\tResolver can log queries only for VPCs that are in the same Region as the query logging configuration.
\n\t\t\n\t\tTo specify which VPCs you want to log queries for, you use AssociateResolverQueryLogConfig
. For more information, see \n\t\t\tAssociateResolverQueryLogConfig.
You can optionally use AWS Resource Access Manager (AWS RAM) to share a query logging configuration with other AWS accounts. The other accounts \n\t\t\tcan then associate VPCs with the configuration. The query logs that Resolver creates for a configuration include all DNS queries that originate in all \n\t\t\tVPCs that are associated with the configuration.
" + "smithy.api#documentation": "Creates a Resolver query logging configuration, which defines where you want Resolver to save DNS query logs that originate in your VPCs. \n\t\t\tResolver can log queries only for VPCs that are in the same Region as the query logging configuration.
\n\t\t\n\t\tTo specify which VPCs you want to log queries for, you use AssociateResolverQueryLogConfig
. For more information, see \n\t\t\tAssociateResolverQueryLogConfig.
You can optionally use Resource Access Manager (RAM) to share a query logging configuration with other Amazon Web Services accounts. The other accounts \n\t\t\tcan then associate VPCs with the configuration. The query logs that Resolver creates for a configuration include all DNS queries that originate in all \n\t\t\tVPCs that are associated with the configuration.
" } }, "com.amazonaws.route53resolver#CreateResolverQueryLogConfigRequest": { @@ -1224,7 +1224,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes a query logging configuration. When you delete a configuration, Resolver stops logging DNS queries for all of the Amazon VPCs that are \n\t\t\tassociated with the configuration. This also applies if the query logging configuration is shared with other AWS accounts, and \n\t\t\tthe other accounts have associated VPCs with the shared configuration.
\n\t\t\n\t\tBefore you can delete a query logging configuration, you must first disassociate all VPCs from the configuration. See \n\t\t\tDisassociateResolverQueryLogConfig.
\n\t\t\t\n\t\tIf you used Resource Access Manager (RAM) to share a query logging configuration with other accounts, you must stop sharing \n\t\t\tthe configuration before you can delete a configuration. The accounts that you shared the configuration with can first disassociate VPCs \n\t\t\tthat they associated with the configuration, but that's not necessary. If you stop sharing the configuration, those VPCs are automatically \n\t\t\tdisassociated from the configuration.
" + "smithy.api#documentation": "Deletes a query logging configuration. When you delete a configuration, Resolver stops logging DNS queries for all of the Amazon VPCs that are \n\t\t\tassociated with the configuration. This also applies if the query logging configuration is shared with other Amazon Web Services accounts, and \n\t\t\tthe other accounts have associated VPCs with the shared configuration.
\n\t\t\n\t\tBefore you can delete a query logging configuration, you must first disassociate all VPCs from the configuration. See \n\t\t\tDisassociateResolverQueryLogConfig.
\n\t\t\t\n\t\tIf you used Resource Access Manager (RAM) to share a query logging configuration with other accounts, you must stop sharing \n\t\t\tthe configuration before you can delete a configuration. The accounts that you shared the configuration with can first disassociate VPCs \n\t\t\tthat they associated with the configuration, but that's not necessary. If you stop sharing the configuration, those VPCs are automatically \n\t\t\tdisassociated from the configuration.
" } }, "com.amazonaws.route53resolver#DeleteResolverQueryLogConfigRequest": { @@ -1457,7 +1457,7 @@ } ], "traits": { - "smithy.api#documentation": "Disassociates a VPC from a query logging configuration.
\n\t\t\n\t\tBefore you can delete a query logging configuration, you must first disassociate all VPCs\n\t\t\t\tfrom the configuration. If you used AWS Resource Access Manager (AWS RAM) to share a\n\t\t\t\tquery logging configuration with other accounts, VPCs can be disassociated from the\n\t\t\t\tconfiguration in the following ways:
\n\t\t\tThe accounts that you shared the configuration with can disassociate VPCs from the configuration.
\nYou can stop sharing the configuration.
\nDisassociates a VPC from a query logging configuration.
\n\t\t\n\t\tBefore you can delete a query logging configuration, you must first disassociate all VPCs\n\t\t\t\tfrom the configuration. If you used Resource Access Manager (RAM) to share a\n\t\t\t\tquery logging configuration with other accounts, VPCs can be disassociated from the\n\t\t\t\tconfiguration in the following ways:
\n\t\t\tThe accounts that you shared the configuration with can disassociate VPCs from the configuration.
\nYou can stop sharing the configuration.
\nThe name of the parameter that you want to use to filter objects.
\n\t\tThe valid values for Name
depend on the action that you're including the filter in, \n\t\t\tListResolverEndpoints, \n\t\t\tListResolverRules, \n\t\t\tListResolverRuleAssociations, \n\t\t\tListResolverQueryLogConfigs,\n\t\t\tor \n\t\t\tListResolverQueryLogConfigAssociations.
In early versions of Resolver, values for Name
were listed as uppercase, with underscore (_) delimiters. For example, \n\t\t\t\tCreatorRequestId
was originally listed as CREATOR_REQUEST_ID
. Uppercase values for Name
are still supported.
\n ListResolverEndpoints\n
\n\t\tValid values for Name
include the following:
\n CreatorRequestId
: The value that you specified when you created the Resolver endpoint.
\n Direction
: Whether you want to return inbound or outbound Resolver endpoints. If you specify DIRECTION
\n\t\t\t\tfor Name
, specify INBOUND
or OUTBOUND
for Values
.
\n HostVpcId
: The ID of the VPC that inbound DNS queries pass through on the way from your network to your VPCs in a region, or \n\t\t\t\tthe VPC that outbound queries pass through on the way from your VPCs to your network. In a \n\t\t\t\tCreateResolverEndpoint\n\t\t\t\trequest, SubnetId
indirectly identifies the VPC. In a \n\t\t\t\tGetResolverEndpoint\n\t\t\t\trequest, the VPC ID for a Resolver endpoint \n\t\t\t\tis returned in the HostVPCId
element.
\n IpAddressCount
: The number of IP addresses that you have associated with the Resolver endpoint.
\n Name
: The name of the Resolver endpoint.
\n SecurityGroupIds
: The IDs of the VPC security groups that you specified when you created the \n\t\t\t\tResolver endpoint.
\n Status
: The status of the Resolver endpoint. If you specify Status
for Name
, \n\t\t\t\tspecify one of the following status codes for Values
: CREATING
, OPERATIONAL
, UPDATING
,\n\t\t\t\tAUTO_RECOVERING
, ACTION_NEEDED
, or DELETING
. For more information, see Status
in\n\t\t\t\tResolverEndpoint.
\n ListResolverRules\n
\n\t\tValid values for Name
include the following:
\n CreatorRequestId
: The value that you specified when you created the Resolver rule.
\n DomainName
: The domain name for which Resolver is forwarding DNS queries to your network. In the value that \n\t\t\t\tyou specify for Values
, include a trailing dot (.) after the domain name. For example, if the domain name is example.com, \n\t\t\t\tspecify the following value. Note the \".\" after com
:
\n example.com.
\n
\n Name
: The name of the Resolver rule.
\n ResolverEndpointId
: The ID of the Resolver endpoint that the Resolver rule is associated with.
You can filter on the Resolver endpoint only for rules that have a value of FORWARD
for \n\t\t\t\t\tRuleType
.
\n Status
: The status of the Resolver rule. If you specify Status
for Name
, \n\t\t\t\tspecify one of the following status codes for Values
: COMPLETE
, DELETING
, UPDATING
, \n\t\t\t\tor FAILED
.
\n Type
: The type of the Resolver rule. If you specify TYPE
\n\t\t\t\tfor Name
, specify FORWARD
or SYSTEM
for Values
.
\n ListResolverRuleAssociations\n
\n\t\tValid values for Name
include the following:
\n Name
: The name of the Resolver rule association.
\n ResolverRuleId
: The ID of the Resolver rule that is associated with one or more VPCs.
\n Status
: The status of the Resolver rule association. If you specify Status
for Name
, \n\t\t\t\tspecify one of the following status codes for Values
: CREATING
, COMPLETE
, DELETING
, or \n\t\t\t\tFAILED
.
\n VPCId
: The ID of the VPC that the Resolver rule is associated with.
\n ListResolverQueryLogConfigs\n
\n\t\tValid values for Name
include the following:
\n Arn
: The ARN for the query logging configuration.
\n AssociationCount
: The number of VPCs that are associated with the query logging configuration.
\n CreationTime
: The date and time that the query logging configuration was created, in Unix time format and \n\t\t\t\tCoordinated Universal Time (UTC).
\n CreatorRequestId
: A unique string that identifies the request that created the query logging configuration.
\n Destination
: The AWS service that you want to forward query logs to. Valid values include the following:
\n S3
\n
\n CloudWatchLogs
\n
\n KinesisFirehose
\n
\n DestinationArn
: The ARN of the location that Resolver is sending query logs to. This value can be the ARN for an \n\t\t\t\tS3 bucket, a CloudWatch Logs log group, or a Kinesis Data Firehose delivery stream.
\n Id
: The ID of the query logging configuration
\n Name
: The name of the query logging configuration
\n OwnerId
: The AWS account ID for the account that created the query logging configuration.
\n ShareStatus
: An indication of whether the query logging configuration is shared with other AWS accounts, \n\t\t\t\tor was shared with the current account by another AWS account. Valid values include: NOT_SHARED
, SHARED_WITH_ME
, \n\t\t\t\tor SHARED_BY_ME
.
\n Status
: The status of the query logging configuration. If you specify Status
for Name
, \n\t\t\t\tspecify the applicable status code for Values
: CREATING
, CREATED
, \n\t\t\t\tDELETING
, or FAILED
. For more information, see \n\t\t\t\tStatus.\n\t\t\t\t
\n ListResolverQueryLogConfigAssociations\n
\n\t\tValid values for Name
include the following:
\n CreationTime
: The date and time that the VPC was associated with the query logging configuration, in Unix time format and \n\t\t\t\tCoordinated Universal Time (UTC).
\n Error
: If the value of Status
is FAILED
, specify the cause: \n\t\t\t\tDESTINATION_NOT_FOUND
or ACCESS_DENIED
.
\n Id
: The ID of the query logging association.
\n ResolverQueryLogConfigId
: The ID of the query logging configuration that a VPC is associated with.
\n ResourceId
: The ID of the Amazon VPC that is associated with the query logging configuration.
\n Status
: The status of the query logging association. If you specify Status
for Name
, \n\t\t\t\tspecify the applicable status code for Values
: CREATING
, CREATED
, \n\t\t\t\tDELETING
, or FAILED
. For more information, see \n\t\t\t Status.\n\t\t\t\t
The name of the parameter that you want to use to filter objects.
\n\t\tThe valid values for Name
depend on the action that you're including the filter in, \n\t\t\tListResolverEndpoints, \n\t\t\tListResolverRules, \n\t\t\tListResolverRuleAssociations, \n\t\t\tListResolverQueryLogConfigs,\n\t\t\tor \n\t\t\tListResolverQueryLogConfigAssociations.
In early versions of Resolver, values for Name
were listed as uppercase, with underscore (_) delimiters. For example, \n\t\t\t\tCreatorRequestId
was originally listed as CREATOR_REQUEST_ID
. Uppercase values for Name
are still supported.
\n ListResolverEndpoints\n
\n\t\tValid values for Name
include the following:
\n CreatorRequestId
: The value that you specified when you created the Resolver endpoint.
\n Direction
: Whether you want to return inbound or outbound Resolver endpoints. If you specify DIRECTION
\n\t\t\t\tfor Name
, specify INBOUND
or OUTBOUND
for Values
.
\n HostVPCId
: The ID of the VPC that inbound DNS queries pass through on the way from your network to your VPCs in a region, or \n\t\t\t\tthe VPC that outbound queries pass through on the way from your VPCs to your network. In a \n\t\t\t\tCreateResolverEndpoint\n\t\t\t\trequest, SubnetId
indirectly identifies the VPC. In a \n\t\t\t\tGetResolverEndpoint\n\t\t\t\trequest, the VPC ID for a Resolver endpoint \n\t\t\t\tis returned in the HostVPCId
element.
\n IpAddressCount
: The number of IP addresses that you have associated with the Resolver endpoint.
\n Name
: The name of the Resolver endpoint.
\n SecurityGroupIds
: The IDs of the VPC security groups that you specified when you created the \n\t\t\t\tResolver endpoint.
\n Status
: The status of the Resolver endpoint. If you specify Status
for Name
, \n\t\t\t\tspecify one of the following status codes for Values
: CREATING
, OPERATIONAL
, UPDATING
,\n\t\t\t\tAUTO_RECOVERING
, ACTION_NEEDED
, or DELETING
. For more information, see Status
in\n\t\t\t\tResolverEndpoint.
\n ListResolverRules\n
\n\t\tValid values for Name
include the following:
\n CreatorRequestId
: The value that you specified when you created the Resolver rule.
\n DomainName
: The domain name for which Resolver is forwarding DNS queries to your network. In the value that \n\t\t\t\tyou specify for Values
, include a trailing dot (.) after the domain name. For example, if the domain name is example.com, \n\t\t\t\tspecify the following value. Note the \".\" after com
:
\n example.com.
\n
\n Name
: The name of the Resolver rule.
\n ResolverEndpointId
: The ID of the Resolver endpoint that the Resolver rule is associated with.
You can filter on the Resolver endpoint only for rules that have a value of FORWARD
for \n\t\t\t\t\tRuleType
.
\n Status
: The status of the Resolver rule. If you specify Status
for Name
, \n\t\t\t\tspecify one of the following status codes for Values
: COMPLETE
, DELETING
, UPDATING
, \n\t\t\t\tor FAILED
.
\n Type
: The type of the Resolver rule. If you specify TYPE
\n\t\t\t\tfor Name
, specify FORWARD
or SYSTEM
for Values
.
\n ListResolverRuleAssociations\n
\n\t\tValid values for Name
include the following:
\n Name
: The name of the Resolver rule association.
\n ResolverRuleId
: The ID of the Resolver rule that is associated with one or more VPCs.
\n Status
: The status of the Resolver rule association. If you specify Status
for Name
, \n\t\t\t\tspecify one of the following status codes for Values
: CREATING
, COMPLETE
, DELETING
, or \n\t\t\t\tFAILED
.
\n VPCId
: The ID of the VPC that the Resolver rule is associated with.
\n ListResolverQueryLogConfigs\n
\n\t\tValid values for Name
include the following:
\n Arn
: The ARN for the query logging configuration.
\n AssociationCount
: The number of VPCs that are associated with the query logging configuration.
\n CreationTime
: The date and time that the query logging configuration was created, in Unix time format and \n\t\t\t\tCoordinated Universal Time (UTC).
\n CreatorRequestId
: A unique string that identifies the request that created the query logging configuration.
\n Destination
: The Amazon Web Services service that you want to forward query logs to. Valid values include the following:
\n S3
\n
\n CloudWatchLogs
\n
\n KinesisFirehose
\n
\n DestinationArn
: The ARN of the location that Resolver is sending query logs to. This value can be the ARN for an \n\t\t\t\tS3 bucket, a CloudWatch Logs log group, or a Kinesis Data Firehose delivery stream.
\n Id
: The ID of the query logging configuration
\n Name
: The name of the query logging configuration
\n OwnerId
: The Amazon Web Services account ID for the account that created the query logging configuration.
\n ShareStatus
: An indication of whether the query logging configuration is shared with other Amazon Web Services accounts, \n\t\t\t\tor was shared with the current account by another Amazon Web Services account. Valid values include: NOT_SHARED
, SHARED_WITH_ME
, \n\t\t\t\tor SHARED_BY_ME
.
\n Status
: The status of the query logging configuration. If you specify Status
for Name
, \n\t\t\t\tspecify the applicable status code for Values
: CREATING
, CREATED
, \n\t\t\t\tDELETING
, or FAILED
. For more information, see \n\t\t\t\tStatus.\n\t\t\t\t
\n ListResolverQueryLogConfigAssociations\n
\n\t\tValid values for Name
include the following:
\n CreationTime
: The date and time that the VPC was associated with the query logging configuration, in Unix time format and \n\t\t\t\tCoordinated Universal Time (UTC).
\n Error
: If the value of Status
is FAILED
, specify the cause: \n\t\t\t\tDESTINATION_NOT_FOUND
or ACCESS_DENIED
.
\n Id
: The ID of the query logging association.
\n ResolverQueryLogConfigId
: The ID of the query logging configuration that a VPC is associated with.
\n ResourceId
: The ID of the Amazon VPC that is associated with the query logging configuration.
\n Status
: The status of the query logging association. If you specify Status
for Name
, \n\t\t\t\tspecify the applicable status code for Values
: CREATING
, CREATED
, \n\t\t\t\tDELETING
, or FAILED
. For more information, see \n\t\t\t Status.\n\t\t\t\t
The AWS account ID of the owner of the VPC that this firewall configuration applies to.
" + "smithy.api#documentation": "The Amazon Web Services account ID of the owner of the VPC that this firewall configuration applies to.
" } }, "FirewallFailOpen": { @@ -1976,7 +1976,7 @@ "OwnerId": { "target": "com.amazonaws.route53resolver#AccountId", "traits": { - "smithy.api#documentation": "The AWS account ID for the account that created the rule group. When a rule group is shared with your account,\n this is the account that has shared the rule group with you.
" + "smithy.api#documentation": "The Amazon Web Services account ID for the account that created the rule group. When a rule group is shared with your account,\n this is the account that has shared the rule group with you.
" } }, "CreatorRequestId": { @@ -1988,7 +1988,7 @@ "ShareStatus": { "target": "com.amazonaws.route53resolver#ShareStatus", "traits": { - "smithy.api#documentation": "Whether the rule group is shared with other AWS accounts, or was shared with the current account by another \n AWS account. Sharing is configured through AWS Resource Access Manager (AWS RAM).
" + "smithy.api#documentation": "Whether the rule group is shared with other Amazon Web Services accounts, or was shared with the current account by another \n Amazon Web Services account. Sharing is configured through Resource Access Manager (RAM).
" } }, "CreationTime": { @@ -2056,7 +2056,7 @@ "ManagedOwnerName": { "target": "com.amazonaws.route53resolver#ServicePrinciple", "traits": { - "smithy.api#documentation": "The owner of the association, used only for associations that are not managed by you. If you use AWS Firewall Manager to \n manage your DNS Firewalls, then this reports Firewall Manager as the managed owner.
" + "smithy.api#documentation": "The owner of the association, used only for associations that are not managed by you. If you use Firewall Manager to \n manage your DNS Firewalls, then this reports Firewall Manager as the managed owner.
" } }, "Status": { @@ -2143,7 +2143,7 @@ "OwnerId": { "target": "com.amazonaws.route53resolver#AccountId", "traits": { - "smithy.api#documentation": "The AWS account ID for the account that created the rule group. When a rule group is shared with your account,\n this is the account that has shared the rule group with you.
" + "smithy.api#documentation": "The Amazon Web Services account ID for the account that created the rule group. When a rule group is shared with your account,\n this is the account that has shared the rule group with you.
" } }, "CreatorRequestId": { @@ -2155,7 +2155,7 @@ "ShareStatus": { "target": "com.amazonaws.route53resolver#ShareStatus", "traits": { - "smithy.api#documentation": "Whether the rule group is shared with other AWS accounts, or was shared with the current account by another \n AWS account. Sharing is configured through AWS Resource Access Manager (AWS RAM).
" + "smithy.api#documentation": "Whether the rule group is shared with other Amazon Web Services accounts, or was shared with the current account by another \n Amazon Web Services account. Sharing is configured through Resource Access Manager (RAM).
" } } }, @@ -2405,7 +2405,7 @@ } ], "traits": { - "smithy.api#documentation": "Returns the AWS Identity and Access Management (AWS IAM) policy for sharing the\n\t\t\tspecified rule group. You can use the policy to share the rule group using AWS Resource\n\t\t\tAccess Manager (AWS RAM).
" + "smithy.api#documentation": "Returns the Identity and Access Management (Amazon Web Services IAM) policy for sharing the\n \tspecified rule group. You can use the policy to share the rule group using Resource Access Manager (RAM).
" } }, "com.amazonaws.route53resolver#GetFirewallRuleGroupPolicyRequest": { @@ -2426,7 +2426,7 @@ "FirewallRuleGroupPolicy": { "target": "com.amazonaws.route53resolver#FirewallRuleGroupPolicy", "traits": { - "smithy.api#documentation": "The AWS Identity and Access Management (AWS IAM) policy for sharing the specified rule\n\t\t\tgroup. You can use the policy to share the rule group using AWS Resource Access Manager\n\t\t\t(AWS RAM).
" + "smithy.api#documentation": "The Identity and Access Management (Amazon Web Services IAM) policy for sharing the specified rule\n\t\t\tgroup. You can use the policy to share the rule group using Resource Access Manager\n\t\t\t(RAM).
" } } } @@ -2671,7 +2671,7 @@ } ], "traits": { - "smithy.api#documentation": "Gets information about a query logging policy. A query logging policy specifies the Resolver query logging \n\t\t\toperations and resources that you want to allow another AWS account to be able to use.
" + "smithy.api#documentation": "Gets information about a query logging policy. A query logging policy specifies the Resolver query logging \n\t\t\toperations and resources that you want to allow another Amazon Web Services account to be able to use.
" } }, "com.amazonaws.route53resolver#GetResolverQueryLogConfigPolicyRequest": { @@ -3733,7 +3733,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists the configurations for DNSSEC validation that are associated with the current AWS account.
", + "smithy.api#documentation": "Lists the configurations for DNSSEC validation that are associated with the current Amazon Web Services account.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -3754,7 +3754,7 @@ "NextToken": { "target": "com.amazonaws.route53resolver#NextToken", "traits": { - "smithy.api#documentation": "(Optional) If the current AWS account has more than MaxResults
DNSSEC configurations, use NextToken
\n\t\t\tto get the second and subsequent pages of results.
For the first ListResolverDnssecConfigs
request, omit this value.
For the second and subsequent requests, get the value of NextToken
from the previous response and specify that value \n\t\t\tfor NextToken
in the request.
(Optional) If the current Amazon Web Services account has more than MaxResults
DNSSEC configurations, use NextToken
\n\t\t\tto get the second and subsequent pages of results.
For the first ListResolverDnssecConfigs
request, omit this value.
For the second and subsequent requests, get the value of NextToken
from the previous response and specify that value \n\t\t\tfor NextToken
in the request.
If a response includes the last of the DNSSEC configurations that are associated with the current AWS account, \n\t\t\tNextToken
doesn't appear in the response.
If a response doesn't include the last of the configurations, you can get more configurations by submitting another \n\t\t\tListResolverDnssecConfigs \n\t\t\trequest. Get the value of NextToken
that Amazon Route 53 returned in the previous response and include it in \n\t\t\tNextToken
in the next request.
If a response includes the last of the DNSSEC configurations that are associated with the current Amazon Web Services account, \n\t\t\tNextToken
doesn't appear in the response.
If a response doesn't include the last of the configurations, you can get more configurations by submitting another \n\t\t\tListResolverDnssecConfigs \n\t\t\trequest. Get the value of NextToken
that Amazon Route 53 returned in the previous response and include it in \n\t\t\tNextToken
in the next request.
An array that contains one \n\t\t\tResolverDnssecConfig element \n\t\t\tfor each configuration for DNSSEC validation that is associated with the current AWS account.
" + "smithy.api#documentation": "An array that contains one \n\t\t\tResolverDnssecConfig element \n\t\t\tfor each configuration for DNSSEC validation that is associated with the current Amazon Web Services account.
" } } } @@ -3890,7 +3890,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists all the Resolver endpoints that were created using the current AWS account.
", + "smithy.api#documentation": "Lists all the Resolver endpoints that were created using the current Amazon Web Services account.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -3940,7 +3940,7 @@ "ResolverEndpoints": { "target": "com.amazonaws.route53resolver#ResolverEndpoints", "traits": { - "smithy.api#documentation": "The Resolver endpoints that were created by using the current AWS account, and that match the specified filters, if any.
" + "smithy.api#documentation": "The Resolver endpoints that were created by using the current Amazon Web Services account, and that match the specified filters, if any.
" } } } @@ -4108,7 +4108,7 @@ "SortBy": { "target": "com.amazonaws.route53resolver#SortByKey", "traits": { - "smithy.api#documentation": "The element that you want Resolver to sort query logging configurations by.
\n\t\tIf you submit a second or subsequent ListResolverQueryLogConfigs
request and specify the NextToken
parameter, \n\t\t\t\tyou must use the same value for SortBy
, if any, as in the previous request.
Valid values include the following elements:
\n\t\t\n Arn
: The ARN of the query logging configuration
\n AssociationCount
: The number of VPCs that are associated with the specified configuration
\n CreationTime
: The date and time that Resolver returned when the configuration was created
\n CreatorRequestId
: The value that was specified for CreatorRequestId
when the configuration was created
\n DestinationArn
: The location that logs are sent to
\n Id
: The ID of the configuration
\n Name
: The name of the configuration
\n OwnerId
: The AWS account number of the account that created the configuration
\n ShareStatus
: Whether the configuration is shared with other AWS accounts or shared with the current account by \n\t\t\t\tanother AWS account. Sharing is configured through AWS Resource Access Manager (AWS RAM).
\n Status
: The current status of the configuration. Valid values include the following:
\n CREATING
: Resolver is creating the query logging configuration.
\n CREATED
: The query logging configuration was successfully created. \n\t\t\t\t\t\tResolver is logging queries that originate in the specified VPC.
\n DELETING
: Resolver is deleting this query logging configuration.
\n FAILED
: Resolver either couldn't create or couldn't delete the query logging configuration.\n\t\t\t\t\t\tHere are two common causes:
The specified destination (for example, an Amazon S3 bucket) was deleted.
\nPermissions don't allow sending logs to the destination.
\nThe element that you want Resolver to sort query logging configurations by.
\n\t\tIf you submit a second or subsequent ListResolverQueryLogConfigs
request and specify the NextToken
parameter, \n\t\t\t\tyou must use the same value for SortBy
, if any, as in the previous request.
Valid values include the following elements:
\n\t\t\n Arn
: The ARN of the query logging configuration
\n AssociationCount
: The number of VPCs that are associated with the specified configuration
\n CreationTime
: The date and time that Resolver returned when the configuration was created
\n CreatorRequestId
: The value that was specified for CreatorRequestId
when the configuration was created
\n DestinationArn
: The location that logs are sent to
\n Id
: The ID of the configuration
\n Name
: The name of the configuration
\n OwnerId
: The Amazon Web Services account number of the account that created the configuration
\n ShareStatus
: Whether the configuration is shared with other Amazon Web Services accounts or shared with the current account by \n\t\t\t\tanother Amazon Web Services account. Sharing is configured through Resource Access Manager (RAM).
\n Status
: The current status of the configuration. Valid values include the following:
\n CREATING
: Resolver is creating the query logging configuration.
\n CREATED
: The query logging configuration was successfully created. \n\t\t\t\t\t\tResolver is logging queries that originate in the specified VPC.
\n DELETING
: Resolver is deleting this query logging configuration.
\n FAILED
: Resolver either couldn't create or couldn't delete the query logging configuration.\n\t\t\t\t\t\tHere are two common causes:
The specified destination (for example, an Amazon S3 bucket) was deleted.
\nPermissions don't allow sending logs to the destination.
\nLists the associations that were created between Resolver rules and VPCs using the current AWS account.
", + "smithy.api#documentation": "Lists the associations that were created between Resolver rules and VPCs using the current Amazon Web Services account.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -4224,7 +4224,7 @@ "ResolverRuleAssociations": { "target": "com.amazonaws.route53resolver#ResolverRuleAssociations", "traits": { - "smithy.api#documentation": "The associations that were created between Resolver rules and VPCs using the current AWS account, and that match the \n\t\t\tspecified filters, if any.
" + "smithy.api#documentation": "The associations that were created between Resolver rules and VPCs using the current Amazon Web Services account, and that match the \n\t\t\tspecified filters, if any.
" } } } @@ -4255,7 +4255,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists the Resolver rules that were created using the current AWS account.
", + "smithy.api#documentation": "Lists the Resolver rules that were created using the current Amazon Web Services account.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -4305,7 +4305,7 @@ "ResolverRules": { "target": "com.amazonaws.route53resolver#ResolverRules", "traits": { - "smithy.api#documentation": "The Resolver rules that were created using the current AWS account and that match the specified filters, if any.
" + "smithy.api#documentation": "The Resolver rules that were created using the current Amazon Web Services account and that match the specified filters, if any.
" } } } @@ -4469,7 +4469,7 @@ } ], "traits": { - "smithy.api#documentation": "Attaches an AWS Identity and Access Management (AWS IAM) policy for sharing the rule\n\t\t\tgroup. You can use the policy to share the rule group using AWS Resource Access Manager\n\t\t\t(AWS RAM).
" + "smithy.api#documentation": "Attaches an Identity and Access Management (Amazon Web Services IAM) policy for sharing the rule\n\t\t\tgroup. You can use the policy to share the rule group using Resource Access Manager\n\t\t\t(RAM).
" } }, "com.amazonaws.route53resolver#PutFirewallRuleGroupPolicyRequest": { @@ -4485,7 +4485,7 @@ "FirewallRuleGroupPolicy": { "target": "com.amazonaws.route53resolver#FirewallRuleGroupPolicy", "traits": { - "smithy.api#documentation": "The AWS Identity and Access Management (AWS IAM) policy to attach to the rule group.
", + "smithy.api#documentation": "The Identity and Access Management (Amazon Web Services IAM) policy to attach to the rule group.
", "smithy.api#required": {} } } @@ -4531,7 +4531,7 @@ } ], "traits": { - "smithy.api#documentation": "Specifies an AWS account that you want to share a query logging configuration with, the query logging configuration that you want to share, \n\t\t\tand the operations that you want the account to be able to perform on the configuration.
" + "smithy.api#documentation": "Specifies an Amazon Web Services account that you want to share a query logging configuration with, the query logging configuration that you want to share, \n\t\t\tand the operations that you want the account to be able to perform on the configuration.
" } }, "com.amazonaws.route53resolver#PutResolverQueryLogConfigPolicyRequest": { @@ -4547,7 +4547,7 @@ "ResolverQueryLogConfigPolicy": { "target": "com.amazonaws.route53resolver#ResolverQueryLogConfigPolicy", "traits": { - "smithy.api#documentation": "An AWS Identity and Access Management policy statement that lists the query logging configurations that you want to share with another AWS account \n\t\t\tand the operations that you want the account to be able to perform. You can specify the following operations in the Actions
section \n\t\t\tof the statement:
\n route53resolver:AssociateResolverQueryLogConfig
\n
\n route53resolver:DisassociateResolverQueryLogConfig
\n
\n route53resolver:ListResolverQueryLogConfigAssociations
\n
\n route53resolver:ListResolverQueryLogConfigs
\n
In the Resource
section of the statement, you specify the ARNs for the query logging configurations that you want to share \n\t\t\twith the account that you specified in Arn
.
An Identity and Access Management policy statement that lists the query logging configurations that you want to share with another Amazon Web Services account \n\t\t\tand the operations that you want the account to be able to perform. You can specify the following operations in the Actions
section \n\t\t\tof the statement:
\n route53resolver:AssociateResolverQueryLogConfig
\n
\n route53resolver:DisassociateResolverQueryLogConfig
\n
\n route53resolver:ListResolverQueryLogConfigAssociations
\n
\n route53resolver:ListResolverQueryLogConfigs
\n
In the Resource
section of the statement, you specify the ARNs for the query logging configurations that you want to share \n\t\t\twith the account that you specified in Arn
.
Specifies an AWS rule that you want to share with another account, the account that you want to share the rule with, \n\t\t\tand the operations that you want the account to be able to perform on the rule.
" + "smithy.api#documentation": "Specifies an Amazon Web Services rule that you want to share with another account, the account that you want to share the rule with, \n\t\t\tand the operations that you want the account to be able to perform on the rule.
" } }, "com.amazonaws.route53resolver#PutResolverRulePolicyRequest": { @@ -4606,7 +4606,7 @@ "ResolverRulePolicy": { "target": "com.amazonaws.route53resolver#ResolverRulePolicy", "traits": { - "smithy.api#documentation": "An AWS Identity and Access Management policy statement that lists the rules that you want to share with another AWS account and the operations that you want the account \n\t\t\tto be able to perform. You can specify the following operations in the Action
section of the statement:
\n route53resolver:GetResolverRule
\n
\n route53resolver:AssociateResolverRule
\n
\n route53resolver:DisassociateResolverRule
\n
\n route53resolver:ListResolverRules
\n
\n route53resolver:ListResolverRuleAssociations
\n
In the Resource
section of the statement, specify the ARN for the rule that you want to share with another account. Specify the same ARN \n\t\t\tthat you specified in Arn
.
An Identity and Access Management policy statement that lists the rules that you want to share with another Amazon Web Services account and the operations that you want the account \n\t\t\tto be able to perform. You can specify the following operations in the Action
section of the statement:
\n route53resolver:GetResolverRule
\n
\n route53resolver:AssociateResolverRule
\n
\n route53resolver:DisassociateResolverRule
\n
\n route53resolver:ListResolverRules
\n
\n route53resolver:ListResolverRuleAssociations
\n
In the Resource
section of the statement, specify the ARN for the rule that you want to share with another account. Specify the same ARN \n\t\t\tthat you specified in Arn
.
The AWS account ID for the account that created the query logging configuration.
" + "smithy.api#documentation": "The Amazon Web Services account ID for the account that created the query logging configuration.
" } }, "Status": { @@ -4843,7 +4843,7 @@ "ShareStatus": { "target": "com.amazonaws.route53resolver#ShareStatus", "traits": { - "smithy.api#documentation": "An indication of whether the query logging configuration is shared with other AWS accounts, or was shared with the current account by another \n\t\t\tAWS account. Sharing is configured through AWS Resource Access Manager (AWS RAM).
" + "smithy.api#documentation": "An indication of whether the query logging configuration is shared with other Amazon Web Services accounts, or was shared with the current account by another \n\t\t\tAmazon Web Services account. Sharing is configured through Resource Access Manager (RAM).
" } }, "AssociationCount": { @@ -5110,7 +5110,7 @@ "OwnerId": { "target": "com.amazonaws.route53resolver#AccountId", "traits": { - "smithy.api#documentation": "When a rule is shared with another AWS account, the account ID of the account that the rule is shared with.
" + "smithy.api#documentation": "When a rule is shared with another Amazon Web Services account, the account ID of the account that the rule is shared with.
" } }, "ShareStatus": { @@ -5564,7 +5564,7 @@ "name": "route53resolver" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "When you create a VPC using Amazon VPC, you automatically get DNS resolution within the VPC\n\t\t\tfrom Route 53 Resolver. By default, Resolver answers DNS queries for VPC domain names\n\t\t\tsuch as domain names for EC2 instances or Elastic Load Balancing load balancers.\n\t\t\tResolver performs recursive lookups against public name servers for all other domain\n\t\t\tnames.
\n\n\t\tYou can also configure DNS resolution between your VPC and your network over a Direct Connect or VPN connection:
\n\n\t\t\n Forward DNS queries from resolvers on your network to Route 53 Resolver\n
\n\n\t\tDNS resolvers on your network can forward DNS queries to Resolver in a specified VPC. This allows your DNS resolvers \n\t\t\tto easily resolve domain names for AWS resources such as EC2 instances or records in a Route 53 private hosted zone. \n\t\t\tFor more information, see \n\t\t\tHow DNS Resolvers \n\t\t\ton Your Network Forward DNS Queries to Route 53 Resolver in the Amazon Route 53 Developer Guide.
\n\t\t\n\t\t\n Conditionally forward queries from a VPC to resolvers on your network\n
\n\n\t\tYou can configure Resolver to forward queries that it receives from EC2 instances in your VPCs to DNS resolvers on your network. \n\t\t\tTo forward selected queries, you create Resolver rules that specify the domain names for the DNS queries that you want to forward \n\t\t\t(such as example.com), and the IP addresses of the DNS resolvers on your network that you want to forward the queries to. \n\t\t\tIf a query matches multiple rules (example.com, acme.example.com), Resolver chooses the rule with the most specific match \n\t\t\t(acme.example.com) and forwards the query to the IP addresses that you specified in that rule. For more information, see \n\t\t\tHow Route 53 Resolver \n\t\t\tForwards DNS Queries from Your VPCs to Your Network in the Amazon Route 53 Developer Guide.
\n\n\t\tLike Amazon VPC, Resolver is Regional. In each Region where you have VPCs, you can choose\n\t\t\twhether to forward queries from your VPCs to your network (outbound queries), from your\n\t\t\tnetwork to your VPCs (inbound queries), or both.
", + "smithy.api#documentation": "When you create a VPC using Amazon VPC, you automatically get DNS resolution within the VPC\n\t\t\tfrom Route 53 Resolver. By default, Resolver answers DNS queries for VPC domain names\n\t\t\tsuch as domain names for EC2 instances or Elastic Load Balancing load balancers.\n\t\t\tResolver performs recursive lookups against public name servers for all other domain\n\t\t\tnames.
\n\n\t\tYou can also configure DNS resolution between your VPC and your network over a Direct Connect or VPN connection:
\n\n\t\t\n Forward DNS queries from resolvers on your network to Route 53 Resolver\n
\n\n\t\tDNS resolvers on your network can forward DNS queries to Resolver in a specified VPC. This allows your DNS resolvers \n\t\t\tto easily resolve domain names for Amazon Web Services resources such as EC2 instances or records in a Route 53 private hosted zone. \n\t\t\tFor more information, see \n\t\t\tHow DNS Resolvers \n\t\t\ton Your Network Forward DNS Queries to Route 53 Resolver in the Amazon Route 53 Developer Guide.
\n\t\t\n\t\t\n Conditionally forward queries from a VPC to resolvers on your network\n
\n\n\t\tYou can configure Resolver to forward queries that it receives from EC2 instances in your VPCs to DNS resolvers on your network. \n\t\t\tTo forward selected queries, you create Resolver rules that specify the domain names for the DNS queries that you want to forward \n\t\t\t(such as example.com), and the IP addresses of the DNS resolvers on your network that you want to forward the queries to. \n\t\t\tIf a query matches multiple rules (example.com, acme.example.com), Resolver chooses the rule with the most specific match \n\t\t\t(acme.example.com) and forwards the query to the IP addresses that you specified in that rule. For more information, see \n\t\t\tHow Route 53 Resolver \n\t\t\tForwards DNS Queries from Your VPCs to Your Network in the Amazon Route 53 Developer Guide.
\n\n\t\tLike Amazon VPC, Resolver is Regional. In each Region where you have VPCs, you can choose\n\t\t\twhether to forward queries from your VPCs to your network (outbound queries), from your\n\t\t\tnetwork to your VPCs (inbound queries), or both.
", "smithy.api#title": "Amazon Route 53 Resolver" } }, diff --git a/codegen/sdk-codegen/aws-models/s3.2006-03-01.json b/codegen/sdk-codegen/aws-models/s3.2006-03-01.json index b5293082c67..12c31ab0744 100644 --- a/codegen/sdk-codegen/aws-models/s3.2006-03-01.json +++ b/codegen/sdk-codegen/aws-models/s3.2006-03-01.json @@ -85,7 +85,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The bucket name to which the upload was taking place.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket name to which the upload was taking place.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -692,7 +692,7 @@ } }, "traits": { - "smithy.api#documentation": "In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name is globally\n unique, and the namespace is shared by all AWS accounts.
" + "smithy.api#documentation": "In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name is globally\n unique, and the namespace is shared by all Amazon Web Services accounts.
" } }, "com.amazonaws.s3#BucketAccelerateStatus": { @@ -722,7 +722,7 @@ "type": "structure", "members": {}, "traits": { - "smithy.api#documentation": "The bucket you tried to create already exists, and you own it. Amazon S3 returns this error\n in all AWS Regions except in the North Virginia Region. For legacy compatibility, if you\n re-create an existing bucket that you already own in the North Virginia Region, Amazon S3\n returns 200 OK and resets the bucket access control lists (ACLs).
", + "smithy.api#documentation": "The bucket you tried to create already exists, and you own it. Amazon S3 returns this error\n in all Amazon Web Services Regions except in the North Virginia Region. For legacy compatibility, if you\n re-create an existing bucket that you already own in the North Virginia Region, Amazon S3\n returns 200 OK and resets the bucket access control lists (ACLs).
", "smithy.api#error": "client" } }, @@ -1168,7 +1168,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The name of the bucket that contains the newly created object.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
" + "smithy.api#documentation": "The name of the bucket that contains the newly created object. Does not return the access point ARN or access point alias if used.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
" } }, "Key": { @@ -1193,7 +1193,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "If you specified server-side encryption either with an Amazon S3-managed encryption key or an\n AWS KMS customer master key (CMK) in your initiate multipart upload request, the response\n includes this header. It confirms the encryption algorithm that Amazon S3 used to encrypt the\n object.
", + "smithy.api#documentation": "If you specified server-side encryption either with an Amazon S3-managed encryption key or an\n Amazon Web Services KMS customer master key (CMK) in your initiate multipart upload request, the response\n includes this header. It confirms the encryption algorithm that Amazon S3 used to encrypt the\n object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -1207,14 +1207,14 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.
", + "smithy.api#documentation": "If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).
", + "smithy.api#documentation": "Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -1235,7 +1235,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "Name of the bucket to which the multipart upload was initiated.
", + "smithy.api#documentation": "Name of the bucket to which the multipart upload was initiated.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1405,7 +1405,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a copy of an object that is already stored in Amazon S3.
\nYou can store individual objects of up to 5 TB in Amazon S3. You create a copy of your\n object up to 5 GB in size in a single atomic action using this API. However, to copy\n an object greater than 5 GB, you must use the multipart upload Upload Part - Copy API.\n For more information, see Copy Object Using the REST Multipart Upload API.
\nAll copy requests must be authenticated. Additionally, you must have\n read access to the source object and write\n access to the destination bucket. For more information, see REST Authentication. Both the Region\n that you want to copy the object from and the Region that you want to copy the object to\n must be enabled for your account.
\nA copy request might return an error when Amazon S3 receives the copy request or while Amazon S3\n is copying the files. If the error occurs before the copy action starts, you receive a\n standard Amazon S3 error. If the error occurs during the copy operation, the error response is\n embedded in the 200 OK
response. This means that a 200 OK
\n response can contain either a success or an error. Design your application to parse the\n contents of the response and handle it appropriately.
If the copy is successful, you receive a response with information about the copied\n object.
\nIf the request is an HTTP 1.1 request, the response is chunk encoded. If it were not,\n it would not contain the content-length, and you would need to read the entire\n body.
\nThe copy request charge is based on the storage class and Region that you specify for\n the destination object. For pricing information, see Amazon S3 pricing.
\nAmazon S3 transfer acceleration does not support cross-Region copies. If you request a\n cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad\n Request
error. For more information, see Transfer Acceleration.
\n Metadata\n
\nWhen copying an object, you can preserve all metadata (default) or specify new metadata.\n However, the ACL is not preserved and is set to private for the user making the request. To\n override the default ACL setting, specify a new ACL when generating a copy request. For\n more information, see Using ACLs.
\nTo specify whether you want the object metadata copied from the source object or\n replaced with metadata provided in the request, you can optionally add the\n x-amz-metadata-directive
header. When you grant permissions, you can use\n the s3:x-amz-metadata-directive
condition key to enforce certain metadata\n behavior when objects are uploaded. For more information, see Specifying Conditions in a\n Policy in the Amazon S3 User Guide. For a complete list of\n Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for\n Amazon S3.
\n \n x-amz-copy-source-if
Headers\n
To only copy an object under certain conditions, such as whether the Etag
\n matches or whether the object was modified before or after a specified date, use the\n following request parameters:
\n x-amz-copy-source-if-match
\n
\n x-amz-copy-source-if-none-match
\n
\n x-amz-copy-source-if-unmodified-since
\n
\n x-amz-copy-source-if-modified-since
\n
If both the x-amz-copy-source-if-match
and\n x-amz-copy-source-if-unmodified-since
headers are present in the request\n and evaluate as follows, Amazon S3 returns 200 OK
and copies the data:
\n x-amz-copy-source-if-match
condition evaluates to true
\n x-amz-copy-source-if-unmodified-since
condition evaluates to\n false
If both the x-amz-copy-source-if-none-match
and\n x-amz-copy-source-if-modified-since
headers are present in the request and\n evaluate as follows, Amazon S3 returns the 412 Precondition Failed
response\n code:
\n x-amz-copy-source-if-none-match
condition evaluates to false
\n x-amz-copy-source-if-modified-since
condition evaluates to\n true
All headers with the x-amz-
prefix, including\n x-amz-copy-source
, must be signed.
\n Server-side encryption\n
\nWhen you perform a CopyObject operation, you can optionally use the appropriate encryption-related \n headers to encrypt the object using server-side encryption with AWS managed encryption keys \n (SSE-S3 or SSE-KMS) or a customer-provided encryption key. With server-side encryption, Amazon S3 \n encrypts your data as it writes it to disks in its data centers and decrypts the data when \n you access it. For more information about server-side encryption, see Using\n Server-Side Encryption.
\nIf a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more\n information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.
\n\n Access Control List (ACL)-Specific Request\n Headers\n
\nWhen copying an object, you can optionally use headers to grant ACL-based permissions.\n By default, all objects are private. Only the owner has full access control. When adding a\n new object, you can grant permissions to individual AWS accounts or to predefined groups\n defined by Amazon S3. These permissions are then added to the ACL on the object. For more\n information, see Access Control List (ACL) Overview and Managing ACLs Using the REST\n API.
\n\n\n Storage Class Options\n
\nYou can use the CopyObject
action to change the storage class of an\n object that is already stored in Amazon S3 using the StorageClass
parameter. For\n more information, see Storage\n Classes in the Amazon S3 User Guide.
\n Versioning\n
\nBy default, x-amz-copy-source
identifies the current version of an object\n to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was\n deleted. To copy a different version, use the versionId
subresource.
If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for\n the object being copied. This version ID is different from the version ID of the source\n object. Amazon S3 returns the version ID of the copied object in the\n x-amz-version-id
response header in the response.
If you do not enable versioning or suspend it on the target bucket, the version ID that\n Amazon S3 generates is always null.
\nIf the source object's storage class is GLACIER, you must restore a copy of this object\n before you can use it as a source object for the copy operation. For more information, see\n RestoreObject.
\nThe following operations are related to CopyObject
:
For more information, see Copying\n Objects.
", + "smithy.api#documentation": "Creates a copy of an object that is already stored in Amazon S3.
\nYou can store individual objects of up to 5 TB in Amazon S3. You create a copy of your\n object up to 5 GB in size in a single atomic action using this API. However, to copy\n an object greater than 5 GB, you must use the multipart upload Upload Part - Copy API.\n For more information, see Copy Object Using the REST Multipart Upload API.
\nAll copy requests must be authenticated. Additionally, you must have\n read access to the source object and write\n access to the destination bucket. For more information, see REST Authentication. Both the Region\n that you want to copy the object from and the Region that you want to copy the object to\n must be enabled for your account.
\nA copy request might return an error when Amazon S3 receives the copy request or while Amazon S3\n is copying the files. If the error occurs before the copy action starts, you receive a\n standard Amazon S3 error. If the error occurs during the copy operation, the error response is\n embedded in the 200 OK
response. This means that a 200 OK
\n response can contain either a success or an error. Design your application to parse the\n contents of the response and handle it appropriately.
If the copy is successful, you receive a response with information about the copied\n object.
\nIf the request is an HTTP 1.1 request, the response is chunk encoded. If it were not,\n it would not contain the content-length, and you would need to read the entire\n body.
\nThe copy request charge is based on the storage class and Region that you specify for\n the destination object. For pricing information, see Amazon S3 pricing.
\nAmazon S3 transfer acceleration does not support cross-Region copies. If you request a\n cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad\n Request
error. For more information, see Transfer Acceleration.
\n Metadata\n
\nWhen copying an object, you can preserve all metadata (default) or specify new metadata.\n However, the ACL is not preserved and is set to private for the user making the request. To\n override the default ACL setting, specify a new ACL when generating a copy request. For\n more information, see Using ACLs.
\nTo specify whether you want the object metadata copied from the source object or\n replaced with metadata provided in the request, you can optionally add the\n x-amz-metadata-directive
header. When you grant permissions, you can use\n the s3:x-amz-metadata-directive
condition key to enforce certain metadata\n behavior when objects are uploaded. For more information, see Specifying Conditions in a\n Policy in the Amazon S3 User Guide. For a complete list of\n Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for\n Amazon S3.
\n \n x-amz-copy-source-if
Headers\n
To only copy an object under certain conditions, such as whether the Etag
\n matches or whether the object was modified before or after a specified date, use the\n following request parameters:
\n x-amz-copy-source-if-match
\n
\n x-amz-copy-source-if-none-match
\n
\n x-amz-copy-source-if-unmodified-since
\n
\n x-amz-copy-source-if-modified-since
\n
If both the x-amz-copy-source-if-match
and\n x-amz-copy-source-if-unmodified-since
headers are present in the request\n and evaluate as follows, Amazon S3 returns 200 OK
and copies the data:
\n x-amz-copy-source-if-match
condition evaluates to true
\n x-amz-copy-source-if-unmodified-since
condition evaluates to\n false
If both the x-amz-copy-source-if-none-match
and\n x-amz-copy-source-if-modified-since
headers are present in the request and\n evaluate as follows, Amazon S3 returns the 412 Precondition Failed
response\n code:
\n x-amz-copy-source-if-none-match
condition evaluates to false
\n x-amz-copy-source-if-modified-since
condition evaluates to\n true
All headers with the x-amz-
prefix, including\n x-amz-copy-source
, must be signed.
\n Server-side encryption\n
\nWhen you perform a CopyObject operation, you can optionally use the appropriate encryption-related \n headers to encrypt the object using server-side encryption with Amazon Web Services managed encryption keys \n (SSE-S3 or SSE-KMS) or a customer-provided encryption key. With server-side encryption, Amazon S3 \n encrypts your data as it writes it to disks in its data centers and decrypts the data when \n you access it. For more information about server-side encryption, see Using\n Server-Side Encryption.
\nIf a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more\n information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.
\n\n Access Control List (ACL)-Specific Request\n Headers\n
\nWhen copying an object, you can optionally use headers to grant ACL-based permissions.\n By default, all objects are private. Only the owner has full access control. When adding a\n new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups\n defined by Amazon S3. These permissions are then added to the ACL on the object. For more\n information, see Access Control List (ACL) Overview and Managing ACLs Using the REST\n API.
\n\n\n Storage Class Options\n
\nYou can use the CopyObject
action to change the storage class of an\n object that is already stored in Amazon S3 using the StorageClass
parameter. For\n more information, see Storage\n Classes in the Amazon S3 User Guide.
\n Versioning\n
\nBy default, x-amz-copy-source
identifies the current version of an object\n to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was\n deleted. To copy a different version, use the versionId
subresource.
If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for\n the object being copied. This version ID is different from the version ID of the source\n object. Amazon S3 returns the version ID of the copied object in the\n x-amz-version-id
response header in the response.
If you do not enable versioning or suspend it on the target bucket, the version ID that\n Amazon S3 generates is always null.
\nIf the source object's storage class is GLACIER, you must restore a copy of this object\n before you can use it as a source object for the copy operation. For more information, see\n RestoreObject.
\nThe following operations are related to CopyObject
:
For more information, see Copying\n Objects.
", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}/{Key+}?x-id=CopyObject", @@ -1468,21 +1468,21 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.
", + "smithy.api#documentation": "If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "If present, specifies the AWS KMS Encryption Context to use for object encryption. The\n value of this header is a base64-encoded UTF-8 string holding JSON with the encryption\n context key-value pairs.
", + "smithy.api#documentation": "If present, specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The\n value of this header is a base64-encoded UTF-8 string holding JSON with the encryption\n context key-value pairs.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "Indicates whether the copied object uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).
", + "smithy.api#documentation": "Indicates whether the copied object uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -1507,7 +1507,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The name of the destination bucket.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The name of the destination bucket.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1550,7 +1550,7 @@ "CopySource": { "target": "com.amazonaws.s3#CopySource", "traits": { - "smithy.api#documentation": "Specifies the source object for the copy operation. You specify the value in one of two\n formats, depending on whether you want to access the source object through an access point:
\nFor objects not accessed through an access point, specify the name of the source\n bucket and the key of the source object, separated by a slash (/). For example, to\n copy the object reports/january.pdf
from the bucket\n awsexamplebucket
, use\n awsexamplebucket/reports/january.pdf
. The value must be URL\n encoded.
For objects accessed through access points, specify the Amazon Resource Name (ARN) of the object as accessed through the access point, in the format arn:aws:s3:
. For example, to copy the object reports/january.pdf
through access point my-access-point
owned by account 123456789012
in Region us-west-2
, use the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf
. The value must be URL encoded.
Amazon S3 supports copy operations using access points only when the source and destination buckets are in the same AWS Region.
\nAlternatively, for objects accessed through Amazon S3 on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:
. For example, to copy the object reports/january.pdf
through outpost my-outpost
owned by account 123456789012
in Region us-west-2
, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf
. The value must be URL encoded.
To copy a specific version of an object, append ?versionId=
\n to the value (for example,\n awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893
).\n If you don't specify a version ID, Amazon S3 copies the latest version of the source\n object.
Specifies the source object for the copy operation. You specify the value in one of two\n formats, depending on whether you want to access the source object through an access point:
\nFor objects not accessed through an access point, specify the name of the source\n bucket and the key of the source object, separated by a slash (/). For example, to\n copy the object reports/january.pdf
from the bucket\n awsexamplebucket
, use\n awsexamplebucket/reports/january.pdf
. The value must be URL\n encoded.
For objects accessed through access points, specify the Amazon Resource Name (ARN) of the object as accessed through the access point, in the format arn:aws:s3:
. For example, to copy the object reports/january.pdf
through access point my-access-point
owned by account 123456789012
in Region us-west-2
, use the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf
. The value must be URL encoded.
Amazon S3 supports copy operations using access points only when the source and destination buckets are in the same Amazon Web Services Region.
\nAlternatively, for objects accessed through Amazon S3 on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:
. For example, to copy the object reports/january.pdf
through outpost my-outpost
owned by account 123456789012
in Region us-west-2
, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf
. The value must be URL encoded.
To copy a specific version of an object, append ?versionId=
\n to the value (for example,\n awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893
).\n If you don't specify a version ID, Amazon S3 copies the latest version of the source\n object.
Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for\n an object protected by AWS KMS will fail if not made via SSL or using SigV4. For\n information about configuring using any of the officially supported AWS SDKs and AWS CLI,\n see Specifying the\n Signature Version in Request Authentication in the Amazon S3 User Guide.
", + "smithy.api#documentation": "Specifies the Amazon Web Services KMS key ID to use for object encryption. All GET and PUT requests for\n an object protected by Amazon Web Services KMS will fail if not made via SSL or using SigV4. For\n information about configuring using any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI,\n see Specifying the\n Signature Version in Request Authentication in the Amazon S3 User Guide.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "Specifies the AWS KMS Encryption Context to use for object encryption. The value of this\n header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value\n pairs.
", + "smithy.api#documentation": "Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this\n header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value\n pairs.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, @@ -1787,7 +1787,7 @@ "ETag": { "target": "com.amazonaws.s3#ETag", "traits": { - "smithy.api#documentation": "Returns the ETag of the new object. The ETag reflects only changes to the contents of an\n object, not its metadata. The source and destination ETag is identical for a successfully\n copied non-multipart object.
" + "smithy.api#documentation": "Returns the ETag of the new object. The ETag reflects only changes to the contents of an object, not its metadata.
" } }, "LastModified": { @@ -1874,7 +1874,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a\n valid AWS Access Key ID to authenticate requests. Anonymous requests are never allowed to\n create buckets. By creating the bucket, you become the bucket owner.
\nNot every string is an acceptable bucket name. For information about bucket naming\n restrictions, see Bucket naming rules.
\nIf you want to create an Amazon S3 on Outposts bucket, see Create Bucket.
\nBy default, the bucket is created in the US East (N. Virginia) Region. You can\n optionally specify a Region in the request body. You might choose a Region to optimize\n latency, minimize costs, or address regulatory requirements. For example, if you reside in\n Europe, you will probably find it advantageous to create buckets in the Europe (Ireland)\n Region. For more information, see Accessing a\n bucket.
\nIf you send your create bucket request to the s3.amazonaws.com
endpoint,\n the request goes to the us-east-1 Region. Accordingly, the signature calculations in\n Signature Version 4 must use us-east-1 as the Region, even if the location constraint in\n the request specifies another Region where the bucket is to be created. If you create a\n bucket in a Region other than US East (N. Virginia), your application must be able to\n handle 307 redirect. For more information, see Virtual hosting of buckets.
When creating a bucket using this operation, you can optionally specify the accounts or\n groups that should be granted specific permissions on the bucket. There are two ways to\n grant the appropriate permissions using the request headers.
\nSpecify a canned ACL using the x-amz-acl
request header. Amazon S3\n supports a set of predefined ACLs, known as canned ACLs. Each\n canned ACL has a predefined set of grantees and permissions. For more information,\n see Canned ACL.
Specify access permissions explicitly using the x-amz-grant-read
,\n x-amz-grant-write
, x-amz-grant-read-acp
,\n x-amz-grant-write-acp
, and x-amz-grant-full-control
\n headers. These headers map to the set of permissions Amazon S3 supports in an ACL. For\n more information, see Access control list\n (ACL) overview.
You specify each grantee as a type=value pair, where the type is one of the\n following:
\n\n id
– if the value specified is the canonical user ID of an AWS\n account
\n uri
– if you are granting permissions to a predefined\n group
\n emailAddress
– if the value specified is the email address of\n an AWS account
Using email addresses to specify a grantee is only supported in the following AWS Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.
\nFor example, the following x-amz-grant-read
header grants the AWS accounts identified by account IDs permissions to read object data and its metadata:
\n x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"
\n
You can use either a canned ACL or specify access permissions explicitly. You cannot\n do both.
\nThe following operations are related to CreateBucket
:
\n PutObject\n
\n\n DeleteBucket\n
\nCreates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a\n valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to\n create buckets. By creating the bucket, you become the bucket owner.
\nNot every string is an acceptable bucket name. For information about bucket naming\n restrictions, see Bucket naming rules.
\nIf you want to create an Amazon S3 on Outposts bucket, see Create Bucket.
\nBy default, the bucket is created in the US East (N. Virginia) Region. You can\n optionally specify a Region in the request body. You might choose a Region to optimize\n latency, minimize costs, or address regulatory requirements. For example, if you reside in\n Europe, you will probably find it advantageous to create buckets in the Europe (Ireland)\n Region. For more information, see Accessing a\n bucket.
\nIf you send your create bucket request to the s3.amazonaws.com
endpoint,\n the request goes to the us-east-1 Region. Accordingly, the signature calculations in\n Signature Version 4 must use us-east-1 as the Region, even if the location constraint in\n the request specifies another Region where the bucket is to be created. If you create a\n bucket in a Region other than US East (N. Virginia), your application must be able to\n handle 307 redirect. For more information, see Virtual hosting of buckets.
When creating a bucket using this operation, you can optionally specify the accounts or\n groups that should be granted specific permissions on the bucket. There are two ways to\n grant the appropriate permissions using the request headers.
\nSpecify a canned ACL using the x-amz-acl
request header. Amazon S3\n supports a set of predefined ACLs, known as canned ACLs. Each\n canned ACL has a predefined set of grantees and permissions. For more information,\n see Canned ACL.
Specify access permissions explicitly using the x-amz-grant-read
,\n x-amz-grant-write
, x-amz-grant-read-acp
,\n x-amz-grant-write-acp
, and x-amz-grant-full-control
\n headers. These headers map to the set of permissions Amazon S3 supports in an ACL. For\n more information, see Access control list\n (ACL) overview.
You specify each grantee as a type=value pair, where the type is one of the\n following:
\n\n id
– if the value specified is the canonical user ID of an Amazon Web Services account
\n uri
– if you are granting permissions to a predefined\n group
\n emailAddress
– if the value specified is the email address of\n an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nFor example, the following x-amz-grant-read
header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata:
\n x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"
\n
You can use either a canned ACL or specify access permissions explicitly. You cannot\n do both.
\n\n Permissions\n
\nIf your CreateBucket
request specifies ACL permissions and the ACL is public-read, public-read-write, \n authenticated-read, or if you specify access permissions explicitly through any other ACL, both \n s3:CreateBucket
and s3:PutBucketAcl
permissions are needed. If the ACL the \n CreateBucket
request is private, only s3:CreateBucket
permission is needed.
If ObjectLockEnabledForBucket
is set to true in your CreateBucket
request, \n s3:PutBucketObjectLockConfiguration
and s3:PutBucketVersioning
permissions are required.
The following operations are related to CreateBucket
:
\n PutObject\n
\n\n DeleteBucket\n
\nThis action initiates a multipart upload and returns an upload ID. This upload ID is\n used to associate all of the parts in the specific multipart upload. You specify this\n upload ID in each of your subsequent upload part requests (see UploadPart). You also include this\n upload ID in the final request to either complete or abort the multipart upload\n request.
\n\nFor more information about multipart uploads, see Multipart Upload Overview.
\n\nIf you have configured a lifecycle rule to abort incomplete multipart uploads, the\n upload must complete within the number of days specified in the bucket lifecycle\n configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort\n action and Amazon S3 aborts the multipart upload. For more information, see Aborting\n Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.
\n\nFor information about the permissions required to use the multipart upload API, see\n Multipart Upload and\n Permissions.
\n\nFor request signing, multipart upload is just a series of regular requests. You initiate\n a multipart upload, send one or more requests to upload parts, and then complete the\n multipart upload process. You sign each request individually. There is nothing special\n about signing multipart upload requests. For more information about signing, see Authenticating\n Requests (AWS Signature Version 4).
\n\nAfter you initiate a multipart upload and upload one or more parts, to stop being\n charged for storing the uploaded parts, you must either complete or abort the multipart\n upload. Amazon S3 frees up the space used to store the parts and stop charging you for\n storing them only after you either complete or abort a multipart upload.
\nYou can optionally request server-side encryption. For server-side encryption, Amazon S3\n encrypts your data as it writes it to disks in its data centers and decrypts it when you\n access it. You can provide your own encryption key, or use AWS Key Management Service (AWS\n KMS) customer master keys (CMKs) or Amazon S3-managed encryption keys. If you choose to provide\n your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the request to\n initiate the upload by using CreateMultipartUpload
.
To perform a multipart upload with encryption using an AWS KMS CMK, the requester must\n have permission to the kms:Decrypt
and kms:GenerateDataKey*
\n actions on the key. These permissions are required because Amazon S3 must decrypt and read data\n from the encrypted file parts before it completes the multipart upload. For more\n information, see Multipart upload API\n and permissions in the Amazon S3 User Guide.
If your AWS Identity and Access Management (IAM) user or role is in the same AWS account\n as the AWS KMS CMK, then you must have these permissions on the key policy. If your IAM\n user or role belongs to a different account than the key, then you must have the\n permissions on both the key policy and your IAM user or role.
\n\n\nFor more information, see Protecting\n Data Using Server-Side Encryption.
\n\nWhen copying an object, you can optionally specify the accounts or groups that\n should be granted specific permissions on the new object. There are two ways to\n grant the permissions using the request headers:
\nSpecify a canned ACL with the x-amz-acl
request header. For\n more information, see Canned ACL.
Specify access permissions explicitly with the\n x-amz-grant-read
, x-amz-grant-read-acp
,\n x-amz-grant-write-acp
, and\n x-amz-grant-full-control
headers. These parameters map to\n the set of permissions that Amazon S3 supports in an ACL. For more information,\n see Access Control List (ACL)\n Overview.
You can use either a canned ACL or specify access permissions explicitly. You\n cannot do both.
\nYou can optionally tell Amazon S3 to encrypt data at rest using server-side\n encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts\n your data as it writes it to disks in its data centers and decrypts it when you\n access it. The option you use depends on whether you want to use AWS managed\n encryption keys or provide your own encryption key.
\nUse encryption keys managed by Amazon S3 or customer master keys (CMKs) stored\n in AWS Key Management Service (AWS KMS) – If you want AWS to manage the keys\n used to encrypt data, specify the following headers in the request.
\nx-amz-server-side-encryption
\nx-amz-server-side-encryption-aws-kms-key-id
\nx-amz-server-side-encryption-context
\nIf you specify x-amz-server-side-encryption:aws:kms
, but\n don't provide x-amz-server-side-encryption-aws-kms-key-id
,\n Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data.
All GET and PUT requests for an object protected by AWS KMS fail if\n you don't make them with SSL or by using SigV4.
\nFor more information about server-side encryption with CMKs stored in AWS\n KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS\n KMS.
\nUse customer-provided encryption keys – If you want to manage your own\n encryption keys, provide all the following headers in the request.
\nx-amz-server-side-encryption-customer-algorithm
\nx-amz-server-side-encryption-customer-key
\nx-amz-server-side-encryption-customer-key-MD5
\nFor more information about server-side encryption with CMKs stored in AWS\n KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS\n KMS.
\nYou also can use the following access control–related headers with this\n operation. By default, all objects are private. Only the owner has full access\n control. When adding a new object, you can grant permissions to individual AWS\n accounts or to predefined groups defined by Amazon S3. These permissions are then added\n to the access control list (ACL) on the object. For more information, see Using ACLs. With this\n operation, you can grant access permissions using one of the following two\n methods:
\nSpecify a canned ACL (x-amz-acl
) — Amazon S3 supports a set of\n predefined ACLs, known as canned ACLs. Each canned ACL\n has a predefined set of grantees and permissions. For more information, see\n Canned\n ACL.
Specify access permissions explicitly — To explicitly grant access\n permissions to specific AWS accounts or groups, use the following headers.\n Each header maps to specific permissions that Amazon S3 supports in an ACL. For\n more information, see Access\n Control List (ACL) Overview. In the header, you specify a list of\n grantees who get the specific permission. To grant permissions explicitly,\n use:
\nx-amz-grant-read
\nx-amz-grant-write
\nx-amz-grant-read-acp
\nx-amz-grant-write-acp
\nx-amz-grant-full-control
\nYou specify each grantee as a type=value pair, where the type is one of\n the following:
\n\n id
– if the value specified is the canonical user ID\n of an AWS account
\n uri
– if you are granting permissions to a predefined\n group
\n emailAddress
– if the value specified is the email\n address of an AWS account
Using email addresses to specify a grantee is only supported in the following AWS Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.
\nFor example, the following x-amz-grant-read
header grants the AWS accounts identified by account IDs permissions to read object data and its metadata:
\n x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"
\n
The following operations are related to CreateMultipartUpload
:
\n UploadPart\n
\n\n AbortMultipartUpload\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nThis action initiates a multipart upload and returns an upload ID. This upload ID is\n used to associate all of the parts in the specific multipart upload. You specify this\n upload ID in each of your subsequent upload part requests (see UploadPart). You also include this\n upload ID in the final request to either complete or abort the multipart upload\n request.
\n\nFor more information about multipart uploads, see Multipart Upload Overview.
\n\nIf you have configured a lifecycle rule to abort incomplete multipart uploads, the\n upload must complete within the number of days specified in the bucket lifecycle\n configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort\n action and Amazon S3 aborts the multipart upload. For more information, see Aborting\n Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.
\n\nFor information about the permissions required to use the multipart upload API, see\n Multipart Upload and\n Permissions.
\n\nFor request signing, multipart upload is just a series of regular requests. You initiate\n a multipart upload, send one or more requests to upload parts, and then complete the\n multipart upload process. You sign each request individually. There is nothing special\n about signing multipart upload requests. For more information about signing, see Authenticating\n Requests (Amazon Web Services Signature Version 4).
\n\nAfter you initiate a multipart upload and upload one or more parts, to stop being\n charged for storing the uploaded parts, you must either complete or abort the multipart\n upload. Amazon S3 frees up the space used to store the parts and stop charging you for\n storing them only after you either complete or abort a multipart upload.
\nYou can optionally request server-side encryption. For server-side encryption, Amazon S3\n encrypts your data as it writes it to disks in its data centers and decrypts it when you\n access it. You can provide your own encryption key, or use Amazon Web Services Key Management Service (Amazon Web Services\n KMS) customer master keys (CMKs) or Amazon S3-managed encryption keys. If you choose to provide\n your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the request to\n initiate the upload by using CreateMultipartUpload
.
To perform a multipart upload with encryption using an Amazon Web Services KMS CMK, the requester must\n have permission to the kms:Decrypt
and kms:GenerateDataKey*
\n actions on the key. These permissions are required because Amazon S3 must decrypt and read data\n from the encrypted file parts before it completes the multipart upload. For more\n information, see Multipart upload API\n and permissions in the Amazon S3 User Guide.
If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account\n as the Amazon Web Services KMS CMK, then you must have these permissions on the key policy. If your IAM\n user or role belongs to a different account than the key, then you must have the\n permissions on both the key policy and your IAM user or role.
\n\n\nFor more information, see Protecting\n Data Using Server-Side Encryption.
\n\nWhen copying an object, you can optionally specify the accounts or groups that\n should be granted specific permissions on the new object. There are two ways to\n grant the permissions using the request headers:
\nSpecify a canned ACL with the x-amz-acl
request header. For\n more information, see Canned ACL.
Specify access permissions explicitly with the\n x-amz-grant-read
, x-amz-grant-read-acp
,\n x-amz-grant-write-acp
, and\n x-amz-grant-full-control
headers. These parameters map to\n the set of permissions that Amazon S3 supports in an ACL. For more information,\n see Access Control List (ACL)\n Overview.
You can use either a canned ACL or specify access permissions explicitly. You\n cannot do both.
\nYou can optionally tell Amazon S3 to encrypt data at rest using server-side\n encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts\n your data as it writes it to disks in its data centers and decrypts it when you\n access it. The option you use depends on whether you want to use Amazon Web Services managed\n encryption keys or provide your own encryption key.
\nUse encryption keys managed by Amazon S3 or customer master keys (CMKs) stored\n in Amazon Web Services Key Management Service (Amazon Web Services KMS) – If you want Amazon Web Services to manage the keys\n used to encrypt data, specify the following headers in the request.
\nx-amz-server-side-encryption
\nx-amz-server-side-encryption-aws-kms-key-id
\nx-amz-server-side-encryption-context
\nIf you specify x-amz-server-side-encryption:aws:kms
, but\n don't provide x-amz-server-side-encryption-aws-kms-key-id
,\n Amazon S3 uses the Amazon Web Services managed CMK in Amazon Web Services KMS to protect the data.
All GET and PUT requests for an object protected by Amazon Web Services KMS fail if\n you don't make them with SSL or by using SigV4.
\nFor more information about server-side encryption with CMKs stored in Amazon Web Services\n KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in Amazon Web Services\n KMS.
\nUse customer-provided encryption keys – If you want to manage your own\n encryption keys, provide all the following headers in the request.
\nx-amz-server-side-encryption-customer-algorithm
\nx-amz-server-side-encryption-customer-key
\nx-amz-server-side-encryption-customer-key-MD5
\nFor more information about server-side encryption with CMKs stored in Amazon Web Services\n KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in Amazon Web Services\n KMS.
\nYou also can use the following access control–related headers with this\n operation. By default, all objects are private. Only the owner has full access\n control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added\n to the access control list (ACL) on the object. For more information, see Using ACLs. With this\n operation, you can grant access permissions using one of the following two\n methods:
\nSpecify a canned ACL (x-amz-acl
) — Amazon S3 supports a set of\n predefined ACLs, known as canned ACLs. Each canned ACL\n has a predefined set of grantees and permissions. For more information, see\n Canned\n ACL.
Specify access permissions explicitly — To explicitly grant access\n permissions to specific Amazon Web Services accounts or groups, use the following headers.\n Each header maps to specific permissions that Amazon S3 supports in an ACL. For\n more information, see Access\n Control List (ACL) Overview. In the header, you specify a list of\n grantees who get the specific permission. To grant permissions explicitly,\n use:
\nx-amz-grant-read
\nx-amz-grant-write
\nx-amz-grant-read-acp
\nx-amz-grant-write-acp
\nx-amz-grant-full-control
\nYou specify each grantee as a type=value pair, where the type is one of\n the following:
\n\n id
– if the value specified is the canonical user ID\n of an Amazon Web Services account
\n uri
– if you are granting permissions to a predefined\n group
\n emailAddress
– if the value specified is the email\n address of an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nFor example, the following x-amz-grant-read
header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata:
\n x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"
\n
The following operations are related to CreateMultipartUpload
:
\n UploadPart\n
\n\n AbortMultipartUpload\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nThe name of the bucket to which the multipart upload was initiated.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The name of the bucket to which the multipart upload was initiated. Does not return the access point ARN or access point alias if used.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", "smithy.api#xmlName": "Bucket" } }, @@ -2055,21 +2055,21 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.
", + "smithy.api#documentation": "If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "If present, specifies the AWS KMS Encryption Context to use for object encryption. The\n value of this header is a base64-encoded UTF-8 string holding JSON with the encryption\n context key-value pairs.
", + "smithy.api#documentation": "If present, specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The\n value of this header is a base64-encoded UTF-8 string holding JSON with the encryption\n context key-value pairs.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).
", + "smithy.api#documentation": "Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -2097,7 +2097,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The name of the bucket to which to initiate the upload
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The name of the bucket to which to initiate the upload
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -2232,14 +2232,14 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "Specifies the ID of the symmetric customer managed AWS KMS CMK to use for object\n encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not\n made via SSL or using SigV4. For information about configuring using any of the officially\n supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request Authentication\n in the Amazon S3 User Guide.
", + "smithy.api#documentation": "Specifies the ID of the symmetric customer managed Amazon Web Services KMS CMK to use for object\n encryption. All GET and PUT requests for an object protected by Amazon Web Services KMS will fail if not\n made via SSL or using SigV4. For information about configuring using any of the officially\n supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication\n in the Amazon S3 User Guide.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "Specifies the AWS KMS Encryption Context to use for object encryption. The value of this\n header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value\n pairs.
", + "smithy.api#documentation": "Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this\n header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value\n pairs.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, @@ -2674,7 +2674,7 @@ "target": "com.amazonaws.s3#DeleteBucketPolicyRequest" }, "traits": { - "smithy.api#documentation": "This implementation of the DELETE action uses the policy subresource to delete the\n policy of a specified bucket. If you are using an identity other than the root user of the\n AWS account that owns the bucket, the calling identity must have the\n DeleteBucketPolicy
permissions on the specified bucket and belong to the\n bucket owner's account to use this operation.
If you don't have DeleteBucketPolicy
permissions, Amazon S3 returns a 403\n Access Denied
error. If you have the correct permissions, but you're not using an\n identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not\n Allowed
error.
As a security precaution, the root user of the AWS account that owns a bucket can\n always use this operation, even if the policy explicitly denies the root user the\n ability to perform this action.
\nFor more information about bucket policies, see Using Bucket Policies and\n UserPolicies.
\nThe following operations are related to DeleteBucketPolicy
\n
\n CreateBucket\n
\n\n DeleteObject\n
\nThis implementation of the DELETE action uses the policy subresource to delete the\n policy of a specified bucket. If you are using an identity other than the root user of the\n Amazon Web Services account that owns the bucket, the calling identity must have the\n DeleteBucketPolicy
permissions on the specified bucket and belong to the\n bucket owner's account to use this operation.
If you don't have DeleteBucketPolicy
permissions, Amazon S3 returns a 403\n Access Denied
error. If you have the correct permissions, but you're not using an\n identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not\n Allowed
error.
As a security precaution, the root user of the Amazon Web Services account that owns a bucket can\n always use this operation, even if the policy explicitly denies the root user the\n ability to perform this action.
\nFor more information about bucket policies, see Using Bucket Policies and\n UserPolicies.
\nThe following operations are related to DeleteBucketPolicy
\n
\n CreateBucket\n
\n\n DeleteObject\n
\nThe bucket name of the bucket containing the object.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket name of the bucket containing the object.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -3035,7 +3035,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The bucket name containing the objects from which to remove the tags.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket name containing the objects from which to remove the tags.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -3117,7 +3117,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The bucket name containing the objects to delete.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket name containing the objects to delete.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -3251,7 +3251,7 @@ "Account": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "Destination bucket owner account ID. In a cross-account scenario, if you direct Amazon S3 to\n change replica ownership to the AWS account that owns the destination bucket by specifying\n the AccessControlTranslation
property, this is the account ID of the\n destination bucket owner. For more information, see Replication Additional\n Configuration: Changing the Replica Owner in the Amazon S3 User Guide.
Destination bucket owner account ID. In a cross-account scenario, if you direct Amazon S3 to\n change replica ownership to the Amazon Web Services account that owns the destination bucket by specifying\n the AccessControlTranslation
property, this is the account ID of the\n destination bucket owner. For more information, see Replication Additional\n Configuration: Changing the Replica Owner in the Amazon S3 User Guide.
Specify this only in a cross-account scenario (where source and destination bucket\n owners are not the same), and you want to change replica ownership to the AWS account that\n owns the destination bucket. If this is not specified in the replication configuration, the\n replicas are owned by same AWS account that owns the source object.
" + "smithy.api#documentation": "Specify this only in a cross-account scenario (where source and destination bucket\n owners are not the same), and you want to change replica ownership to the Amazon Web Services account that\n owns the destination bucket. If this is not specified in the replication configuration, the\n replicas are owned by same Amazon Web Services account that owns the source object.
" } }, "EncryptionConfiguration": { @@ -3326,7 +3326,7 @@ "KMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "If the encryption type is aws:kms
, this optional value specifies the ID of\n the symmetric customer managed AWS KMS CMK to use for encryption of job results. Amazon S3 only\n supports symmetric CMKs. For more information, see Using symmetric and\n asymmetric keys in the AWS Key Management Service Developer Guide.
If the encryption type is aws:kms
, this optional value specifies the ID of\n the symmetric customer managed Amazon Web Services KMS CMK to use for encryption of job results. Amazon S3 only\n supports symmetric CMKs. For more information, see Using symmetric and\n asymmetric keys in the Amazon Web Services Key Management Service Developer Guide.
Specifies the ID (Key ARN or Alias ARN) of the customer managed AWS KMS key\n stored in AWS Key Management Service (KMS) for the destination bucket. Amazon S3 uses\n this key to encrypt replica objects. Amazon S3 only supports symmetric, customer managed KMS keys.\n For more information, see Using symmetric and\n asymmetric keys in the AWS Key Management Service Developer Guide.
" + "smithy.api#documentation": "Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web Services KMS key\n stored in Amazon Web Services Key Management Service (KMS) for the destination bucket. Amazon S3 uses\n this key to encrypt replica objects. Amazon S3 only supports symmetric, customer managed KMS keys.\n For more information, see Using symmetric and\n asymmetric keys in the Amazon Web Services Key Management Service Developer Guide.
" } } }, @@ -3382,7 +3382,7 @@ "Code": { "target": "com.amazonaws.s3#Code", "traits": { - "smithy.api#documentation": "The error code is a string that uniquely identifies an error condition. It is meant to\n be read and understood by programs that detect and handle errors by type.
\n\n Amazon S3 error codes\n
\n\n Code: AccessDenied
\n\n Description: Access Denied
\n\n HTTP Status Code: 403 Forbidden
\n\n SOAP Fault Code Prefix: Client
\n\n Code: AccountProblem
\n\n Description: There is a problem with your AWS account\n that prevents the action from completing successfully. Contact AWS Support\n for further assistance.
\n\n HTTP Status Code: 403 Forbidden
\n\n SOAP Fault Code Prefix: Client
\n\n Code: AllAccessDisabled
\n\n Description: All access to this Amazon S3 resource has been\n disabled. Contact AWS Support for further assistance.
\n\n HTTP Status Code: 403 Forbidden
\n\n SOAP Fault Code Prefix: Client
\n\n Code: AmbiguousGrantByEmailAddress
\n\n Description: The email address you provided is\n associated with more than one account.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: AuthorizationHeaderMalformed
\n\n Description: The authorization header you provided is\n invalid.
\n\n HTTP Status Code: 400 Bad Request
\n\n HTTP Status Code: N/A
\n\n Code: BadDigest
\n\n Description: The Content-MD5 you specified did not\n match what we received.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: BucketAlreadyExists
\n\n Description: The requested bucket name is not\n available. The bucket namespace is shared by all users of the system. Please\n select a different name and try again.
\n\n HTTP Status Code: 409 Conflict
\n\n SOAP Fault Code Prefix: Client
\n\n Code: BucketAlreadyOwnedByYou
\n\n Description: The bucket you tried to create already\n exists, and you own it. Amazon S3 returns this error in all AWS Regions except in\n the North Virginia Region. For legacy compatibility, if you re-create an\n existing bucket that you already own in the North Virginia Region, Amazon S3 returns\n 200 OK and resets the bucket access control lists (ACLs).
\n\n Code: 409 Conflict (in all Regions except the North\n Virginia Region)
\n\n SOAP Fault Code Prefix: Client
\n\n Code: BucketNotEmpty
\n\n Description: The bucket you tried to delete is not\n empty.
\n\n HTTP Status Code: 409 Conflict
\n\n SOAP Fault Code Prefix: Client
\n\n Code: CredentialsNotSupported
\n\n Description: This request does not support\n credentials.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: CrossLocationLoggingProhibited
\n\n Description: Cross-location logging not allowed.\n Buckets in one geographic location cannot log information to a bucket in\n another location.
\n\n HTTP Status Code: 403 Forbidden
\n\n SOAP Fault Code Prefix: Client
\n\n Code: EntityTooSmall
\n\n Description: Your proposed upload is smaller than the\n minimum allowed object size.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: EntityTooLarge
\n\n Description: Your proposed upload exceeds the maximum\n allowed object size.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: ExpiredToken
\n\n Description: The provided token has expired.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: IllegalVersioningConfigurationException
\n\n Description: Indicates that the versioning\n configuration specified in the request is invalid.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: IncompleteBody
\n\n Description: You did not provide the number of bytes\n specified by the Content-Length HTTP header
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: IncorrectNumberOfFilesInPostRequest
\n\n Description: POST requires exactly one file upload per\n request.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InlineDataTooLarge
\n\n Description: Inline data exceeds the maximum allowed\n size.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InternalError
\n\n Description: We encountered an internal error. Please\n try again.
\n\n HTTP Status Code: 500 Internal Server Error
\n\n SOAP Fault Code Prefix: Server
\n\n Code: InvalidAccessKeyId
\n\n Description: The AWS access key ID you provided does\n not exist in our records.
\n\n HTTP Status Code: 403 Forbidden
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidAddressingHeader
\n\n Description: You must specify the Anonymous\n role.
\n\n HTTP Status Code: N/A
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidArgument
\n\n Description: Invalid Argument
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidBucketName
\n\n Description: The specified bucket is not valid.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidBucketState
\n\n Description: The request is not valid with the current\n state of the bucket.
\n\n HTTP Status Code: 409 Conflict
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidDigest
\n\n Description: The Content-MD5 you specified is not\n valid.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidEncryptionAlgorithmError
\n\n Description: The encryption request you specified is\n not valid. The valid value is AES256.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidLocationConstraint
\n\n Description: The specified location constraint is not\n valid. For more information about Regions, see How to Select a\n Region for Your Buckets.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidObjectState
\n\n Description: The action is not valid for the current\n state of the object.
\n\n HTTP Status Code: 403 Forbidden
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidPart
\n\n Description: One or more of the specified parts could\n not be found. The part might not have been uploaded, or the specified entity\n tag might not have matched the part's entity tag.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidPartOrder
\n\n Description: The list of parts was not in ascending\n order. Parts list must be specified in order by part number.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidPayer
\n\n Description: All access to this object has been\n disabled. Please contact AWS Support for further assistance.
\n\n HTTP Status Code: 403 Forbidden
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidPolicyDocument
\n\n Description: The content of the form does not meet the\n conditions specified in the policy document.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidRange
\n\n Description: The requested range cannot be\n satisfied.
\n\n HTTP Status Code: 416 Requested Range Not\n Satisfiable
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidRequest
\n\n Description: Please use AWS4-HMAC-SHA256.
\n\n HTTP Status Code: 400 Bad Request
\n\n Code: N/A
\n\n Code: InvalidRequest
\n\n Description: SOAP requests must be made over an HTTPS\n connection.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidRequest
\n\n Description: Amazon S3 Transfer Acceleration is not\n supported for buckets with non-DNS compliant names.
\n\n HTTP Status Code: 400 Bad Request
\n\n Code: N/A
\n\n Code: InvalidRequest
\n\n Description: Amazon S3 Transfer Acceleration is not\n supported for buckets with periods (.) in their names.
\n\n HTTP Status Code: 400 Bad Request
\n\n Code: N/A
\n\n Code: InvalidRequest
\n\n Description: Amazon S3 Transfer Accelerate endpoint only\n supports virtual style requests.
\n\n HTTP Status Code: 400 Bad Request
\n\n Code: N/A
\n\n Code: InvalidRequest
\n\n Description: Amazon S3 Transfer Accelerate is not configured\n on this bucket.
\n\n HTTP Status Code: 400 Bad Request
\n\n Code: N/A
\n\n Code: InvalidRequest
\n\n Description: Amazon S3 Transfer Accelerate is disabled on\n this bucket.
\n\n HTTP Status Code: 400 Bad Request
\n\n Code: N/A
\n\n Code: InvalidRequest
\n\n Description: Amazon S3 Transfer Acceleration is not\n supported on this bucket. Contact AWS Support for more information.
\n\n HTTP Status Code: 400 Bad Request
\n\n Code: N/A
\n\n Code: InvalidRequest
\n\n Description: Amazon S3 Transfer Acceleration cannot be\n enabled on this bucket. Contact AWS Support for more information.
\n\n HTTP Status Code: 400 Bad Request
\n\n Code: N/A
\n\n Code: InvalidSecurity
\n\n Description: The provided security credentials are not\n valid.
\n\n HTTP Status Code: 403 Forbidden
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidSOAPRequest
\n\n Description: The SOAP request body is invalid.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidStorageClass
\n\n Description: The storage class you specified is not\n valid.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidTargetBucketForLogging
\n\n Description: The target bucket for logging does not\n exist, is not owned by you, or does not have the appropriate grants for the\n log-delivery group.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidToken
\n\n Description: The provided token is malformed or\n otherwise invalid.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidURI
\n\n Description: Couldn't parse the specified URI.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: KeyTooLongError
\n\n Description: Your key is too long.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MalformedACLError
\n\n Description: The XML you provided was not well-formed\n or did not validate against our published schema.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MalformedPOSTRequest
\n\n Description: The body of your POST request is not\n well-formed multipart/form-data.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MalformedXML
\n\n Description: This happens when the user sends malformed\n XML (XML that doesn't conform to the published XSD) for the configuration. The\n error message is, \"The XML you provided was not well-formed or did not validate\n against our published schema.\"
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MaxMessageLengthExceeded
\n\n Description: Your request was too big.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MaxPostPreDataLengthExceededError
\n\n Description: Your POST request fields preceding the\n upload file were too large.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MetadataTooLarge
\n\n Description: Your metadata headers exceed the maximum\n allowed metadata size.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MethodNotAllowed
\n\n Description: The specified method is not allowed\n against this resource.
\n\n HTTP Status Code: 405 Method Not Allowed
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MissingAttachment
\n\n Description: A SOAP attachment was expected, but none\n were found.
\n\n HTTP Status Code: N/A
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MissingContentLength
\n\n Description: You must provide the Content-Length HTTP\n header.
\n\n HTTP Status Code: 411 Length Required
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MissingRequestBodyError
\n\n Description: This happens when the user sends an empty\n XML document as a request. The error message is, \"Request body is empty.\"\n
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MissingSecurityElement
\n\n Description: The SOAP 1.1 request is missing a security\n element.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MissingSecurityHeader
\n\n Description: Your request is missing a required\n header.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: NoLoggingStatusForKey
\n\n Description: There is no such thing as a logging status\n subresource for a key.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: NoSuchBucket
\n\n Description: The specified bucket does not\n exist.
\n\n HTTP Status Code: 404 Not Found
\n\n SOAP Fault Code Prefix: Client
\n\n Code: NoSuchBucketPolicy
\n\n Description: The specified bucket does not have a\n bucket policy.
\n\n HTTP Status Code: 404 Not Found
\n\n SOAP Fault Code Prefix: Client
\n\n Code: NoSuchKey
\n\n Description: The specified key does not exist.
\n\n HTTP Status Code: 404 Not Found
\n\n SOAP Fault Code Prefix: Client
\n\n Code: NoSuchLifecycleConfiguration
\n\n Description: The lifecycle configuration does not\n exist.
\n\n HTTP Status Code: 404 Not Found
\n\n SOAP Fault Code Prefix: Client
\n\n Code: NoSuchUpload
\n\n Description: The specified multipart upload does not\n exist. The upload ID might be invalid, or the multipart upload might have been\n aborted or completed.
\n\n HTTP Status Code: 404 Not Found
\n\n SOAP Fault Code Prefix: Client
\n\n Code: NoSuchVersion
\n\n Description: Indicates that the version ID specified in\n the request does not match an existing version.
\n\n HTTP Status Code: 404 Not Found
\n\n SOAP Fault Code Prefix: Client
\n\n Code: NotImplemented
\n\n Description: A header you provided implies\n functionality that is not implemented.
\n\n HTTP Status Code: 501 Not Implemented
\n\n SOAP Fault Code Prefix: Server
\n\n Code: NotSignedUp
\n\n Description: Your account is not signed up for the Amazon S3\n service. You must sign up before you can use Amazon S3. You can sign up at the\n following URL: https://aws.amazon.com/s3
\n\n HTTP Status Code: 403 Forbidden
\n\n SOAP Fault Code Prefix: Client
\n\n Code: OperationAborted
\n\n Description: A conflicting conditional action is\n currently in progress against this resource. Try again.
\n\n HTTP Status Code: 409 Conflict
\n\n SOAP Fault Code Prefix: Client
\n\n Code: PermanentRedirect
\n\n Description: The bucket you are attempting to access\n must be addressed using the specified endpoint. Send all future requests to\n this endpoint.
\n\n HTTP Status Code: 301 Moved Permanently
\n\n SOAP Fault Code Prefix: Client
\n\n Code: PreconditionFailed
\n\n Description: At least one of the preconditions you\n specified did not hold.
\n\n HTTP Status Code: 412 Precondition Failed
\n\n SOAP Fault Code Prefix: Client
\n\n Code: Redirect
\n\n Description: Temporary redirect.
\n\n HTTP Status Code: 307 Moved Temporarily
\n\n SOAP Fault Code Prefix: Client
\n\n Code: RestoreAlreadyInProgress
\n\n Description: Object restore is already in\n progress.
\n\n HTTP Status Code: 409 Conflict
\n\n SOAP Fault Code Prefix: Client
\n\n Code: RequestIsNotMultiPartContent
\n\n Description: Bucket POST must be of the enclosure-type\n multipart/form-data.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: RequestTimeout
\n\n Description: Your socket connection to the server was\n not read from or written to within the timeout period.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: RequestTimeTooSkewed
\n\n Description: The difference between the request time\n and the server's time is too large.
\n\n HTTP Status Code: 403 Forbidden
\n\n SOAP Fault Code Prefix: Client
\n\n Code: RequestTorrentOfBucketError
\n\n Description: Requesting the torrent file of a bucket is\n not permitted.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: SignatureDoesNotMatch
\n\n Description: The request signature we calculated does\n not match the signature you provided. Check your AWS secret access key and\n signing method. For more information, see REST Authentication and\n SOAP Authentication\n for details.
\n\n HTTP Status Code: 403 Forbidden
\n\n SOAP Fault Code Prefix: Client
\n\n Code: ServiceUnavailable
\n\n Description: Reduce your request rate.
\n\n HTTP Status Code: 503 Service Unavailable
\n\n SOAP Fault Code Prefix: Server
\n\n Code: SlowDown
\n\n Description: Reduce your request rate.
\n\n HTTP Status Code: 503 Slow Down
\n\n SOAP Fault Code Prefix: Server
\n\n Code: TemporaryRedirect
\n\n Description: You are being redirected to the bucket\n while DNS updates.
\n\n HTTP Status Code: 307 Moved Temporarily
\n\n SOAP Fault Code Prefix: Client
\n\n Code: TokenRefreshRequired
\n\n Description: The provided token must be\n refreshed.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: TooManyBuckets
\n\n Description: You have attempted to create more buckets\n than allowed.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: UnexpectedContent
\n\n Description: This request does not support\n content.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: UnresolvableGrantByEmailAddress
\n\n Description: The email address you provided does not\n match any account on record.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: UserKeyMustBeSpecified
\n\n Description: The bucket POST must contain the specified\n field name. If it is specified, check the order of the fields.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\nThe error code is a string that uniquely identifies an error condition. It is meant to\n be read and understood by programs that detect and handle errors by type.
\n\n Amazon S3 error codes\n
\n\n Code: AccessDenied
\n\n Description: Access Denied
\n\n HTTP Status Code: 403 Forbidden
\n\n SOAP Fault Code Prefix: Client
\n\n Code: AccountProblem
\n\n Description: There is a problem with your Amazon Web Services account\n that prevents the action from completing successfully. Contact Amazon Web Services Support\n for further assistance.
\n\n HTTP Status Code: 403 Forbidden
\n\n SOAP Fault Code Prefix: Client
\n\n Code: AllAccessDisabled
\n\n Description: All access to this Amazon S3 resource has been\n disabled. Contact Amazon Web Services Support for further assistance.
\n\n HTTP Status Code: 403 Forbidden
\n\n SOAP Fault Code Prefix: Client
\n\n Code: AmbiguousGrantByEmailAddress
\n\n Description: The email address you provided is\n associated with more than one account.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: AuthorizationHeaderMalformed
\n\n Description: The authorization header you provided is\n invalid.
\n\n HTTP Status Code: 400 Bad Request
\n\n HTTP Status Code: N/A
\n\n Code: BadDigest
\n\n Description: The Content-MD5 you specified did not\n match what we received.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: BucketAlreadyExists
\n\n Description: The requested bucket name is not\n available. The bucket namespace is shared by all users of the system. Please\n select a different name and try again.
\n\n HTTP Status Code: 409 Conflict
\n\n SOAP Fault Code Prefix: Client
\n\n Code: BucketAlreadyOwnedByYou
\n\n Description: The bucket you tried to create already\n exists, and you own it. Amazon S3 returns this error in all Amazon Web Services Regions except in\n the North Virginia Region. For legacy compatibility, if you re-create an\n existing bucket that you already own in the North Virginia Region, Amazon S3 returns\n 200 OK and resets the bucket access control lists (ACLs).
\n\n Code: 409 Conflict (in all Regions except the North\n Virginia Region)
\n\n SOAP Fault Code Prefix: Client
\n\n Code: BucketNotEmpty
\n\n Description: The bucket you tried to delete is not\n empty.
\n\n HTTP Status Code: 409 Conflict
\n\n SOAP Fault Code Prefix: Client
\n\n Code: CredentialsNotSupported
\n\n Description: This request does not support\n credentials.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: CrossLocationLoggingProhibited
\n\n Description: Cross-location logging not allowed.\n Buckets in one geographic location cannot log information to a bucket in\n another location.
\n\n HTTP Status Code: 403 Forbidden
\n\n SOAP Fault Code Prefix: Client
\n\n Code: EntityTooSmall
\n\n Description: Your proposed upload is smaller than the\n minimum allowed object size.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: EntityTooLarge
\n\n Description: Your proposed upload exceeds the maximum\n allowed object size.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: ExpiredToken
\n\n Description: The provided token has expired.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: IllegalVersioningConfigurationException
\n\n Description: Indicates that the versioning\n configuration specified in the request is invalid.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: IncompleteBody
\n\n Description: You did not provide the number of bytes\n specified by the Content-Length HTTP header
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: IncorrectNumberOfFilesInPostRequest
\n\n Description: POST requires exactly one file upload per\n request.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InlineDataTooLarge
\n\n Description: Inline data exceeds the maximum allowed\n size.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InternalError
\n\n Description: We encountered an internal error. Please\n try again.
\n\n HTTP Status Code: 500 Internal Server Error
\n\n SOAP Fault Code Prefix: Server
\n\n Code: InvalidAccessKeyId
\n\n Description: The Amazon Web Services access key ID you provided does\n not exist in our records.
\n\n HTTP Status Code: 403 Forbidden
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidAddressingHeader
\n\n Description: You must specify the Anonymous\n role.
\n\n HTTP Status Code: N/A
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidArgument
\n\n Description: Invalid Argument
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidBucketName
\n\n Description: The specified bucket is not valid.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidBucketState
\n\n Description: The request is not valid with the current\n state of the bucket.
\n\n HTTP Status Code: 409 Conflict
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidDigest
\n\n Description: The Content-MD5 you specified is not\n valid.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidEncryptionAlgorithmError
\n\n Description: The encryption request you specified is\n not valid. The valid value is AES256.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidLocationConstraint
\n\n Description: The specified location constraint is not\n valid. For more information about Regions, see How to Select a\n Region for Your Buckets.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidObjectState
\n\n Description: The action is not valid for the current\n state of the object.
\n\n HTTP Status Code: 403 Forbidden
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidPart
\n\n Description: One or more of the specified parts could\n not be found. The part might not have been uploaded, or the specified entity\n tag might not have matched the part's entity tag.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidPartOrder
\n\n Description: The list of parts was not in ascending\n order. Parts list must be specified in order by part number.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidPayer
\n\n Description: All access to this object has been\n disabled. Please contact Amazon Web Services Support for further assistance.
\n\n HTTP Status Code: 403 Forbidden
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidPolicyDocument
\n\n Description: The content of the form does not meet the\n conditions specified in the policy document.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidRange
\n\n Description: The requested range cannot be\n satisfied.
\n\n HTTP Status Code: 416 Requested Range Not\n Satisfiable
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidRequest
\n\n Description: Please use AWS4-HMAC-SHA256
.
\n HTTP Status Code: 400 Bad Request
\n\n Code: N/A
\n\n Code: InvalidRequest
\n\n Description: SOAP requests must be made over an HTTPS\n connection.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidRequest
\n\n Description: Amazon S3 Transfer Acceleration is not\n supported for buckets with non-DNS compliant names.
\n\n HTTP Status Code: 400 Bad Request
\n\n Code: N/A
\n\n Code: InvalidRequest
\n\n Description: Amazon S3 Transfer Acceleration is not\n supported for buckets with periods (.) in their names.
\n\n HTTP Status Code: 400 Bad Request
\n\n Code: N/A
\n\n Code: InvalidRequest
\n\n Description: Amazon S3 Transfer Accelerate endpoint only\n supports virtual style requests.
\n\n HTTP Status Code: 400 Bad Request
\n\n Code: N/A
\n\n Code: InvalidRequest
\n\n Description: Amazon S3 Transfer Accelerate is not configured\n on this bucket.
\n\n HTTP Status Code: 400 Bad Request
\n\n Code: N/A
\n\n Code: InvalidRequest
\n\n Description: Amazon S3 Transfer Accelerate is disabled on\n this bucket.
\n\n HTTP Status Code: 400 Bad Request
\n\n Code: N/A
\n\n Code: InvalidRequest
\n\n Description: Amazon S3 Transfer Acceleration is not\n supported on this bucket. Contact Amazon Web Services Support for more information.
\n\n HTTP Status Code: 400 Bad Request
\n\n Code: N/A
\n\n Code: InvalidRequest
\n\n Description: Amazon S3 Transfer Acceleration cannot be\n enabled on this bucket. Contact Amazon Web Services Support for more information.
\n\n HTTP Status Code: 400 Bad Request
\n\n Code: N/A
\n\n Code: InvalidSecurity
\n\n Description: The provided security credentials are not\n valid.
\n\n HTTP Status Code: 403 Forbidden
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidSOAPRequest
\n\n Description: The SOAP request body is invalid.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidStorageClass
\n\n Description: The storage class you specified is not\n valid.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidTargetBucketForLogging
\n\n Description: The target bucket for logging does not\n exist, is not owned by you, or does not have the appropriate grants for the\n log-delivery group.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidToken
\n\n Description: The provided token is malformed or\n otherwise invalid.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: InvalidURI
\n\n Description: Couldn't parse the specified URI.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: KeyTooLongError
\n\n Description: Your key is too long.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MalformedACLError
\n\n Description: The XML you provided was not well-formed\n or did not validate against our published schema.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MalformedPOSTRequest
\n\n Description: The body of your POST request is not\n well-formed multipart/form-data.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MalformedXML
\n\n Description: This happens when the user sends malformed\n XML (XML that doesn't conform to the published XSD) for the configuration. The\n error message is, \"The XML you provided was not well-formed or did not validate\n against our published schema.\"
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MaxMessageLengthExceeded
\n\n Description: Your request was too big.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MaxPostPreDataLengthExceededError
\n\n Description: Your POST request fields preceding the\n upload file were too large.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MetadataTooLarge
\n\n Description: Your metadata headers exceed the maximum\n allowed metadata size.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MethodNotAllowed
\n\n Description: The specified method is not allowed\n against this resource.
\n\n HTTP Status Code: 405 Method Not Allowed
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MissingAttachment
\n\n Description: A SOAP attachment was expected, but none\n were found.
\n\n HTTP Status Code: N/A
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MissingContentLength
\n\n Description: You must provide the Content-Length HTTP\n header.
\n\n HTTP Status Code: 411 Length Required
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MissingRequestBodyError
\n\n Description: This happens when the user sends an empty\n XML document as a request. The error message is, \"Request body is empty.\"\n
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MissingSecurityElement
\n\n Description: The SOAP 1.1 request is missing a security\n element.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: MissingSecurityHeader
\n\n Description: Your request is missing a required\n header.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: NoLoggingStatusForKey
\n\n Description: There is no such thing as a logging status\n subresource for a key.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: NoSuchBucket
\n\n Description: The specified bucket does not\n exist.
\n\n HTTP Status Code: 404 Not Found
\n\n SOAP Fault Code Prefix: Client
\n\n Code: NoSuchBucketPolicy
\n\n Description: The specified bucket does not have a\n bucket policy.
\n\n HTTP Status Code: 404 Not Found
\n\n SOAP Fault Code Prefix: Client
\n\n Code: NoSuchKey
\n\n Description: The specified key does not exist.
\n\n HTTP Status Code: 404 Not Found
\n\n SOAP Fault Code Prefix: Client
\n\n Code: NoSuchLifecycleConfiguration
\n\n Description: The lifecycle configuration does not\n exist.
\n\n HTTP Status Code: 404 Not Found
\n\n SOAP Fault Code Prefix: Client
\n\n Code: NoSuchUpload
\n\n Description: The specified multipart upload does not\n exist. The upload ID might be invalid, or the multipart upload might have been\n aborted or completed.
\n\n HTTP Status Code: 404 Not Found
\n\n SOAP Fault Code Prefix: Client
\n\n Code: NoSuchVersion
\n\n Description: Indicates that the version ID specified in\n the request does not match an existing version.
\n\n HTTP Status Code: 404 Not Found
\n\n SOAP Fault Code Prefix: Client
\n\n Code: NotImplemented
\n\n Description: A header you provided implies\n functionality that is not implemented.
\n\n HTTP Status Code: 501 Not Implemented
\n\n SOAP Fault Code Prefix: Server
\n\n Code: NotSignedUp
\n\n Description: Your account is not signed up for the Amazon S3\n service. You must sign up before you can use Amazon S3. You can sign up at the\n following URL: Amazon S3\n
\n\n HTTP Status Code: 403 Forbidden
\n\n SOAP Fault Code Prefix: Client
\n\n Code: OperationAborted
\n\n Description: A conflicting conditional action is\n currently in progress against this resource. Try again.
\n\n HTTP Status Code: 409 Conflict
\n\n SOAP Fault Code Prefix: Client
\n\n Code: PermanentRedirect
\n\n Description: The bucket you are attempting to access\n must be addressed using the specified endpoint. Send all future requests to\n this endpoint.
\n\n HTTP Status Code: 301 Moved Permanently
\n\n SOAP Fault Code Prefix: Client
\n\n Code: PreconditionFailed
\n\n Description: At least one of the preconditions you\n specified did not hold.
\n\n HTTP Status Code: 412 Precondition Failed
\n\n SOAP Fault Code Prefix: Client
\n\n Code: Redirect
\n\n Description: Temporary redirect.
\n\n HTTP Status Code: 307 Moved Temporarily
\n\n SOAP Fault Code Prefix: Client
\n\n Code: RestoreAlreadyInProgress
\n\n Description: Object restore is already in\n progress.
\n\n HTTP Status Code: 409 Conflict
\n\n SOAP Fault Code Prefix: Client
\n\n Code: RequestIsNotMultiPartContent
\n\n Description: Bucket POST must be of the enclosure-type\n multipart/form-data.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: RequestTimeout
\n\n Description: Your socket connection to the server was\n not read from or written to within the timeout period.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: RequestTimeTooSkewed
\n\n Description: The difference between the request time\n and the server's time is too large.
\n\n HTTP Status Code: 403 Forbidden
\n\n SOAP Fault Code Prefix: Client
\n\n Code: RequestTorrentOfBucketError
\n\n Description: Requesting the torrent file of a bucket is\n not permitted.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: SignatureDoesNotMatch
\n\n Description: The request signature we calculated does\n not match the signature you provided. Check your Amazon Web Services secret access key and\n signing method. For more information, see REST Authentication and\n SOAP Authentication\n for details.
\n\n HTTP Status Code: 403 Forbidden
\n\n SOAP Fault Code Prefix: Client
\n\n Code: ServiceUnavailable
\n\n Description: Reduce your request rate.
\n\n HTTP Status Code: 503 Service Unavailable
\n\n SOAP Fault Code Prefix: Server
\n\n Code: SlowDown
\n\n Description: Reduce your request rate.
\n\n HTTP Status Code: 503 Slow Down
\n\n SOAP Fault Code Prefix: Server
\n\n Code: TemporaryRedirect
\n\n Description: You are being redirected to the bucket\n while DNS updates.
\n\n HTTP Status Code: 307 Moved Temporarily
\n\n SOAP Fault Code Prefix: Client
\n\n Code: TokenRefreshRequired
\n\n Description: The provided token must be\n refreshed.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: TooManyBuckets
\n\n Description: You have attempted to create more buckets\n than allowed.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: UnexpectedContent
\n\n Description: This request does not support\n content.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: UnresolvableGrantByEmailAddress
\n\n Description: The email address you provided does not\n match any account on record.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\n\n Code: UserKeyMustBeSpecified
\n\n Description: The bucket POST must contain the specified\n field name. If it is specified, check the order of the fields.
\n\n HTTP Status Code: 400 Bad Request
\n\n SOAP Fault Code Prefix: Client
\nReturns the Region the bucket resides in. You set the bucket's Region using the\n LocationConstraint
request parameter in a CreateBucket
\n request. For more information, see CreateBucket.
To use this implementation of the operation, you must be the bucket owner.
\n\nThe following operations are related to GetBucketLocation
:
\n GetObject\n
\n\n CreateBucket\n
\nReturns the Region the bucket resides in. You set the bucket's Region using the\n LocationConstraint
request parameter in a CreateBucket
\n request. For more information, see CreateBucket.
To use this implementation of the operation, you must be the bucket owner.
\n \nTo use this API against an access point, provide the alias of the access point in place of the bucket name.
\n\nThe following operations are related to GetBucketLocation
:
\n GetObject\n
\n\n CreateBucket\n
\nReturns the policy of a specified bucket. If you are using an identity other than the\n root user of the AWS account that owns the bucket, the calling identity must have the\n GetBucketPolicy
permissions on the specified bucket and belong to the\n bucket owner's account in order to use this operation.
If you don't have GetBucketPolicy
permissions, Amazon S3 returns a 403\n Access Denied
error. If you have the correct permissions, but you're not using an\n identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not\n Allowed
error.
As a security precaution, the root user of the AWS account that owns a bucket can\n always use this operation, even if the policy explicitly denies the root user the\n ability to perform this action.
\nFor more information about bucket policies, see Using Bucket Policies and User\n Policies.
\n\nThe following action is related to GetBucketPolicy
:
\n GetObject\n
\nReturns the policy of a specified bucket. If you are using an identity other than the\n root user of the Amazon Web Services account that owns the bucket, the calling identity must have the\n GetBucketPolicy
permissions on the specified bucket and belong to the\n bucket owner's account in order to use this operation.
If you don't have GetBucketPolicy
permissions, Amazon S3 returns a 403\n Access Denied
error. If you have the correct permissions, but you're not using an\n identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not\n Allowed
error.
As a security precaution, the root user of the Amazon Web Services account that owns a bucket can\n always use this operation, even if the policy explicitly denies the root user the\n ability to perform this action.
\nFor more information about bucket policies, see Using Bucket Policies and User\n Policies.
\n\nThe following action is related to GetBucketPolicy
:
\n GetObject\n
\nRetrieves objects from Amazon S3. To use GET
, you must have READ
\n access to the object. If you grant READ
access to the anonymous user, you can\n return the object without using an authorization header.
An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer\n file system. You can, however, create a logical hierarchy by using object key names that\n imply a folder structure. For example, instead of naming an object sample.jpg
,\n you can name it photos/2006/February/sample.jpg
.
To get an object from such a logical hierarchy, specify the full key name for the object\n in the GET
operation. For a virtual hosted-style request example, if you have\n the object photos/2006/February/sample.jpg
, specify the resource as\n /photos/2006/February/sample.jpg
. For a path-style request example, if you\n have the object photos/2006/February/sample.jpg
in the bucket named\n examplebucket
, specify the resource as\n /examplebucket/photos/2006/February/sample.jpg
. For more information about\n request types, see HTTP Host Header Bucket Specification.
To distribute large files to many people, you can save bandwidth costs by using\n BitTorrent. For more information, see Amazon S3\n Torrent. For more information about returning the ACL of an object, see GetObjectAcl.
\n\nIf the object you are retrieving is stored in the S3 Glacier or\n S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a\n copy using RestoreObject. Otherwise, this action returns an\n InvalidObjectStateError
error. For information about restoring archived\n objects, see Restoring Archived\n Objects.
Encryption request headers, like x-amz-server-side-encryption
, should not\n be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS\n KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your\n object does use these types of keys, you’ll get an HTTP 400 BadRequest error.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object,\n you must use the following headers:
\nx-amz-server-side-encryption-customer-algorithm
\nx-amz-server-side-encryption-customer-key
\nx-amz-server-side-encryption-customer-key-MD5
\nFor more information about SSE-C, see Server-Side Encryption (Using\n Customer-Provided Encryption Keys).
\n\nAssuming you have permission to read object tags (permission for the\n s3:GetObjectVersionTagging
action), the response also returns the\n x-amz-tagging-count
header that provides the count of number of tags\n associated with the object. You can use GetObjectTagging to retrieve\n the tag set associated with an object.
\n Permissions\n
\nYou need the s3:GetObject
permission for this operation. For more\n information, see Specifying Permissions\n in a Policy. If the object you request does not exist, the error Amazon S3 returns\n depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 will\n return an HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 will return an\n HTTP status code 403 (\"access denied\") error.
\n Versioning\n
\nBy default, the GET action returns the current version of an object. To return a\n different version, use the versionId
subresource.
If the current version of the object is a delete marker, Amazon S3 behaves as if the\n object was deleted and includes x-amz-delete-marker: true
in the\n response.
For more information about versioning, see PutBucketVersioning.
\n\n\n Overriding Response Header Values\n
\nThere are times when you want to override certain response header values in a GET\n response. For example, you might override the Content-Disposition response header value in\n your GET request.
\n\nYou can override values for a set of response headers using the following query\n parameters. These response header values are sent only on a successful request, that is,\n when status code 200 OK is returned. The set of headers you can override using these\n parameters is a subset of the headers that Amazon S3 accepts when you create an object. The\n response headers that you can override for the GET response are Content-Type
,\n Content-Language
, Expires
, Cache-Control
,\n Content-Disposition
, and Content-Encoding
. To override these\n header values in the GET response, you use the following request parameters.
You must sign the request, either using an Authorization header or a presigned URL,\n when using these parameters. They cannot be used with an unsigned (anonymous)\n request.
\n\n response-content-type
\n
\n response-content-language
\n
\n response-expires
\n
\n response-cache-control
\n
\n response-content-disposition
\n
\n response-content-encoding
\n
\n Additional Considerations about Request Headers\n
\n\nIf both of the If-Match
and If-Unmodified-Since
headers are\n present in the request as follows: If-Match
condition evaluates to\n true
, and; If-Unmodified-Since
condition evaluates to\n false
; then, S3 returns 200 OK and the data requested.
If both of the If-None-Match
and If-Modified-Since
headers are\n present in the request as follows: If-None-Match
condition evaluates to\n false
, and; If-Modified-Since
condition evaluates to\n true
; then, S3 returns 304 Not Modified response code.
For more information about conditional requests, see RFC 7232.
\n\nThe following operations are related to GetObject
:
\n ListBuckets\n
\n\n GetObjectAcl\n
\nRetrieves objects from Amazon S3. To use GET
, you must have READ
\n access to the object. If you grant READ
access to the anonymous user, you can\n return the object without using an authorization header.
An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer\n file system. You can, however, create a logical hierarchy by using object key names that\n imply a folder structure. For example, instead of naming an object sample.jpg
,\n you can name it photos/2006/February/sample.jpg
.
To get an object from such a logical hierarchy, specify the full key name for the object\n in the GET
operation. For a virtual hosted-style request example, if you have\n the object photos/2006/February/sample.jpg
, specify the resource as\n /photos/2006/February/sample.jpg
. For a path-style request example, if you\n have the object photos/2006/February/sample.jpg
in the bucket named\n examplebucket
, specify the resource as\n /examplebucket/photos/2006/February/sample.jpg
. For more information about\n request types, see HTTP Host Header Bucket Specification.
To distribute large files to many people, you can save bandwidth costs by using\n BitTorrent. For more information, see Amazon S3\n Torrent. For more information about returning the ACL of an object, see GetObjectAcl.
\n\nIf the object you are retrieving is stored in the S3 Glacier or\n S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a\n copy using RestoreObject. Otherwise, this action returns an\n InvalidObjectStateError
error. For information about restoring archived\n objects, see Restoring Archived\n Objects.
Encryption request headers, like x-amz-server-side-encryption
, should not\n be sent for GET requests if your object uses server-side encryption with CMKs stored in Amazon Web Services\n KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your\n object does use these types of keys, you’ll get an HTTP 400 BadRequest error.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object,\n you must use the following headers:
\nx-amz-server-side-encryption-customer-algorithm
\nx-amz-server-side-encryption-customer-key
\nx-amz-server-side-encryption-customer-key-MD5
\nFor more information about SSE-C, see Server-Side Encryption (Using\n Customer-Provided Encryption Keys).
\n\nAssuming you have permission to read object tags (permission for the\n s3:GetObjectVersionTagging
action), the response also returns the\n x-amz-tagging-count
header that provides the count of number of tags\n associated with the object. You can use GetObjectTagging to retrieve\n the tag set associated with an object.
\n Permissions\n
\nYou need the s3:GetObject
permission for this operation. For more\n information, see Specifying Permissions\n in a Policy. If the object you request does not exist, the error Amazon S3 returns\n depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 will\n return an HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 will return an\n HTTP status code 403 (\"access denied\") error.
\n Versioning\n
\nBy default, the GET action returns the current version of an object. To return a\n different version, use the versionId
subresource.
You need the s3:GetObjectVersion
permission to access a specific version of an object.\n
If the current version of the object is a delete marker, Amazon S3 behaves as if the\n object was deleted and includes x-amz-delete-marker: true
in the\n response.
For more information about versioning, see PutBucketVersioning.
\n\n\n Overriding Response Header Values\n
\nThere are times when you want to override certain response header values in a GET\n response. For example, you might override the Content-Disposition response header value in\n your GET request.
\n\nYou can override values for a set of response headers using the following query\n parameters. These response header values are sent only on a successful request, that is,\n when status code 200 OK is returned. The set of headers you can override using these\n parameters is a subset of the headers that Amazon S3 accepts when you create an object. The\n response headers that you can override for the GET response are Content-Type
,\n Content-Language
, Expires
, Cache-Control
,\n Content-Disposition
, and Content-Encoding
. To override these\n header values in the GET response, you use the following request parameters.
You must sign the request, either using an Authorization header or a presigned URL,\n when using these parameters. They cannot be used with an unsigned (anonymous)\n request.
\n\n response-content-type
\n
\n response-content-language
\n
\n response-expires
\n
\n response-cache-control
\n
\n response-content-disposition
\n
\n response-content-encoding
\n
\n Additional Considerations about Request Headers\n
\n\nIf both of the If-Match
and If-Unmodified-Since
headers are\n present in the request as follows: If-Match
condition evaluates to\n true
, and; If-Unmodified-Since
condition evaluates to\n false
; then, S3 returns 200 OK and the data requested.
If both of the If-None-Match
and If-Modified-Since
headers are\n present in the request as follows: If-None-Match
condition evaluates to\n false
, and; If-Modified-Since
condition evaluates to\n true
; then, S3 returns 304 Not Modified response code.
For more information about conditional requests, see RFC 7232.
\n\nThe following operations are related to GetObject
:
\n ListBuckets\n
\n\n GetObjectAcl\n
\nThe bucket name that contains the object for which to get the ACL information.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket name that contains the object for which to get the ACL information.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -4833,7 +4833,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The bucket name containing the object whose Legal Hold status you want to retrieve.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket name containing the object whose Legal Hold status you want to retrieve.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -4903,7 +4903,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The bucket whose Object Lock configuration you want to retrieve.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket whose Object Lock configuration you want to retrieve.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -5077,14 +5077,14 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.
", + "smithy.api#documentation": "If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "Indicates whether the object uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).
", + "smithy.api#documentation": "Indicates whether the object uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -5151,7 +5151,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The bucket name containing the object.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket name containing the object.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -5329,7 +5329,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The bucket name containing the object whose retention settings you want to retrieve.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket name containing the object whose retention settings you want to retrieve.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -5409,7 +5409,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The bucket name containing the object for which to get the tagging information.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket name containing the object for which to get the tagging information.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -5628,7 +5628,7 @@ "EmailAddress": { "target": "com.amazonaws.s3#EmailAddress", "traits": { - "smithy.api#documentation": "Email address of the grantee.
\nUsing email addresses to specify a grantee is only supported in the following AWS Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.
\nEmail address of the grantee.
\nUsing email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nThis action is useful to determine if a bucket exists and you have permission to\n access it. The action returns a 200 OK
if the bucket exists and you have\n permission to access it.
If the bucket does not exist or you do not have permission to access it, the HEAD
request\n returns a generic 404 Not Found
or 403 Forbidden
code. A message body is not \n included, so you cannot determine the exception beyond these error codes.
To use this operation, you must have permissions to perform the\n s3:ListBucket
action. The bucket owner has this permission by default and\n can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3\n Resources.
This action is useful to determine if a bucket exists and you have permission to\n access it. The action returns a 200 OK
if the bucket exists and you have\n permission to access it.
If the bucket does not exist or you do not have permission to access it, the HEAD
request\n returns a generic 404 Not Found
or 403 Forbidden
code. A message body is not \n included, so you cannot determine the exception beyond these error codes.
To use this operation, you must have permissions to perform the\n s3:ListBucket
action. The bucket owner has this permission by default and\n can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3\n Resources.
To use this API against an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using the Amazon Web Services SDKs, you provide the ARN in place of the bucket name. For more information see, Using access points.
", "smithy.api#http": { "method": "HEAD", "uri": "/{Bucket}", @@ -5721,7 +5721,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The bucket name.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket name.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -5749,7 +5749,7 @@ } ], "traits": { - "smithy.api#documentation": "The HEAD action retrieves metadata from an object without returning the object\n itself. This action is useful if you're only interested in an object's metadata. To use\n HEAD, you must have READ access to the object.
\n\nA HEAD
request has the same options as a GET
action on an\n object. The response is identical to the GET
response except that there is no\n response body. Because of this, if the HEAD
request generates an error, it\n returns a generic 404 Not Found
or 403 Forbidden
code. It is not \n possible to retrieve the exact exception beyond these error codes.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers:
\nx-amz-server-side-encryption-customer-algorithm
\nx-amz-server-side-encryption-customer-key
\nx-amz-server-side-encryption-customer-key-MD5
\nFor more information about SSE-C, see Server-Side Encryption (Using\n Customer-Provided Encryption Keys).
\nEncryption request headers, like x-amz-server-side-encryption
, should\n not be sent for GET requests if your object uses server-side encryption with CMKs stored\n in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys\n (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest\n error.
\n The last modified property in this case is the creation date of the object.
\nRequest headers are limited to 8 KB in size. For more information, see Common Request\n Headers.
\nConsider the following when using request headers:
\n Consideration 1 – If both of the If-Match
and\n If-Unmodified-Since
headers are present in the request as\n follows:
\n If-Match
condition evaluates to true
, and;
\n If-Unmodified-Since
condition evaluates to\n false
;
Then Amazon S3 returns 200 OK
and the data requested.
Consideration 2 – If both of the If-None-Match
and\n If-Modified-Since
headers are present in the request as\n follows:
\n If-None-Match
condition evaluates to false
,\n and;
\n If-Modified-Since
condition evaluates to\n true
;
Then Amazon S3 returns the 304 Not Modified
response code.
For more information about conditional requests, see RFC 7232.
\n\n\n Permissions\n
\nYou need the s3:GetObject
permission for this operation. For more\n information, see Specifying Permissions\n in a Policy. If the object you request does not exist, the error Amazon S3 returns\n depends on whether you also have the s3:ListBucket permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns\n an HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP\n status code 403 (\"access denied\") error.
The following action is related to HeadObject
:
\n GetObject\n
\nThe HEAD action retrieves metadata from an object without returning the object\n itself. This action is useful if you're only interested in an object's metadata. To use\n HEAD, you must have READ access to the object.
\n\nA HEAD
request has the same options as a GET
action on an\n object. The response is identical to the GET
response except that there is no\n response body. Because of this, if the HEAD
request generates an error, it\n returns a generic 404 Not Found
or 403 Forbidden
code. It is not \n possible to retrieve the exact exception beyond these error codes.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers:
\nx-amz-server-side-encryption-customer-algorithm
\nx-amz-server-side-encryption-customer-key
\nx-amz-server-side-encryption-customer-key-MD5
\nFor more information about SSE-C, see Server-Side Encryption (Using\n Customer-Provided Encryption Keys).
\nEncryption request headers, like x-amz-server-side-encryption
, should\n not be sent for GET requests if your object uses server-side encryption with CMKs stored\n in Amazon Web Services KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys\n (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest\n error.
\n The last modified property in this case is the creation date of the object.
\nRequest headers are limited to 8 KB in size. For more information, see Common Request\n Headers.
\nConsider the following when using request headers:
\n Consideration 1 – If both of the If-Match
and\n If-Unmodified-Since
headers are present in the request as\n follows:
\n If-Match
condition evaluates to true
, and;
\n If-Unmodified-Since
condition evaluates to\n false
;
Then Amazon S3 returns 200 OK
and the data requested.
Consideration 2 – If both of the If-None-Match
and\n If-Modified-Since
headers are present in the request as\n follows:
\n If-None-Match
condition evaluates to false
,\n and;
\n If-Modified-Since
condition evaluates to\n true
;
Then Amazon S3 returns the 304 Not Modified
response code.
For more information about conditional requests, see RFC 7232.
\n\n\n Permissions\n
\nYou need the s3:GetObject
permission for this operation. For more\n information, see Specifying Permissions\n in a Policy. If the object you request does not exist, the error Amazon S3 returns\n depends on whether you also have the s3:ListBucket permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns\n an HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP\n status code 403 (\"access denied\") error.
The following action is related to HeadObject
:
\n GetObject\n
\nIf the object is stored using server-side encryption either with an AWS KMS customer\n master key (CMK) or an Amazon S3-managed encryption key, the response includes this header with\n the value of the server-side encryption algorithm used when storing this object in Amazon\n S3 (for example, AES256, aws:kms).
", + "smithy.api#documentation": "If the object is stored using server-side encryption either with an Amazon Web Services KMS customer\n master key (CMK) or an Amazon S3-managed encryption key, the response includes this header with\n the value of the server-side encryption algorithm used when storing this object in Amazon\n S3 (for example, AES256, aws:kms).
", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -5940,14 +5940,14 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.
", + "smithy.api#documentation": "If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "Indicates whether the object uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).
", + "smithy.api#documentation": "Indicates whether the object uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -6007,7 +6007,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The name of the bucket containing the object.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The name of the bucket containing the object.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -6153,7 +6153,7 @@ "ID": { "target": "com.amazonaws.s3#ID", "traits": { - "smithy.api#documentation": "If the principal is an AWS account, it provides the Canonical User ID. If the principal\n is an IAM User, it provides a user ARN value.
" + "smithy.api#documentation": "If the principal is an Amazon Web Services account, it provides the Canonical User ID. If the principal\n is an IAM User, it provides a user ARN value.
" } }, "DisplayName": { @@ -6701,7 +6701,7 @@ "LambdaFunctionArn": { "target": "com.amazonaws.s3#LambdaFunctionArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3 invokes when the\n specified event type occurs.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Lambda function that Amazon S3 invokes when the\n specified event type occurs.
", "smithy.api#required": {}, "smithy.api#xmlName": "CloudFunction" } @@ -6709,7 +6709,7 @@ "Events": { "target": "com.amazonaws.s3#EventList", "traits": { - "smithy.api#documentation": "The Amazon S3 bucket event for which to invoke the AWS Lambda function. For more information,\n see Supported\n Event Types in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The Amazon S3 bucket event for which to invoke the Lambda function. For more information,\n see Supported\n Event Types in the Amazon S3 User Guide.
", "smithy.api#required": {}, "smithy.api#xmlFlattened": {}, "smithy.api#xmlName": "Event" @@ -6720,7 +6720,7 @@ } }, "traits": { - "smithy.api#documentation": "A container for specifying the configuration for AWS Lambda notifications.
" + "smithy.api#documentation": "A container for specifying the configuration for Lambda notifications.
" } }, "com.amazonaws.s3#LambdaFunctionConfigurationList": { @@ -7230,7 +7230,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The name of the bucket to which the multipart upload was initiated.
" + "smithy.api#documentation": "The name of the bucket to which the multipart upload was initiated. Does not return the access point ARN or access point alias if used.
" } }, "KeyMarker": { @@ -7313,7 +7313,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The name of the bucket to which the multipart upload was initiated.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The name of the bucket to which the multipart upload was initiated.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -7635,7 +7635,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The name of the bucket containing the objects.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The name of the bucket containing the objects.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -7656,7 +7656,7 @@ "Marker": { "target": "com.amazonaws.s3#Marker", "traits": { - "smithy.api#documentation": "Specifies the key to start with when listing objects in a bucket.
", + "smithy.api#documentation": "Marker is where you want Amazon S3 to start listing from. Amazon S3 starts listing after\n this specified key. Marker can be any key in the bucket.
", "smithy.api#httpQuery": "marker" } }, @@ -7704,7 +7704,7 @@ } ], "traits": { - "smithy.api#documentation": "Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use\n the request parameters as selection criteria to return a subset of the objects in a bucket. A \n 200 OK
response can contain valid or invalid XML. Make sure to design your\n application to parse the contents of the response and handle it appropriately. \n Objects are returned sorted in an ascending order of the respective key names in the list.\n For more information about listing objects, see Listing object keys \n programmatically\n
To use this operation, you must have READ access to the bucket.
\n\nTo use this action in an AWS Identity and Access Management (IAM) policy, you must\n have permissions to perform the s3:ListBucket
action. The bucket owner has\n this permission by default and can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3\n Resources.
This section describes the latest revision of this action. We recommend that you use this\n revised API for application development. For backward compatibility, Amazon S3 continues to\n support the prior version of this API, ListObjects.
\nTo get a list of your buckets, see ListBuckets.
\n\nThe following operations are related to ListObjectsV2
:
\n GetObject\n
\n\n PutObject\n
\n\n CreateBucket\n
\nReturns some or all (up to 1,000) of the objects in a bucket with each request. You can use\n the request parameters as selection criteria to return a subset of the objects in a bucket. A \n 200 OK
response can contain valid or invalid XML. Make sure to design your\n application to parse the contents of the response and handle it appropriately. \n Objects are returned sorted in an ascending order of the respective key names in the list.\n For more information about listing objects, see Listing object keys \n programmatically\n
To use this operation, you must have READ access to the bucket.
\n\nTo use this action in an Identity and Access Management (IAM) policy, you must\n have permissions to perform the s3:ListBucket
action. The bucket owner has\n this permission by default and can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3\n Resources.
This section describes the latest revision of this action. We recommend that you use this\n revised API for application development. For backward compatibility, Amazon S3 continues to\n support the prior version of this API, ListObjects.
\nTo get a list of your buckets, see ListBuckets.
\n\nThe following operations are related to ListObjectsV2
:
\n GetObject\n
\n\n PutObject\n
\n\n CreateBucket\n
\nThe bucket name.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
" + "smithy.api#documentation": "The bucket name.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
" } }, "Prefix": { @@ -7805,7 +7805,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "Bucket name to list.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "smithy.api#documentation": "Bucket name to list.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -7918,7 +7918,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The name of the bucket to which the multipart upload was initiated.
" + "smithy.api#documentation": "The name of the bucket to which the multipart upload was initiated. Does not return the access point ARN or access point alias if used.
" } }, "Key": { @@ -7968,7 +7968,7 @@ "Initiator": { "target": "com.amazonaws.s3#Initiator", "traits": { - "smithy.api#documentation": "Container element that identifies who initiated the multipart upload. If the initiator\n is an AWS account, this element provides the same information as the Owner
\n element. If the initiator is an IAM User, this element provides the user ARN and display\n name.
Container element that identifies who initiated the multipart upload. If the initiator\n is an Amazon Web Services account, this element provides the same information as the Owner
\n element. If the initiator is an IAM User, this element provides the user ARN and display\n name.
The name of the bucket to which the parts are being uploaded.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The name of the bucket to which the parts are being uploaded.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -8470,7 +8470,7 @@ "LambdaFunctionConfigurations": { "target": "com.amazonaws.s3#LambdaFunctionConfigurationList", "traits": { - "smithy.api#documentation": "Describes the AWS Lambda functions to invoke and the events for which to invoke\n them.
", + "smithy.api#documentation": "Describes the Lambda functions to invoke and the events for which to invoke\n them.
", "smithy.api#xmlFlattened": {}, "smithy.api#xmlName": "CloudFunctionConfiguration" } @@ -8518,7 +8518,7 @@ "ETag": { "target": "com.amazonaws.s3#ETag", "traits": { - "smithy.api#documentation": "The entity tag is a hash of the object. The ETag reflects changes only to the contents\n of an object, not its metadata. The ETag may or may not be an MD5 digest of the object\n data. Whether or not it is depends on how the object was created and how it is encrypted as\n described below:
\nObjects created by the PUT Object, POST Object, or Copy operation, or through the\n AWS Management Console, and are encrypted by SSE-S3 or plaintext, have ETags that are\n an MD5 digest of their object data.
\nObjects created by the PUT Object, POST Object, or Copy operation, or through the\n AWS Management Console, and are encrypted by SSE-C or SSE-KMS, have ETags that are\n not an MD5 digest of their object data.
\nIf an object is created by either the Multipart Upload or Part Copy operation, the\n ETag is not an MD5 digest, regardless of the method of encryption.
\nThe entity tag is a hash of the object. The ETag reflects changes only to the contents\n of an object, not its metadata. The ETag may or may not be an MD5 digest of the object\n data. Whether or not it is depends on how the object was created and how it is encrypted as\n described below:
\nObjects created by the PUT Object, POST Object, or Copy operation, or through the\n Amazon Web Services Management Console, and are encrypted by SSE-S3 or plaintext, have ETags that are\n an MD5 digest of their object data.
\nObjects created by the PUT Object, POST Object, or Copy operation, or through the\n Amazon Web Services Management Console, and are encrypted by SSE-C or SSE-KMS, have ETags that are\n not an MD5 digest of their object data.
\nIf an object is created by either the Multipart Upload or Part Copy operation, the\n ETag is not an MD5 digest, regardless of the method of encryption.
\nSpecifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting\n this element to TRUE
restricts access to this bucket to only AWS service\n principals and authorized users within this account if the bucket has a public\n policy.
Enabling this setting doesn't affect previously stored bucket policies, except that\n public and cross-account access within any public bucket policy, including non-public\n delegation to specific accounts, is blocked.
", + "smithy.api#documentation": "Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting\n this element to TRUE
restricts access to this bucket to only Amazon Web Service\n principals and authorized users within this account if the bucket has a public\n policy.
Enabling this setting doesn't affect previously stored bucket policies, except that\n public and cross-account access within any public bucket policy, including non-public\n delegation to specific accounts, is blocked.
", "smithy.api#xmlName": "RestrictPublicBuckets" } } @@ -9266,7 +9266,7 @@ "target": "com.amazonaws.s3#PutBucketAclRequest" }, "traits": { - "smithy.api#documentation": "Sets the permissions on an existing bucket using access control lists (ACL). For more\n information, see Using ACLs. To set\n the ACL of a bucket, you must have WRITE_ACP
permission.
You can use one of the following two ways to set a bucket's permissions:
\nSpecify the ACL in the request body
\nSpecify permissions using request headers
\nYou cannot specify access permission using both the body and the request\n headers.
\nDepending on your application needs, you may choose to set the ACL on a bucket using\n either the request body or the headers. For example, if you have an existing application\n that updates a bucket ACL using the request body, then you can continue to use that\n approach.
\n\n\n\n Access Permissions\n
\nYou can set access permissions using one of the following methods:
\nSpecify a canned ACL with the x-amz-acl
request header. Amazon S3 supports\n a set of predefined ACLs, known as canned ACLs. Each canned ACL\n has a predefined set of grantees and permissions. Specify the canned ACL name as the\n value of x-amz-acl
. If you use this header, you cannot use other access\n control-specific headers in your request. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
,\n x-amz-grant-read-acp
, x-amz-grant-write-acp
, and\n x-amz-grant-full-control
headers. When using these headers, you\n specify explicit access permissions and grantees (AWS accounts or Amazon S3 groups) who\n will receive the permission. If you use these ACL-specific headers, you cannot use\n the x-amz-acl
header to set a canned ACL. These parameters map to the\n set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL)\n Overview.
You specify each grantee as a type=value pair, where the type is one of the\n following:
\n\n id
– if the value specified is the canonical user ID of an AWS\n account
\n uri
– if you are granting permissions to a predefined\n group
\n emailAddress
– if the value specified is the email address of\n an AWS account
Using email addresses to specify a grantee is only supported in the following AWS Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.
\nFor example, the following x-amz-grant-write
header grants create,\n overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and\n two AWS accounts identified by their email addresses.
\n x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\",\n id=\"111122223333\", id=\"555566667777\"
\n
You can use either a canned ACL or specify access permissions explicitly. You cannot do\n both.
\n\n Grantee Values\n
\nYou can specify the person (grantee) to whom you're assigning access rights (using\n request elements) in the following ways:
\nBy the person's ID:
\n\n
\n
DisplayName is optional and ignored in the request
\nBy URI:
\n\n
\n
By Email address:
\n\n
\n
The grantee is resolved to the CanonicalUser and, in a response to a GET Object\n acl request, appears as the CanonicalUser.
\nUsing email addresses to specify a grantee is only supported in the following AWS Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.
\n\n Related Resources\n
\n\n CreateBucket\n
\n\n DeleteBucket\n
\n\n GetObjectAcl\n
\nSets the permissions on an existing bucket using access control lists (ACL). For more\n information, see Using ACLs. To set\n the ACL of a bucket, you must have WRITE_ACP
permission.
You can use one of the following two ways to set a bucket's permissions:
\nSpecify the ACL in the request body
\nSpecify permissions using request headers
\nYou cannot specify access permission using both the body and the request\n headers.
\nDepending on your application needs, you may choose to set the ACL on a bucket using\n either the request body or the headers. For example, if you have an existing application\n that updates a bucket ACL using the request body, then you can continue to use that\n approach.
\n\n\n\n Access Permissions\n
\nYou can set access permissions using one of the following methods:
\nSpecify a canned ACL with the x-amz-acl
request header. Amazon S3 supports\n a set of predefined ACLs, known as canned ACLs. Each canned ACL\n has a predefined set of grantees and permissions. Specify the canned ACL name as the\n value of x-amz-acl
. If you use this header, you cannot use other access\n control-specific headers in your request. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
,\n x-amz-grant-read-acp
, x-amz-grant-write-acp
, and\n x-amz-grant-full-control
headers. When using these headers, you\n specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who\n will receive the permission. If you use these ACL-specific headers, you cannot use\n the x-amz-acl
header to set a canned ACL. These parameters map to the\n set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL)\n Overview.
You specify each grantee as a type=value pair, where the type is one of the\n following:
\n\n id
– if the value specified is the canonical user ID of an Amazon Web Services account
\n uri
– if you are granting permissions to a predefined\n group
\n emailAddress
– if the value specified is the email address of\n an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nFor example, the following x-amz-grant-write
header grants create,\n overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and\n two Amazon Web Services accounts identified by their email addresses.
\n x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\",\n id=\"111122223333\", id=\"555566667777\"
\n
You can use either a canned ACL or specify access permissions explicitly. You cannot do\n both.
\n\n Grantee Values\n
\nYou can specify the person (grantee) to whom you're assigning access rights (using\n request elements) in the following ways:
\nBy the person's ID:
\n\n
\n
DisplayName is optional and ignored in the request
\nBy URI:
\n\n
\n
By Email address:
\n\n
\n
The grantee is resolved to the CanonicalUser and, in a response to a GET Object\n acl request, appears as the CanonicalUser.
\nUsing email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\n\n Related Resources\n
\n\n CreateBucket\n
\n\n DeleteBucket\n
\n\n GetObjectAcl\n
\nThe base64-encoded 128-bit MD5 digest of the data. This header must be used as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, go to RFC\n 1864.\n
\nFor requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
", + "smithy.api#documentation": "The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, go to RFC\n 1864.\n
\nFor requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "smithy.api#httpHeader": "Content-MD5" } }, @@ -9432,7 +9432,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, go to RFC\n 1864.\n
\nFor requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
", + "smithy.api#documentation": "The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, go to RFC\n 1864.\n
\nFor requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "smithy.api#httpHeader": "Content-MD5" } }, @@ -9460,7 +9460,7 @@ "target": "com.amazonaws.s3#PutBucketEncryptionRequest" }, "traits": { - "smithy.api#documentation": "This action uses the encryption
subresource to configure default\n encryption and Amazon S3 Bucket Key for an existing bucket.
Default encryption for a bucket can use server-side encryption with Amazon S3-managed keys\n (SSE-S3) or AWS KMS customer master keys (SSE-KMS). If you specify default encryption\n using SSE-KMS, you can also configure Amazon S3 Bucket Key. For information about default\n encryption, see Amazon S3 default bucket encryption\n in the Amazon S3 User Guide. For more information about S3 Bucket Keys,\n see Amazon S3 Bucket Keys in the Amazon S3 User Guide.
\nThis action requires AWS Signature Version 4. For more information, see Authenticating Requests (AWS Signature\n Version 4).
\nTo use this operation, you must have permissions to perform the\n s3:PutEncryptionConfiguration
action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3\n Resources in the Amazon S3 User Guide.
\n Related Resources\n
\n\n GetBucketEncryption\n
\nThis action uses the encryption
subresource to configure default\n encryption and Amazon S3 Bucket Key for an existing bucket.
Default encryption for a bucket can use server-side encryption with Amazon S3-managed keys\n (SSE-S3) or Amazon Web Services KMS customer master keys (SSE-KMS). If you specify default encryption\n using SSE-KMS, you can also configure Amazon S3 Bucket Key. For information about default\n encryption, see Amazon S3 default bucket encryption\n in the Amazon S3 User Guide. For more information about S3 Bucket Keys,\n see Amazon S3 Bucket Keys in the Amazon S3 User Guide.
\nThis action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature\n Version 4).
\nTo use this operation, you must have permissions to perform the\n s3:PutEncryptionConfiguration
action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3\n Resources in the Amazon S3 User Guide.
\n Related Resources\n
\n\n GetBucketEncryption\n
\nSpecifies default encryption for a bucket using server-side encryption with Amazon S3-managed\n keys (SSE-S3) or customer master keys stored in AWS KMS (SSE-KMS). For information about\n the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption\n in the Amazon S3 User Guide.
", + "smithy.api#documentation": "Specifies default encryption for a bucket using server-side encryption with Amazon S3-managed\n keys (SSE-S3) or customer master keys stored in Amazon Web Services KMS (SSE-KMS). For information about\n the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption\n in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -9483,7 +9483,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "The base64-encoded 128-bit MD5 digest of the server-side encryption configuration.
\nFor requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
", + "smithy.api#documentation": "The base64-encoded 128-bit MD5 digest of the server-side encryption configuration.
\nFor requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "smithy.api#httpHeader": "Content-MD5" } }, @@ -9554,7 +9554,7 @@ "target": "com.amazonaws.s3#PutBucketInventoryConfigurationRequest" }, "traits": { - "smithy.api#documentation": "This implementation of the PUT
action adds an inventory configuration\n (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory\n configurations per bucket.
Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly\n basis, and the results are published to a flat file. The bucket that is inventoried is\n called the source bucket, and the bucket where the inventory flat file\n is stored is called the destination bucket. The\n destination bucket must be in the same AWS Region as the\n source bucket.
\nWhen you configure an inventory for a source bucket, you specify\n the destination bucket where you want the inventory to be stored, and\n whether to generate the inventory daily or weekly. You can also configure what object\n metadata to include and whether to inventory all object versions or only current versions.\n For more information, see Amazon S3\n Inventory in the Amazon S3 User Guide.
\nYou must create a bucket policy on the destination bucket to\n grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an\n example policy, see \n Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.
\nTo use this operation, you must have permissions to perform the\n s3:PutInventoryConfiguration
action. The bucket owner has this permission\n by default and can grant this permission to others. For more information about permissions,\n see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3\n Resources in the Amazon S3 User Guide.
\n Special Errors\n
\n\n HTTP 400 Bad Request Error\n
\n\n Code: InvalidArgument
\n\n Cause: Invalid Argument
\n\n HTTP 400 Bad Request Error\n
\n\n Code: TooManyConfigurations
\n\n Cause: You are attempting to create a new configuration\n but have already reached the 1,000-configuration limit.
\n\n HTTP 403 Forbidden Error\n
\n\n Code: AccessDenied
\n\n Cause: You are not the owner of the specified bucket,\n or you do not have the s3:PutInventoryConfiguration
bucket\n permission to set the configuration on the bucket.
\n Related Resources\n
\nThis implementation of the PUT
action adds an inventory configuration\n (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory\n configurations per bucket.
Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly\n basis, and the results are published to a flat file. The bucket that is inventoried is\n called the source bucket, and the bucket where the inventory flat file\n is stored is called the destination bucket. The\n destination bucket must be in the same Amazon Web Services Region as the\n source bucket.
\nWhen you configure an inventory for a source bucket, you specify\n the destination bucket where you want the inventory to be stored, and\n whether to generate the inventory daily or weekly. You can also configure what object\n metadata to include and whether to inventory all object versions or only current versions.\n For more information, see Amazon S3\n Inventory in the Amazon S3 User Guide.
\nYou must create a bucket policy on the destination bucket to\n grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an\n example policy, see \n Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.
\nTo use this operation, you must have permissions to perform the\n s3:PutInventoryConfiguration
action. The bucket owner has this permission\n by default and can grant this permission to others. For more information about permissions,\n see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3\n Resources in the Amazon S3 User Guide.
\n Special Errors\n
\n\n HTTP 400 Bad Request Error\n
\n\n Code: InvalidArgument
\n\n Cause: Invalid Argument
\n\n HTTP 400 Bad Request Error\n
\n\n Code: TooManyConfigurations
\n\n Cause: You are attempting to create a new configuration\n but have already reached the 1,000-configuration limit.
\n\n HTTP 403 Forbidden Error\n
\n\n Code: AccessDenied
\n\n Cause: You are not the owner of the specified bucket,\n or you do not have the s3:PutInventoryConfiguration
bucket\n permission to set the configuration on the bucket.
\n Related Resources\n
\nCreates a new lifecycle configuration for the bucket or replaces an existing lifecycle\n configuration. For information about lifecycle configuration, see Managing your storage\n lifecycle.
\n\nBucket lifecycle configuration now supports specifying a lifecycle rule using an\n object key name prefix, one or more object tags, or a combination of both. Accordingly,\n this section describes the latest API. The previous version of the API supported\n filtering based only on an object key name prefix, which is supported for backward\n compatibility. For the related API description, see PutBucketLifecycle.
\n\n Rules\n
\nYou specify the lifecycle configuration in your request body. The lifecycle\n configuration is specified as XML consisting of one or more rules. Each rule consists of\n the following:
\n\nFilter identifying a subset of objects to which the rule applies. The filter can\n be based on a key name prefix, object tags, or a combination of both.
\nStatus whether the rule is in effect.
\nOne or more lifecycle transition and expiration actions that you want Amazon S3 to\n perform on the objects identified by the filter. If the state of your bucket is\n versioning-enabled or versioning-suspended, you can have many versions of the same\n object (one current version and zero or more noncurrent versions). Amazon S3 provides\n predefined actions that you can specify for current and noncurrent object\n versions.
\nFor more information, see Object\n Lifecycle Management and Lifecycle Configuration Elements.
\n\n\n\n Permissions\n
\n\n\nBy default, all Amazon S3 resources are private, including buckets, objects, and related\n subresources (for example, lifecycle configuration and website configuration). Only the\n resource owner (that is, the AWS account that created it) can access the resource. The\n resource owner can optionally grant access permissions to others by writing an access\n policy. For this operation, a user must get the s3:PutLifecycleConfiguration\n permission.
\n\nYou can also explicitly deny permissions. Explicit deny also supersedes any other\n permissions. If you want to block users or accounts from removing or deleting objects from\n your bucket, you must deny them permissions for the following actions:
\n\ns3:DeleteObject
\ns3:DeleteObjectVersion
\ns3:PutLifecycleConfiguration
\nFor more information about permissions, see Managing Access Permissions to Your Amazon S3\n Resources.
\n\nThe following are related to PutBucketLifecycleConfiguration
:
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle\n configuration. For information about lifecycle configuration, see Managing your storage\n lifecycle.
\n\nBucket lifecycle configuration now supports specifying a lifecycle rule using an\n object key name prefix, one or more object tags, or a combination of both. Accordingly,\n this section describes the latest API. The previous version of the API supported\n filtering based only on an object key name prefix, which is supported for backward\n compatibility. For the related API description, see PutBucketLifecycle.
\n\n Rules\n
\nYou specify the lifecycle configuration in your request body. The lifecycle\n configuration is specified as XML consisting of one or more rules. Each rule consists of\n the following:
\n\nFilter identifying a subset of objects to which the rule applies. The filter can\n be based on a key name prefix, object tags, or a combination of both.
\nStatus whether the rule is in effect.
\nOne or more lifecycle transition and expiration actions that you want Amazon S3 to\n perform on the objects identified by the filter. If the state of your bucket is\n versioning-enabled or versioning-suspended, you can have many versions of the same\n object (one current version and zero or more noncurrent versions). Amazon S3 provides\n predefined actions that you can specify for current and noncurrent object\n versions.
\nFor more information, see Object\n Lifecycle Management and Lifecycle Configuration Elements.
\n\n\n\n Permissions\n
\n\n\nBy default, all Amazon S3 resources are private, including buckets, objects, and related\n subresources (for example, lifecycle configuration and website configuration). Only the\n resource owner (that is, the Amazon Web Services account that created it) can access the resource. The\n resource owner can optionally grant access permissions to others by writing an access\n policy. For this operation, a user must get the s3:PutLifecycleConfiguration\n permission.
\n\nYou can also explicitly deny permissions. Explicit deny also supersedes any other\n permissions. If you want to block users or accounts from removing or deleting objects from\n your bucket, you must deny them permissions for the following actions:
\n\ns3:DeleteObject
\ns3:DeleteObjectVersion
\ns3:PutLifecycleConfiguration
\nFor more information about permissions, see Managing Access Permissions to Your Amazon S3\n Resources.
\n\nThe following are related to PutBucketLifecycleConfiguration
:
Set the logging parameters for a bucket and to specify permissions for who can view and\n modify the logging parameters. All logs are saved to buckets in the same AWS Region as the\n source bucket. To set the logging status of a bucket, you must be the bucket owner.
\n\nThe bucket owner is automatically granted FULL_CONTROL to all logs. You use the\n Grantee
request element to grant access to other people. The\n Permissions
request element specifies the kind of access the grantee has to\n the logs.
\n Grantee Values\n
\nYou can specify the person (grantee) to whom you're assigning access rights (using\n request elements) in the following ways:
\n\nBy the person's ID:
\n\n
\n
DisplayName is optional and ignored in the request.
\nBy Email address:
\n\n
\n
The grantee is resolved to the CanonicalUser and, in a response to a GET Object\n acl request, appears as the CanonicalUser.
\nBy URI:
\n\n
\n
To enable logging, you use LoggingEnabled and its children request elements. To disable\n logging, you use an empty BucketLoggingStatus request element:
\n\n\n
\n
For more information about server access logging, see Server Access Logging.
\n\nFor more information about creating a bucket, see CreateBucket. For more\n information about returning the logging status of a bucket, see GetBucketLogging.
\n\nThe following operations are related to PutBucketLogging
:
\n PutObject\n
\n\n DeleteBucket\n
\n\n CreateBucket\n
\n\n GetBucketLogging\n
\nSet the logging parameters for a bucket and to specify permissions for who can view and\n modify the logging parameters. All logs are saved to buckets in the same Amazon Web Services Region as the\n source bucket. To set the logging status of a bucket, you must be the bucket owner.
\n\nThe bucket owner is automatically granted FULL_CONTROL to all logs. You use the\n Grantee
request element to grant access to other people. The\n Permissions
request element specifies the kind of access the grantee has to\n the logs.
\n Grantee Values\n
\nYou can specify the person (grantee) to whom you're assigning access rights (using\n request elements) in the following ways:
\n\nBy the person's ID:
\n\n
\n
DisplayName is optional and ignored in the request.
\nBy Email address:
\n\n
\n
The grantee is resolved to the CanonicalUser and, in a response to a GET Object\n acl request, appears as the CanonicalUser.
\nBy URI:
\n\n
\n
To enable logging, you use LoggingEnabled and its children request elements. To disable\n logging, you use an empty BucketLoggingStatus request element:
\n\n\n
\n
For more information about server access logging, see Server Access Logging.
\n\nFor more information about creating a bucket, see CreateBucket. For more\n information about returning the logging status of a bucket, see GetBucketLogging.
\n\nThe following operations are related to PutBucketLogging
:
\n PutObject\n
\n\n DeleteBucket\n
\n\n CreateBucket\n
\n\n GetBucketLogging\n
\nThe MD5 hash of the PutBucketLogging
request body.
For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
", + "smithy.api#documentation": "The MD5 hash of the PutBucketLogging
request body.
For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "smithy.api#httpHeader": "Content-MD5" } }, @@ -9750,7 +9750,7 @@ "target": "com.amazonaws.s3#PutBucketNotificationConfigurationRequest" }, "traits": { - "smithy.api#documentation": "Enables notifications of specified events for a bucket. For more information about event\n notifications, see Configuring Event\n Notifications.
\n\nUsing this API, you can replace an existing notification configuration. The\n configuration is an XML file that defines the event types that you want Amazon S3 to publish and\n the destination where you want Amazon S3 to publish an event notification when it detects an\n event of the specified type.
\n\nBy default, your bucket has no event notifications configured. That is, the notification\n configuration will be an empty NotificationConfiguration
.
\n
\n
\n \n
This action replaces the existing notification configuration with the configuration\n you include in the request body.
\n\nAfter Amazon S3 receives this request, it first verifies that any Amazon Simple Notification\n Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and\n that the bucket owner has permission to publish to it by sending a test notification. In\n the case of AWS Lambda destinations, Amazon S3 verifies that the Lambda function permissions\n grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information,\n see Configuring Notifications for Amazon S3\n Events.
\n\nYou can disable notifications by adding the empty NotificationConfiguration\n element.
\n\nBy default, only the bucket owner can configure notifications on a bucket. However,\n bucket owners can use a bucket policy to grant permission to other users to set this\n configuration with s3:PutBucketNotification
permission.
The PUT notification is an atomic operation. For example, suppose your notification\n configuration includes SNS topic, SQS queue, and Lambda function configurations. When\n you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS\n topic. If the message fails, the entire PUT action will fail, and Amazon S3 will not add\n the configuration to your bucket.
\n\n Responses\n
\nIf the configuration in the request body includes only one\n TopicConfiguration
specifying only the\n s3:ReducedRedundancyLostObject
event type, the response will also include\n the x-amz-sns-test-message-id
header containing the message ID of the test\n notification sent to the topic.
The following action is related to\n PutBucketNotificationConfiguration
:
Enables notifications of specified events for a bucket. For more information about event\n notifications, see Configuring Event\n Notifications.
\n\nUsing this API, you can replace an existing notification configuration. The\n configuration is an XML file that defines the event types that you want Amazon S3 to publish and\n the destination where you want Amazon S3 to publish an event notification when it detects an\n event of the specified type.
\n\nBy default, your bucket has no event notifications configured. That is, the notification\n configuration will be an empty NotificationConfiguration
.
\n
\n
\n \n
This action replaces the existing notification configuration with the configuration\n you include in the request body.
\n\nAfter Amazon S3 receives this request, it first verifies that any Amazon Simple Notification\n Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and\n that the bucket owner has permission to publish to it by sending a test notification. In\n the case of Lambda destinations, Amazon S3 verifies that the Lambda function permissions\n grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information,\n see Configuring Notifications for Amazon S3\n Events.
\n\nYou can disable notifications by adding the empty NotificationConfiguration\n element.
\n\nBy default, only the bucket owner can configure notifications on a bucket. However,\n bucket owners can use a bucket policy to grant permission to other users to set this\n configuration with s3:PutBucketNotification
permission.
The PUT notification is an atomic operation. For example, suppose your notification\n configuration includes SNS topic, SQS queue, and Lambda function configurations. When\n you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS\n topic. If the message fails, the entire PUT action will fail, and Amazon S3 will not add\n the configuration to your bucket.
\n\n Responses\n
\nIf the configuration in the request body includes only one\n TopicConfiguration
specifying only the\n s3:ReducedRedundancyLostObject
event type, the response will also include\n the x-amz-sns-test-message-id
header containing the message ID of the test\n notification sent to the topic.
The following action is related to\n PutBucketNotificationConfiguration
:
The MD5 hash of the OwnershipControls
request body.
For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
", + "smithy.api#documentation": "The MD5 hash of the OwnershipControls
request body.
For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "smithy.api#httpHeader": "Content-MD5" } }, @@ -9843,7 +9843,7 @@ "target": "com.amazonaws.s3#PutBucketPolicyRequest" }, "traits": { - "smithy.api#documentation": "Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an identity other than\n the root user of the AWS account that owns the bucket, the calling identity must have the\n PutBucketPolicy
permissions on the specified bucket and belong to the\n bucket owner's account in order to use this operation.
If you don't have PutBucketPolicy
permissions, Amazon S3 returns a 403\n Access Denied
error. If you have the correct permissions, but you're not using an\n identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not\n Allowed
error.
As a security precaution, the root user of the AWS account that owns a bucket can\n always use this operation, even if the policy explicitly denies the root user the\n ability to perform this action.
\nFor more information about bucket policies, see Using Bucket Policies and User\n Policies.
\n\nThe following operations are related to PutBucketPolicy
:
\n CreateBucket\n
\n\n DeleteBucket\n
\nApplies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an identity other than\n the root user of the Amazon Web Services account that owns the bucket, the calling identity must have the\n PutBucketPolicy
permissions on the specified bucket and belong to the\n bucket owner's account in order to use this operation.
If you don't have PutBucketPolicy
permissions, Amazon S3 returns a 403\n Access Denied
error. If you have the correct permissions, but you're not using an\n identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not\n Allowed
error.
As a security precaution, the root user of the Amazon Web Services account that owns a bucket can\n always use this operation, even if the policy explicitly denies the root user the\n ability to perform this action.
\nFor more information, see Bucket policy examples.
\n\nThe following operations are related to PutBucketPolicy
:
\n CreateBucket\n
\n\n DeleteBucket\n
\nThe MD5 hash of the request body.
\nFor requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
", + "smithy.api#documentation": "The MD5 hash of the request body.
\nFor requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "smithy.api#httpHeader": "Content-MD5" } }, @@ -9900,7 +9900,7 @@ "target": "com.amazonaws.s3#PutBucketReplicationRequest" }, "traits": { - "smithy.api#documentation": "Creates a replication configuration or replaces an existing one. For more information,\n see Replication in the Amazon S3 User Guide.
\nTo perform this operation, the user or role performing the action must have the\n iam:PassRole permission.
\nSpecify the replication configuration in the request body. In the replication\n configuration, you provide the name of the destination bucket or buckets where you want\n Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your\n behalf, and other relevant information.
\n\n\nA replication configuration must include at least one rule, and can contain a maximum of\n 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in\n the source bucket. To choose additional subsets of objects to replicate, add a rule for\n each subset.
\n\nTo specify a subset of the objects in the source bucket to apply a replication rule to,\n add the Filter element as a child of the Rule element. You can filter objects based on an\n object key prefix, one or more object tags, or both. When you add the Filter element in the\n configuration, you must also add the following elements:\n DeleteMarkerReplication
, Status
, and\n Priority
.
If you are using an earlier version of the replication configuration, Amazon S3 handles\n replication of delete markers differently. For more information, see Backward Compatibility.
\nFor information about enabling versioning on a bucket, see Using Versioning.
\n\nBy default, a resource owner, in this case the AWS account that created the bucket, can\n perform this operation. The resource owner can also grant others permissions to perform the\n operation. For more information about permissions, see Specifying Permissions in a Policy\n and Managing Access Permissions to Your\n Amazon S3 Resources.
\n\n\n Handling Replication of Encrypted Objects\n
\nBy default, Amazon S3 doesn't replicate objects that are stored at rest using server-side\n encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the\n following: SourceSelectionCriteria
, SseKmsEncryptedObjects
,\n Status
, EncryptionConfiguration
, and\n ReplicaKmsKeyID
. For information about replication configuration, see\n Replicating Objects\n Created with SSE Using CMKs stored in AWS KMS.
For information on PutBucketReplication
errors, see List of\n replication-related error codes\n
The following operations are related to PutBucketReplication
:
\n GetBucketReplication\n
\nCreates a replication configuration or replaces an existing one. For more information,\n see Replication in the Amazon S3 User Guide.
\n \nSpecify the replication configuration in the request body. In the replication\n configuration, you provide the name of the destination bucket or buckets where you want\n Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your\n behalf, and other relevant information.
\n\n\nA replication configuration must include at least one rule, and can contain a maximum of\n 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in\n the source bucket. To choose additional subsets of objects to replicate, add a rule for\n each subset.
\n\nTo specify a subset of the objects in the source bucket to apply a replication rule to,\n add the Filter element as a child of the Rule element. You can filter objects based on an\n object key prefix, one or more object tags, or both. When you add the Filter element in the\n configuration, you must also add the following elements:\n DeleteMarkerReplication
, Status
, and\n Priority
.
If you are using an earlier version of the replication configuration, Amazon S3 handles\n replication of delete markers differently. For more information, see Backward Compatibility.
\nFor information about enabling versioning on a bucket, see Using Versioning.
\n\n\n Handling Replication of Encrypted Objects\n
\nBy default, Amazon S3 doesn't replicate objects that are stored at rest using server-side\n encryption with CMKs stored in Amazon Web Services KMS. To replicate Amazon Web Services KMS-encrypted objects, add the\n following: SourceSelectionCriteria
, SseKmsEncryptedObjects
,\n Status
, EncryptionConfiguration
, and\n ReplicaKmsKeyID
. For information about replication configuration, see\n Replicating Objects\n Created with SSE Using CMKs stored in Amazon Web Services KMS.
For information on PutBucketReplication
errors, see List of\n replication-related error codes\n
\n Permissions\n
\nTo create a PutBucketReplication
request, you must have s3:PutReplicationConfiguration
\n permissions for the bucket. \n
By default, a resource owner, in this case the Amazon Web Services account that created the bucket, can\n perform this operation. The resource owner can also grant others permissions to perform the\n operation. For more information about permissions, see Specifying Permissions in a Policy\n and Managing Access Permissions to Your\n Amazon S3 Resources.
\nTo perform this operation, the user or role performing the action must have the\n iam:PassRole permission.
\nThe following operations are related to PutBucketReplication
:
\n GetBucketReplication\n
\nThe base64-encoded 128-bit MD5 digest of the data. You must use this header as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, see RFC 1864.
\nFor requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
", + "smithy.api#documentation": "The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, see RFC 1864.
\nFor requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "smithy.api#httpHeader": "Content-MD5" } }, @@ -9980,7 +9980,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "The base64-encoded 128-bit MD5 digest of the data. You must use this header as a\n message integrity check to verify that the request body was not corrupted in transit. For\n more information, see RFC\n 1864.
\nFor requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
", + "smithy.api#documentation": "The base64-encoded 128-bit MD5 digest of the data. You must use this header as a\n message integrity check to verify that the request body was not corrupted in transit. For\n more information, see RFC\n 1864.
\nFor requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "smithy.api#httpHeader": "Content-MD5" } }, @@ -10008,7 +10008,7 @@ "target": "com.amazonaws.s3#PutBucketTaggingRequest" }, "traits": { - "smithy.api#documentation": "Sets the tags for a bucket.
\nUse tags to organize your AWS bill to reflect your own cost structure. To do this, sign\n up to get your AWS account bill with tag key values included. Then, to see the cost of\n combined resources, organize your billing information according to resources with the same\n tag key values. For example, you can tag several resources with a specific application\n name, and then organize your billing information to see the total cost of that application\n across several services. For more information, see Cost Allocation\n and Tagging and Using Cost Allocation in Amazon S3 Bucket\n Tags.
\n\n\n When this operation sets the tags for a bucket, it will overwrite any current tags the \n bucket already has. You cannot use this operation to add tags to an existing list of tags.
\nTo use this operation, you must have permissions to perform the\n s3:PutBucketTagging
action. The bucket owner has this permission by default\n and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3\n Resources.
\n PutBucketTagging
has the following special errors:
Error code: InvalidTagError
\n
Description: The tag provided was not a valid tag. This error can occur if\n the tag did not pass input validation. For information about tag restrictions,\n see User-Defined Tag Restrictions and AWS-Generated Cost Allocation Tag Restrictions.
\nError code: MalformedXMLError
\n
Description: The XML provided does not match the schema.
\nError code: OperationAbortedError
\n
Description: A conflicting conditional action is currently in progress\n against this resource. Please try again.
\nError code: InternalError
\n
Description: The service was unable to apply the provided tag to the\n bucket.
\nThe following operations are related to PutBucketTagging
:
\n GetBucketTagging\n
\n\n DeleteBucketTagging\n
\nSets the tags for a bucket.
\nUse tags to organize your Amazon Web Services bill to reflect your own cost structure. To do this, sign\n up to get your Amazon Web Services account bill with tag key values included. Then, to see the cost of\n combined resources, organize your billing information according to resources with the same\n tag key values. For example, you can tag several resources with a specific application\n name, and then organize your billing information to see the total cost of that application\n across several services. For more information, see Cost Allocation\n and Tagging and Using Cost Allocation in Amazon S3 Bucket\n Tags.
\n\n\n When this operation sets the tags for a bucket, it will overwrite any current tags the \n bucket already has. You cannot use this operation to add tags to an existing list of tags.
\nTo use this operation, you must have permissions to perform the\n s3:PutBucketTagging
action. The bucket owner has this permission by default\n and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3\n Resources.
\n PutBucketTagging
has the following special errors:
Error code: InvalidTagError
\n
Description: The tag provided was not a valid tag. This error can occur if\n the tag did not pass input validation. For information about tag restrictions,\n see User-Defined Tag Restrictions and Amazon Web Services-Generated Cost Allocation Tag Restrictions.
\nError code: MalformedXMLError
\n
Description: The XML provided does not match the schema.
\nError code: OperationAbortedError
\n
Description: A conflicting conditional action is currently in progress\n against this resource. Please try again.
\nError code: InternalError
\n
Description: The service was unable to apply the provided tag to the\n bucket.
\nThe following operations are related to PutBucketTagging
:
\n GetBucketTagging\n
\n\n DeleteBucketTagging\n
\nThe base64-encoded 128-bit MD5 digest of the data. You must use this header as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, see RFC 1864.
\nFor requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
", + "smithy.api#documentation": "The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, see RFC 1864.
\nFor requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "smithy.api#httpHeader": "Content-MD5" } }, @@ -10082,7 +10082,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": ">The base64-encoded 128-bit MD5 digest of the data. You must use this header as a\n message integrity check to verify that the request body was not corrupted in transit. For\n more information, see RFC\n 1864.
\nFor requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
", + "smithy.api#documentation": ">The base64-encoded 128-bit MD5 digest of the data. You must use this header as a\n message integrity check to verify that the request body was not corrupted in transit. For\n more information, see RFC\n 1864.
\nFor requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "smithy.api#httpHeader": "Content-MD5" } }, @@ -10140,7 +10140,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, see RFC 1864.
\nFor requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
", + "smithy.api#documentation": "The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, see RFC 1864.
\nFor requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "smithy.api#httpHeader": "Content-MD5" } }, @@ -10171,7 +10171,7 @@ "target": "com.amazonaws.s3#PutObjectOutput" }, "traits": { - "smithy.api#documentation": "Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object\n to it.
\n\n\nAmazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the\n entire object to the bucket.
\n\nAmazon S3 is a distributed system. If it receives multiple write requests for the same object\n simultaneously, it overwrites all but the last object written. Amazon S3 does not provide object\n locking; if you need this, make sure to build it into your application layer or use\n versioning instead.
\n\nTo ensure that data is not corrupted traversing the network, use the\n Content-MD5
header. When you use this header, Amazon S3 checks the object\n against the provided MD5 value and, if they do not match, returns an error. Additionally,\n you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to\n the calculated MD5 value.
The Content-MD5
header is required for any request to upload an object\n with a retention period configured using Amazon S3 Object Lock. For more information about\n Amazon S3 Object Lock, see Amazon S3 Object Lock Overview\n in the Amazon S3 User Guide.
\n Server-side Encryption\n
\nYou can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts \n your data as it writes it to disks in its data centers and decrypts the data\n when you access it. You have the option to provide your own encryption key or use AWS\n managed encryption keys (SSE-S3 or SSE-KMS). For more information, see Using Server-Side\n Encryption.
\nIf you request server-side encryption using AWS Key Management Service (SSE-KMS), you can enable \n an S3 Bucket Key at the object-level. For more information, see Amazon S3 Bucket Keys in the \n Amazon S3 User Guide.
\n\n Access Control List (ACL)-Specific Request\n Headers\n
\nYou can use headers to grant ACL- based permissions. By default, all objects are\n private. Only the owner has full access control. When adding a new object, you can grant\n permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These\n permissions are then added to the ACL on the object. For more information, see Access Control List\n (ACL) Overview and Managing ACLs Using the REST\n API.
\n\n\n Storage Class Options\n
\nBy default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The\n STANDARD storage class provides high durability and high availability. Depending on\n performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses\n the OUTPOSTS Storage Class. For more information, see Storage Classes in the\n Amazon S3 User Guide.
\n\n\n\n Versioning\n
\nIf you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID\n for the object being stored. Amazon S3 returns this ID in the response. When you enable\n versioning for a bucket, if Amazon S3 receives multiple write requests for the same object\n simultaneously, it stores all of the objects.
\nFor more information about versioning, see Adding Objects to\n Versioning Enabled Buckets. For information about returning the versioning state\n of a bucket, see GetBucketVersioning.
\n\n\n\n Related Resources\n
\n\n CopyObject\n
\n\n DeleteObject\n
\nAdds an object to a bucket. You must have WRITE permissions on a bucket to add an object\n to it.
\n\n\nAmazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the\n entire object to the bucket.
\n\nAmazon S3 is a distributed system. If it receives multiple write requests for the same object\n simultaneously, it overwrites all but the last object written. Amazon S3 does not provide object\n locking; if you need this, make sure to build it into your application layer or use\n versioning instead.
\n\nTo ensure that data is not corrupted traversing the network, use the\n Content-MD5
header. When you use this header, Amazon S3 checks the object\n against the provided MD5 value and, if they do not match, returns an error. Additionally,\n you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to\n the calculated MD5 value.
To successfully complete the PutObject
request, you must have the \n s3:PutObject
in your IAM permissions.
To successfully change the objects acl of your PutObject
request, \n you must have the s3:PutObjectAcl
in your IAM permissions.
The Content-MD5
header is required for any request to upload an object\n with a retention period configured using Amazon S3 Object Lock. For more information about\n Amazon S3 Object Lock, see Amazon S3 Object Lock Overview\n in the Amazon S3 User Guide.
\n Server-side Encryption\n
\nYou can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts \n your data as it writes it to disks in its data centers and decrypts the data\n when you access it. You have the option to provide your own encryption key or use Amazon Web Services\n managed encryption keys (SSE-S3 or SSE-KMS). For more information, see Using Server-Side\n Encryption.
\nIf you request server-side encryption using Amazon Web Services Key Management Service (SSE-KMS), you can enable \n an S3 Bucket Key at the object-level. For more information, see Amazon S3 Bucket Keys in the \n Amazon S3 User Guide.
\n\n Access Control List (ACL)-Specific Request\n Headers\n
\nYou can use headers to grant ACL- based permissions. By default, all objects are\n private. Only the owner has full access control. When adding a new object, you can grant\n permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These\n permissions are then added to the ACL on the object. For more information, see Access Control List\n (ACL) Overview and Managing ACLs Using the REST\n API.
\n\n\n Storage Class Options\n
\nBy default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The\n STANDARD storage class provides high durability and high availability. Depending on\n performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses\n the OUTPOSTS Storage Class. For more information, see Storage Classes in the\n Amazon S3 User Guide.
\n\n\n\n Versioning\n
\nIf you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID\n for the object being stored. Amazon S3 returns this ID in the response. When you enable\n versioning for a bucket, if Amazon S3 receives multiple write requests for the same object\n simultaneously, it stores all of the objects.
\nFor more information about versioning, see Adding Objects to\n Versioning Enabled Buckets. For information about returning the versioning state\n of a bucket, see GetBucketVersioning.
\n\n\n\n Related Resources\n
\n\n CopyObject\n
\n\n DeleteObject\n
\nUses the acl
subresource to set the access control list (ACL) permissions\n for a new or existing object in an S3 bucket. You must have WRITE_ACP
\n permission to set the ACL of an object. For more information, see What\n permissions can I grant? in the Amazon S3 User Guide.
This action is not supported by Amazon S3 on Outposts.
\nDepending on your application needs, you can choose to set\n the ACL on an object using either the request body or the headers. For example, if you have\n an existing application that updates a bucket ACL using the request body, you can continue\n to use that approach. For more information, see Access Control List (ACL) Overview in the Amazon S3 User Guide.
\n\n\n\n\n Access Permissions\n
\nYou can set access permissions using one of the following methods:
\nSpecify a canned ACL with the x-amz-acl
request header. Amazon S3 supports\n a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set\n of grantees and permissions. Specify the canned ACL name as the value of\n x-amz-ac
l. If you use this header, you cannot use other access\n control-specific headers in your request. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
,\n x-amz-grant-read-acp
, x-amz-grant-write-acp
, and\n x-amz-grant-full-control
headers. When using these headers, you\n specify explicit access permissions and grantees (AWS accounts or Amazon S3 groups) who\n will receive the permission. If you use these ACL-specific headers, you cannot use\n x-amz-acl
header to set a canned ACL. These parameters map to the set\n of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL)\n Overview.
You specify each grantee as a type=value pair, where the type is one of the\n following:
\n\n id
– if the value specified is the canonical user ID of an AWS\n account
\n uri
– if you are granting permissions to a predefined\n group
\n emailAddress
– if the value specified is the email address of\n an AWS account
Using email addresses to specify a grantee is only supported in the following AWS Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.
\nFor example, the following x-amz-grant-read
header grants list\n objects permission to the two AWS accounts identified by their email\n addresses.
\n x-amz-grant-read: emailAddress=\"xyz@amazon.com\",\n emailAddress=\"abc@amazon.com\"
\n
You can use either a canned ACL or specify access permissions explicitly. You cannot do\n both.
\n\n Grantee Values\n
\nYou can specify the person (grantee) to whom you're assigning access rights (using\n request elements) in the following ways:
\nBy the person's ID:
\n\n
\n
DisplayName is optional and ignored in the request.
\nBy URI:
\n\n
\n
By Email address:
\n\n
\n
The grantee is resolved to the CanonicalUser and, in a response to a GET Object\n acl request, appears as the CanonicalUser.
\nUsing email addresses to specify a grantee is only supported in the following AWS Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.
\n\n Versioning\n
\nThe ACL of an object is set at the object version level. By default, PUT sets the ACL of\n the current version of an object. To set the ACL of a different version, use the\n versionId
subresource.
\n Related Resources\n
\n\n CopyObject\n
\n\n GetObject\n
\nUses the acl
subresource to set the access control list (ACL) permissions\n for a new or existing object in an S3 bucket. You must have WRITE_ACP
\n permission to set the ACL of an object. For more information, see What\n permissions can I grant? in the Amazon S3 User Guide.
This action is not supported by Amazon S3 on Outposts.
\nDepending on your application needs, you can choose to set\n the ACL on an object using either the request body or the headers. For example, if you have\n an existing application that updates a bucket ACL using the request body, you can continue\n to use that approach. For more information, see Access Control List (ACL) Overview in the Amazon S3 User Guide.
\n\n\n\n\n Access Permissions\n
\nYou can set access permissions using one of the following methods:
\nSpecify a canned ACL with the x-amz-acl
request header. Amazon S3 supports\n a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set\n of grantees and permissions. Specify the canned ACL name as the value of\n x-amz-ac
l. If you use this header, you cannot use other access\n control-specific headers in your request. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
,\n x-amz-grant-read-acp
, x-amz-grant-write-acp
, and\n x-amz-grant-full-control
headers. When using these headers, you\n specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who\n will receive the permission. If you use these ACL-specific headers, you cannot use\n x-amz-acl
header to set a canned ACL. These parameters map to the set\n of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL)\n Overview.
You specify each grantee as a type=value pair, where the type is one of the\n following:
\n\n id
– if the value specified is the canonical user ID of an Amazon Web Services account
\n uri
– if you are granting permissions to a predefined\n group
\n emailAddress
– if the value specified is the email address of\n an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nFor example, the following x-amz-grant-read
header grants list\n objects permission to the two Amazon Web Services accounts identified by their email\n addresses.
\n x-amz-grant-read: emailAddress=\"xyz@amazon.com\",\n emailAddress=\"abc@amazon.com\"
\n
You can use either a canned ACL or specify access permissions explicitly. You cannot do\n both.
\n\n Grantee Values\n
\nYou can specify the person (grantee) to whom you're assigning access rights (using\n request elements) in the following ways:
\nBy the person's ID:
\n\n
\n
DisplayName is optional and ignored in the request.
\nBy URI:
\n\n
\n
By Email address:
\n\n
\n
The grantee is resolved to the CanonicalUser and, in a response to a GET Object\n acl request, appears as the CanonicalUser.
\nUsing email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\n\n Versioning\n
\nThe ACL of an object is set at the object version level. By default, PUT sets the ACL of\n the current version of an object. To set the ACL of a different version, use the\n versionId
subresource.
\n Related Resources\n
\n\n CopyObject\n
\n\n GetObject\n
\nThe bucket name that contains the object to which you want to attach the ACL.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket name that contains the object to which you want to attach the ACL.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -10234,7 +10234,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, go to RFC\n 1864.>\n
\nFor requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
", + "smithy.api#documentation": "The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, go to RFC\n 1864.>\n
\nFor requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "smithy.api#httpHeader": "Content-MD5" } }, @@ -10276,7 +10276,7 @@ "Key": { "target": "com.amazonaws.s3#ObjectKey", "traits": { - "smithy.api#documentation": "Key for which the PUT action was initiated.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "smithy.api#documentation": "Key for which the PUT action was initiated.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -10346,7 +10346,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The bucket name containing the object that you want to place a Legal Hold on.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket name containing the object that you want to place a Legal Hold on.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -10375,7 +10375,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "The MD5 hash for the request body.
\nFor requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
", + "smithy.api#documentation": "The MD5 hash for the request body.
\nFor requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "smithy.api#httpHeader": "Content-MD5" } }, @@ -10405,7 +10405,7 @@ "target": "com.amazonaws.s3#PutObjectLockConfigurationOutput" }, "traits": { - "smithy.api#documentation": "Places an Object Lock configuration on the specified bucket. The rule specified in the\n Object Lock configuration will be applied by default to every new object placed in the\n specified bucket. For more information, see Locking Objects.\n
\nThe DefaultRetention
settings require both a mode and a\n period.
The DefaultRetention
period can be either Days
\n or Years
but you must select one. You cannot specify Days
\n and Years
at the same time.
You can only enable Object Lock for new buckets. If you want to turn on\n Object Lock for an existing bucket, contact AWS Support.
\nPlaces an Object Lock configuration on the specified bucket. The rule specified in the\n Object Lock configuration will be applied by default to every new object placed in the\n specified bucket. For more information, see Locking Objects.\n
\nThe DefaultRetention
settings require both a mode and a\n period.
The DefaultRetention
period can be either Days
\n or Years
but you must select one. You cannot specify Days
\n and Years
at the same time.
You can only enable Object Lock for new buckets. If you want to turn on\n Object Lock for an existing bucket, contact Amazon Web Services Support.
\nThe MD5 hash for the request body.
\nFor requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
", + "smithy.api#documentation": "The MD5 hash for the request body.
\nFor requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "smithy.api#httpHeader": "Content-MD5" } }, @@ -10493,7 +10493,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "If you specified server-side encryption either with an AWS KMS customer master key (CMK)\n or Amazon S3-managed encryption key in your PUT request, the response includes this header. It\n confirms the encryption algorithm that Amazon S3 used to encrypt the object.
", + "smithy.api#documentation": "If you specified server-side encryption either with an Amazon Web Services KMS customer master key (CMK)\n or Amazon S3-managed encryption key in your PUT request, the response includes this header. It\n confirms the encryption algorithm that Amazon S3 used to encrypt the object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -10521,21 +10521,21 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "If x-amz-server-side-encryption
is present and has the value of\n aws:kms
, this header specifies the ID of the AWS Key Management Service\n (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the\n object.
If x-amz-server-side-encryption
is present and has the value of\n aws:kms
, this header specifies the ID of the Amazon Web Services Key Management Service\n (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) that was used for the\n object.
If present, specifies the AWS KMS Encryption Context to use for object encryption. The\n value of this header is a base64-encoded UTF-8 string holding JSON with the encryption\n context key-value pairs.
", + "smithy.api#documentation": "If present, specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The\n value of this header is a base64-encoded UTF-8 string holding JSON with the encryption\n context key-value pairs.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).
", + "smithy.api#documentation": "Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -10567,7 +10567,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The bucket name to which the PUT action was initiated.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket name to which the PUT action was initiated.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -10716,14 +10716,14 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "If x-amz-server-side-encryption
is present and has the value of\n aws:kms
, this header specifies the ID of the AWS Key Management Service\n (AWS KMS) symmetrical customer managed customer master key (CMK) that was used for the\n object. If you specify x-amz-server-side-encryption:aws:kms
, but do not\n provide x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the AWS\n managed CMK in AWS to protect the data. If the KMS key does not exist in the same account\n issuing the command, you must use the full ARN and not just the ID.\n
If x-amz-server-side-encryption
is present and has the value of\n aws:kms
, this header specifies the ID of the Amazon Web Services Key Management Service\n (Amazon Web Services KMS) symmetrical customer managed customer master key (CMK) that was used for the\n object. If you specify x-amz-server-side-encryption:aws:kms
, but do not\n provide x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services\n managed CMK in Amazon Web Services to protect the data. If the KMS key does not exist in the same account\n issuing the command, you must use the full ARN and not just the ID.\n
Specifies the AWS KMS Encryption Context to use for object encryption. The value of this\n header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value\n pairs.
", + "smithy.api#documentation": "Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this\n header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value\n pairs.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, @@ -10786,7 +10786,7 @@ "target": "com.amazonaws.s3#PutObjectRetentionOutput" }, "traits": { - "smithy.api#documentation": "Places an Object Retention configuration on an object. For more information, see Locking Objects.\n
\nThis action is not supported by Amazon S3 on Outposts.
", + "smithy.api#documentation": "Places an Object Retention configuration on an object. For more information, see Locking Objects.\n Users or accounts require the s3:PutObjectRetention
permission in order to place\n an Object Retention configuration on objects. Bypassing a Governance Retention configuration\n requires the s3:BypassGovernanceRetention
permission.\n
This action is not supported by Amazon S3 on Outposts.
\n\n\n Permissions\n
\nWhen the Object Lock retention mode is set to compliance, you need s3:PutObjectRetention
and \n s3:BypassGovernanceRetention
permissions. For other requests to PutObjectRetention
, \n only s3:PutObjectRetention
permissions are required.
The bucket name that contains the object you want to apply this Object Retention\n configuration to.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket name that contains the object you want to apply this Object Retention\n configuration to.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -10848,7 +10848,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "The MD5 hash for the request body.
\nFor requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
", + "smithy.api#documentation": "The MD5 hash for the request body.
\nFor requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "smithy.api#httpHeader": "Content-MD5" } }, @@ -10905,7 +10905,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The bucket name containing the object.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket name containing the object.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -10928,7 +10928,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "The MD5 hash for the request body.
\nFor requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
", + "smithy.api#documentation": "The MD5 hash for the request body.
\nFor requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "smithy.api#httpHeader": "Content-MD5" } }, @@ -10985,7 +10985,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "The MD5 hash of the PutPublicAccessBlock
request body.
For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.
", + "smithy.api#documentation": "The MD5 hash of the PutPublicAccessBlock
request body.
For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "smithy.api#httpHeader": "Content-MD5" } }, @@ -11196,7 +11196,7 @@ "Role": { "target": "com.amazonaws.s3#Role", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that\n Amazon S3 assumes when replicating objects. For more information, see How to Set Up\n Replication in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that\n Amazon S3 assumes when replicating objects. For more information, see How to Set Up\n Replication in the Amazon S3 User Guide.
", "smithy.api#required": {} } }, @@ -11249,7 +11249,7 @@ "SourceSelectionCriteria": { "target": "com.amazonaws.s3#SourceSelectionCriteria", "traits": { - "smithy.api#documentation": "A container that describes additional filters for identifying the source objects that\n you want to replicate. You can choose to enable or disable the replication of these\n objects. Currently, Amazon S3 supports only the filter that you can specify for objects created\n with server-side encryption using a customer master key (CMK) stored in AWS Key Management\n Service (SSE-KMS).
" + "smithy.api#documentation": "A container that describes additional filters for identifying the source objects that\n you want to replicate. You can choose to enable or disable the replication of these\n objects. Currently, Amazon S3 supports only the filter that you can specify for objects created\n with server-side encryption using a customer master key (CMK) stored in Amazon Web Services Key Management\n Service (SSE-KMS).
" } }, "ExistingObjectReplication": { @@ -11408,7 +11408,7 @@ "Minutes": { "target": "com.amazonaws.s3#Minutes", "traits": { - "smithy.api#documentation": "Contains an integer specifying time in minutes.
\nValid values: 15 minutes.
" + "smithy.api#documentation": "Contains an integer specifying time in minutes.
\nValid value: 15
" } } }, @@ -11513,7 +11513,7 @@ } ], "traits": { - "smithy.api#documentation": "Restores an archived copy of an object back into Amazon S3
\nThis action is not supported by Amazon S3 on Outposts.
\nThis action performs the following types of requests:
\n\n select
- Perform a select query on an archived object
\n restore an archive
- Restore an archived object
To use this operation, you must have permissions to perform the\n s3:RestoreObject
action. The bucket owner has this permission by default\n and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3\n Resources in the Amazon S3 User Guide.
\n Querying Archives with Select Requests\n
\nYou use a select type of request to perform SQL queries on archived objects. The\n archived objects that are being queried by the select request must be formatted as\n uncompressed comma-separated values (CSV) files. You can run queries and custom analytics\n on your archived data without having to restore your data to a hotter Amazon S3 tier. For an\n overview about select requests, see Querying Archived Objects in the Amazon S3 User Guide.
\nWhen making a select request, do the following:
\nDefine an output location for the select query's output. This must be an Amazon S3\n bucket in the same AWS Region as the bucket that contains the archive object that is\n being queried. The AWS account that initiates the job must have permissions to write\n to the S3 bucket. You can specify the storage class and encryption for the output\n objects stored in the bucket. For more information about output, see Querying Archived Objects\n in the Amazon S3 User Guide.
\nFor more information about the S3
structure in the request body, see\n the following:
\n PutObject\n
\n\n Managing Access with\n ACLs in the Amazon S3 User Guide\n
\n\n Protecting Data Using\n Server-Side Encryption in the\n Amazon S3 User Guide\n
\nDefine the SQL expression for the SELECT
type of restoration for your\n query in the request body's SelectParameters
structure. You can use\n expressions like the following examples.
The following expression returns all records from the specified\n object.
\n\n SELECT * FROM Object
\n
Assuming that you are not using any headers for data stored in the object,\n you can specify columns with positional headers.
\n\n SELECT s._1, s._2 FROM Object s WHERE s._3 > 100
\n
If you have headers and you set the fileHeaderInfo
in the\n CSV
structure in the request body to USE
, you can\n specify headers in the query. (If you set the fileHeaderInfo
field\n to IGNORE
, the first row is skipped for the query.) You cannot mix\n ordinal positions with header column names.
\n SELECT s.Id, s.FirstName, s.SSN FROM S3Object s
\n
For more information about using SQL with S3 Glacier Select restore, see SQL Reference for Amazon S3 Select and\n S3 Glacier Select in the Amazon S3 User Guide.
\nWhen making a select request, you can also do the following:
\nTo expedite your queries, specify the Expedited
tier. For more\n information about tiers, see \"Restoring Archives,\" later in this topic.
Specify details about the data serialization format of both the input object that\n is being queried and the serialization of the CSV-encoded query results.
\nThe following are additional important facts about the select feature:
\nThe output results are new Amazon S3 objects. Unlike archive retrievals, they are\n stored until explicitly deleted-manually or through a lifecycle policy.
\nYou can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't\n deduplicate requests, so avoid issuing duplicate requests.
\n Amazon S3 accepts a select request even if the object has already been restored. A\n select request doesn’t return error response 409
.
\n Restoring objects\n
\nObjects that you archive to the S3 Glacier or\n S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers are not accessible in real time. For objects in\n Archive Access or Deep Archive Access tiers you must first initiate a restore request, and\n then wait until the object is moved into the Frequent Access tier. For objects in\n S3 Glacier or S3 Glacier Deep Archive storage classes you must\n first initiate a restore request, and then wait until a temporary copy of the object is\n available. To access an archived object, you must restore the object for the duration\n (number of days) that you specify.
\nTo restore a specific object version, you can provide a version ID. If you don't provide\n a version ID, Amazon S3 restores the current version.
\nWhen restoring an archived object (or using a select request), you can specify one of\n the following data access tier options in the Tier
element of the request\n body:
\n \n Expedited
\n - Expedited retrievals\n allow you to quickly access your data stored in the S3 Glacier\n storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for a\n subset of archives are required. For all but the largest archived objects (250 MB+),\n data accessed using Expedited retrievals is typically made available within 1–5\n minutes. Provisioned capacity ensures that retrieval capacity for Expedited\n retrievals is available when you need it. Expedited retrievals and provisioned\n capacity are not available for objects stored in the S3 Glacier Deep Archive\n storage class or S3 Intelligent-Tiering Deep Archive tier.
\n \n Standard
\n - Standard retrievals allow\n you to access any of your archived objects within several hours. This is the default\n option for retrieval requests that do not specify the retrieval option. Standard\n retrievals typically finish within 3–5 hours for objects stored in the\n S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. They\n typically finish within 12 hours for objects stored in the\n S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.\n Standard retrievals are free for objects stored in S3 Intelligent-Tiering.
\n \n Bulk
\n - Bulk retrievals are the\n lowest-cost retrieval option in S3 Glacier, enabling you to retrieve large amounts,\n even petabytes, of data inexpensively. Bulk retrievals typically finish within 5–12\n hours for objects stored in the S3 Glacier storage class or\n S3 Intelligent-Tiering Archive tier. They typically finish within 48 hours for objects stored\n in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.\n Bulk retrievals are free for objects stored in S3 Intelligent-Tiering.
For more information about archive retrieval options and provisioned capacity for\n Expedited
data access, see Restoring Archived Objects in the Amazon S3 User Guide.
You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed\n while it is in progress. For more information, see \n Upgrading the speed of an in-progress restore in the\n Amazon S3 User Guide.
\nTo get the status of object restoration, you can send a HEAD
request.\n Operations return the x-amz-restore
header, which provides information about\n the restoration status, in the response. You can use Amazon S3 event notifications to notify you\n when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in\n the Amazon S3 User Guide.
After restoring an archived object, you can update the restoration period by reissuing\n the request with a new period. Amazon S3 updates the restoration period relative to the current\n time and charges only for the request-there are no data transfer charges. You cannot\n update the restoration period when Amazon S3 is actively processing your current restore request\n for the object.
\nIf your bucket has a lifecycle configuration with a rule that includes an expiration\n action, the object expiration overrides the life span that you specify in a restore\n request. For example, if you restore an object copy for 10 days, but the object is\n scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information\n about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in\n Amazon S3 User Guide.
\n\n Responses\n
\nA successful action returns either the 200 OK
or 202\n Accepted
status code.
If the object is not previously restored, then Amazon S3 returns 202\n Accepted
in the response.
If the object is previously restored, Amazon S3 returns 200 OK
in the\n response.
\n Special Errors\n
\n\n Code: RestoreAlreadyInProgress\n
\n\n Cause: Object restore is already in progress. (This error does not\n apply to SELECT type requests.)\n
\n\n HTTP Status Code: 409 Conflict\n
\n\n SOAP Fault Code Prefix: Client\n
\n\n Code: GlacierExpeditedRetrievalNotAvailable\n
\n\n Cause: expedited retrievals are currently not available. Try again\n later. (Returned if there is insufficient capacity to process the Expedited\n request. This error applies only to Expedited retrievals and not to\n S3 Standard or Bulk retrievals.)\n
\n\n HTTP Status Code: 503\n
\n\n SOAP Fault Code Prefix: N/A\n
\n\n Related Resources\n
\n\n SQL Reference for\n Amazon S3 Select and S3 Glacier Select in the\n Amazon S3 User Guide\n
\nRestores an archived copy of an object back into Amazon S3
\nThis action is not supported by Amazon S3 on Outposts.
\nThis action performs the following types of requests:
\n\n select
- Perform a select query on an archived object
\n restore an archive
- Restore an archived object
To use this operation, you must have permissions to perform the\n s3:RestoreObject
action. The bucket owner has this permission by default\n and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3\n Resources in the Amazon S3 User Guide.
\n Querying Archives with Select Requests\n
\nYou use a select type of request to perform SQL queries on archived objects. The\n archived objects that are being queried by the select request must be formatted as\n uncompressed comma-separated values (CSV) files. You can run queries and custom analytics\n on your archived data without having to restore your data to a hotter Amazon S3 tier. For an\n overview about select requests, see Querying Archived Objects in the Amazon S3 User Guide.
\nWhen making a select request, do the following:
\nDefine an output location for the select query's output. This must be an Amazon S3\n bucket in the same Amazon Web Services Region as the bucket that contains the archive object that is\n being queried. The Amazon Web Services account that initiates the job must have permissions to write\n to the S3 bucket. You can specify the storage class and encryption for the output\n objects stored in the bucket. For more information about output, see Querying Archived Objects\n in the Amazon S3 User Guide.
\nFor more information about the S3
structure in the request body, see\n the following:
\n PutObject\n
\n\n Managing Access with\n ACLs in the Amazon S3 User Guide\n
\n\n Protecting Data Using\n Server-Side Encryption in the\n Amazon S3 User Guide\n
\nDefine the SQL expression for the SELECT
type of restoration for your\n query in the request body's SelectParameters
structure. You can use\n expressions like the following examples.
The following expression returns all records from the specified\n object.
\n\n SELECT * FROM Object
\n
Assuming that you are not using any headers for data stored in the object,\n you can specify columns with positional headers.
\n\n SELECT s._1, s._2 FROM Object s WHERE s._3 > 100
\n
If you have headers and you set the fileHeaderInfo
in the\n CSV
structure in the request body to USE
, you can\n specify headers in the query. (If you set the fileHeaderInfo
field\n to IGNORE
, the first row is skipped for the query.) You cannot mix\n ordinal positions with header column names.
\n SELECT s.Id, s.FirstName, s.SSN FROM S3Object s
\n
For more information about using SQL with S3 Glacier Select restore, see SQL Reference for Amazon S3 Select and\n S3 Glacier Select in the Amazon S3 User Guide.
\nWhen making a select request, you can also do the following:
\nTo expedite your queries, specify the Expedited
tier. For more\n information about tiers, see \"Restoring Archives,\" later in this topic.
Specify details about the data serialization format of both the input object that\n is being queried and the serialization of the CSV-encoded query results.
\nThe following are additional important facts about the select feature:
\nThe output results are new Amazon S3 objects. Unlike archive retrievals, they are\n stored until explicitly deleted-manually or through a lifecycle policy.
\nYou can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't\n deduplicate requests, so avoid issuing duplicate requests.
\n Amazon S3 accepts a select request even if the object has already been restored. A\n select request doesn’t return error response 409
.
\n Restoring objects\n
\nObjects that you archive to the S3 Glacier or\n S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers are not accessible in real time. For objects in\n Archive Access or Deep Archive Access tiers you must first initiate a restore request, and\n then wait until the object is moved into the Frequent Access tier. For objects in\n S3 Glacier or S3 Glacier Deep Archive storage classes you must\n first initiate a restore request, and then wait until a temporary copy of the object is\n available. To access an archived object, you must restore the object for the duration\n (number of days) that you specify.
\nTo restore a specific object version, you can provide a version ID. If you don't provide\n a version ID, Amazon S3 restores the current version.
\nWhen restoring an archived object (or using a select request), you can specify one of\n the following data access tier options in the Tier
element of the request\n body:
\n \n Expedited
\n - Expedited retrievals\n allow you to quickly access your data stored in the S3 Glacier\n storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for a\n subset of archives are required. For all but the largest archived objects (250 MB+),\n data accessed using Expedited retrievals is typically made available within 1–5\n minutes. Provisioned capacity ensures that retrieval capacity for Expedited\n retrievals is available when you need it. Expedited retrievals and provisioned\n capacity are not available for objects stored in the S3 Glacier Deep Archive\n storage class or S3 Intelligent-Tiering Deep Archive tier.
\n \n Standard
\n - Standard retrievals allow\n you to access any of your archived objects within several hours. This is the default\n option for retrieval requests that do not specify the retrieval option. Standard\n retrievals typically finish within 3–5 hours for objects stored in the\n S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. They\n typically finish within 12 hours for objects stored in the\n S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.\n Standard retrievals are free for objects stored in S3 Intelligent-Tiering.
\n \n Bulk
\n - Bulk retrievals are the\n lowest-cost retrieval option in S3 Glacier, enabling you to retrieve large amounts,\n even petabytes, of data inexpensively. Bulk retrievals typically finish within 5–12\n hours for objects stored in the S3 Glacier storage class or\n S3 Intelligent-Tiering Archive tier. They typically finish within 48 hours for objects stored\n in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.\n Bulk retrievals are free for objects stored in S3 Intelligent-Tiering.
For more information about archive retrieval options and provisioned capacity for\n Expedited
data access, see Restoring Archived Objects in the Amazon S3 User Guide.
You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed\n while it is in progress. For more information, see \n Upgrading the speed of an in-progress restore in the\n Amazon S3 User Guide.
\nTo get the status of object restoration, you can send a HEAD
request.\n Operations return the x-amz-restore
header, which provides information about\n the restoration status, in the response. You can use Amazon S3 event notifications to notify you\n when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in\n the Amazon S3 User Guide.
After restoring an archived object, you can update the restoration period by reissuing\n the request with a new period. Amazon S3 updates the restoration period relative to the current\n time and charges only for the request-there are no data transfer charges. You cannot\n update the restoration period when Amazon S3 is actively processing your current restore request\n for the object.
\nIf your bucket has a lifecycle configuration with a rule that includes an expiration\n action, the object expiration overrides the life span that you specify in a restore\n request. For example, if you restore an object copy for 10 days, but the object is\n scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information\n about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in\n Amazon S3 User Guide.
\n\n Responses\n
\nA successful action returns either the 200 OK
or 202\n Accepted
status code.
If the object is not previously restored, then Amazon S3 returns 202\n Accepted
in the response.
If the object is previously restored, Amazon S3 returns 200 OK
in the\n response.
\n Special Errors\n
\n\n Code: RestoreAlreadyInProgress\n
\n\n Cause: Object restore is already in progress. (This error does not\n apply to SELECT type requests.)\n
\n\n HTTP Status Code: 409 Conflict\n
\n\n SOAP Fault Code Prefix: Client\n
\n\n Code: GlacierExpeditedRetrievalNotAvailable\n
\n\n Cause: expedited retrievals are currently not available. Try again\n later. (Returned if there is insufficient capacity to process the Expedited\n request. This error applies only to Expedited retrievals and not to\n S3 Standard or Bulk retrievals.)\n
\n\n HTTP Status Code: 503\n
\n\n SOAP Fault Code Prefix: N/A\n
\n\n Related Resources\n
\n\n SQL Reference for\n Amazon S3 Select and S3 Glacier Select in the\n Amazon S3 User Guide\n
\nThe bucket name containing the object to restore.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket name containing the object to restore.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -11772,7 +11772,7 @@ "KeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "Specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed\n customer master key (CMK) to use for encrypting inventory reports.
", + "smithy.api#documentation": "Specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric customer managed\n customer master key (CMK) to use for encrypting inventory reports.
", "smithy.api#required": {} } } @@ -11831,7 +11831,7 @@ "target": "com.amazonaws.s3#SelectObjectContentOutput" }, "traits": { - "smithy.api#documentation": "This action filters the contents of an Amazon S3 object based on a simple structured query\n language (SQL) statement. In the request, along with the SQL expression, you must also\n specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses\n this format to parse object data into records, and returns only records that match the\n specified SQL expression. You must also specify the data serialization format for the\n response.
\nThis action is not supported by Amazon S3 on Outposts.
\nFor more information about Amazon S3 Select,\n see Selecting Content from\n Objects in the Amazon S3 User Guide.
\nFor more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select\n and S3 Glacier Select in the Amazon S3 User Guide.
\n \n\n Permissions\n
\nYou must have s3:GetObject
permission for this operation. Amazon S3 Select does\n not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy\n in the Amazon S3 User Guide.
\n Object Data Formats\n
\nYou can use Amazon S3 Select to query objects that have the following format\n properties:
\n\n CSV, JSON, and Parquet - Objects must be in CSV, JSON, or\n Parquet format.
\n\n UTF-8 - UTF-8 is the only encoding type Amazon S3 Select\n supports.
\n\n GZIP or BZIP2 - CSV and JSON files can be compressed using\n GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select\n supports for CSV and JSON files. Amazon S3 Select supports columnar compression for\n Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression\n for Parquet objects.
\n\n Server-side encryption - Amazon S3 Select supports querying\n objects that are protected with server-side encryption.
\nFor objects that are encrypted with customer-provided encryption keys (SSE-C), you\n must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the\n Amazon S3 User Guide.
\nFor objects that are encrypted with Amazon S3 managed encryption keys (SSE-S3) and\n customer master keys (CMKs) stored in AWS Key Management Service (SSE-KMS),\n server-side encryption is handled transparently, so you don't need to specify\n anything. For more information about server-side encryption, including SSE-S3 and\n SSE-KMS, see Protecting Data Using\n Server-Side Encryption in the Amazon S3 User Guide.
\n\n Working with the Response Body\n
\nGiven the response size is unknown, Amazon S3 Select streams the response as a series of\n messages and includes a Transfer-Encoding
header with chunked
as\n its value in the response. For more information, see Appendix: SelectObjectContent\n Response\n .
\n GetObject Support\n
\nThe SelectObjectContent
action does not support the following\n GetObject
functionality. For more information, see GetObject.
\n Range
: Although you can specify a scan range for an Amazon S3 Select request\n (see SelectObjectContentRequest - ScanRange in the request parameters),\n you cannot specify the range of bytes of an object to return.
GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot specify\n the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY
storage classes. For\n more information, about storage classes see Storage Classes\n in the Amazon S3 User Guide.
\n Special Errors\n
\n\nFor a list of special errors for this operation, see List of\n SELECT Object Content Error Codes\n
\n\n Related Resources\n
\n\n GetObject\n
\nThis action filters the contents of an Amazon S3 object based on a simple structured query\n language (SQL) statement. In the request, along with the SQL expression, you must also\n specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses\n this format to parse object data into records, and returns only records that match the\n specified SQL expression. You must also specify the data serialization format for the\n response.
\nThis action is not supported by Amazon S3 on Outposts.
\nFor more information about Amazon S3 Select,\n see Selecting Content from\n Objects in the Amazon S3 User Guide.
\nFor more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select\n and S3 Glacier Select in the Amazon S3 User Guide.
\n \n\n Permissions\n
\nYou must have s3:GetObject
permission for this operation. Amazon S3 Select does\n not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy\n in the Amazon S3 User Guide.
\n Object Data Formats\n
\nYou can use Amazon S3 Select to query objects that have the following format\n properties:
\n\n CSV, JSON, and Parquet - Objects must be in CSV, JSON, or\n Parquet format.
\n\n UTF-8 - UTF-8 is the only encoding type Amazon S3 Select\n supports.
\n\n GZIP or BZIP2 - CSV and JSON files can be compressed using\n GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select\n supports for CSV and JSON files. Amazon S3 Select supports columnar compression for\n Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression\n for Parquet objects.
\n\n Server-side encryption - Amazon S3 Select supports querying\n objects that are protected with server-side encryption.
\nFor objects that are encrypted with customer-provided encryption keys (SSE-C), you\n must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the\n Amazon S3 User Guide.
\nFor objects that are encrypted with Amazon S3 managed encryption keys (SSE-S3) and\n customer master keys (CMKs) stored in Amazon Web Services Key Management Service (SSE-KMS),\n server-side encryption is handled transparently, so you don't need to specify\n anything. For more information about server-side encryption, including SSE-S3 and\n SSE-KMS, see Protecting Data Using\n Server-Side Encryption in the Amazon S3 User Guide.
\n\n Working with the Response Body\n
\nGiven the response size is unknown, Amazon S3 Select streams the response as a series of\n messages and includes a Transfer-Encoding
header with chunked
as\n its value in the response. For more information, see Appendix: SelectObjectContent\n Response.
\n GetObject Support\n
\nThe SelectObjectContent
action does not support the following\n GetObject
functionality. For more information, see GetObject.
\n Range
: Although you can specify a scan range for an Amazon S3 Select request\n (see SelectObjectContentRequest - ScanRange in the request parameters),\n you cannot specify the range of bytes of an object to return.
GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot specify\n the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY
storage classes. For\n more information, about storage classes see Storage Classes\n in the Amazon S3 User Guide.
\n Special Errors\n
\n\nFor a list of special errors for this operation, see List of\n SELECT Object Content Error Codes\n
\n\n Related Resources\n
\n\n GetObject\n
\nAWS Key Management Service (KMS) customer AWS KMS key ID to use for the default\n encryption. This parameter is allowed if and only if SSEAlgorithm
is set to\n aws:kms
.
You can specify the key ID or the Amazon Resource Name (ARN) of the KMS key. However, if you\n are using encryption with cross-account operations, you must use a fully qualified KMS key ARN.\n For more information, see Using encryption for cross-account operations.
\n\n For example:\n
\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN:\n arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
Amazon S3 only supports symmetric KMS keys and not asymmetric KMS keys. For more information, see\n Using symmetric and\n asymmetric keys in the AWS Key Management Service Developer Guide.
\nAmazon Web Services Key Management Service (KMS) customer Amazon Web Services KMS key ID to use for the default\n encryption. This parameter is allowed if and only if SSEAlgorithm
is set to\n aws:kms
.
You can specify the key ID or the Amazon Resource Name (ARN) of the KMS key. However, if you\n are using encryption with cross-account operations, you must use a fully qualified KMS key ARN.\n For more information, see Using encryption for cross-account operations.
\n\n For example:\n
\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN:\n arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
Amazon S3 only supports symmetric KMS keys and not asymmetric KMS keys. For more information, see\n Using symmetric and\n asymmetric keys in the Amazon Web Services Key Management Service Developer Guide.
\n A container for filter information for the selection of Amazon S3 objects encrypted with AWS\n KMS. If you include SourceSelectionCriteria
in the replication configuration,\n this element is required.
A container for filter information for the selection of Amazon S3 objects encrypted with Amazon Web Services\n KMS. If you include SourceSelectionCriteria
in the replication configuration,\n this element is required.
A container that describes additional filters for identifying the source objects that\n you want to replicate. You can choose to enable or disable the replication of these\n objects. Currently, Amazon S3 supports only the filter that you can specify for objects created\n with server-side encryption using a customer master key (CMK) stored in AWS Key Management\n Service (SSE-KMS).
" + "smithy.api#documentation": "A container that describes additional filters for identifying the source objects that\n you want to replicate. You can choose to enable or disable the replication of these\n objects. Currently, Amazon S3 supports only the filter that you can specify for objects created\n with server-side encryption using a customer master key (CMK) stored in Amazon Web Services Key Management\n Service (SSE-KMS).
" } }, "com.amazonaws.s3#SseKmsEncryptedObjects": { @@ -12129,13 +12129,13 @@ "Status": { "target": "com.amazonaws.s3#SseKmsEncryptedObjectsStatus", "traits": { - "smithy.api#documentation": "Specifies whether Amazon S3 replicates objects created with server-side encryption using an\n AWS KMS key stored in AWS Key Management Service.
", + "smithy.api#documentation": "Specifies whether Amazon S3 replicates objects created with server-side encryption using an\n Amazon Web Services KMS key stored in Amazon Web Services Key Management Service.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "A container for filter information for the selection of S3 objects encrypted with AWS\n KMS.
" + "smithy.api#documentation": "A container for filter information for the selection of S3 objects encrypted with Amazon Web Services\n KMS.
" } }, "com.amazonaws.s3#SseKmsEncryptedObjectsStatus": { @@ -12584,7 +12584,7 @@ "target": "com.amazonaws.s3#UploadPartOutput" }, "traits": { - "smithy.api#documentation": "Uploads a part in a multipart upload.
\nIn this operation, you provide part data in your request. However, you have an option\n to specify your existing Amazon S3 object as a data source for the part you are uploading. To\n upload a part from an existing object, you use the UploadPartCopy operation.\n
\nYou must initiate a multipart upload (see CreateMultipartUpload)\n before you can upload any part. In response to your initiate request, Amazon S3 returns an\n upload ID, a unique identifier, that you must include in your upload part request.
\nPart numbers can be any number from 1 to 10,000, inclusive. A part number uniquely\n identifies a part and also defines its position within the object being created. If you\n upload a new part using the same part number that was used with a previous part, the\n previously uploaded part is overwritten. Each part must be at least 5 MB in size, except\n the last part. There is no size limit on the last part of your multipart upload.
\nTo ensure that data is not corrupted when traversing the network, specify the\n Content-MD5
header in the upload part request. Amazon S3 checks the part data\n against the provided MD5 value. If they do not match, Amazon S3 returns an error.
If the upload request is signed with Signature Version 4, then AWS S3 uses the\n x-amz-content-sha256
header as a checksum instead of\n Content-MD5
. For more information see Authenticating Requests: Using the Authorization Header (AWS Signature Version\n 4).
\n Note: After you initiate multipart upload and upload\n one or more parts, you must either complete or abort multipart upload in order to stop\n getting charged for storage of the uploaded parts. Only after you either complete or abort\n multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts\n storage.
\n\nFor more information on multipart uploads, go to Multipart Upload Overview in the\n Amazon S3 User Guide .
\nFor information on the permissions required to use the multipart upload API, go to\n Multipart Upload and\n Permissions in the Amazon S3 User Guide.
\n\nYou can optionally request server-side encryption where Amazon S3 encrypts your data as it\n writes it to disks in its data centers and decrypts it for you when you access it. You have\n the option of providing your own encryption key, or you can use the AWS managed encryption\n keys. If you choose to provide your own encryption key, the request headers you provide in\n the request must match the headers you used in the request to initiate the upload by using\n CreateMultipartUpload. For more information, go to Using Server-Side Encryption in\n the Amazon S3 User Guide.
\n\nServer-side encryption is supported by the S3 Multipart Upload actions. Unless you are\n using a customer-provided encryption key, you don't need to specify the encryption\n parameters in each UploadPart request. Instead, you only need to specify the server-side\n encryption parameters in the initial Initiate Multipart request. For more information, see\n CreateMultipartUpload.
\n\nIf you requested server-side encryption using a customer-provided encryption key in your\n initiate multipart upload request, you must provide identical encryption information in\n each part upload using the following headers.
\n\n\nx-amz-server-side-encryption-customer-algorithm
\nx-amz-server-side-encryption-customer-key
\nx-amz-server-side-encryption-customer-key-MD5
\n\n Special Errors\n
\n\n Code: NoSuchUpload\n
\n\n Cause: The specified multipart upload does not exist. The upload\n ID might be invalid, or the multipart upload might have been aborted or\n completed.\n
\n\n HTTP Status Code: 404 Not Found \n
\n\n SOAP Fault Code Prefix: Client\n
\n\n Related Resources\n
\n\n AbortMultipartUpload\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nUploads a part in a multipart upload.
\nIn this operation, you provide part data in your request. However, you have an option\n to specify your existing Amazon S3 object as a data source for the part you are uploading. To\n upload a part from an existing object, you use the UploadPartCopy operation.\n
\nYou must initiate a multipart upload (see CreateMultipartUpload)\n before you can upload any part. In response to your initiate request, Amazon S3 returns an\n upload ID, a unique identifier, that you must include in your upload part request.
\nPart numbers can be any number from 1 to 10,000, inclusive. A part number uniquely\n identifies a part and also defines its position within the object being created. If you\n upload a new part using the same part number that was used with a previous part, the\n previously uploaded part is overwritten. Each part must be at least 5 MB in size, except\n the last part. There is no size limit on the last part of your multipart upload.
\nTo ensure that data is not corrupted when traversing the network, specify the\n Content-MD5
header in the upload part request. Amazon S3 checks the part data\n against the provided MD5 value. If they do not match, Amazon S3 returns an error.
If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the\n x-amz-content-sha256
header as a checksum instead of\n Content-MD5
. For more information see Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version\n 4).
\n Note: After you initiate multipart upload and upload\n one or more parts, you must either complete or abort multipart upload in order to stop\n getting charged for storage of the uploaded parts. Only after you either complete or abort\n multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts\n storage.
\n\nFor more information on multipart uploads, go to Multipart Upload Overview in the\n Amazon S3 User Guide .
\nFor information on the permissions required to use the multipart upload API, go to\n Multipart Upload and\n Permissions in the Amazon S3 User Guide.
\n\nYou can optionally request server-side encryption where Amazon S3 encrypts your data as it\n writes it to disks in its data centers and decrypts it for you when you access it. You have\n the option of providing your own encryption key, or you can use the Amazon Web Services managed encryption\n keys. If you choose to provide your own encryption key, the request headers you provide in\n the request must match the headers you used in the request to initiate the upload by using\n CreateMultipartUpload. For more information, go to Using Server-Side Encryption in\n the Amazon S3 User Guide.
\n\nServer-side encryption is supported by the S3 Multipart Upload actions. Unless you are\n using a customer-provided encryption key, you don't need to specify the encryption\n parameters in each UploadPart request. Instead, you only need to specify the server-side\n encryption parameters in the initial Initiate Multipart request. For more information, see\n CreateMultipartUpload.
\n\nIf you requested server-side encryption using a customer-provided encryption key in your\n initiate multipart upload request, you must provide identical encryption information in\n each part upload using the following headers.
\n\n\nx-amz-server-side-encryption-customer-algorithm
\nx-amz-server-side-encryption-customer-key
\nx-amz-server-side-encryption-customer-key-MD5
\n\n Special Errors\n
\n\n Code: NoSuchUpload\n
\n\n Cause: The specified multipart upload does not exist. The upload\n ID might be invalid, or the multipart upload might have been aborted or\n completed.\n
\n\n HTTP Status Code: 404 Not Found \n
\n\n SOAP Fault Code Prefix: Client\n
\n\n Related Resources\n
\n\n AbortMultipartUpload\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nIf present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.
", + "smithy.api#documentation": "If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).
", + "smithy.api#documentation": "Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -12675,7 +12675,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The bucket name.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket name.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -12683,7 +12683,7 @@ "CopySource": { "target": "com.amazonaws.s3#CopySource", "traits": { - "smithy.api#documentation": "Specifies the source object for the copy operation. You specify the value in one of two\n formats, depending on whether you want to access the source object through an access point:
\nFor objects not accessed through an access point, specify the name of the source\n bucket and key of the source object, separated by a slash (/). For example, to copy\n the object reports/january.pdf
from the bucket\n awsexamplebucket
, use\n awsexamplebucket/reports/january.pdf
. The value must be URL\n encoded.
For objects accessed through access points, specify the Amazon Resource Name (ARN) of the object as accessed through the access point, in the format arn:aws:s3:
. For example, to copy the object reports/january.pdf
through access point my-access-point
owned by account 123456789012
in Region us-west-2
, use the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf
. The value must be URL encoded.
Amazon S3 supports copy operations using access points only when the source and destination buckets are in the same AWS Region.
\nAlternatively, for objects accessed through Amazon S3 on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:
. For example, to copy the object reports/january.pdf
through outpost my-outpost
owned by account 123456789012
in Region us-west-2
, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf
. The value must be URL encoded.
To copy a specific version of an object, append ?versionId=
\n to the value (for example,\n awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893
).\n If you don't specify a version ID, Amazon S3 copies the latest version of the source\n object.
Specifies the source object for the copy operation. You specify the value in one of two\n formats, depending on whether you want to access the source object through an access point:
\nFor objects not accessed through an access point, specify the name of the source\n bucket and key of the source object, separated by a slash (/). For example, to copy\n the object reports/january.pdf
from the bucket\n awsexamplebucket
, use\n awsexamplebucket/reports/january.pdf
. The value must be URL\n encoded.
For objects accessed through access points, specify the Amazon Resource Name (ARN) of the object as accessed through the access point, in the format arn:aws:s3:
. For example, to copy the object reports/january.pdf
through access point my-access-point
owned by account 123456789012
in Region us-west-2
, use the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf
. The value must be URL encoded.
Amazon S3 supports copy operations using access points only when the source and destination buckets are in the same Amazon Web Services Region.
\nAlternatively, for objects accessed through Amazon S3 on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:
. For example, to copy the object reports/january.pdf
through outpost my-outpost
owned by account 123456789012
in Region us-west-2
, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf
. The value must be URL encoded.
To copy a specific version of an object, append ?versionId=
\n to the value (for example,\n awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893
).\n If you don't specify a version ID, Amazon S3 copies the latest version of the source\n object.
If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric\n customer managed customer master key (CMK) was used for the object.
", + "smithy.api#documentation": "If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed customer master key (CMK) was used for the object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).
", + "smithy.api#documentation": "Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -12877,7 +12877,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The name of the bucket to which the multipart upload was initiated.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The name of the bucket to which the multipart upload was initiated.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -13037,7 +13037,7 @@ "smithy.api#auth": [ "aws.auth#sigv4" ], - "smithy.api#documentation": "Passes transformed\n objects to a GetObject
operation when using Object Lambda Access Points. For information about\n Object Lambda Access Points, see Transforming objects with\n Object Lambda Access Points in the Amazon S3 User Guide.
This operation supports metadata that can be returned by GetObject, in addition to\n RequestRoute
, RequestToken
, StatusCode
,\n ErrorCode
, and ErrorMessage
. The GetObject
\n response metadata is supported so that the WriteGetObjectResponse
caller,\n typically an AWS Lambda function, can provide the same metadata when it internally invokes\n GetObject
. When WriteGetObjectResponse
is called by a\n customer-owned Lambda function, the metadata returned to the end user\n GetObject
call might differ from what Amazon S3 would normally return.
AWS provides some prebuilt Lambda functions that you can use with S3 Object Lambda to detect and redact\n personally identifiable information (PII) and decompress S3 objects. These Lambda functions\n are available in the AWS Serverless Application Repository, and can be selected through the AWS Management Console when you create your\n Object Lambda Access Point.
\nExample 1: PII Access Control - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically detects personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.
\nExample 2: PII Redaction - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically redacts personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.
\nExample 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is equipped to decompress objects stored in S3 in one of six compressed file formats including bzip2, gzip, snappy, zlib, zstandard and ZIP.
\nFor information on how to view and use these functions, see Using AWS built Lambda functions in the Amazon S3 User Guide.
", + "smithy.api#documentation": "Passes transformed\n objects to a GetObject
operation when using Object Lambda Access Points. For information about\n Object Lambda Access Points, see Transforming objects with\n Object Lambda Access Points in the Amazon S3 User Guide.
This operation supports metadata that can be returned by GetObject, in addition to\n RequestRoute
, RequestToken
, StatusCode
,\n ErrorCode
, and ErrorMessage
. The GetObject
\n response metadata is supported so that the WriteGetObjectResponse
caller,\n typically an Lambda function, can provide the same metadata when it internally invokes\n GetObject
. When WriteGetObjectResponse
is called by a\n customer-owned Lambda function, the metadata returned to the end user\n GetObject
call might differ from what Amazon S3 would normally return.
You can include any number of metadata headers. When including a metadata header, it should be\n prefaced with x-amz-meta
. For example, x-amz-meta-my-custom-header: MyCustomValue
.\n The primary use case for this is to forward GetObject
metadata.
Amazon Web Services provides some prebuilt Lambda functions that you can use with S3 Object Lambda to detect and redact\n personally identifiable information (PII) and decompress S3 objects. These Lambda functions\n are available in the Amazon Web Services Serverless Application Repository, and can be selected through the Amazon Web Services Management Console when you create your\n Object Lambda Access Point.
\nExample 1: PII Access Control - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically detects personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.
\nExample 2: PII Redaction - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically redacts personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.
\nExample 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is equipped to decompress objects stored in S3 in one of six compressed file formats including bzip2, gzip, snappy, zlib, zstandard and ZIP.
\nFor information on how to view and use these functions, see Using Amazon Web Services built Lambda functions in the Amazon S3 User Guide.
", "smithy.api#endpoint": { "hostPrefix": "{RequestRoute}." }, @@ -13266,7 +13266,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for stored in Amazon S3 object.
", + "smithy.api#documentation": "If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) that was used for stored in Amazon S3 object.
", "smithy.api#httpHeader": "x-amz-fwd-header-x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -13301,7 +13301,7 @@ "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "Indicates whether the object stored in Amazon S3 uses an S3 bucket key for server-side\n encryption with AWS KMS (SSE-KMS).
", + "smithy.api#documentation": "Indicates whether the object stored in Amazon S3 uses an S3 bucket key for server-side\n encryption with Amazon Web Services KMS (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-fwd-header-x-amz-server-side-encryption-bucket-key-enabled" } } diff --git a/codegen/sdk-codegen/aws-models/sagemaker.2017-07-24.json b/codegen/sdk-codegen/aws-models/sagemaker.2017-07-24.json index 79ddb444446..e1ba3b5eadc 100644 --- a/codegen/sdk-codegen/aws-models/sagemaker.2017-07-24.json +++ b/codegen/sdk-codegen/aws-models/sagemaker.2017-07-24.json @@ -620,7 +620,7 @@ "AnnotationConsolidationLambdaArn": { "target": "com.amazonaws.sagemaker#LambdaFunctionArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of a Lambda function implements the logic for annotation consolidation and to process output data.
\nThis parameter is required for all labeling jobs. For built-in task types, use one\n of the following Amazon SageMaker Ground Truth Lambda function ARNs for\n AnnotationConsolidationLambdaArn
. For custom labeling workflows, see\n Post-annotation Lambda.
\n Bounding box - Finds the most similar boxes from\n different workers based on the Jaccard index of the boxes.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-BoundingBox
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-BoundingBox
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-BoundingBox
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-BoundingBox
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-BoundingBox
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-BoundingBox
\n
\n Image classification - Uses a variant of the\n Expectation Maximization approach to estimate the true class of an image based on\n annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClass
\n
\n Multi-label image classification - Uses a variant of\n the Expectation Maximization approach to estimate the true classes of an image based on\n annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClassMultiLabel
\n
\n Semantic segmentation - Treats each pixel in an image\n as a multi-class classification and treats pixel annotations from workers as \"votes\" for\n the correct label.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-SemanticSegmentation
\n
\n Text classification - Uses a variant of the\n Expectation Maximization approach to estimate the true class of text based on\n annotations from individual workers.
\n\n rn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClass
\n
\n Multi-label text classification - Uses a variant of\n the Expectation Maximization approach to estimate the true classes of text based on\n annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClassMultiLabel
\n
\n Named entity recognition - Groups similar selections\n and calculates aggregate boundaries, resolving to most-assigned label.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-NamedEntityRecognition
\n
\n Video Classification - Use this task type when you need workers to classify videos using\n predefined labels that you specify. Workers are shown videos and are asked to choose one\n label for each video.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoMultiClass
\n
\n Video Frame Object Detection - Use this task type to\n have workers identify and locate objects in a sequence of video frames (images extracted\n from a video) using bounding boxes. For example, you can use this task to ask workers to\n identify and localize various objects in a series of video frames, such as cars, bikes,\n and pedestrians.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoObjectDetection
\n
\n Video Frame Object Tracking - Use this task type to\n have workers track the movement of objects in a sequence of video frames (images\n extracted from a video) using bounding boxes. For example, you can use this task to ask\n workers to track the movement of objects, such as cars, bikes, and pedestrians.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoObjectTracking
\n
\n 3D Point Cloud Object Detection - Use this task type\n when you want workers to classify objects in a 3D point cloud by drawing 3D cuboids\n around objects. For example, you can use this task type to ask workers to identify\n different types of objects in a point cloud, such as cars, bikes, and\n pedestrians.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudObjectDetection
\n
\n 3D Point Cloud Object Tracking - Use this task type\n when you want workers to draw 3D cuboids around objects that appear in a sequence of 3D\n point cloud frames. For example, you can use this task type to ask workers to track the\n movement of vehicles across multiple point cloud frames.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudObjectTracking
\n
\n 3D Point Cloud Semantic Segmentation - Use this task\n type when you want workers to create a point-level semantic segmentation masks by\n painting objects in a 3D point cloud using different colors where each color is assigned\n to one of the classes you specify.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudSemanticSegmentation
\n
\n Use the following ARNs for Label Verification and Adjustment Jobs\n
\nUse label verification and adjustment jobs to review and adjust labels. To learn more,\n see Verify and Adjust Labels .
\n \n\n Semantic Segmentation Adjustment - Treats each pixel\n in an image as a multi-class classification and treats pixel adjusted annotations from\n workers as \"votes\" for the correct label.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentSemanticSegmentation
\n
\n Semantic Segmentation Verification - Uses a variant\n of the Expectation Maximization approach to estimate the true class of verification\n judgment for semantic segmentation labels based on annotations from individual\n workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-VerificationSemanticSegmentation
\n
\n Bounding Box Adjustment - Finds the most similar\n boxes from different workers based on the Jaccard index of the adjusted\n annotations.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentBoundingBox
\n
\n Bounding Box Verification - Uses a variant of the\n Expectation Maximization approach to estimate the true class of verification judgement\n for bounding box labels based on annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-VerificationBoundingBox
\n
\n Video Frame Object Detection Adjustment - \n Use this task type when you want workers to adjust bounding boxes that workers have added \n to video frames to classify and localize objects in a sequence of video frames.
\n\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentVideoObjectDetection
\n
\n Video Frame Object Tracking Adjustment - \n Use this task type when you want workers to adjust bounding boxes that workers have added \n to video frames to track object movement across a sequence of video frames.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentVideoObjectTracking
\n
\n 3D Point Cloud Object Detection Adjustment - Use this\n task type when you want workers to adjust 3D cuboids around objects in a 3D point cloud.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n 3D Point Cloud Object Tracking Adjustment - Use this\n task type when you want workers to adjust 3D cuboids around objects that appear in a\n sequence of 3D point cloud frames.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n 3D Point Cloud Semantic Segmentation Adjustment - Use this task\n type when you want workers to adjust a point-level semantic segmentation masks using a paint tool.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
The Amazon Resource Name (ARN) of a Lambda function implements the logic for annotation consolidation and to process output data.
\nThis parameter is required for all labeling jobs. For built-in task types, use one\n of the following Amazon SageMaker Ground Truth Lambda function ARNs for\n AnnotationConsolidationLambdaArn
. For custom labeling workflows, see\n Post-annotation Lambda.
\n Bounding box - Finds the most similar boxes from\n different workers based on the Jaccard index of the boxes.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-BoundingBox
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-BoundingBox
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-BoundingBox
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-BoundingBox
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-BoundingBox
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-BoundingBox
\n
\n Image classification - Uses a variant of the\n Expectation Maximization approach to estimate the true class of an image based on\n annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClass
\n
\n Multi-label image classification - Uses a variant of\n the Expectation Maximization approach to estimate the true classes of an image based on\n annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClassMultiLabel
\n
\n Semantic segmentation - Treats each pixel in an image\n as a multi-class classification and treats pixel annotations from workers as \"votes\" for\n the correct label.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-SemanticSegmentation
\n
\n Text classification - Uses a variant of the\n Expectation Maximization approach to estimate the true class of text based on\n annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClass
\n
\n Multi-label text classification - Uses a variant of\n the Expectation Maximization approach to estimate the true classes of text based on\n annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClassMultiLabel
\n
\n Named entity recognition - Groups similar selections\n and calculates aggregate boundaries, resolving to most-assigned label.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-NamedEntityRecognition
\n
\n Video Classification - Use this task type when you need workers to classify videos using\n predefined labels that you specify. Workers are shown videos and are asked to choose one\n label for each video.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoMultiClass
\n
\n Video Frame Object Detection - Use this task type to\n have workers identify and locate objects in a sequence of video frames (images extracted\n from a video) using bounding boxes. For example, you can use this task to ask workers to\n identify and localize various objects in a series of video frames, such as cars, bikes,\n and pedestrians.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoObjectDetection
\n
\n Video Frame Object Tracking - Use this task type to\n have workers track the movement of objects in a sequence of video frames (images\n extracted from a video) using bounding boxes. For example, you can use this task to ask\n workers to track the movement of objects, such as cars, bikes, and pedestrians.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoObjectTracking
\n
\n 3D Point Cloud Object Detection - Use this task type\n when you want workers to classify objects in a 3D point cloud by drawing 3D cuboids\n around objects. For example, you can use this task type to ask workers to identify\n different types of objects in a point cloud, such as cars, bikes, and\n pedestrians.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudObjectDetection
\n
\n 3D Point Cloud Object Tracking - Use this task type\n when you want workers to draw 3D cuboids around objects that appear in a sequence of 3D\n point cloud frames. For example, you can use this task type to ask workers to track the\n movement of vehicles across multiple point cloud frames.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudObjectTracking
\n
\n 3D Point Cloud Semantic Segmentation - Use this task\n type when you want workers to create a point-level semantic segmentation masks by\n painting objects in a 3D point cloud using different colors where each color is assigned\n to one of the classes you specify.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudSemanticSegmentation
\n
\n Use the following ARNs for Label Verification and Adjustment Jobs\n
\nUse label verification and adjustment jobs to review and adjust labels. To learn more,\n see Verify and Adjust Labels .
\n \n\n Semantic Segmentation Adjustment - Treats each pixel\n in an image as a multi-class classification and treats pixel adjusted annotations from\n workers as \"votes\" for the correct label.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentSemanticSegmentation
\n
\n Semantic Segmentation Verification - Uses a variant\n of the Expectation Maximization approach to estimate the true class of verification\n judgment for semantic segmentation labels based on annotations from individual\n workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-VerificationSemanticSegmentation
\n
\n Bounding Box Adjustment - Finds the most similar\n boxes from different workers based on the Jaccard index of the adjusted\n annotations.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentBoundingBox
\n
\n Bounding Box Verification - Uses a variant of the\n Expectation Maximization approach to estimate the true class of verification judgement\n for bounding box labels based on annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-VerificationBoundingBox
\n
\n Video Frame Object Detection Adjustment - \n Use this task type when you want workers to adjust bounding boxes that workers have added \n to video frames to classify and localize objects in a sequence of video frames.
\n\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentVideoObjectDetection
\n
\n Video Frame Object Tracking Adjustment - \n Use this task type when you want workers to adjust bounding boxes that workers have added \n to video frames to track object movement across a sequence of video frames.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentVideoObjectTracking
\n
\n 3D Point Cloud Object Detection Adjustment - Use this\n task type when you want workers to adjust 3D cuboids around objects in a 3D point cloud.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n 3D Point Cloud Object Tracking Adjustment - Use this\n task type when you want workers to adjust 3D cuboids around objects that appear in a\n sequence of 3D point cloud frames.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n 3D Point Cloud Semantic Segmentation Adjustment - Use this task\n type when you want workers to adjust a point-level semantic segmentation masks using a paint tool.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
Lists a summary of the properties of an association. An association is an entity that\n links other lineage or experiment entities. An example would be an association between a\n training job and a model.
" } }, + "com.amazonaws.sagemaker#AsyncInferenceClientConfig": { + "type": "structure", + "members": { + "MaxConcurrentInvocationsPerInstance": { + "target": "com.amazonaws.sagemaker#MaxConcurrentInvocationsPerInstance", + "traits": { + "smithy.api#documentation": "The maximum number of concurrent requests sent by the SageMaker client to the \n model container. If no value is provided, Amazon SageMaker will choose an optimal value for you.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Configures the behavior of the client used by Amazon SageMaker to interact with the \n model container during asynchronous inference.
" + } + }, + "com.amazonaws.sagemaker#AsyncInferenceConfig": { + "type": "structure", + "members": { + "ClientConfig": { + "target": "com.amazonaws.sagemaker#AsyncInferenceClientConfig", + "traits": { + "smithy.api#documentation": "Configures the behavior of the client used by Amazon SageMaker to interact \n with the model container during asynchronous inference.
" + } + }, + "OutputConfig": { + "target": "com.amazonaws.sagemaker#AsyncInferenceOutputConfig", + "traits": { + "smithy.api#documentation": "Specifies the configuration for asynchronous inference invocation outputs.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Specifies configuration for how an endpoint performs asynchronous inference.
" + } + }, + "com.amazonaws.sagemaker#AsyncInferenceNotificationConfig": { + "type": "structure", + "members": { + "SuccessTopic": { + "target": "com.amazonaws.sagemaker#SnsTopicArn", + "traits": { + "smithy.api#documentation": "Amazon SNS topic to post a notification to when inference completes successfully. \n If no topic is provided, no notification is sent on success.
" + } + }, + "ErrorTopic": { + "target": "com.amazonaws.sagemaker#SnsTopicArn", + "traits": { + "smithy.api#documentation": "Amazon SNS topic to post a notification to when inference fails. \n If no topic is provided, no notification is sent on failure.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Specifies the configuration for notifications of inference results for asynchronous inference.
" + } + }, + "com.amazonaws.sagemaker#AsyncInferenceOutputConfig": { + "type": "structure", + "members": { + "KmsKeyId": { + "target": "com.amazonaws.sagemaker#KmsKeyId", + "traits": { + "smithy.api#documentation": "The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that\n Amazon SageMaker uses to encrypt the asynchronous inference output in Amazon S3.
\n " + } + }, + "S3OutputPath": { + "target": "com.amazonaws.sagemaker#DestinationS3Uri", + "traits": { + "smithy.api#documentation": "The Amazon S3 location to upload inference responses to.
", + "smithy.api#required": {} + } + }, + "NotificationConfig": { + "target": "com.amazonaws.sagemaker#AsyncInferenceNotificationConfig", + "traits": { + "smithy.api#documentation": "Specifies the configuration for notifications of inference results for asynchronous inference.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Specifies the configuration for asynchronous inference invocation outputs.
" + } + }, "com.amazonaws.sagemaker#AthenaCatalog": { "type": "string", "traits": { @@ -1666,12 +1748,12 @@ "CandidateProperties": { "target": "com.amazonaws.sagemaker#CandidateProperties", "traits": { - "smithy.api#documentation": "The AutoML candidate's properties.
" + "smithy.api#documentation": "The properties of an AutoML candidate job.
" } } }, "traits": { - "smithy.api#documentation": "An Autopilot job returns recommendations, or candidates. Each candidate has futher details\n about the steps involved and the status.
" + "smithy.api#documentation": "Information about a candidate produced by an AutoML training job, including its status,\n steps, and other properties.
" } }, "com.amazonaws.sagemaker#AutoMLCandidateStep": { @@ -1743,7 +1825,7 @@ "Image": { "target": "com.amazonaws.sagemaker#ContainerImage", "traits": { - "smithy.api#documentation": "The ECR path of the container. For more information, see .
", + "smithy.api#documentation": "The Amazon Elastic Container Registry (Amazon ECR) path of the container. For more\n information, see .
", "smithy.api#required": {} } }, @@ -1855,7 +1937,7 @@ "MaxRuntimePerTrainingJobInSeconds": { "target": "com.amazonaws.sagemaker#MaxRuntimePerTrainingJobInSeconds", "traits": { - "smithy.api#documentation": "The maximum time, in seconds, a training job is allowed to run as part of an AutoML job.
" + "smithy.api#documentation": "The maximum time, in seconds, a training job is allowed to run as part of an AutoML\n job.
" } }, "MaxAutoMLJobRuntimeInSeconds": { @@ -1905,7 +1987,7 @@ "MetricName": { "target": "com.amazonaws.sagemaker#AutoMLMetricEnum", "traits": { - "smithy.api#documentation": "The name of the objective metric used to measure the predictive quality of a machine\n learning system. This metric is optimized during training to provide the best estimate for\n model parameter values from data.
\nHere are the options:
\n\n MSE
: The mean squared error (MSE) is the average of the squared\n differences between the predicted and actual values. It is used for regression. MSE\n values are always positive: the better a model is at predicting the actual values,\n the smaller the MSE value. When the data contains outliers, they tend to dominate the\n MSE, which might cause subpar prediction performance.
\n Accuracy
: The ratio of the number of correctly classified items to\n the total number of (correctly and incorrectly) classified items. It is used for\n binary and multiclass classification. It measures how close the predicted class\n values are to the actual values. Accuracy values vary between zero and one: one\n indicates perfect accuracy and zero indicates perfect inaccuracy.
\n F1
: The F1 score is the harmonic mean of the precision and recall. It\n is used for binary classification into classes traditionally referred to as positive\n and negative. Predictions are said to be true when they match their actual (correct)\n class and false when they do not. Precision is the ratio of the true positive\n predictions to all positive predictions (including the false positives) in a data set\n and measures the quality of the prediction when it predicts the positive class.\n Recall (or sensitivity) is the ratio of the true positive predictions to all actual\n positive instances and measures how completely a model predicts the actual class\n members in a data set. The standard F1 score weighs precision and recall equally. But\n which metric is paramount typically depends on specific aspects of a problem. F1\n scores vary between zero and one: one indicates the best possible performance and\n zero the worst.
\n AUC
: The area under the curve (AUC) metric is used to compare and\n evaluate binary classification by algorithms such as logistic regression that return\n probabilities. A threshold is needed to map the probabilities into classifications.\n The relevant curve is the receiver operating characteristic curve that plots the true\n positive rate (TPR) of predictions (or recall) against the false positive rate (FPR)\n as a function of the threshold value, above which a prediction is considered\n positive. Increasing the threshold results in fewer false positives but more false\n negatives. AUC is the area under this receiver operating characteristic curve and so\n provides an aggregated measure of the model performance across all possible\n classification thresholds. The AUC score can also be interpreted as the probability\n that a randomly selected positive data point is more likely to be predicted positive\n than a randomly selected negative example. AUC scores vary between zero and one: a\n score of one indicates perfect accuracy and a score of one half indicates that the\n prediction is not better than a random classifier. Values under one half predict less\n accurately than a random predictor. But such consistently bad predictors can simply\n be inverted to obtain better than random predictors.
\n F1macro
: The F1macro score applies F1 scoring to multiclass\n classification. In this context, you have multiple classes to predict. You just\n calculate the precision and recall for each class as you did for the positive class\n in binary classification. Then, use these values to calculate the F1 score for each\n class and average them to obtain the F1macro score. F1macro scores vary between zero\n and one: one indicates the best possible performance and zero the worst.
If you do not specify a metric explicitly, the default behavior is to automatically\n use:
\n\n MSE
: for regression.
\n F1
: for binary classification
\n Accuracy
: for multiclass classification.
The name of the objective metric used to measure the predictive quality of a machine\n learning system. This metric is optimized during training to provide the best estimate for\n model parameter values from data.
\nHere are the options:
\n\n MSE
: The mean squared error (MSE) is the average of the squared\n differences between the predicted and actual values. It is used for regression. MSE\n values are always positive: the better a model is at predicting the actual values,\n the smaller the MSE value is. When the data contains outliers, they tend to dominate\n the MSE, which might cause subpar prediction performance.
\n Accuracy
: The ratio of the number of correctly classified items to\n the total number of (correctly and incorrectly) classified items. It is used for\n binary and multiclass classification. It measures how close the predicted class\n values are to the actual values. Accuracy values vary between zero and one: one\n indicates perfect accuracy and zero indicates perfect inaccuracy.
\n F1
: The F1 score is the harmonic mean of the precision and recall. It\n is used for binary classification into classes traditionally referred to as positive\n and negative. Predictions are said to be true when they match their actual (correct)\n class and false when they do not. Precision is the ratio of the true positive\n predictions to all positive predictions (including the false positives) in a data set\n and measures the quality of the prediction when it predicts the positive class.\n Recall (or sensitivity) is the ratio of the true positive predictions to all actual\n positive instances and measures how completely a model predicts the actual class\n members in a data set. The standard F1 score weighs precision and recall equally. But\n which metric is paramount typically depends on specific aspects of a problem. F1\n scores vary between zero and one: one indicates the best possible performance and\n zero the worst.
\n AUC
: The area under the curve (AUC) metric is used to compare and\n evaluate binary classification by algorithms such as logistic regression that return\n probabilities. A threshold is needed to map the probabilities into classifications.\n The relevant curve is the receiver operating characteristic curve that plots the true\n positive rate (TPR) of predictions (or recall) against the false positive rate (FPR)\n as a function of the threshold value, above which a prediction is considered\n positive. Increasing the threshold results in fewer false positives but more false\n negatives. AUC is the area under this receiver operating characteristic curve and so\n provides an aggregated measure of the model performance across all possible\n classification thresholds. The AUC score can also be interpreted as the probability\n that a randomly selected positive data point is more likely to be predicted positive\n than a randomly selected negative example. AUC scores vary between zero and one: a\n score of one indicates perfect accuracy and a score of one half indicates that the\n prediction is not better than a random classifier. Values under one half predict less\n accurately than a random predictor. But such consistently bad predictors can simply\n be inverted to obtain better than random predictors.
\n F1macro
: The F1macro score applies F1 scoring to multiclass\n classification. In this context, you have multiple classes to predict. You just\n calculate the precision and recall for each class as you did for the positive class\n in binary classification. Then, use these values to calculate the F1 score for each\n class and average them to obtain the F1macro score. F1macro scores vary between zero\n and one: one indicates the best possible performance and zero the worst.
If you do not specify a metric explicitly, the default behavior is to automatically\n use:
\n\n MSE
: for regression.
\n F1
: for binary classification
\n Accuracy
: for multiclass classification.
The name of the AutoML you are requesting.
", + "smithy.api#documentation": "The name of the AutoML job you are requesting.
", "smithy.api#required": {} } }, @@ -2508,6 +2590,12 @@ "traits": { "smithy.api#documentation": "The Amazon S3 prefix to the artifacts generated for an AutoML candidate.
" } + }, + "CandidateMetrics": { + "target": "com.amazonaws.sagemaker#MetricDataList", + "traits": { + "smithy.api#documentation": "Information about the candidate metrics for an AutoML job.
" + } } }, "traits": { @@ -3540,7 +3628,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 5 + "max": 15 } } }, @@ -4177,7 +4265,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates an Autopilot job.
\nFind the best performing model after you run an Autopilot job by calling .
\nFor information about how to use Autopilot, see Automate Model\n Development with Amazon SageMaker Autopilot.
" + "smithy.api#documentation": "Creates an Autopilot job.
\nFind the best-performing model after you run an Autopilot job by calling .
\nFor information about how to use Autopilot, see Automate Model\n Development with Amazon SageMaker Autopilot.
" } }, "com.amazonaws.sagemaker#CreateAutoMLJobRequest": { @@ -4200,7 +4288,7 @@ "OutputDataConfig": { "target": "com.amazonaws.sagemaker#AutoMLOutputDataConfig", "traits": { - "smithy.api#documentation": "Provides information about encryption and the Amazon S3 output path needed to store\n artifacts from an AutoML job. Format(s) supported: CSV.
", + "smithy.api#documentation": "Provides information about encryption and the Amazon S3 output path needed to store artifacts\n from an AutoML job. Format(s) supported: CSV.
", "smithy.api#required": {} } }, @@ -4213,7 +4301,7 @@ "AutoMLJobObjective": { "target": "com.amazonaws.sagemaker#AutoMLJobObjective", "traits": { - "smithy.api#documentation": "Defines the objective metric used to measure the predictive quality of an AutoML job.\n You provide an AutoMLJobObjective$MetricName and Autopilot infers whether to\n minimize or maximize it.
" + "smithy.api#documentation": "Defines the objective metric used to measure the predictive quality of an AutoML job. You\n provide an AutoMLJobObjective$MetricName and Autopilot infers whether to\n minimize or maximize it.
" } }, "AutoMLJobConfig": { @@ -4255,7 +4343,7 @@ "AutoMLJobArn": { "target": "com.amazonaws.sagemaker#AutoMLJobArn", "traits": { - "smithy.api#documentation": "The unique ARN that is assigned to the AutoML job when it is created.
", + "smithy.api#documentation": "The unique ARN assigned to the AutoML job when it is created.
", "smithy.api#required": {} } } @@ -4866,6 +4954,12 @@ "traits": { "smithy.api#documentation": "The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that Amazon SageMaker uses to encrypt data on\n the storage volume attached to the ML compute instance that hosts the endpoint.
\nThe KmsKeyId can be any of the following formats:
\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
Alias name: alias/ExampleAlias
\n
Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias
\n
The KMS key policy must grant permission to the IAM role that you specify in your\n CreateEndpoint
, UpdateEndpoint
requests. For more\n information, refer to the Amazon Web Services Key Management Service section Using Key\n Policies in Amazon Web Services KMS \n
Certain Nitro-based instances include local storage, dependent on the instance\n type. Local storage volumes are encrypted using a hardware module on the instance.\n You can't request a KmsKeyId
when using an instance type with local\n storage. If any of the models that you specify in the\n ProductionVariants
parameter use nitro-based instances with local\n storage, do not specify a value for the KmsKeyId
parameter. If you\n specify a value for KmsKeyId
when using any nitro-based instances with\n local storage, the call to CreateEndpointConfig
fails.
For a list of instance types that support local instance storage, see Instance Store Volumes.
\nFor more information about local instance storage encryption, see SSD\n Instance Store Volumes.
\nSpecifies configuration for how an endpoint performs asynchronous inference. \n This is a required field in order for your Endpoint to be invoked using \n \n InvokeEndpointAsync
\n .
The S3 URI of the file, referred to as a label category configuration\n file, that defines the categories used to label the data objects.
\nFor 3D point cloud and video frame task types, you can add label category attributes\n and frame attributes to your label category configuration file. To learn how, see Create a\n Labeling Category Configuration File for 3D Point Cloud Labeling Jobs.
\nFor all other built-in task types and custom\n tasks, your label category configuration file must be a JSON file in the\n following format. Identify the labels you want to use by replacing label_1
,\n label_2
,...
,label_n
with your label\n categories.
\n {
\n
\n \"document-version\": \"2018-11-28\",
\n
\n \"labels\": [{\"label\": \"label_1\"},{\"label\": \"label_2\"},...{\"label\":\n \"label_n\"}]
\n
\n }
\n
Note the following about the label category configuration file:
\nFor image classification and text classification (single and multi-label) you\n must specify at least two label categories. For all other task types, the\n minimum number of label categories required is one.
\nEach label category must be unique, you cannot specify duplicate label\n categories.
\nIf you create a 3D point cloud or video frame adjustment or verification\n labeling job, you must include auditLabelAttributeName
in the label\n category configuration. Use this parameter to enter the \n LabelAttributeName
\n of the labeling job you want to\n adjust or verify annotations of.
The S3 URI of the file, referred to as a label category configuration\n file, that defines the categories used to label the data objects.
\nFor 3D point cloud and video frame task types, you can add label category attributes\n and frame attributes to your label category configuration file. To learn how, see Create a\n Labeling Category Configuration File for 3D Point Cloud Labeling Jobs.
\nFor named entity recognition jobs, in addition to Add Instructions Add additional instructions.\"labels\"
, you must\n provide worker instructions in the label category configuration file using the\n \"instructions\"
parameter: \"instructions\":\n {\"shortInstruction\":\"
. For details\n and an example, see Create a\n Named Entity Recognition Labeling Job (API) .Add header
For all other built-in task types and custom\n tasks, your label category configuration file must be a JSON file in the\n following format. Identify the labels you want to use by replacing label_1
,\n label_2
,...
,label_n
with your label\n categories.
\n {
\n
\n \"document-version\": \"2018-11-28\",
\n
\n \"labels\": [{\"label\": \"label_1\"},{\"label\": \"label_2\"},...{\"label\":\n \"label_n\"}]
\n
\n }
\n
Note the following about the label category configuration file:
\nFor image classification and text classification (single and multi-label) you\n must specify at least two label categories. For all other task types, the\n minimum number of label categories required is one.
\nEach label category must be unique, you cannot specify duplicate label\n categories.
\nIf you create a 3D point cloud or video frame adjustment or verification\n labeling job, you must include auditLabelAttributeName
in the label\n category configuration. Use this parameter to enter the \n LabelAttributeName
\n of the labeling job you want to\n adjust or verify annotations of.
Whether root access is enabled or disabled for users of the notebook instance. The\n default value is Enabled
.
Lifecycle configurations need root access to be able to set up a notebook\n instance. Because of this, lifecycle configurations associated with a notebook\n instance always run with root access even if you disable root access for\n users.
\nThe platform identifier of the notebook instance runtime environment.
" + } } } }, @@ -9624,7 +9724,7 @@ "RoleArn": { "target": "com.amazonaws.sagemaker#RoleArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that has read permission to\n the input data location and write permission to the output data location in Amazon S3.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that\n has read permission to the input data location and write permission to the output data\n location in Amazon S3.
", "smithy.api#required": {} } }, @@ -9713,7 +9813,7 @@ "ResolvedAttributes": { "target": "com.amazonaws.sagemaker#ResolvedAttributes", "traits": { - "smithy.api#documentation": "This contains ProblemType
, AutoMLJobObjective
and\n CompletionCriteria
. If you do not provide these values, they are\n auto-inferred. If you do provide them, the values used are the ones you provide.
This contains ProblemType
, AutoMLJobObjective
, and\n CompletionCriteria
. If you do not provide these values, they are\n auto-inferred. If you do provide them, the values used are the ones you provide.
A timestamp that shows when the endpoint configuration was created.
", "smithy.api#required": {} } + }, + "AsyncInferenceConfig": { + "target": "com.amazonaws.sagemaker#AsyncInferenceConfig", + "traits": { + "smithy.api#documentation": "Returns the description of an endpoint configuration created using the \n \n CreateEndpointConfig
\n API.
The most recent deployment configuration for the endpoint.
" } + }, + "AsyncInferenceConfig": { + "target": "com.amazonaws.sagemaker#AsyncInferenceConfig", + "traits": { + "smithy.api#documentation": "Returns the description of an endpoint configuration created \n using the \n CreateEndpointConfig
\n API.
Whether root access is enabled or disabled for users of the notebook instance.
\nLifecycle configurations need root access to be able to set up a notebook\n instance. Because of this, lifecycle configurations associated with a notebook\n instance always run with root access even if you disable root access for\n users.
\nThe platform identifier of the notebook instance runtime environment.
" + } } } }, @@ -18378,6 +18496,15 @@ "smithy.api#documentation": "Defines how to perform inference generation after a training job is run.
" } }, + "com.amazonaws.sagemaker#InitialTaskCount": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1 + } + } + }, "com.amazonaws.sagemaker#InputConfig": { "type": "structure", "members": { @@ -18527,6 +18654,38 @@ "value": "ml.m5.24xlarge", "name": "ML_M5_24XLARGE" }, + { + "value": "ml.m5d.large", + "name": "ML_M5D_LARGE" + }, + { + "value": "ml.m5d.xlarge", + "name": "ML_M5D_XLARGE" + }, + { + "value": "ml.m5d.2xlarge", + "name": "ML_M5D_2XLARGE" + }, + { + "value": "ml.m5d.4xlarge", + "name": "ML_M5D_4XLARGE" + }, + { + "value": "ml.m5d.8xlarge", + "name": "ML_M5D_8XLARGE" + }, + { + "value": "ml.m5d.12xlarge", + "name": "ML_M5D_12XLARGE" + }, + { + "value": "ml.m5d.16xlarge", + "name": "ML_M5D_16XLARGE" + }, + { + "value": "ml.m5d.24xlarge", + "name": "ML_M5D_24XLARGE" + }, { "value": "ml.c4.xlarge", "name": "ML_C4_XLARGE" @@ -18606,6 +18765,66 @@ { "value": "ml.p3.16xlarge", "name": "ML_P3_16XLARGE" + }, + { + "value": "ml.p3dn.24xlarge", + "name": "ML_P3DN_24XLARGE" + }, + { + "value": "ml.g4dn.xlarge", + "name": "ML_G4DN_XLARGE" + }, + { + "value": "ml.g4dn.2xlarge", + "name": "ML_G4DN_2XLARGE" + }, + { + "value": "ml.g4dn.4xlarge", + "name": "ML_G4DN_4XLARGE" + }, + { + "value": "ml.g4dn.8xlarge", + "name": "ML_G4DN_8XLARGE" + }, + { + "value": "ml.g4dn.12xlarge", + "name": "ML_G4DN_12XLARGE" + }, + { + "value": "ml.g4dn.16xlarge", + "name": "ML_G4DN_16XLARGE" + }, + { + "value": "ml.r5.large", + "name": "ML_R5_LARGE" + }, + { + "value": "ml.r5.xlarge", + "name": "ML_R5_XLARGE" + }, + { + "value": "ml.r5.2xlarge", + "name": "ML_R5_2XLARGE" + }, + { + "value": "ml.r5.4xlarge", + "name": "ML_R5_4XLARGE" + }, + { + "value": "ml.r5.8xlarge", + "name": "ML_R5_8XLARGE" + }, + { + "value": "ml.r5.12xlarge", + "name": "ML_R5_12XLARGE" + }, + { + "value": "ml.r5.16xlarge", + "name": "ML_R5_16XLARGE" + }, + { + "value": "ml.r5.24xlarge", + "name": "ML_R5_24XLARGE" } ] } @@ -24261,6 +24480,16 @@ } } }, + "com.amazonaws.sagemaker#MaxConcurrentInvocationsPerInstance": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 1000 + } + } + }, "com.amazonaws.sagemaker#MaxConcurrentTaskCount": { "type": "integer", "traits": { @@ -24489,6 +24718,44 @@ "smithy.api#documentation": "The name, value, and date and time of a metric that was emitted to Amazon CloudWatch.
" } }, + "com.amazonaws.sagemaker#MetricDataList": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemaker#MetricDatum" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 40 + } + } + }, + "com.amazonaws.sagemaker#MetricDatum": { + "type": "structure", + "members": { + "MetricName": { + "target": "com.amazonaws.sagemaker#AutoMLMetricEnum", + "traits": { + "smithy.api#documentation": "The name of the metric.
" + } + }, + "Value": { + "target": "com.amazonaws.sagemaker#Float", + "traits": { + "smithy.api#documentation": "The value of the metric.
" + } + }, + "Set": { + "target": "com.amazonaws.sagemaker#MetricSetSource", + "traits": { + "smithy.api#documentation": "The dataset split from which the AutoML job produced the metric.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Information about the metric for a candidate produced by an AutoML job.
" + } + }, "com.amazonaws.sagemaker#MetricDefinition": { "type": "structure", "members": { @@ -24543,6 +24810,25 @@ "smithy.api#pattern": "^.+$" } }, + "com.amazonaws.sagemaker#MetricSetSource": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "Train", + "name": "TRAIN" + }, + { + "value": "Validation", + "name": "VALIDATION" + }, + { + "value": "Test", + "name": "TEST" + } + ] + } + }, "com.amazonaws.sagemaker#MetricValue": { "type": "float" }, @@ -24751,7 +25037,7 @@ "EndpointName": { "target": "com.amazonaws.sagemaker#EndpointName", "traits": { - "smithy.api#documentation": "Specifies the endpoint name to use for a one-click Autopilot model deployment if the\n endpoint name is not generated automatically.
\nSpecify the EndpointName
if and only if you set\n AutoGenerateEndpointName
to False
; otherwise a 400 error\n is thrown.
Specifies the endpoint name to use for a one-click Autopilot model deployment if the\n endpoint name is not generated automatically.
\nSpecify the EndpointName
if and only if you set\n AutoGenerateEndpointName
to False
; otherwise a 400 error is\n thrown.
The ARN for the SNS topic to which notifications should be published.
" + "smithy.api#documentation": "The ARN for the Amazon SNS topic to which notifications should be published.
" } } }, "traits": { - "smithy.api#documentation": "Configures SNS notifications of available or expiring work items for work\n teams.
" + "smithy.api#documentation": "Configures Amazon SNS notifications of available or expiring work items for work\n teams.
" } }, "com.amazonaws.sagemaker#NotificationTopicArn": { @@ -28288,6 +28574,16 @@ } } }, + "com.amazonaws.sagemaker#PlatformIdentifier": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 15 + }, + "smithy.api#pattern": "^(notebook-al1-v1|notebook-al2-v1)$" + } + }, "com.amazonaws.sagemaker#PolicyString": { "type": "string", "traits": { @@ -29207,7 +29503,7 @@ } }, "InitialInstanceCount": { - "target": "com.amazonaws.sagemaker#TaskCount", + "target": "com.amazonaws.sagemaker#InitialTaskCount", "traits": { "smithy.api#documentation": "Number of instances to launch initially.
", "smithy.api#required": {} @@ -33006,7 +33302,7 @@ } ], "traits": { - "smithy.api#documentation": "Stops a pipeline execution.
\nA pipeline execution won't stop while a callback step is running.\n When you call StopPipelineExecution
\n on a pipeline execution with a running callback step, SageMaker Pipelines sends an\n additional Amazon SQS message to the specified SQS queue. The body of the SQS message\n contains a \"Status\" field which is set to \"Stopping\".
You should add logic to your Amazon SQS message consumer to take any needed action (for\n example, resource cleanup) upon receipt of the message followed by a call to\n SendPipelineExecutionStepSuccess
or\n SendPipelineExecutionStepFailure
.
Only when SageMaker Pipelines receives one of these calls will it stop the pipeline execution.
" + "smithy.api#documentation": "Stops a pipeline execution.
\n\n\n Callback Step\n
\nA pipeline execution won't stop while a callback step is running.\n When you call StopPipelineExecution
\n on a pipeline execution with a running callback step, SageMaker Pipelines sends an\n additional Amazon SQS message to the specified SQS queue. The body of the SQS message\n contains a \"Status\" field which is set to \"Stopping\".
You should add logic to your Amazon SQS message consumer to take any needed action (for\n example, resource cleanup) upon receipt of the message followed by a call to\n SendPipelineExecutionStepSuccess
or\n SendPipelineExecutionStepFailure
.
Only when SageMaker Pipelines receives one of these calls will it stop the pipeline execution.
\n\n\n Lambda Step\n
\nA pipeline execution can't be stopped while a lambda step is running because the Lambda\n function invoked by the lambda step can't be stopped. If you attempt to stop the execution\n while the Lambda function is running, the pipeline waits for the Lambda function to finish\n or until the timeout is hit, whichever occurs first, and then stops. If the Lambda function\n finishes, the pipeline execution status is Stopped
. If the timeout is hit\n the pipeline execution status is Failed
.
The ARN of the worker task template used to render the worker UI and tools for\n labeling job tasks.
\nUse this parameter when you are creating a labeling job for 3D point cloud and video\n fram labeling jobs. Use your labeling job task type to select one of the following ARNs\n and use it with this parameter when you create a labeling job. Replace\n aws-region
with the Amazon Web Services region you are creating your labeling job\n in.
\n 3D Point Cloud HumanTaskUiArns\n
\n\nUse this HumanTaskUiArn
for 3D point cloud object detection and 3D point\n cloud object detection adjustment labeling jobs.
\n arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudObjectDetection
\n
Use this HumanTaskUiArn
for 3D point cloud object tracking and 3D point\n cloud object tracking adjustment labeling jobs.
\n arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudObjectTracking
\n
Use this HumanTaskUiArn
for 3D point cloud semantic segmentation and 3D\n point cloud semantic segmentation adjustment labeling jobs.
\n arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudSemanticSegmentation
\n
\n Video Frame HumanTaskUiArns\n
\n\nUse this HumanTaskUiArn
for video frame object detection and video frame\n object detection adjustment labeling jobs.
\n arn:aws:sagemaker:region:394669845002:human-task-ui/VideoObjectDetection
\n
Use this HumanTaskUiArn
for video frame object tracking and video frame\n object tracking adjustment labeling jobs.
\n arn:aws:sagemaker:aws-region:394669845002:human-task-ui/VideoObjectTracking
\n
The ARN of the worker task template used to render the worker UI and tools for\n labeling job tasks.
\nUse this parameter when you are creating a labeling job for named entity recognition,\n 3D point cloud and video frame labeling jobs. Use your labeling job task type to select\n one of the following ARNs and use it with this parameter when you create a labeling job.\n Replace aws-region
with the Amazon Web Services Region you are creating your labeling job\n in. For example, replace aws-region
with us-west-1
if you\n create a labeling job in US West (N. California).
\n Named Entity Recognition\n
\nUse the following HumanTaskUiArn
for named entity recognition labeling\n jobs:
\n arn:aws:sagemaker:aws-region:394669845002:human-task-ui/NamedEntityRecognition
\n
\n 3D Point Cloud HumanTaskUiArns\n
\n \nUse this HumanTaskUiArn
for 3D point cloud object detection and 3D point\n cloud object detection adjustment labeling jobs.
\n arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudObjectDetection
\n
Use this HumanTaskUiArn
for 3D point cloud object tracking and 3D point\n cloud object tracking adjustment labeling jobs.
\n arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudObjectTracking
\n
Use this HumanTaskUiArn
for 3D point cloud semantic segmentation and 3D\n point cloud semantic segmentation adjustment labeling jobs.
\n arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudSemanticSegmentation
\n
\n Video Frame HumanTaskUiArns\n
\n \nUse this HumanTaskUiArn
for video frame object detection and video frame\n object detection adjustment labeling jobs.
\n arn:aws:sagemaker:region:394669845002:human-task-ui/VideoObjectDetection
\n
Use this HumanTaskUiArn
for video frame object tracking and video frame\n object tracking adjustment labeling jobs.
\n arn:aws:sagemaker:aws-region:394669845002:human-task-ui/VideoObjectTracking
\n
Provided configuration information for the worker UI for a labeling job.
" + "smithy.api#documentation": "Provided configuration information for the worker UI for a labeling job. Provide\n either HumanTaskUiArn
or UiTemplateS3Uri
.
For named entity recognition, 3D point cloud and video frame labeling jobs, use\n HumanTaskUiArn
.
For all other Ground Truth built-in task types and custom task types, use\n UiTemplateS3Uri
to specify the location of a worker task template in\n Amazon S3.
After you deploy a model into production using Amazon SageMaker hosting services, \n your client applications use this API to get inferences from the model hosted at \n the specified endpoint in an asynchronous manner.
\n \nInference requests sent to this API are enqueued for asynchronous processing. \n The processing of the inference request may or may not complete before the \n you receive a response from this API. The response from this API will \n not contain the result of the inference request but contain information \n about where you can locate it.
\n \nAmazon SageMaker strips all POST
headers except those supported by the API. \n Amazon SageMaker might add additional headers. You should not rely on the behavior \n of headers outside those enumerated in the request syntax.
Calls to InvokeEndpointAsync
are authenticated by using AWS Signature\n Version 4. For information, see Authenticating Requests (AWS Signature Version 4) in the Amazon S3 API\n Reference.
The name of the endpoint that you specified when you created the endpoint using \n the \n CreateEndpoint
\n API.
The MIME type of the input data in the request body.
", + "smithy.api#httpHeader": "X-Amzn-SageMaker-Content-Type" + } + }, + "Accept": { + "target": "com.amazonaws.sagemakerruntime#Header", + "traits": { + "smithy.api#documentation": "The desired MIME type of the inference in the response.
", + "smithy.api#httpHeader": "X-Amzn-SageMaker-Accept" + } + }, + "CustomAttributes": { + "target": "com.amazonaws.sagemakerruntime#CustomAttributesHeader", + "traits": { + "smithy.api#documentation": "Provides additional information about a request for an inference submitted to \n a model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is \n forwarded verbatim. You could use this value, for example, to provide an ID that you \n can use to track a request or to provide other metadata that a service endpoint was \n programmed to process. The value must consist of no more than 1024 \n visible US-ASCII characters as specified in \n Section 3.3.6. \n Field Value Components of the Hypertext Transfer Protocol (HTTP/1.1).
\n \nThe code in your model is responsible for setting or updating any custom attributes \n in the response. If your code does not set this value in the response, an empty \n value is returned. For example, if a custom attribute represents the trace ID, \n your model can prepend the custom attribute with Trace ID
: in your post-processing function.
This feature is currently supported in the AWS SDKs but not in the Amazon SageMaker Python SDK.
", + "smithy.api#httpHeader": "X-Amzn-SageMaker-Custom-Attributes" + } + }, + "InferenceId": { + "target": "com.amazonaws.sagemakerruntime#InferenceId", + "traits": { + "smithy.api#documentation": "The identifier for the inference request. Amazon SageMaker will generate an identifier for you \n if none is specified.
", + "smithy.api#httpHeader": "X-Amzn-SageMaker-Inference-Id" + } + }, + "InputLocation": { + "target": "com.amazonaws.sagemakerruntime#InputLocationHeader", + "traits": { + "smithy.api#documentation": "The Amazon S3 URI where the inference request payload is stored.
", + "smithy.api#httpHeader": "X-Amzn-SageMaker-InputLocation", + "smithy.api#required": {} + } + }, + "RequestTTLSeconds": { + "target": "com.amazonaws.sagemakerruntime#RequestTTLSecondsHeader", + "traits": { + "smithy.api#documentation": "Maximum age in seconds a request can be in the queue before it is marked as expired.
", + "smithy.api#httpHeader": "X-Amzn-SageMaker-RequestTTLSeconds" + } + } + } + }, + "com.amazonaws.sagemakerruntime#InvokeEndpointAsyncOutput": { + "type": "structure", + "members": { + "InferenceId": { + "target": "com.amazonaws.sagemakerruntime#Header", + "traits": { + "smithy.api#documentation": "Identifier for an inference request. This will be the same as the InferenceId
specified \n in the input. Amazon SageMaker will generate an identifier for you if you do not specify one.
The Amazon S3 URI where the inference response payload is stored.
", + "smithy.api#httpHeader": "X-Amzn-SageMaker-OutputLocation" + } + } + } + }, "com.amazonaws.sagemakerruntime#InvokeEndpointInput": { "type": "structure", "members": { @@ -295,6 +410,16 @@ "smithy.api#httpError": 424 } }, + "com.amazonaws.sagemakerruntime#RequestTTLSecondsHeader": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 60, + "max": 21600 + } + } + }, "com.amazonaws.sagemakerruntime#ServiceUnavailable": { "type": "structure", "members": { @@ -321,7 +446,7 @@ "min": 0, "max": 63 }, - "smithy.api#pattern": "^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "smithy.api#pattern": "^[a-zA-Z0-9](-*[a-zA-Z0-9])*$" } }, "com.amazonaws.sagemakerruntime#TargetModelHeader": { @@ -331,7 +456,7 @@ "min": 1, "max": 1024 }, - "smithy.api#pattern": "\\A\\S[\\p{Print}]*\\z" + "smithy.api#pattern": "^\\A\\S[\\p{Print}]*\\z$" } }, "com.amazonaws.sagemakerruntime#TargetVariantHeader": { @@ -341,7 +466,7 @@ "min": 0, "max": 63 }, - "smithy.api#pattern": "^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "smithy.api#pattern": "^[a-zA-Z0-9](-*[a-zA-Z0-9])*$" } }, "com.amazonaws.sagemakerruntime#ValidationError": { diff --git a/codegen/smithy-aws-go-codegen/src/main/resources/software/amazon/smithy/aws/go/codegen/endpoint-prefix.json b/codegen/smithy-aws-go-codegen/src/main/resources/software/amazon/smithy/aws/go/codegen/endpoint-prefix.json index 1eed56267db..48806ebf03a 100644 --- a/codegen/smithy-aws-go-codegen/src/main/resources/software/amazon/smithy/aws/go/codegen/endpoint-prefix.json +++ b/codegen/smithy-aws-go-codegen/src/main/resources/software/amazon/smithy/aws/go/codegen/endpoint-prefix.json @@ -178,6 +178,7 @@ "MediaStore": "mediastore", "MediaStore Data": "data.mediastore", "MediaTailor": "api.mediatailor", + "MemoryDB": "memory-db", "Mgn": "mgn", "Migration Hub": "mgh", "MigrationHub Config": "migrationhub-config", diff --git a/service/apigateway/api_op_CreateDomainName.go b/service/apigateway/api_op_CreateDomainName.go index 1fb8057a202..905643f316b 100644 --- a/service/apigateway/api_op_CreateDomainName.go +++ b/service/apigateway/api_op_CreateDomainName.go @@ -72,6 +72,11 @@ type CreateDomainNameInput struct { // domain name. MutualTlsAuthentication *types.MutualTlsAuthenticationInput + // The ARN of the public certificate issued by ACM to validate ownership of your + // custom domain. Only required when configuring mutual TLS and using an ACM + // imported or private CA certificate ARN as the regionalCertificateArn. + OwnershipVerificationCertificateArn *string + // The reference to an AWS-managed certificate that will be used by regional // endpoint for this domain name. AWS Certificate Manager is the only supported // source. @@ -137,10 +142,10 @@ type CreateDomainNameOutput struct { // The custom domain name as an API host name, for example, my-api.example.com. DomainName *string - // The status of the DomainName migration. The valid values are AVAILABLE and - // UPDATING. If the status is UPDATING, the domain cannot be modified further until - // the existing operation is complete. If it is AVAILABLE, the domain can be - // updated. + // The status of the DomainName migration. The valid values are AVAILABLE, + // UPDATING, PENDING_CERTIFICATE_REIMPORT, and PENDING_OWNERSHIP_VERIFICATION. If + // the status is UPDATING, the domain cannot be modified further until the existing + // operation is complete. If it is AVAILABLE, the domain can be updated. DomainNameStatus types.DomainNameStatus // An optional text message containing detailed information about status of the @@ -156,6 +161,11 @@ type CreateDomainNameOutput struct { // the server. Clients must present a trusted certificate to access your API. MutualTlsAuthentication *types.MutualTlsAuthentication + // The ARN of the public certificate issued by ACM to validate ownership of your + // custom domain. Only required when configuring mutual TLS and using an ACM + // imported or private CA certificate ARN as the regionalCertificateArn. + OwnershipVerificationCertificateArn *string + // The reference to an AWS-managed certificate that will be used for validating the // regional domain name. AWS Certificate Manager is the only supported source. RegionalCertificateArn *string diff --git a/service/apigateway/api_op_GetDomainName.go b/service/apigateway/api_op_GetDomainName.go index 57a5d78950d..2940220e37d 100644 --- a/service/apigateway/api_op_GetDomainName.go +++ b/service/apigateway/api_op_GetDomainName.go @@ -84,10 +84,10 @@ type GetDomainNameOutput struct { // The custom domain name as an API host name, for example, my-api.example.com. DomainName *string - // The status of the DomainName migration. The valid values are AVAILABLE and - // UPDATING. If the status is UPDATING, the domain cannot be modified further until - // the existing operation is complete. If it is AVAILABLE, the domain can be - // updated. + // The status of the DomainName migration. The valid values are AVAILABLE, + // UPDATING, PENDING_CERTIFICATE_REIMPORT, and PENDING_OWNERSHIP_VERIFICATION. If + // the status is UPDATING, the domain cannot be modified further until the existing + // operation is complete. If it is AVAILABLE, the domain can be updated. DomainNameStatus types.DomainNameStatus // An optional text message containing detailed information about status of the @@ -103,6 +103,11 @@ type GetDomainNameOutput struct { // the server. Clients must present a trusted certificate to access your API. MutualTlsAuthentication *types.MutualTlsAuthentication + // The ARN of the public certificate issued by ACM to validate ownership of your + // custom domain. Only required when configuring mutual TLS and using an ACM + // imported or private CA certificate ARN as the regionalCertificateArn. + OwnershipVerificationCertificateArn *string + // The reference to an AWS-managed certificate that will be used for validating the // regional domain name. AWS Certificate Manager is the only supported source. RegionalCertificateArn *string diff --git a/service/apigateway/api_op_UpdateDomainName.go b/service/apigateway/api_op_UpdateDomainName.go index 4610471f024..1752bad04b2 100644 --- a/service/apigateway/api_op_UpdateDomainName.go +++ b/service/apigateway/api_op_UpdateDomainName.go @@ -87,10 +87,10 @@ type UpdateDomainNameOutput struct { // The custom domain name as an API host name, for example, my-api.example.com. DomainName *string - // The status of the DomainName migration. The valid values are AVAILABLE and - // UPDATING. If the status is UPDATING, the domain cannot be modified further until - // the existing operation is complete. If it is AVAILABLE, the domain can be - // updated. + // The status of the DomainName migration. The valid values are AVAILABLE, + // UPDATING, PENDING_CERTIFICATE_REIMPORT, and PENDING_OWNERSHIP_VERIFICATION. If + // the status is UPDATING, the domain cannot be modified further until the existing + // operation is complete. If it is AVAILABLE, the domain can be updated. DomainNameStatus types.DomainNameStatus // An optional text message containing detailed information about status of the @@ -106,6 +106,11 @@ type UpdateDomainNameOutput struct { // the server. Clients must present a trusted certificate to access your API. MutualTlsAuthentication *types.MutualTlsAuthentication + // The ARN of the public certificate issued by ACM to validate ownership of your + // custom domain. Only required when configuring mutual TLS and using an ACM + // imported or private CA certificate ARN as the regionalCertificateArn. + OwnershipVerificationCertificateArn *string + // The reference to an AWS-managed certificate that will be used for validating the // regional domain name. AWS Certificate Manager is the only supported source. RegionalCertificateArn *string diff --git a/service/apigateway/deserializers.go b/service/apigateway/deserializers.go index c5bdee71773..df19d7ed8e0 100644 --- a/service/apigateway/deserializers.go +++ b/service/apigateway/deserializers.go @@ -1500,6 +1500,15 @@ func awsRestjson1_deserializeOpDocumentCreateDomainNameOutput(v **CreateDomainNa return err } + case "ownershipVerificationCertificateArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.OwnershipVerificationCertificateArn = ptr.String(jtv) + } + case "regionalCertificateArn": if value != nil { jtv, ok := value.(string) @@ -8791,6 +8800,15 @@ func awsRestjson1_deserializeOpDocumentGetDomainNameOutput(v **GetDomainNameOutp return err } + case "ownershipVerificationCertificateArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.OwnershipVerificationCertificateArn = ptr.String(jtv) + } + case "regionalCertificateArn": if value != nil { jtv, ok := value.(string) @@ -18767,6 +18785,15 @@ func awsRestjson1_deserializeOpDocumentUpdateDomainNameOutput(v **UpdateDomainNa return err } + case "ownershipVerificationCertificateArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.OwnershipVerificationCertificateArn = ptr.String(jtv) + } + case "regionalCertificateArn": if value != nil { jtv, ok := value.(string) @@ -22994,6 +23021,15 @@ func awsRestjson1_deserializeDocumentDomainName(v **types.DomainName, value inte return err } + case "ownershipVerificationCertificateArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.OwnershipVerificationCertificateArn = ptr.String(jtv) + } + case "regionalCertificateArn": if value != nil { jtv, ok := value.(string) diff --git a/service/apigateway/serializers.go b/service/apigateway/serializers.go index dae4af5e6f2..c3711645176 100644 --- a/service/apigateway/serializers.go +++ b/service/apigateway/serializers.go @@ -739,6 +739,11 @@ func awsRestjson1_serializeOpDocumentCreateDomainNameInput(v *CreateDomainNameIn } } + if v.OwnershipVerificationCertificateArn != nil { + ok := object.Key("ownershipVerificationCertificateArn") + ok.String(*v.OwnershipVerificationCertificateArn) + } + if v.RegionalCertificateArn != nil { ok := object.Key("regionalCertificateArn") ok.String(*v.RegionalCertificateArn) diff --git a/service/apigateway/types/enums.go b/service/apigateway/types/enums.go index 784f47d3fa0..fbdf2c38b8c 100644 --- a/service/apigateway/types/enums.go +++ b/service/apigateway/types/enums.go @@ -188,9 +188,11 @@ type DomainNameStatus string // Enum values for DomainNameStatus const ( - DomainNameStatusAvailable DomainNameStatus = "AVAILABLE" - DomainNameStatusUpdating DomainNameStatus = "UPDATING" - DomainNameStatusPending DomainNameStatus = "PENDING" + DomainNameStatusAvailable DomainNameStatus = "AVAILABLE" + DomainNameStatusUpdating DomainNameStatus = "UPDATING" + DomainNameStatusPending DomainNameStatus = "PENDING" + DomainNameStatusPendingCertificateReimport DomainNameStatus = "PENDING_CERTIFICATE_REIMPORT" + DomainNameStatusPendingOwnershipVerification DomainNameStatus = "PENDING_OWNERSHIP_VERIFICATION" ) // Values returns all known values for DomainNameStatus. Note that this can be @@ -201,6 +203,8 @@ func (DomainNameStatus) Values() []DomainNameStatus { "AVAILABLE", "UPDATING", "PENDING", + "PENDING_CERTIFICATE_REIMPORT", + "PENDING_OWNERSHIP_VERIFICATION", } } diff --git a/service/apigateway/types/types.go b/service/apigateway/types/types.go index dd4e5b0dbb8..8fbdea1d179 100644 --- a/service/apigateway/types/types.go +++ b/service/apigateway/types/types.go @@ -434,10 +434,10 @@ type DomainName struct { // The custom domain name as an API host name, for example, my-api.example.com. DomainName *string - // The status of the DomainName migration. The valid values are AVAILABLE and - // UPDATING. If the status is UPDATING, the domain cannot be modified further until - // the existing operation is complete. If it is AVAILABLE, the domain can be - // updated. + // The status of the DomainName migration. The valid values are AVAILABLE, + // UPDATING, PENDING_CERTIFICATE_REIMPORT, and PENDING_OWNERSHIP_VERIFICATION. If + // the status is UPDATING, the domain cannot be modified further until the existing + // operation is complete. If it is AVAILABLE, the domain can be updated. DomainNameStatus DomainNameStatus // An optional text message containing detailed information about status of the @@ -453,6 +453,11 @@ type DomainName struct { // the server. Clients must present a trusted certificate to access your API. MutualTlsAuthentication *MutualTlsAuthentication + // The ARN of the public certificate issued by ACM to validate ownership of your + // custom domain. Only required when configuring mutual TLS and using an ACM + // imported or private CA certificate ARN as the regionalCertificateArn. + OwnershipVerificationCertificateArn *string + // The reference to an AWS-managed certificate that will be used for validating the // regional domain name. AWS Certificate Manager is the only supported source. RegionalCertificateArn *string diff --git a/service/apigatewayv2/deserializers.go b/service/apigatewayv2/deserializers.go index bbb88381536..d78ad4e3896 100644 --- a/service/apigatewayv2/deserializers.go +++ b/service/apigatewayv2/deserializers.go @@ -14502,6 +14502,15 @@ func awsRestjson1_deserializeDocumentDomainNameConfiguration(v **types.DomainNam sv.HostedZoneId = ptr.String(jtv) } + case "ownershipVerificationCertificateArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + } + sv.OwnershipVerificationCertificateArn = ptr.String(jtv) + } + case "securityPolicy": if value != nil { jtv, ok := value.(string) diff --git a/service/apigatewayv2/serializers.go b/service/apigatewayv2/serializers.go index 41b7f879148..7031d14fa29 100644 --- a/service/apigatewayv2/serializers.go +++ b/service/apigatewayv2/serializers.go @@ -6232,6 +6232,11 @@ func awsRestjson1_serializeDocumentDomainNameConfiguration(v *types.DomainNameCo ok.String(*v.HostedZoneId) } + if v.OwnershipVerificationCertificateArn != nil { + ok := object.Key("ownershipVerificationCertificateArn") + ok.String(*v.OwnershipVerificationCertificateArn) + } + if len(v.SecurityPolicy) > 0 { ok := object.Key("securityPolicy") ok.String(string(v.SecurityPolicy)) diff --git a/service/apigatewayv2/types/enums.go b/service/apigatewayv2/types/enums.go index 46fce137e1d..5e1e5734fed 100644 --- a/service/apigatewayv2/types/enums.go +++ b/service/apigatewayv2/types/enums.go @@ -102,8 +102,10 @@ type DomainNameStatus string // Enum values for DomainNameStatus const ( - DomainNameStatusAvailable DomainNameStatus = "AVAILABLE" - DomainNameStatusUpdating DomainNameStatus = "UPDATING" + DomainNameStatusAvailable DomainNameStatus = "AVAILABLE" + DomainNameStatusUpdating DomainNameStatus = "UPDATING" + DomainNameStatusPendingCertificateReimport DomainNameStatus = "PENDING_CERTIFICATE_REIMPORT" + DomainNameStatusPendingOwnershipVerification DomainNameStatus = "PENDING_OWNERSHIP_VERIFICATION" ) // Values returns all known values for DomainNameStatus. Note that this can be @@ -113,6 +115,8 @@ func (DomainNameStatus) Values() []DomainNameStatus { return []DomainNameStatus{ "AVAILABLE", "UPDATING", + "PENDING_CERTIFICATE_REIMPORT", + "PENDING_OWNERSHIP_VERIFICATION", } } diff --git a/service/apigatewayv2/types/types.go b/service/apigatewayv2/types/types.go index 13d2c784342..a622d6f553b 100644 --- a/service/apigatewayv2/types/types.go +++ b/service/apigatewayv2/types/types.go @@ -299,10 +299,10 @@ type DomainNameConfiguration struct { // this domain name was uploaded. CertificateUploadDate *time.Time - // The status of the domain name migration. The valid values are AVAILABLE and - // UPDATING. If the status is UPDATING, the domain cannot be modified further until - // the existing operation is complete. If it is AVAILABLE, the domain can be - // updated. + // The status of the domain name migration. The valid values are AVAILABLE, + // UPDATING, PENDING_CERTIFICATE_REIMPORT, and PENDING_OWNERSHIP_VERIFICATION. If + // the status is UPDATING, the domain cannot be modified further until the existing + // operation is complete. If it is AVAILABLE, the domain can be updated. DomainNameStatus DomainNameStatus // An optional text message containing detailed information about status of the @@ -315,6 +315,11 @@ type DomainNameConfiguration struct { // The Amazon Route 53 Hosted Zone ID of the endpoint. HostedZoneId *string + // The ARN of the public certificate issued by ACM to validate ownership of your + // custom domain. Only required when configuring mutual TLS and using an ACM + // imported or private CA certificate ARN as the regionalCertificateArn + OwnershipVerificationCertificateArn *string + // The Transport Layer Security (TLS) version of the security policy for this // domain name. The valid values are TLS_1_0 and TLS_1_2. SecurityPolicy SecurityPolicy diff --git a/service/appflow/api_op_CreateConnectorProfile.go b/service/appflow/api_op_CreateConnectorProfile.go index fc7a9a79add..50197f584da 100644 --- a/service/appflow/api_op_CreateConnectorProfile.go +++ b/service/appflow/api_op_CreateConnectorProfile.go @@ -11,10 +11,11 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a new connector profile associated with your AWS account. There is a -// soft quota of 100 connector profiles per AWS account. If you need more connector -// profiles than this quota allows, you can submit a request to the Amazon AppFlow -// team through the Amazon AppFlow support channel. +// Creates a new connector profile associated with your Amazon Web Services +// account. There is a soft quota of 100 connector profiles per Amazon Web Services +// account. If you need more connector profiles than this quota allows, you can +// submit a request to the Amazon AppFlow team through the Amazon AppFlow support +// channel. func (c *Client) CreateConnectorProfile(ctx context.Context, params *CreateConnectorProfileInput, optFns ...func(*Options)) (*CreateConnectorProfileOutput, error) { if params == nil { params = &CreateConnectorProfileInput{} @@ -33,8 +34,8 @@ func (c *Client) CreateConnectorProfile(ctx context.Context, params *CreateConne type CreateConnectorProfileInput struct { // Indicates the connection mode and specifies whether it is public or private. - // Private flows use AWS PrivateLink to route data over AWS infrastructure without - // exposing it to the public internet. + // Private flows use Amazon Web Services PrivateLink to route data over Amazon Web + // Services infrastructure without exposing it to the public internet. // // This member is required. ConnectionMode types.ConnectionMode @@ -45,7 +46,7 @@ type CreateConnectorProfileInput struct { ConnectorProfileConfig *types.ConnectorProfileConfig // The name of the connector profile. The name is unique for each ConnectorProfile - // in your AWS account. + // in your Amazon Web Services account. // // This member is required. ConnectorProfileName *string diff --git a/service/appflow/api_op_DescribeConnectorEntity.go b/service/appflow/api_op_DescribeConnectorEntity.go index 1be959a79de..354f24918b3 100644 --- a/service/appflow/api_op_DescribeConnectorEntity.go +++ b/service/appflow/api_op_DescribeConnectorEntity.go @@ -36,7 +36,7 @@ type DescribeConnectorEntityInput struct { ConnectorEntityName *string // The name of the connector profile. The name is unique for each ConnectorProfile - // in the AWS account. + // in the Amazon Web Services account. ConnectorProfileName *string // The type of connector application, such as Salesforce, Amplitude, and so on. diff --git a/service/appflow/api_op_DescribeConnectorProfiles.go b/service/appflow/api_op_DescribeConnectorProfiles.go index 71cecea80e2..8ad7fb68e1e 100644 --- a/service/appflow/api_op_DescribeConnectorProfiles.go +++ b/service/appflow/api_op_DescribeConnectorProfiles.go @@ -35,7 +35,7 @@ func (c *Client) DescribeConnectorProfiles(ctx context.Context, params *Describe type DescribeConnectorProfilesInput struct { // The name of the connector profile. The name is unique for each ConnectorProfile - // in the AWS account. + // in the Amazon Web Services account. ConnectorProfileNames []string // The type of connector, such as Salesforce, Amplitude, and so on. diff --git a/service/appflow/api_op_ListConnectorEntities.go b/service/appflow/api_op_ListConnectorEntities.go index 524a8a11edf..ff227af25b7 100644 --- a/service/appflow/api_op_ListConnectorEntities.go +++ b/service/appflow/api_op_ListConnectorEntities.go @@ -32,7 +32,8 @@ func (c *Client) ListConnectorEntities(ctx context.Context, params *ListConnecto type ListConnectorEntitiesInput struct { // The name of the connector profile. The name is unique for each ConnectorProfile - // in the AWS account, and is used to query the downstream connector. + // in the Amazon Web Services account, and is used to query the downstream + // connector. ConnectorProfileName *string // The type of connector, such as Salesforce, Amplitude, and so on. diff --git a/service/appflow/api_op_UpdateConnectorProfile.go b/service/appflow/api_op_UpdateConnectorProfile.go index f958a2ff680..7ff028d4700 100644 --- a/service/appflow/api_op_UpdateConnectorProfile.go +++ b/service/appflow/api_op_UpdateConnectorProfile.go @@ -40,7 +40,7 @@ type UpdateConnectorProfileInput struct { ConnectorProfileConfig *types.ConnectorProfileConfig // The name of the connector profile and is unique for each ConnectorProfile in the - // AWS Account. + // Amazon Web Services account. // // This member is required. ConnectorProfileName *string diff --git a/service/appflow/api_op_UpdateFlow.go b/service/appflow/api_op_UpdateFlow.go index f3d4784dc8a..8e65e253b18 100644 --- a/service/appflow/api_op_UpdateFlow.go +++ b/service/appflow/api_op_UpdateFlow.go @@ -41,6 +41,12 @@ type UpdateFlowInput struct { // This member is required. FlowName *string + // Contains information about the configuration of the source connector used in the + // flow. + // + // This member is required. + SourceFlowConfig *types.SourceFlowConfig + // A list of tasks that Amazon AppFlow performs while transferring the data in the // flow run. // @@ -55,10 +61,6 @@ type UpdateFlowInput struct { // A description of the flow. Description *string - // Contains information about the configuration of the source connector used in the - // flow. - SourceFlowConfig *types.SourceFlowConfig - noSmithyDocumentSerde } diff --git a/service/appflow/deserializers.go b/service/appflow/deserializers.go index b54fa3568b7..db4df747a12 100644 --- a/service/appflow/deserializers.go +++ b/service/appflow/deserializers.go @@ -3731,6 +3731,11 @@ func awsRestjson1_deserializeDocumentConnectorMetadata(v **types.ConnectorMetada return err } + case "SAPOData": + if err := awsRestjson1_deserializeDocumentSAPODataMetadata(&sv.SAPOData, value); err != nil { + return err + } + case "ServiceNow": if err := awsRestjson1_deserializeDocumentServiceNowMetadata(&sv.ServiceNow, value); err != nil { return err @@ -3874,6 +3879,15 @@ func awsRestjson1_deserializeDocumentConnectorOperator(v **types.ConnectorOperat sv.Salesforce = types.SalesforceConnectorOperator(jtv) } + case "SAPOData": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SAPODataConnectorOperator to be of type string, got %T instead", value) + } + sv.SAPOData = types.SAPODataConnectorOperator(jtv) + } + case "ServiceNow": if value != nil { jtv, ok := value.(string) @@ -4041,6 +4055,11 @@ func awsRestjson1_deserializeDocumentConnectorProfile(v **types.ConnectorProfile } } + case "privateConnectionProvisioningState": + if err := awsRestjson1_deserializeDocumentPrivateConnectionProvisioningState(&sv.PrivateConnectionProvisioningState, value); err != nil { + return err + } + default: _, _ = key, value @@ -4151,6 +4170,11 @@ func awsRestjson1_deserializeDocumentConnectorProfileProperties(v **types.Connec return err } + case "SAPOData": + if err := awsRestjson1_deserializeDocumentSAPODataConnectorProfileProperties(&sv.SAPOData, value); err != nil { + return err + } + case "ServiceNow": if err := awsRestjson1_deserializeDocumentServiceNowConnectorProfileProperties(&sv.ServiceNow, value); err != nil { return err @@ -6155,6 +6179,60 @@ func awsRestjson1_deserializeDocumentMarketoSourceProperties(v **types.MarketoSo return nil } +func awsRestjson1_deserializeDocumentOAuthProperties(v **types.OAuthProperties, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.OAuthProperties + if *v == nil { + sv = &types.OAuthProperties{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "authCodeUrl": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AuthCodeUrl to be of type string, got %T instead", value) + } + sv.AuthCodeUrl = ptr.String(jtv) + } + + case "oAuthScopes": + if err := awsRestjson1_deserializeDocumentOAuthScopeList(&sv.OAuthScopes, value); err != nil { + return err + } + + case "tokenUrl": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TokenUrl to be of type string, got %T instead", value) + } + sv.TokenUrl = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentOAuthScopeList(v *[]string, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -6240,6 +6318,64 @@ func awsRestjson1_deserializeDocumentPrefixConfig(v **types.PrefixConfig, value return nil } +func awsRestjson1_deserializeDocumentPrivateConnectionProvisioningState(v **types.PrivateConnectionProvisioningState, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PrivateConnectionProvisioningState + if *v == nil { + sv = &types.PrivateConnectionProvisioningState{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "failureCause": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PrivateConnectionProvisioningFailureCause to be of type string, got %T instead", value) + } + sv.FailureCause = types.PrivateConnectionProvisioningFailureCause(jtv) + } + + case "failureMessage": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PrivateConnectionProvisioningFailureMessage to be of type string, got %T instead", value) + } + sv.FailureMessage = ptr.String(jtv) + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PrivateConnectionProvisioningStatus to be of type string, got %T instead", value) + } + sv.Status = types.PrivateConnectionProvisioningStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentRedshiftConnectorProfileProperties(v **types.RedshiftConnectorProfileProperties, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -6863,6 +6999,171 @@ func awsRestjson1_deserializeDocumentSalesforceSourceProperties(v **types.Salesf return nil } +func awsRestjson1_deserializeDocumentSAPODataConnectorProfileProperties(v **types.SAPODataConnectorProfileProperties, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SAPODataConnectorProfileProperties + if *v == nil { + sv = &types.SAPODataConnectorProfileProperties{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "applicationHostUrl": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ApplicationHostUrl to be of type string, got %T instead", value) + } + sv.ApplicationHostUrl = ptr.String(jtv) + } + + case "applicationServicePath": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ApplicationServicePath to be of type string, got %T instead", value) + } + sv.ApplicationServicePath = ptr.String(jtv) + } + + case "clientNumber": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ClientNumber to be of type string, got %T instead", value) + } + sv.ClientNumber = ptr.String(jtv) + } + + case "logonLanguage": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LogonLanguage to be of type string, got %T instead", value) + } + sv.LogonLanguage = ptr.String(jtv) + } + + case "oAuthProperties": + if err := awsRestjson1_deserializeDocumentOAuthProperties(&sv.OAuthProperties, value); err != nil { + return err + } + + case "portNumber": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PortNumber to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.PortNumber = int32(i64) + } + + case "privateLinkServiceName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PrivateLinkServiceName to be of type string, got %T instead", value) + } + sv.PrivateLinkServiceName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentSAPODataMetadata(v **types.SAPODataMetadata, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SAPODataMetadata + if *v == nil { + sv = &types.SAPODataMetadata{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentSAPODataSourceProperties(v **types.SAPODataSourceProperties, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SAPODataSourceProperties + if *v == nil { + sv = &types.SAPODataSourceProperties{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "objectPath": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Object to be of type string, got %T instead", value) + } + sv.ObjectPath = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentScheduledTriggerProperties(v **types.ScheduledTriggerProperties, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -7642,6 +7943,11 @@ func awsRestjson1_deserializeDocumentSourceConnectorProperties(v **types.SourceC return err } + case "SAPOData": + if err := awsRestjson1_deserializeDocumentSAPODataSourceProperties(&sv.SAPOData, value); err != nil { + return err + } + case "ServiceNow": if err := awsRestjson1_deserializeDocumentServiceNowSourceProperties(&sv.ServiceNow, value); err != nil { return err @@ -8630,6 +8936,42 @@ func awsRestjson1_deserializeDocumentVeevaSourceProperties(v **types.VeevaSource for key, value := range shape { switch key { + case "documentType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DocumentType to be of type string, got %T instead", value) + } + sv.DocumentType = ptr.String(jtv) + } + + case "includeAllVersions": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.IncludeAllVersions = jtv + } + + case "includeRenditions": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.IncludeRenditions = jtv + } + + case "includeSourceFiles": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.IncludeSourceFiles = jtv + } + case "object": if value != nil { jtv, ok := value.(string) diff --git a/service/appflow/doc.go b/service/appflow/doc.go index 6bcec1e06b2..744bfe693ce 100644 --- a/service/appflow/doc.go +++ b/service/appflow/doc.go @@ -7,9 +7,9 @@ // need detailed information about the Amazon AppFlow API operations, data types, // and errors. Amazon AppFlow is a fully managed integration service that enables // you to securely transfer data between software as a service (SaaS) applications -// like Salesforce, Marketo, Slack, and ServiceNow, and AWS services like Amazon S3 -// and Amazon Redshift. Use the following links to get started on the Amazon -// AppFlow API: +// like Salesforce, Marketo, Slack, and ServiceNow, and Amazon Web Services like +// Amazon S3 and Amazon Redshift. Use the following links to get started on the +// Amazon AppFlow API: // // * Actions // (https://docs.aws.amazon.com/appflow/1.0/APIReference/API_Operations.html): An diff --git a/service/appflow/serializers.go b/service/appflow/serializers.go index 2d4a3fb9296..d4a4d22efc5 100644 --- a/service/appflow/serializers.go +++ b/service/appflow/serializers.go @@ -1469,6 +1469,23 @@ func awsRestjson1_serializeDocumentAmplitudeSourceProperties(v *types.AmplitudeS return nil } +func awsRestjson1_serializeDocumentBasicAuthCredentials(v *types.BasicAuthCredentials, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Password != nil { + ok := object.Key("password") + ok.String(*v.Password) + } + + if v.Username != nil { + ok := object.Key("username") + ok.String(*v.Username) + } + + return nil +} + func awsRestjson1_serializeDocumentConnectorOAuthRequest(v *types.ConnectorOAuthRequest, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -1530,6 +1547,11 @@ func awsRestjson1_serializeDocumentConnectorOperator(v *types.ConnectorOperator, ok.String(string(v.Salesforce)) } + if len(v.SAPOData) > 0 { + ok := object.Key("SAPOData") + ok.String(string(v.SAPOData)) + } + if len(v.ServiceNow) > 0 { ok := object.Key("ServiceNow") ok.String(string(v.ServiceNow)) @@ -1651,6 +1673,13 @@ func awsRestjson1_serializeDocumentConnectorProfileCredentials(v *types.Connecto } } + if v.SAPOData != nil { + ok := object.Key("SAPOData") + if err := awsRestjson1_serializeDocumentSAPODataConnectorProfileCredentials(v.SAPOData, ok); err != nil { + return err + } + } + if v.ServiceNow != nil { ok := object.Key("ServiceNow") if err := awsRestjson1_serializeDocumentServiceNowConnectorProfileCredentials(v.ServiceNow, ok); err != nil { @@ -1781,6 +1810,13 @@ func awsRestjson1_serializeDocumentConnectorProfileProperties(v *types.Connector } } + if v.SAPOData != nil { + ok := object.Key("SAPOData") + if err := awsRestjson1_serializeDocumentSAPODataConnectorProfileProperties(v.SAPOData, ok); err != nil { + return err + } + } + if v.ServiceNow != nil { ok := object.Key("ServiceNow") if err := awsRestjson1_serializeDocumentServiceNowConnectorProfileProperties(v.ServiceNow, ok); err != nil { @@ -2330,6 +2366,75 @@ func awsRestjson1_serializeDocumentMarketoSourceProperties(v *types.MarketoSourc return nil } +func awsRestjson1_serializeDocumentOAuthCredentials(v *types.OAuthCredentials, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AccessToken != nil { + ok := object.Key("accessToken") + ok.String(*v.AccessToken) + } + + if v.ClientId != nil { + ok := object.Key("clientId") + ok.String(*v.ClientId) + } + + if v.ClientSecret != nil { + ok := object.Key("clientSecret") + ok.String(*v.ClientSecret) + } + + if v.OAuthRequest != nil { + ok := object.Key("oAuthRequest") + if err := awsRestjson1_serializeDocumentConnectorOAuthRequest(v.OAuthRequest, ok); err != nil { + return err + } + } + + if v.RefreshToken != nil { + ok := object.Key("refreshToken") + ok.String(*v.RefreshToken) + } + + return nil +} + +func awsRestjson1_serializeDocumentOAuthProperties(v *types.OAuthProperties, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AuthCodeUrl != nil { + ok := object.Key("authCodeUrl") + ok.String(*v.AuthCodeUrl) + } + + if v.OAuthScopes != nil { + ok := object.Key("oAuthScopes") + if err := awsRestjson1_serializeDocumentOAuthScopeList(v.OAuthScopes, ok); err != nil { + return err + } + } + + if v.TokenUrl != nil { + ok := object.Key("tokenUrl") + ok.String(*v.TokenUrl) + } + + return nil +} + +func awsRestjson1_serializeDocumentOAuthScopeList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + func awsRestjson1_serializeDocumentPrefixConfig(v *types.PrefixConfig, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -2586,6 +2691,83 @@ func awsRestjson1_serializeDocumentSalesforceSourceProperties(v *types.Salesforc return nil } +func awsRestjson1_serializeDocumentSAPODataConnectorProfileCredentials(v *types.SAPODataConnectorProfileCredentials, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.BasicAuthCredentials != nil { + ok := object.Key("basicAuthCredentials") + if err := awsRestjson1_serializeDocumentBasicAuthCredentials(v.BasicAuthCredentials, ok); err != nil { + return err + } + } + + if v.OAuthCredentials != nil { + ok := object.Key("oAuthCredentials") + if err := awsRestjson1_serializeDocumentOAuthCredentials(v.OAuthCredentials, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentSAPODataConnectorProfileProperties(v *types.SAPODataConnectorProfileProperties, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ApplicationHostUrl != nil { + ok := object.Key("applicationHostUrl") + ok.String(*v.ApplicationHostUrl) + } + + if v.ApplicationServicePath != nil { + ok := object.Key("applicationServicePath") + ok.String(*v.ApplicationServicePath) + } + + if v.ClientNumber != nil { + ok := object.Key("clientNumber") + ok.String(*v.ClientNumber) + } + + if v.LogonLanguage != nil { + ok := object.Key("logonLanguage") + ok.String(*v.LogonLanguage) + } + + if v.OAuthProperties != nil { + ok := object.Key("oAuthProperties") + if err := awsRestjson1_serializeDocumentOAuthProperties(v.OAuthProperties, ok); err != nil { + return err + } + } + + { + ok := object.Key("portNumber") + ok.Integer(v.PortNumber) + } + + if v.PrivateLinkServiceName != nil { + ok := object.Key("privateLinkServiceName") + ok.String(*v.PrivateLinkServiceName) + } + + return nil +} + +func awsRestjson1_serializeDocumentSAPODataSourceProperties(v *types.SAPODataSourceProperties, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ObjectPath != nil { + ok := object.Key("objectPath") + ok.String(*v.ObjectPath) + } + + return nil +} + func awsRestjson1_serializeDocumentScheduledTriggerProperties(v *types.ScheduledTriggerProperties, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -2901,6 +3083,13 @@ func awsRestjson1_serializeDocumentSourceConnectorProperties(v *types.SourceConn } } + if v.SAPOData != nil { + ok := object.Key("SAPOData") + if err := awsRestjson1_serializeDocumentSAPODataSourceProperties(v.SAPOData, ok); err != nil { + return err + } + } + if v.ServiceNow != nil { ok := object.Key("ServiceNow") if err := awsRestjson1_serializeDocumentServiceNowSourceProperties(v.ServiceNow, ok); err != nil { @@ -3208,6 +3397,26 @@ func awsRestjson1_serializeDocumentVeevaSourceProperties(v *types.VeevaSourcePro object := value.Object() defer object.Close() + if v.DocumentType != nil { + ok := object.Key("documentType") + ok.String(*v.DocumentType) + } + + if v.IncludeAllVersions { + ok := object.Key("includeAllVersions") + ok.Boolean(v.IncludeAllVersions) + } + + if v.IncludeRenditions { + ok := object.Key("includeRenditions") + ok.Boolean(v.IncludeRenditions) + } + + if v.IncludeSourceFiles { + ok := object.Key("includeSourceFiles") + ok.Boolean(v.IncludeSourceFiles) + } + if v.Object != nil { ok := object.Key("object") ok.String(*v.Object) diff --git a/service/appflow/types/enums.go b/service/appflow/types/enums.go index 9e5efb15e9b..399380dd4c8 100644 --- a/service/appflow/types/enums.go +++ b/service/appflow/types/enums.go @@ -79,6 +79,7 @@ const ( ConnectorTypeUpsolver ConnectorType = "Upsolver" ConnectorTypeHoneycode ConnectorType = "Honeycode" ConnectorTypeCustomerprofiles ConnectorType = "CustomerProfiles" + ConnectorTypeSapodata ConnectorType = "SAPOData" ) // Values returns all known values for ConnectorType. Note that this can be @@ -107,6 +108,7 @@ func (ConnectorType) Values() []ConnectorType { "Upsolver", "Honeycode", "CustomerProfiles", + "SAPOData", } } @@ -535,6 +537,52 @@ func (PrefixType) Values() []PrefixType { } } +type PrivateConnectionProvisioningFailureCause string + +// Enum values for PrivateConnectionProvisioningFailureCause +const ( + PrivateConnectionProvisioningFailureCauseConnectorAuthentication PrivateConnectionProvisioningFailureCause = "CONNECTOR_AUTHENTICATION" + PrivateConnectionProvisioningFailureCauseConnectorServer PrivateConnectionProvisioningFailureCause = "CONNECTOR_SERVER" + PrivateConnectionProvisioningFailureCauseInternalServer PrivateConnectionProvisioningFailureCause = "INTERNAL_SERVER" + PrivateConnectionProvisioningFailureCauseAccessDenied PrivateConnectionProvisioningFailureCause = "ACCESS_DENIED" + PrivateConnectionProvisioningFailureCauseValidation PrivateConnectionProvisioningFailureCause = "VALIDATION" +) + +// Values returns all known values for PrivateConnectionProvisioningFailureCause. +// Note that this can be expanded in the future, and so it is only as up to date as +// the client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (PrivateConnectionProvisioningFailureCause) Values() []PrivateConnectionProvisioningFailureCause { + return []PrivateConnectionProvisioningFailureCause{ + "CONNECTOR_AUTHENTICATION", + "CONNECTOR_SERVER", + "INTERNAL_SERVER", + "ACCESS_DENIED", + "VALIDATION", + } +} + +type PrivateConnectionProvisioningStatus string + +// Enum values for PrivateConnectionProvisioningStatus +const ( + PrivateConnectionProvisioningStatusFailed PrivateConnectionProvisioningStatus = "FAILED" + PrivateConnectionProvisioningStatusPending PrivateConnectionProvisioningStatus = "PENDING" + PrivateConnectionProvisioningStatusCreated PrivateConnectionProvisioningStatus = "CREATED" +) + +// Values returns all known values for PrivateConnectionProvisioningStatus. Note +// that this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (PrivateConnectionProvisioningStatus) Values() []PrivateConnectionProvisioningStatus { + return []PrivateConnectionProvisioningStatus{ + "FAILED", + "PENDING", + "CREATED", + } +} + type S3ConnectorOperator string // Enum values for S3ConnectorOperator @@ -645,6 +693,62 @@ func (SalesforceConnectorOperator) Values() []SalesforceConnectorOperator { } } +type SAPODataConnectorOperator string + +// Enum values for SAPODataConnectorOperator +const ( + SAPODataConnectorOperatorProjection SAPODataConnectorOperator = "PROJECTION" + SAPODataConnectorOperatorLessThan SAPODataConnectorOperator = "LESS_THAN" + SAPODataConnectorOperatorContains SAPODataConnectorOperator = "CONTAINS" + SAPODataConnectorOperatorGreaterThan SAPODataConnectorOperator = "GREATER_THAN" + SAPODataConnectorOperatorBetween SAPODataConnectorOperator = "BETWEEN" + SAPODataConnectorOperatorLessThanOrEqualTo SAPODataConnectorOperator = "LESS_THAN_OR_EQUAL_TO" + SAPODataConnectorOperatorGreaterThanOrEqualTo SAPODataConnectorOperator = "GREATER_THAN_OR_EQUAL_TO" + SAPODataConnectorOperatorEqualTo SAPODataConnectorOperator = "EQUAL_TO" + SAPODataConnectorOperatorNotEqualTo SAPODataConnectorOperator = "NOT_EQUAL_TO" + SAPODataConnectorOperatorAddition SAPODataConnectorOperator = "ADDITION" + SAPODataConnectorOperatorMultiplication SAPODataConnectorOperator = "MULTIPLICATION" + SAPODataConnectorOperatorDivision SAPODataConnectorOperator = "DIVISION" + SAPODataConnectorOperatorSubtraction SAPODataConnectorOperator = "SUBTRACTION" + SAPODataConnectorOperatorMaskAll SAPODataConnectorOperator = "MASK_ALL" + SAPODataConnectorOperatorMaskFirstN SAPODataConnectorOperator = "MASK_FIRST_N" + SAPODataConnectorOperatorMaskLastN SAPODataConnectorOperator = "MASK_LAST_N" + SAPODataConnectorOperatorValidateNonNull SAPODataConnectorOperator = "VALIDATE_NON_NULL" + SAPODataConnectorOperatorValidateNonZero SAPODataConnectorOperator = "VALIDATE_NON_ZERO" + SAPODataConnectorOperatorValidateNonNegative SAPODataConnectorOperator = "VALIDATE_NON_NEGATIVE" + SAPODataConnectorOperatorValidateNumeric SAPODataConnectorOperator = "VALIDATE_NUMERIC" + SAPODataConnectorOperatorNoOp SAPODataConnectorOperator = "NO_OP" +) + +// Values returns all known values for SAPODataConnectorOperator. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (SAPODataConnectorOperator) Values() []SAPODataConnectorOperator { + return []SAPODataConnectorOperator{ + "PROJECTION", + "LESS_THAN", + "CONTAINS", + "GREATER_THAN", + "BETWEEN", + "LESS_THAN_OR_EQUAL_TO", + "GREATER_THAN_OR_EQUAL_TO", + "EQUAL_TO", + "NOT_EQUAL_TO", + "ADDITION", + "MULTIPLICATION", + "DIVISION", + "SUBTRACTION", + "MASK_ALL", + "MASK_FIRST_N", + "MASK_LAST_N", + "VALIDATE_NON_NULL", + "VALIDATE_NON_ZERO", + "VALIDATE_NON_NEGATIVE", + "VALIDATE_NUMERIC", + "NO_OP", + } +} + type ScheduleFrequencyType string // Enum values for ScheduleFrequencyType diff --git a/service/appflow/types/types.go b/service/appflow/types/types.go index 4f177ea839e..4119cea1676 100644 --- a/service/appflow/types/types.go +++ b/service/appflow/types/types.go @@ -56,6 +56,22 @@ type AmplitudeSourceProperties struct { noSmithyDocumentSerde } +// The basic auth credentials required for basic authentication. +type BasicAuthCredentials struct { + + // The password to use to connect to a resource. + // + // This member is required. + Password *string + + // The username to use to connect to a resource. + // + // This member is required. + Username *string + + noSmithyDocumentSerde +} + // The configuration settings related to a given connector. type ConnectorConfiguration struct { @@ -177,6 +193,9 @@ type ConnectorMetadata struct { // The connector metadata specific to Amazon S3. S3 *S3Metadata + // The connector metadata specific to SAPOData. + SAPOData *SAPODataMetadata + // The connector metadata specific to Salesforce. Salesforce *SalesforceMetadata @@ -246,6 +265,9 @@ type ConnectorOperator struct { // The operation to be performed on the provided Amazon S3 source fields. S3 S3ConnectorOperator + // The operation to be performed on the provided SAPOData source fields. + SAPOData SAPODataConnectorOperator + // The operation to be performed on the provided Salesforce source fields. Salesforce SalesforceConnectorOperator @@ -284,7 +306,7 @@ type ConnectorProfile struct { ConnectorProfileArn *string // The name of the connector profile. The name is unique for each ConnectorProfile - // in the AWS account. + // in the Amazon Web Services account. ConnectorProfileName *string // The connector-specific properties of the profile configuration. @@ -302,6 +324,9 @@ type ConnectorProfile struct { // Specifies when the connector profile was last updated. LastUpdatedAt *time.Time + // Specifies the private connection provisioning state. + PrivateConnectionProvisioningState *PrivateConnectionProvisioningState + noSmithyDocumentSerde } @@ -349,6 +374,9 @@ type ConnectorProfileCredentials struct { // The connector-specific credentials required when using Amazon Redshift. Redshift *RedshiftConnectorProfileCredentials + // The connector-specific profile credentials required when using SAPOData. + SAPOData *SAPODataConnectorProfileCredentials + // The connector-specific credentials required when using Salesforce. Salesforce *SalesforceConnectorProfileCredentials @@ -403,6 +431,9 @@ type ConnectorProfileProperties struct { // The connector-specific properties required by Amazon Redshift. Redshift *RedshiftConnectorProfileProperties + // The connector-specific profile properties required when using SAPOData. + SAPOData *SAPODataConnectorProfileProperties + // The connector-specific properties required by Salesforce. Salesforce *SalesforceConnectorProfileProperties @@ -574,7 +605,7 @@ type DestinationFlowConfig struct { DestinationConnectorProperties *DestinationConnectorProperties // The name of the connector profile. This name must be unique for each connector - // profile in the AWS account. + // profile in the Amazon Web Services account. ConnectorProfileName *string noSmithyDocumentSerde @@ -1031,6 +1062,56 @@ type MarketoSourceProperties struct { noSmithyDocumentSerde } +// The OAuth credentials required for OAuth type authentication. +type OAuthCredentials struct { + + // The identifier for the desired client. + // + // This member is required. + ClientId *string + + // The client secret used by the OAuth client to authenticate to the authorization + // server. + // + // This member is required. + ClientSecret *string + + // The access token used to access protected SAPOData resources. + AccessToken *string + + // The OAuth requirement needed to request security tokens from the connector + // endpoint. + OAuthRequest *ConnectorOAuthRequest + + // The refresh token used to refresh expired access token. + RefreshToken *string + + noSmithyDocumentSerde +} + +// The OAuth properties required for OAuth type authentication. +type OAuthProperties struct { + + // The authorization code url required to redirect to SAP Login Page to fetch + // authorization code for OAuth type authentication. + // + // This member is required. + AuthCodeUrl *string + + // The OAuth scopes required for OAuth type authentication. + // + // This member is required. + OAuthScopes []string + + // The token url required to fetch access/refresh tokens using authorization code + // and also to refresh expired access token using refresh token. + // + // This member is required. + TokenUrl *string + + noSmithyDocumentSerde +} + // Determines the prefix that Amazon AppFlow applies to the destination folder // name. You can name your destination folders according to the flow frequency and // date. @@ -1046,6 +1127,21 @@ type PrefixConfig struct { noSmithyDocumentSerde } +// Specifies the private connection provisioning state. +type PrivateConnectionProvisioningState struct { + + // Specifies the private connection provisioning failure cause. + FailureCause PrivateConnectionProvisioningFailureCause + + // Specifies the private connection provisioning failure reason. + FailureMessage *string + + // Specifies the private connection provisioning status. + Status PrivateConnectionProvisioningStatus + + noSmithyDocumentSerde +} + // The connector-specific profile credentials required when using Amazon Redshift. type RedshiftConnectorProfileCredentials struct { @@ -1263,6 +1359,67 @@ type SalesforceSourceProperties struct { noSmithyDocumentSerde } +// The connector-specific profile credentials required when using SAPOData. +type SAPODataConnectorProfileCredentials struct { + + // The SAPOData basic authentication credentials. + BasicAuthCredentials *BasicAuthCredentials + + // The SAPOData OAuth type authentication credentials. + OAuthCredentials *OAuthCredentials + + noSmithyDocumentSerde +} + +// The connector-specific profile properties required when using SAPOData. +type SAPODataConnectorProfileProperties struct { + + // The location of the SAPOData resource. + // + // This member is required. + ApplicationHostUrl *string + + // The application path to catalog service. + // + // This member is required. + ApplicationServicePath *string + + // The client number for the client creating the connection. + // + // This member is required. + ClientNumber *string + + // The port number of the SAPOData instance. + // + // This member is required. + PortNumber int32 + + // The logon language of SAPOData instance. + LogonLanguage *string + + // The SAPOData OAuth properties required for OAuth type authentication. + OAuthProperties *OAuthProperties + + // The SAPOData Private Link service name to be used for private data transfers. + PrivateLinkServiceName *string + + noSmithyDocumentSerde +} + +// The connector metadata specific to SAPOData. +type SAPODataMetadata struct { + noSmithyDocumentSerde +} + +// The properties that are applied when using SAPOData as a flow source. +type SAPODataSourceProperties struct { + + // The object path specified in the SAPOData flow source. + ObjectPath *string + + noSmithyDocumentSerde +} + // Specifies the configuration details of a schedule-triggered flow as defined by // the user. Currently, these settings only apply to the Scheduled trigger type. type ScheduledTriggerProperties struct { @@ -1474,7 +1631,7 @@ type SnowflakeConnectorProfileProperties struct { // The Snowflake Private Link service name to be used for private data transfers. PrivateLinkServiceName *string - // The AWS Region of the Snowflake account. + // The Amazon Web Services Region of the Snowflake account. Region *string noSmithyDocumentSerde @@ -1511,7 +1668,7 @@ type SnowflakeDestinationProperties struct { // The connector metadata specific to Snowflake. type SnowflakeMetadata struct { - // Specifies the supported AWS Regions when using Snowflake. + // Specifies the supported Amazon Web Services Regions when using Snowflake. SupportedRegions []string noSmithyDocumentSerde @@ -1541,6 +1698,9 @@ type SourceConnectorProperties struct { // Specifies the information that is required for querying Amazon S3. S3 *S3SourceProperties + // The properties that are applied when using SAPOData as a flow source. + SAPOData *SAPODataSourceProperties + // Specifies the information that is required for querying Salesforce. Salesforce *SalesforceSourceProperties @@ -1594,7 +1754,7 @@ type SourceFlowConfig struct { SourceConnectorProperties *SourceConnectorProperties // The name of the connector profile. This name must be unique for each connector - // profile in the AWS account. + // profile in the Amazon Web Services account. ConnectorProfileName *string // Defines the configuration for a scheduled incremental data pull. If a valid @@ -1794,6 +1954,18 @@ type VeevaSourceProperties struct { // This member is required. Object *string + // The document type specified in the Veeva document extract flow. + DocumentType *string + + // Boolean value to include All Versions of files in Veeva document extract flow. + IncludeAllVersions bool + + // Boolean value to include file renditions in Veeva document extract flow. + IncludeRenditions bool + + // Boolean value to include source files in Veeva document extract flow. + IncludeSourceFiles bool + noSmithyDocumentSerde } diff --git a/service/appflow/validators.go b/service/appflow/validators.go index 1aac09db041..a14c2c1be81 100644 --- a/service/appflow/validators.go +++ b/service/appflow/validators.go @@ -379,6 +379,24 @@ func validateAmplitudeSourceProperties(v *types.AmplitudeSourceProperties) error } } +func validateBasicAuthCredentials(v *types.BasicAuthCredentials) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BasicAuthCredentials"} + if v.Username == nil { + invalidParams.Add(smithy.NewErrParamRequired("Username")) + } + if v.Password == nil { + invalidParams.Add(smithy.NewErrParamRequired("Password")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateConnectorProfileConfig(v *types.ConnectorProfileConfig) error { if v == nil { return nil @@ -480,6 +498,11 @@ func validateConnectorProfileCredentials(v *types.ConnectorProfileCredentials) e invalidParams.AddNested("Zendesk", err.(smithy.InvalidParamsError)) } } + if v.SAPOData != nil { + if err := validateSAPODataConnectorProfileCredentials(v.SAPOData); err != nil { + invalidParams.AddNested("SAPOData", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { @@ -542,6 +565,11 @@ func validateConnectorProfileProperties(v *types.ConnectorProfileProperties) err invalidParams.AddNested("Zendesk", err.(smithy.InvalidParamsError)) } } + if v.SAPOData != nil { + if err := validateSAPODataConnectorProfileProperties(v.SAPOData); err != nil { + invalidParams.AddNested("SAPOData", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { @@ -918,6 +946,45 @@ func validateMarketoSourceProperties(v *types.MarketoSourceProperties) error { } } +func validateOAuthCredentials(v *types.OAuthCredentials) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "OAuthCredentials"} + if v.ClientId == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientId")) + } + if v.ClientSecret == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientSecret")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOAuthProperties(v *types.OAuthProperties) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "OAuthProperties"} + if v.TokenUrl == nil { + invalidParams.Add(smithy.NewErrParamRequired("TokenUrl")) + } + if v.AuthCodeUrl == nil { + invalidParams.Add(smithy.NewErrParamRequired("AuthCodeUrl")) + } + if v.OAuthScopes == nil { + invalidParams.Add(smithy.NewErrParamRequired("OAuthScopes")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateRedshiftConnectorProfileCredentials(v *types.RedshiftConnectorProfileCredentials) error { if v == nil { return nil @@ -1035,6 +1102,54 @@ func validateSalesforceSourceProperties(v *types.SalesforceSourceProperties) err } } +func validateSAPODataConnectorProfileCredentials(v *types.SAPODataConnectorProfileCredentials) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SAPODataConnectorProfileCredentials"} + if v.BasicAuthCredentials != nil { + if err := validateBasicAuthCredentials(v.BasicAuthCredentials); err != nil { + invalidParams.AddNested("BasicAuthCredentials", err.(smithy.InvalidParamsError)) + } + } + if v.OAuthCredentials != nil { + if err := validateOAuthCredentials(v.OAuthCredentials); err != nil { + invalidParams.AddNested("OAuthCredentials", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSAPODataConnectorProfileProperties(v *types.SAPODataConnectorProfileProperties) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SAPODataConnectorProfileProperties"} + if v.ApplicationHostUrl == nil { + invalidParams.Add(smithy.NewErrParamRequired("ApplicationHostUrl")) + } + if v.ApplicationServicePath == nil { + invalidParams.Add(smithy.NewErrParamRequired("ApplicationServicePath")) + } + if v.ClientNumber == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientNumber")) + } + if v.OAuthProperties != nil { + if err := validateOAuthProperties(v.OAuthProperties); err != nil { + invalidParams.AddNested("OAuthProperties", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateScheduledTriggerProperties(v *types.ScheduledTriggerProperties) error { if v == nil { return nil @@ -1854,7 +1969,9 @@ func validateOpUpdateFlowInput(v *UpdateFlowInput) error { invalidParams.AddNested("TriggerConfig", err.(smithy.InvalidParamsError)) } } - if v.SourceFlowConfig != nil { + if v.SourceFlowConfig == nil { + invalidParams.Add(smithy.NewErrParamRequired("SourceFlowConfig")) + } else if v.SourceFlowConfig != nil { if err := validateSourceFlowConfig(v.SourceFlowConfig); err != nil { invalidParams.AddNested("SourceFlowConfig", err.(smithy.InvalidParamsError)) } diff --git a/service/applicationautoscaling/api_op_DeleteScalingPolicy.go b/service/applicationautoscaling/api_op_DeleteScalingPolicy.go index f4dc5cc1691..7ff3a43a040 100644 --- a/service/applicationautoscaling/api_op_DeleteScalingPolicy.go +++ b/service/applicationautoscaling/api_op_DeleteScalingPolicy.go @@ -109,6 +109,11 @@ type DeleteScalingPolicyInput struct { // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. // + // * + // Amazon ElastiCache replication group - The resource type is replication-group + // and the unique identifier is the replication group name. Example: + // replication-group/mycluster. + // // This member is required. ResourceId *string @@ -118,12 +123,12 @@ type DeleteScalingPolicyInput struct { // * ecs:service:DesiredCount - The desired task count // of an ECS service. // - // * ec2:spot-fleet-request:TargetCapacity - The target - // capacity of a Spot Fleet request. + // * elasticmapreduce:instancegroup:InstanceCount - The + // instance count of an EMR Instance Group. // // * - // elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR - // Instance Group. + // ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet + // request. // // * appstream:fleet:DesiredCapacity - The desired capacity of an // AppStream 2.0 fleet. @@ -175,11 +180,19 @@ type DeleteScalingPolicyInput struct { // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for // brokers in an Amazon MSK cluster. // + // * elasticache:replication-group:NodeGroups - + // The number of node groups for an Amazon ElastiCache replication group. + // + // * + // elasticache:replication-group:Replicas - The number of replicas per node group + // for an Amazon ElastiCache replication group. + // // This member is required. ScalableDimension types.ScalableDimension - // The namespace of the AWS service that provides the resource. For a resource - // provided by your own application or service, use custom-resource instead. + // The namespace of the Amazon Web Services service that provides the resource. For + // a resource provided by your own application or service, use custom-resource + // instead. // // This member is required. ServiceNamespace types.ServiceNamespace diff --git a/service/applicationautoscaling/api_op_DeleteScheduledAction.go b/service/applicationautoscaling/api_op_DeleteScheduledAction.go index 8f4836324a6..48dc353a21f 100644 --- a/service/applicationautoscaling/api_op_DeleteScheduledAction.go +++ b/service/applicationautoscaling/api_op_DeleteScheduledAction.go @@ -99,6 +99,11 @@ type DeleteScheduledActionInput struct { // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. // + // * + // Amazon ElastiCache replication group - The resource type is replication-group + // and the unique identifier is the replication group name. Example: + // replication-group/mycluster. + // // This member is required. ResourceId *string @@ -108,12 +113,12 @@ type DeleteScheduledActionInput struct { // * ecs:service:DesiredCount - The desired task count // of an ECS service. // - // * ec2:spot-fleet-request:TargetCapacity - The target - // capacity of a Spot Fleet request. + // * elasticmapreduce:instancegroup:InstanceCount - The + // instance count of an EMR Instance Group. // // * - // elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR - // Instance Group. + // ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet + // request. // // * appstream:fleet:DesiredCapacity - The desired capacity of an // AppStream 2.0 fleet. @@ -165,6 +170,13 @@ type DeleteScheduledActionInput struct { // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for // brokers in an Amazon MSK cluster. // + // * elasticache:replication-group:NodeGroups - + // The number of node groups for an Amazon ElastiCache replication group. + // + // * + // elasticache:replication-group:Replicas - The number of replicas per node group + // for an Amazon ElastiCache replication group. + // // This member is required. ScalableDimension types.ScalableDimension @@ -173,8 +185,9 @@ type DeleteScheduledActionInput struct { // This member is required. ScheduledActionName *string - // The namespace of the AWS service that provides the resource. For a resource - // provided by your own application or service, use custom-resource instead. + // The namespace of the Amazon Web Services service that provides the resource. For + // a resource provided by your own application or service, use custom-resource + // instead. // // This member is required. ServiceNamespace types.ServiceNamespace diff --git a/service/applicationautoscaling/api_op_DeregisterScalableTarget.go b/service/applicationautoscaling/api_op_DeregisterScalableTarget.go index ac487433970..c7d0aa9ec5a 100644 --- a/service/applicationautoscaling/api_op_DeregisterScalableTarget.go +++ b/service/applicationautoscaling/api_op_DeregisterScalableTarget.go @@ -101,6 +101,11 @@ type DeregisterScalableTargetInput struct { // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. // + // * + // Amazon ElastiCache replication group - The resource type is replication-group + // and the unique identifier is the replication group name. Example: + // replication-group/mycluster. + // // This member is required. ResourceId *string @@ -111,35 +116,36 @@ type DeregisterScalableTargetInput struct { // ecs:service:DesiredCount - The desired task count of an ECS service. // // * - // ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet - // request. + // elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR + // Instance Group. // - // * elasticmapreduce:instancegroup:InstanceCount - The instance count of - // an EMR Instance Group. + // * ec2:spot-fleet-request:TargetCapacity - The target capacity + // of a Spot Fleet request. // - // * appstream:fleet:DesiredCapacity - The desired capacity - // of an AppStream 2.0 fleet. + // * appstream:fleet:DesiredCapacity - The desired + // capacity of an AppStream 2.0 fleet. // - // * dynamodb:table:ReadCapacityUnits - The provisioned - // read capacity for a DynamoDB table. - // - // * dynamodb:table:WriteCapacityUnits - The - // provisioned write capacity for a DynamoDB table. + // * dynamodb:table:ReadCapacityUnits - The + // provisioned read capacity for a DynamoDB table. // // * - // dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB - // global secondary index. + // dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a + // DynamoDB table. // - // * dynamodb:index:WriteCapacityUnits - The provisioned - // write capacity for a DynamoDB global secondary index. + // * dynamodb:index:ReadCapacityUnits - The provisioned read + // capacity for a DynamoDB global secondary index. // // * - // rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB - // cluster. Available for Aurora MySQL-compatible edition and Aurora - // PostgreSQL-compatible edition. + // dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a + // DynamoDB global secondary index. + // + // * rds:cluster:ReadReplicaCount - The count of + // Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible + // edition and Aurora PostgreSQL-compatible edition. // - // * sagemaker:variant:DesiredInstanceCount - The - // number of EC2 instances for an Amazon SageMaker model endpoint variant. + // * + // sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an + // Amazon SageMaker model endpoint variant. // // * // custom-resource:ResourceType:Property - The scalable dimension for a custom @@ -167,11 +173,19 @@ type DeregisterScalableTargetInput struct { // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for // brokers in an Amazon MSK cluster. // + // * elasticache:replication-group:NodeGroups - + // The number of node groups for an Amazon ElastiCache replication group. + // + // * + // elasticache:replication-group:Replicas - The number of replicas per node group + // for an Amazon ElastiCache replication group. + // // This member is required. ScalableDimension types.ScalableDimension - // The namespace of the AWS service that provides the resource. For a resource - // provided by your own application or service, use custom-resource instead. + // The namespace of the Amazon Web Services service that provides the resource. For + // a resource provided by your own application or service, use custom-resource + // instead. // // This member is required. ServiceNamespace types.ServiceNamespace diff --git a/service/applicationautoscaling/api_op_DescribeScalableTargets.go b/service/applicationautoscaling/api_op_DescribeScalableTargets.go index 78653394b35..2d505eac6d3 100644 --- a/service/applicationautoscaling/api_op_DescribeScalableTargets.go +++ b/service/applicationautoscaling/api_op_DescribeScalableTargets.go @@ -31,8 +31,9 @@ func (c *Client) DescribeScalableTargets(ctx context.Context, params *DescribeSc type DescribeScalableTargetsInput struct { - // The namespace of the AWS service that provides the resource. For a resource - // provided by your own application or service, use custom-resource instead. + // The namespace of the Amazon Web Services service that provides the resource. For + // a resource provided by your own application or service, use custom-resource + // instead. // // This member is required. ServiceNamespace types.ServiceNamespace @@ -114,6 +115,11 @@ type DescribeScalableTargetsInput struct { // * Amazon MSK cluster - The resource type and // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. + // + // * + // Amazon ElastiCache replication group - The resource type is replication-group + // and the unique identifier is the replication group name. Example: + // replication-group/mycluster. ResourceIds []string // The scalable dimension associated with the scalable target. This string consists @@ -124,35 +130,36 @@ type DescribeScalableTargetsInput struct { // ecs:service:DesiredCount - The desired task count of an ECS service. // // * - // ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet - // request. + // elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR + // Instance Group. // - // * elasticmapreduce:instancegroup:InstanceCount - The instance count of - // an EMR Instance Group. + // * ec2:spot-fleet-request:TargetCapacity - The target capacity + // of a Spot Fleet request. // - // * appstream:fleet:DesiredCapacity - The desired capacity - // of an AppStream 2.0 fleet. + // * appstream:fleet:DesiredCapacity - The desired + // capacity of an AppStream 2.0 fleet. // - // * dynamodb:table:ReadCapacityUnits - The provisioned - // read capacity for a DynamoDB table. - // - // * dynamodb:table:WriteCapacityUnits - The - // provisioned write capacity for a DynamoDB table. + // * dynamodb:table:ReadCapacityUnits - The + // provisioned read capacity for a DynamoDB table. // // * - // dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB - // global secondary index. + // dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a + // DynamoDB table. // - // * dynamodb:index:WriteCapacityUnits - The provisioned - // write capacity for a DynamoDB global secondary index. + // * dynamodb:index:ReadCapacityUnits - The provisioned read + // capacity for a DynamoDB global secondary index. // // * - // rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB - // cluster. Available for Aurora MySQL-compatible edition and Aurora - // PostgreSQL-compatible edition. + // dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a + // DynamoDB global secondary index. + // + // * rds:cluster:ReadReplicaCount - The count of + // Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible + // edition and Aurora PostgreSQL-compatible edition. // - // * sagemaker:variant:DesiredInstanceCount - The - // number of EC2 instances for an Amazon SageMaker model endpoint variant. + // * + // sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an + // Amazon SageMaker model endpoint variant. // // * // custom-resource:ResourceType:Property - The scalable dimension for a custom @@ -179,6 +186,13 @@ type DescribeScalableTargetsInput struct { // * // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for // brokers in an Amazon MSK cluster. + // + // * elasticache:replication-group:NodeGroups - + // The number of node groups for an Amazon ElastiCache replication group. + // + // * + // elasticache:replication-group:Replicas - The number of replicas per node group + // for an Amazon ElastiCache replication group. ScalableDimension types.ScalableDimension noSmithyDocumentSerde diff --git a/service/applicationautoscaling/api_op_DescribeScalingActivities.go b/service/applicationautoscaling/api_op_DescribeScalingActivities.go index 8bf145fb727..1a30b59ae94 100644 --- a/service/applicationautoscaling/api_op_DescribeScalingActivities.go +++ b/service/applicationautoscaling/api_op_DescribeScalingActivities.go @@ -32,8 +32,9 @@ func (c *Client) DescribeScalingActivities(ctx context.Context, params *Describe type DescribeScalingActivitiesInput struct { - // The namespace of the AWS service that provides the resource. For a resource - // provided by your own application or service, use custom-resource instead. + // The namespace of the Amazon Web Services service that provides the resource. For + // a resource provided by your own application or service, use custom-resource + // instead. // // This member is required. ServiceNamespace types.ServiceNamespace @@ -115,6 +116,11 @@ type DescribeScalingActivitiesInput struct { // * Amazon MSK cluster - The resource type and // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. + // + // * + // Amazon ElastiCache replication group - The resource type is replication-group + // and the unique identifier is the replication group name. Example: + // replication-group/mycluster. ResourceId *string // The scalable dimension. This string consists of the service namespace, resource @@ -124,11 +130,11 @@ type DescribeScalingActivitiesInput struct { // * ecs:service:DesiredCount - The desired task count of // an ECS service. // - // * ec2:spot-fleet-request:TargetCapacity - The target capacity - // of a Spot Fleet request. + // * elasticmapreduce:instancegroup:InstanceCount - The instance + // count of an EMR Instance Group. // - // * elasticmapreduce:instancegroup:InstanceCount - The - // instance count of an EMR Instance Group. + // * ec2:spot-fleet-request:TargetCapacity - The + // target capacity of a Spot Fleet request. // // * appstream:fleet:DesiredCapacity - // The desired capacity of an AppStream 2.0 fleet. @@ -180,6 +186,13 @@ type DescribeScalingActivitiesInput struct { // * // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for // brokers in an Amazon MSK cluster. + // + // * elasticache:replication-group:NodeGroups - + // The number of node groups for an Amazon ElastiCache replication group. + // + // * + // elasticache:replication-group:Replicas - The number of replicas per node group + // for an Amazon ElastiCache replication group. ScalableDimension types.ScalableDimension noSmithyDocumentSerde diff --git a/service/applicationautoscaling/api_op_DescribeScalingPolicies.go b/service/applicationautoscaling/api_op_DescribeScalingPolicies.go index 7db3b39ad9d..f263b9b25c2 100644 --- a/service/applicationautoscaling/api_op_DescribeScalingPolicies.go +++ b/service/applicationautoscaling/api_op_DescribeScalingPolicies.go @@ -37,8 +37,9 @@ func (c *Client) DescribeScalingPolicies(ctx context.Context, params *DescribeSc type DescribeScalingPoliciesInput struct { - // The namespace of the AWS service that provides the resource. For a resource - // provided by your own application or service, use custom-resource instead. + // The namespace of the Amazon Web Services service that provides the resource. For + // a resource provided by your own application or service, use custom-resource + // instead. // // This member is required. ServiceNamespace types.ServiceNamespace @@ -123,6 +124,11 @@ type DescribeScalingPoliciesInput struct { // * Amazon MSK cluster - The resource type and // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. + // + // * + // Amazon ElastiCache replication group - The resource type is replication-group + // and the unique identifier is the replication group name. Example: + // replication-group/mycluster. ResourceId *string // The scalable dimension. This string consists of the service namespace, resource @@ -132,11 +138,11 @@ type DescribeScalingPoliciesInput struct { // * ecs:service:DesiredCount - The desired task count of // an ECS service. // - // * ec2:spot-fleet-request:TargetCapacity - The target capacity - // of a Spot Fleet request. + // * elasticmapreduce:instancegroup:InstanceCount - The instance + // count of an EMR Instance Group. // - // * elasticmapreduce:instancegroup:InstanceCount - The - // instance count of an EMR Instance Group. + // * ec2:spot-fleet-request:TargetCapacity - The + // target capacity of a Spot Fleet request. // // * appstream:fleet:DesiredCapacity - // The desired capacity of an AppStream 2.0 fleet. @@ -188,6 +194,13 @@ type DescribeScalingPoliciesInput struct { // * // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for // brokers in an Amazon MSK cluster. + // + // * elasticache:replication-group:NodeGroups - + // The number of node groups for an Amazon ElastiCache replication group. + // + // * + // elasticache:replication-group:Replicas - The number of replicas per node group + // for an Amazon ElastiCache replication group. ScalableDimension types.ScalableDimension noSmithyDocumentSerde diff --git a/service/applicationautoscaling/api_op_DescribeScheduledActions.go b/service/applicationautoscaling/api_op_DescribeScheduledActions.go index 2bd28eea4c2..c0cdc6c6713 100644 --- a/service/applicationautoscaling/api_op_DescribeScheduledActions.go +++ b/service/applicationautoscaling/api_op_DescribeScheduledActions.go @@ -37,8 +37,9 @@ func (c *Client) DescribeScheduledActions(ctx context.Context, params *DescribeS type DescribeScheduledActionsInput struct { - // The namespace of the AWS service that provides the resource. For a resource - // provided by your own application or service, use custom-resource instead. + // The namespace of the Amazon Web Services service that provides the resource. For + // a resource provided by your own application or service, use custom-resource + // instead. // // This member is required. ServiceNamespace types.ServiceNamespace @@ -120,6 +121,11 @@ type DescribeScheduledActionsInput struct { // * Amazon MSK cluster - The resource type and // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. + // + // * + // Amazon ElastiCache replication group - The resource type is replication-group + // and the unique identifier is the replication group name. Example: + // replication-group/mycluster. ResourceId *string // The scalable dimension. This string consists of the service namespace, resource @@ -129,11 +135,11 @@ type DescribeScheduledActionsInput struct { // * ecs:service:DesiredCount - The desired task count of // an ECS service. // - // * ec2:spot-fleet-request:TargetCapacity - The target capacity - // of a Spot Fleet request. + // * elasticmapreduce:instancegroup:InstanceCount - The instance + // count of an EMR Instance Group. // - // * elasticmapreduce:instancegroup:InstanceCount - The - // instance count of an EMR Instance Group. + // * ec2:spot-fleet-request:TargetCapacity - The + // target capacity of a Spot Fleet request. // // * appstream:fleet:DesiredCapacity - // The desired capacity of an AppStream 2.0 fleet. @@ -185,6 +191,13 @@ type DescribeScheduledActionsInput struct { // * // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for // brokers in an Amazon MSK cluster. + // + // * elasticache:replication-group:NodeGroups - + // The number of node groups for an Amazon ElastiCache replication group. + // + // * + // elasticache:replication-group:Replicas - The number of replicas per node group + // for an Amazon ElastiCache replication group. ScalableDimension types.ScalableDimension // The names of the scheduled actions to describe. diff --git a/service/applicationautoscaling/api_op_PutScalingPolicy.go b/service/applicationautoscaling/api_op_PutScalingPolicy.go index 2a6f9fd63d7..904b97ad09d 100644 --- a/service/applicationautoscaling/api_op_PutScalingPolicy.go +++ b/service/applicationautoscaling/api_op_PutScalingPolicy.go @@ -128,6 +128,11 @@ type PutScalingPolicyInput struct { // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. // + // * + // Amazon ElastiCache replication group - The resource type is replication-group + // and the unique identifier is the replication group name. Example: + // replication-group/mycluster. + // // This member is required. ResourceId *string @@ -137,12 +142,12 @@ type PutScalingPolicyInput struct { // * ecs:service:DesiredCount - The desired task count // of an ECS service. // - // * ec2:spot-fleet-request:TargetCapacity - The target - // capacity of a Spot Fleet request. + // * elasticmapreduce:instancegroup:InstanceCount - The + // instance count of an EMR Instance Group. // // * - // elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR - // Instance Group. + // ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet + // request. // // * appstream:fleet:DesiredCapacity - The desired capacity of an // AppStream 2.0 fleet. @@ -194,11 +199,19 @@ type PutScalingPolicyInput struct { // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for // brokers in an Amazon MSK cluster. // + // * elasticache:replication-group:NodeGroups - + // The number of node groups for an Amazon ElastiCache replication group. + // + // * + // elasticache:replication-group:Replicas - The number of replicas per node group + // for an Amazon ElastiCache replication group. + // // This member is required. ScalableDimension types.ScalableDimension - // The namespace of the AWS service that provides the resource. For a resource - // provided by your own application or service, use custom-resource instead. + // The namespace of the Amazon Web Services service that provides the resource. For + // a resource provided by your own application or service, use custom-resource + // instead. // // This member is required. ServiceNamespace types.ServiceNamespace @@ -206,8 +219,9 @@ type PutScalingPolicyInput struct { // The policy type. This parameter is required if you are creating a scaling // policy. The following policy types are supported: TargetTrackingScaling—Not // supported for Amazon EMR StepScaling—Not supported for DynamoDB, Amazon - // Comprehend, Lambda, Amazon Keyspaces (for Apache Cassandra), or Amazon MSK. For - // more information, see Target tracking scaling policies + // Comprehend, Lambda, Amazon Keyspaces (for Apache Cassandra), Amazon MSK, or + // Amazon ElastiCache for Redis. For more information, see Target tracking scaling + // policies // (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html) // and Step scaling policies // (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html) diff --git a/service/applicationautoscaling/api_op_PutScheduledAction.go b/service/applicationautoscaling/api_op_PutScheduledAction.go index 10d56876cf1..8958e99880b 100644 --- a/service/applicationautoscaling/api_op_PutScheduledAction.go +++ b/service/applicationautoscaling/api_op_PutScheduledAction.go @@ -111,6 +111,11 @@ type PutScheduledActionInput struct { // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. // + // * + // Amazon ElastiCache replication group - The resource type is replication-group + // and the unique identifier is the replication group name. Example: + // replication-group/mycluster. + // // This member is required. ResourceId *string @@ -120,12 +125,12 @@ type PutScheduledActionInput struct { // * ecs:service:DesiredCount - The desired task count // of an ECS service. // - // * ec2:spot-fleet-request:TargetCapacity - The target - // capacity of a Spot Fleet request. + // * elasticmapreduce:instancegroup:InstanceCount - The + // instance count of an EMR Instance Group. // // * - // elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR - // Instance Group. + // ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet + // request. // // * appstream:fleet:DesiredCapacity - The desired capacity of an // AppStream 2.0 fleet. @@ -177,6 +182,13 @@ type PutScheduledActionInput struct { // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for // brokers in an Amazon MSK cluster. // + // * elasticache:replication-group:NodeGroups - + // The number of node groups for an Amazon ElastiCache replication group. + // + // * + // elasticache:replication-group:Replicas - The number of replicas per node group + // for an Amazon ElastiCache replication group. + // // This member is required. ScalableDimension types.ScalableDimension @@ -186,8 +198,9 @@ type PutScheduledActionInput struct { // This member is required. ScheduledActionName *string - // The namespace of the AWS service that provides the resource. For a resource - // provided by your own application or service, use custom-resource instead. + // The namespace of the Amazon Web Services service that provides the resource. For + // a resource provided by your own application or service, use custom-resource + // instead. // // This member is required. ServiceNamespace types.ServiceNamespace diff --git a/service/applicationautoscaling/api_op_RegisterScalableTarget.go b/service/applicationautoscaling/api_op_RegisterScalableTarget.go index 66c523a2d1c..a844efe05d2 100644 --- a/service/applicationautoscaling/api_op_RegisterScalableTarget.go +++ b/service/applicationautoscaling/api_op_RegisterScalableTarget.go @@ -116,6 +116,11 @@ type RegisterScalableTargetInput struct { // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. // + // * + // Amazon ElastiCache replication group - The resource type is replication-group + // and the unique identifier is the replication group name. Example: + // replication-group/mycluster. + // // This member is required. ResourceId *string @@ -126,35 +131,36 @@ type RegisterScalableTargetInput struct { // ecs:service:DesiredCount - The desired task count of an ECS service. // // * - // ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet - // request. + // elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR + // Instance Group. // - // * elasticmapreduce:instancegroup:InstanceCount - The instance count of - // an EMR Instance Group. + // * ec2:spot-fleet-request:TargetCapacity - The target capacity + // of a Spot Fleet request. // - // * appstream:fleet:DesiredCapacity - The desired capacity - // of an AppStream 2.0 fleet. + // * appstream:fleet:DesiredCapacity - The desired + // capacity of an AppStream 2.0 fleet. // - // * dynamodb:table:ReadCapacityUnits - The provisioned - // read capacity for a DynamoDB table. - // - // * dynamodb:table:WriteCapacityUnits - The - // provisioned write capacity for a DynamoDB table. + // * dynamodb:table:ReadCapacityUnits - The + // provisioned read capacity for a DynamoDB table. // // * - // dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB - // global secondary index. + // dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a + // DynamoDB table. // - // * dynamodb:index:WriteCapacityUnits - The provisioned - // write capacity for a DynamoDB global secondary index. + // * dynamodb:index:ReadCapacityUnits - The provisioned read + // capacity for a DynamoDB global secondary index. // // * - // rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB - // cluster. Available for Aurora MySQL-compatible edition and Aurora - // PostgreSQL-compatible edition. + // dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a + // DynamoDB global secondary index. + // + // * rds:cluster:ReadReplicaCount - The count of + // Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible + // edition and Aurora PostgreSQL-compatible edition. // - // * sagemaker:variant:DesiredInstanceCount - The - // number of EC2 instances for an Amazon SageMaker model endpoint variant. + // * + // sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an + // Amazon SageMaker model endpoint variant. // // * // custom-resource:ResourceType:Property - The scalable dimension for a custom @@ -182,11 +188,19 @@ type RegisterScalableTargetInput struct { // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for // brokers in an Amazon MSK cluster. // + // * elasticache:replication-group:NodeGroups - + // The number of node groups for an Amazon ElastiCache replication group. + // + // * + // elasticache:replication-group:Replicas - The number of replicas per node group + // for an Amazon ElastiCache replication group. + // // This member is required. ScalableDimension types.ScalableDimension - // The namespace of the AWS service that provides the resource. For a resource - // provided by your own application or service, use custom-resource instead. + // The namespace of the Amazon Web Services service that provides the resource. For + // a resource provided by your own application or service, use custom-resource + // instead. // // This member is required. ServiceNamespace types.ServiceNamespace diff --git a/service/applicationautoscaling/doc.go b/service/applicationautoscaling/doc.go index 0c5b2e27656..42dc35eee40 100644 --- a/service/applicationautoscaling/doc.go +++ b/service/applicationautoscaling/doc.go @@ -6,52 +6,55 @@ // With Application Auto Scaling, you can configure automatic scaling for the // following resources: // -// * Amazon ECS services +// * Amazon AppStream 2.0 fleets // -// * Amazon EC2 Spot Fleet requests +// * Amazon Aurora Replicas // // * -// Amazon EMR clusters +// Amazon Comprehend document classification and entity recognizer endpoints // -// * Amazon AppStream 2.0 fleets +// * +// Amazon DynamoDB tables and global secondary indexes throughput capacity // -// * Amazon DynamoDB tables and -// global secondary indexes throughput capacity +// * +// Amazon ECS services // -// * Amazon Aurora Replicas +// * Amazon ElastiCache for Redis clusters (replication +// groups) // -// * Amazon -// SageMaker endpoint variants +// * Amazon EMR clusters // -// * Custom resources provided by your own -// applications or services +// * Amazon Keyspaces (for Apache Cassandra) +// tables +// +// * Lambda function provisioned concurrency // -// * Amazon Comprehend document classification and entity -// recognizer endpoints +// * Amazon Managed Streaming +// for Apache Kafka broker storage // -// * AWS Lambda function provisioned concurrency +// * Amazon SageMaker endpoint variants // -// * Amazon -// Keyspaces (for Apache Cassandra) tables +// * Spot +// Fleet (Amazon EC2) requests // -// * Amazon Managed Streaming for Apache -// Kafka broker storage +// * Custom resources provided by your own +// applications or services // // API Summary The Application Auto Scaling service API // includes three key sets of actions: // // * Register and manage scalable targets - -// Register AWS or custom resources as scalable targets (a resource that -// Application Auto Scaling can scale), set minimum and maximum capacity limits, -// and retrieve information on existing scalable targets. +// Register Amazon Web Services or custom resources as scalable targets (a resource +// that Application Auto Scaling can scale), set minimum and maximum capacity +// limits, and retrieve information on existing scalable targets. // -// * Configure and manage -// automatic scaling - Define scaling policies to dynamically scale your resources -// in response to CloudWatch alarms, schedule one-time or recurring scaling -// actions, and retrieve your recent scaling activity history. +// * Configure and +// manage automatic scaling - Define scaling policies to dynamically scale your +// resources in response to CloudWatch alarms, schedule one-time or recurring +// scaling actions, and retrieve your recent scaling activity history. // -// * Suspend and -// resume scaling - Temporarily suspend and later resume automatic scaling by +// * Suspend +// and resume scaling - Temporarily suspend and later resume automatic scaling by // calling the RegisterScalableTarget // (https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html) // API action for any Application Auto Scaling scalable target. You can suspend and diff --git a/service/applicationautoscaling/types/enums.go b/service/applicationautoscaling/types/enums.go index 3d1d9244677..99173cf61a9 100644 --- a/service/applicationautoscaling/types/enums.go +++ b/service/applicationautoscaling/types/enums.go @@ -70,23 +70,26 @@ type MetricType string // Enum values for MetricType const ( - MetricTypeDynamoDBReadCapacityUtilization MetricType = "DynamoDBReadCapacityUtilization" - MetricTypeDynamoDBWriteCapacityUtilization MetricType = "DynamoDBWriteCapacityUtilization" - MetricTypeALBRequestCountPerTarget MetricType = "ALBRequestCountPerTarget" - MetricTypeRDSReaderAverageCPUUtilization MetricType = "RDSReaderAverageCPUUtilization" - MetricTypeRDSReaderAverageDatabaseConnections MetricType = "RDSReaderAverageDatabaseConnections" - MetricTypeEC2SpotFleetRequestAverageCPUUtilization MetricType = "EC2SpotFleetRequestAverageCPUUtilization" - MetricTypeEC2SpotFleetRequestAverageNetworkIn MetricType = "EC2SpotFleetRequestAverageNetworkIn" - MetricTypeEC2SpotFleetRequestAverageNetworkOut MetricType = "EC2SpotFleetRequestAverageNetworkOut" - MetricTypeSageMakerVariantInvocationsPerInstance MetricType = "SageMakerVariantInvocationsPerInstance" - MetricTypeECSServiceAverageCPUUtilization MetricType = "ECSServiceAverageCPUUtilization" - MetricTypeECSServiceAverageMemoryUtilization MetricType = "ECSServiceAverageMemoryUtilization" - MetricTypeAppStreamAverageCapacityUtilization MetricType = "AppStreamAverageCapacityUtilization" - MetricTypeComprehendInferenceUtilization MetricType = "ComprehendInferenceUtilization" - MetricTypeLambdaProvisionedConcurrencyUtilization MetricType = "LambdaProvisionedConcurrencyUtilization" - MetricTypeCassandraReadCapacityUtilization MetricType = "CassandraReadCapacityUtilization" - MetricTypeCassandraWriteCapacityUtilization MetricType = "CassandraWriteCapacityUtilization" - MetricTypeKafkaBrokerStorageUtilization MetricType = "KafkaBrokerStorageUtilization" + MetricTypeDynamoDBReadCapacityUtilization MetricType = "DynamoDBReadCapacityUtilization" + MetricTypeDynamoDBWriteCapacityUtilization MetricType = "DynamoDBWriteCapacityUtilization" + MetricTypeALBRequestCountPerTarget MetricType = "ALBRequestCountPerTarget" + MetricTypeRDSReaderAverageCPUUtilization MetricType = "RDSReaderAverageCPUUtilization" + MetricTypeRDSReaderAverageDatabaseConnections MetricType = "RDSReaderAverageDatabaseConnections" + MetricTypeEC2SpotFleetRequestAverageCPUUtilization MetricType = "EC2SpotFleetRequestAverageCPUUtilization" + MetricTypeEC2SpotFleetRequestAverageNetworkIn MetricType = "EC2SpotFleetRequestAverageNetworkIn" + MetricTypeEC2SpotFleetRequestAverageNetworkOut MetricType = "EC2SpotFleetRequestAverageNetworkOut" + MetricTypeSageMakerVariantInvocationsPerInstance MetricType = "SageMakerVariantInvocationsPerInstance" + MetricTypeECSServiceAverageCPUUtilization MetricType = "ECSServiceAverageCPUUtilization" + MetricTypeECSServiceAverageMemoryUtilization MetricType = "ECSServiceAverageMemoryUtilization" + MetricTypeAppStreamAverageCapacityUtilization MetricType = "AppStreamAverageCapacityUtilization" + MetricTypeComprehendInferenceUtilization MetricType = "ComprehendInferenceUtilization" + MetricTypeLambdaProvisionedConcurrencyUtilization MetricType = "LambdaProvisionedConcurrencyUtilization" + MetricTypeCassandraReadCapacityUtilization MetricType = "CassandraReadCapacityUtilization" + MetricTypeCassandraWriteCapacityUtilization MetricType = "CassandraWriteCapacityUtilization" + MetricTypeKafkaBrokerStorageUtilization MetricType = "KafkaBrokerStorageUtilization" + MetricTypeElastiCachePrimaryEngineCPUUtilization MetricType = "ElastiCachePrimaryEngineCPUUtilization" + MetricTypeElastiCacheReplicaEngineCPUUtilization MetricType = "ElastiCacheReplicaEngineCPUUtilization" + MetricTypeElastiCacheDatabaseMemoryUsageCountedForEvictPercentage MetricType = "ElastiCacheDatabaseMemoryUsageCountedForEvictPercentage" ) // Values returns all known values for MetricType. Note that this can be expanded @@ -111,6 +114,9 @@ func (MetricType) Values() []MetricType { "CassandraReadCapacityUtilization", "CassandraWriteCapacityUtilization", "KafkaBrokerStorageUtilization", + "ElastiCachePrimaryEngineCPUUtilization", + "ElastiCacheReplicaEngineCPUUtilization", + "ElastiCacheDatabaseMemoryUsageCountedForEvictPercentage", } } @@ -153,6 +159,8 @@ const ( ScalableDimensionCassandraTableReadCapacityUnits ScalableDimension = "cassandra:table:ReadCapacityUnits" ScalableDimensionCassandraTableWriteCapacityUnits ScalableDimension = "cassandra:table:WriteCapacityUnits" ScalableDimensionKafkaBrokerStorageVolumeSize ScalableDimension = "kafka:broker-storage:VolumeSize" + ScalableDimensionElastiCacheReplicationGroupNodeGroups ScalableDimension = "elasticache:replication-group:NodeGroups" + ScalableDimensionElastiCacheReplicationGroupReplicas ScalableDimension = "elasticache:replication-group:Replicas" ) // Values returns all known values for ScalableDimension. Note that this can be @@ -177,6 +185,8 @@ func (ScalableDimension) Values() []ScalableDimension { "cassandra:table:ReadCapacityUnits", "cassandra:table:WriteCapacityUnits", "kafka:broker-storage:VolumeSize", + "elasticache:replication-group:NodeGroups", + "elasticache:replication-group:Replicas", } } @@ -222,6 +232,7 @@ const ( ServiceNamespaceLambda ServiceNamespace = "lambda" ServiceNamespaceCassandra ServiceNamespace = "cassandra" ServiceNamespaceKafka ServiceNamespace = "kafka" + ServiceNamespaceElasticache ServiceNamespace = "elasticache" ) // Values returns all known values for ServiceNamespace. Note that this can be @@ -241,5 +252,6 @@ func (ServiceNamespace) Values() []ServiceNamespace { "lambda", "cassandra", "kafka", + "elasticache", } } diff --git a/service/applicationautoscaling/types/types.go b/service/applicationautoscaling/types/types.go index 379b8f90e86..f685b4867b1 100644 --- a/service/applicationautoscaling/types/types.go +++ b/service/applicationautoscaling/types/types.go @@ -25,7 +25,8 @@ type Alarm struct { // Represents a CloudWatch metric of your choosing for a target tracking scaling // policy to use with Application Auto Scaling. For information about the available -// metrics for a service, see AWS Services That Publish CloudWatch Metrics +// metrics for a service, see Amazon Web Services Services That Publish CloudWatch +// Metrics // (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/aws-services-cloudwatch-metrics.html) // in the Amazon CloudWatch User Guide. To create your customized metric // specification: @@ -90,10 +91,11 @@ type MetricDimension struct { } // Represents a predefined metric for a target tracking scaling policy to use with -// Application Auto Scaling. Only the AWS services that you're using send metrics -// to Amazon CloudWatch. To determine whether a desired metric already exists by -// looking up its namespace and dimension using the CloudWatch metrics dashboard in -// the console, follow the procedure in Building dashboards with CloudWatch +// Application Auto Scaling. Only the Amazon Web Services that you're using send +// metrics to Amazon CloudWatch. To determine whether a desired metric already +// exists by looking up its namespace and dimension using the CloudWatch metrics +// dashboard in the console, follow the procedure in Building dashboards with +// CloudWatch // (https://docs.aws.amazon.com/autoscaling/application/userguide/monitoring-cloudwatch.html) // in the Application Auto Scaling User Guide. type PredefinedMetricSpecification struct { @@ -109,17 +111,17 @@ type PredefinedMetricSpecification struct { // target group attached to the Spot Fleet request or ECS service. You create the // resource label by appending the final portion of the load balancer ARN and the // final portion of the target group ARN into a single value, separated by a - // forward slash (/). The format is app///targetgroup//, where: + // forward slash (/). The format of the resource label is: + // app/my-alb/778d41231b141a0f/targetgroup/my-alb-target-group/943f017f100becff. + // Where: // - // * app// is the - // final portion of the load balancer ARN + // * app// is the final portion of the load balancer ARN // - // * targetgroup// is the final portion of - // the target group ARN. + // * targetgroup// + // is the final portion of the target group ARN. // - // This is an example: - // app/EC2Co-EcsEl-1TKLTMITMM0EO/f37c06a68c1748aa/targetgroup/EC2Co-Defau-LDNM7Q3ZH1ZN/6d4ea56ca2d6a18d. - // To find the ARN for an Application Load Balancer, use the DescribeLoadBalancers + // To find the ARN for an + // Application Load Balancer, use the DescribeLoadBalancers // (https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html) // API operation. To find the ARN for the target group, use the // DescribeTargetGroups @@ -215,6 +217,11 @@ type ScalableTarget struct { // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. // + // * + // Amazon ElastiCache replication group - The resource type is replication-group + // and the unique identifier is the replication group name. Example: + // replication-group/mycluster. + // // This member is required. ResourceId *string @@ -231,35 +238,36 @@ type ScalableTarget struct { // ecs:service:DesiredCount - The desired task count of an ECS service. // // * - // ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet - // request. - // - // * elasticmapreduce:instancegroup:InstanceCount - The instance count of - // an EMR Instance Group. + // elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR + // Instance Group. // - // * appstream:fleet:DesiredCapacity - The desired capacity - // of an AppStream 2.0 fleet. + // * ec2:spot-fleet-request:TargetCapacity - The target capacity + // of a Spot Fleet request. // - // * dynamodb:table:ReadCapacityUnits - The provisioned - // read capacity for a DynamoDB table. + // * appstream:fleet:DesiredCapacity - The desired + // capacity of an AppStream 2.0 fleet. // - // * dynamodb:table:WriteCapacityUnits - The - // provisioned write capacity for a DynamoDB table. + // * dynamodb:table:ReadCapacityUnits - The + // provisioned read capacity for a DynamoDB table. // // * - // dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB - // global secondary index. + // dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a + // DynamoDB table. // - // * dynamodb:index:WriteCapacityUnits - The provisioned - // write capacity for a DynamoDB global secondary index. + // * dynamodb:index:ReadCapacityUnits - The provisioned read + // capacity for a DynamoDB global secondary index. // // * - // rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB - // cluster. Available for Aurora MySQL-compatible edition and Aurora - // PostgreSQL-compatible edition. + // dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a + // DynamoDB global secondary index. // - // * sagemaker:variant:DesiredInstanceCount - The - // number of EC2 instances for an Amazon SageMaker model endpoint variant. + // * rds:cluster:ReadReplicaCount - The count of + // Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible + // edition and Aurora PostgreSQL-compatible edition. + // + // * + // sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an + // Amazon SageMaker model endpoint variant. // // * // custom-resource:ResourceType:Property - The scalable dimension for a custom @@ -287,11 +295,18 @@ type ScalableTarget struct { // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for // brokers in an Amazon MSK cluster. // + // * elasticache:replication-group:NodeGroups - + // The number of node groups for an Amazon ElastiCache replication group. + // + // * + // elasticache:replication-group:Replicas - The number of replicas per node group + // for an Amazon ElastiCache replication group. + // // This member is required. ScalableDimension ScalableDimension - // The namespace of the AWS service that provides the resource, or a - // custom-resource. + // The namespace of the Amazon Web Services service that provides the resource, or + // a custom-resource. // // This member is required. ServiceNamespace ServiceNamespace @@ -410,6 +425,11 @@ type ScalingActivity struct { // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. // + // * + // Amazon ElastiCache replication group - The resource type is replication-group + // and the unique identifier is the replication group name. Example: + // replication-group/mycluster. + // // This member is required. ResourceId *string @@ -419,12 +439,12 @@ type ScalingActivity struct { // * ecs:service:DesiredCount - The desired task count // of an ECS service. // - // * ec2:spot-fleet-request:TargetCapacity - The target - // capacity of a Spot Fleet request. + // * elasticmapreduce:instancegroup:InstanceCount - The + // instance count of an EMR Instance Group. // // * - // elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR - // Instance Group. + // ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet + // request. // // * appstream:fleet:DesiredCapacity - The desired capacity of an // AppStream 2.0 fleet. @@ -476,11 +496,18 @@ type ScalingActivity struct { // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for // brokers in an Amazon MSK cluster. // + // * elasticache:replication-group:NodeGroups - + // The number of node groups for an Amazon ElastiCache replication group. + // + // * + // elasticache:replication-group:Replicas - The number of replicas per node group + // for an Amazon ElastiCache replication group. + // // This member is required. ScalableDimension ScalableDimension - // The namespace of the AWS service that provides the resource, or a - // custom-resource. + // The namespace of the Amazon Web Services service that provides the resource, or + // a custom-resource. // // This member is required. ServiceNamespace ServiceNamespace @@ -601,6 +628,11 @@ type ScalingPolicy struct { // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. // + // * + // Amazon ElastiCache replication group - The resource type is replication-group + // and the unique identifier is the replication group name. Example: + // replication-group/mycluster. + // // This member is required. ResourceId *string @@ -610,12 +642,12 @@ type ScalingPolicy struct { // * ecs:service:DesiredCount - The desired task count // of an ECS service. // - // * ec2:spot-fleet-request:TargetCapacity - The target - // capacity of a Spot Fleet request. + // * elasticmapreduce:instancegroup:InstanceCount - The + // instance count of an EMR Instance Group. // // * - // elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR - // Instance Group. + // ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet + // request. // // * appstream:fleet:DesiredCapacity - The desired capacity of an // AppStream 2.0 fleet. @@ -667,11 +699,18 @@ type ScalingPolicy struct { // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for // brokers in an Amazon MSK cluster. // + // * elasticache:replication-group:NodeGroups - + // The number of node groups for an Amazon ElastiCache replication group. + // + // * + // elasticache:replication-group:Replicas - The number of replicas per node group + // for an Amazon ElastiCache replication group. + // // This member is required. ScalableDimension ScalableDimension - // The namespace of the AWS service that provides the resource, or a - // custom-resource. + // The namespace of the Amazon Web Services service that provides the resource, or + // a custom-resource. // // This member is required. ServiceNamespace ServiceNamespace @@ -763,6 +802,11 @@ type ScheduledAction struct { // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. // + // * + // Amazon ElastiCache replication group - The resource type is replication-group + // and the unique identifier is the replication group name. Example: + // replication-group/mycluster. + // // This member is required. ResourceId *string @@ -801,8 +845,8 @@ type ScheduledAction struct { // This member is required. ScheduledActionName *string - // The namespace of the AWS service that provides the resource, or a - // custom-resource. + // The namespace of the Amazon Web Services service that provides the resource, or + // a custom-resource. // // This member is required. ServiceNamespace ServiceNamespace @@ -816,12 +860,12 @@ type ScheduledAction struct { // * ecs:service:DesiredCount - The desired task count // of an ECS service. // - // * ec2:spot-fleet-request:TargetCapacity - The target - // capacity of a Spot Fleet request. + // * elasticmapreduce:instancegroup:InstanceCount - The + // instance count of an EMR Instance Group. // // * - // elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR - // Instance Group. + // ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet + // request. // // * appstream:fleet:DesiredCapacity - The desired capacity of an // AppStream 2.0 fleet. @@ -872,6 +916,13 @@ type ScheduledAction struct { // * // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for // brokers in an Amazon MSK cluster. + // + // * elasticache:replication-group:NodeGroups - + // The number of node groups for an Amazon ElastiCache replication group. + // + // * + // elasticache:replication-group:Replicas - The number of replicas per node group + // for an Amazon ElastiCache replication group. ScalableDimension ScalableDimension // The new minimum and maximum capacity. You can set both values or just one. At @@ -980,7 +1031,8 @@ type StepScalingPolicyConfiguration struct { // cooldown period after a scale-in activity, Application Auto Scaling scales out // the target immediately. In this case, the cooldown period for the scale-in // activity stops and doesn't complete. Application Auto Scaling provides a default - // value of 300 for the following scalable targets: + // value of 600 for Amazon ElastiCache replication groups and a default value of + // 300 for the following scalable targets: // // * ECS services // @@ -1097,42 +1149,42 @@ type TargetTrackingScalingPolicyConfiguration struct { // expired. However, if another alarm triggers a scale-out activity during the // scale-in cooldown period, Application Auto Scaling scales out the target // immediately. In this case, the scale-in cooldown period stops and doesn't - // complete. Application Auto Scaling provides a default value of 300 for the - // following scalable targets: + // complete. Application Auto Scaling provides a default value of 600 for Amazon + // ElastiCache replication groups and a default value of 300 for the following + // scalable targets: // // * ECS services // // * Spot Fleet requests // - // * EMR - // clusters + // * EMR clusters // - // * AppStream 2.0 fleets + // * + // AppStream 2.0 fleets // // * Aurora DB clusters // - // * Amazon SageMaker - // endpoint variants + // * Amazon SageMaker endpoint + // variants // // * Custom resources // - // For all other scalable targets, the - // default value is 0: + // For all other scalable targets, the default value + // is 0: // // * DynamoDB tables // // * DynamoDB global secondary indexes // - // * - // Amazon Comprehend document classification and entity recognizer endpoints + // * Amazon + // Comprehend document classification and entity recognizer endpoints // - // * - // Lambda provisioned concurrency + // * Lambda + // provisioned concurrency // // * Amazon Keyspaces tables // - // * Amazon MSK broker - // storage + // * Amazon MSK broker storage ScaleInCooldown *int32 // The amount of time, in seconds, to wait for a previous scale-out activity to @@ -1144,41 +1196,42 @@ type TargetTrackingScalingPolicyConfiguration struct { // period ends. While the cooldown period is in effect, the capacity added by the // initiating scale-out activity is calculated as part of the desired capacity for // the next scale-out activity. Application Auto Scaling provides a default value - // of 300 for the following scalable targets: + // of 600 for Amazon ElastiCache replication groups and a default value of 300 for + // the following scalable targets: // // * ECS services // - // * Spot Fleet - // requests + // * Spot Fleet requests // - // * EMR clusters + // * EMR + // clusters // // * AppStream 2.0 fleets // // * Aurora DB clusters // - // * Amazon - // SageMaker endpoint variants + // * Amazon SageMaker + // endpoint variants // // * Custom resources // - // For all other scalable targets, - // the default value is 0: + // For all other scalable targets, the + // default value is 0: // // * DynamoDB tables // - // * DynamoDB global secondary - // indexes + // * DynamoDB global secondary indexes // - // * Amazon Comprehend document classification and entity recognizer - // endpoints + // * + // Amazon Comprehend document classification and entity recognizer endpoints // - // * Lambda provisioned concurrency + // * + // Lambda provisioned concurrency // // * Amazon Keyspaces tables // - // * Amazon - // MSK broker storage + // * Amazon MSK broker + // storage ScaleOutCooldown *int32 noSmithyDocumentSerde diff --git a/service/cloud9/api_op_CreateEnvironmentEC2.go b/service/cloud9/api_op_CreateEnvironmentEC2.go index 42b7d5fbe11..2ada9e051fb 100644 --- a/service/cloud9/api_op_CreateEnvironmentEC2.go +++ b/service/cloud9/api_op_CreateEnvironmentEC2.go @@ -63,6 +63,12 @@ type CreateEnvironmentEC2Input struct { // The description of the environment to create. Description *string + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have the + // required permissions, the error response is DryRunOperation. Otherwise, it is + // UnauthorizedOperation. + DryRun *bool + // The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 // instance. To choose an AMI for the instance, you must specify a valid AMI alias // or a valid Amazon EC2 Systems Manager (SSM) path. The default AMI is used if the diff --git a/service/cloud9/api_op_UpdateEnvironment.go b/service/cloud9/api_op_UpdateEnvironment.go index 2383dd8e75e..dd28d892379 100644 --- a/service/cloud9/api_op_UpdateEnvironment.go +++ b/service/cloud9/api_op_UpdateEnvironment.go @@ -6,6 +6,7 @@ import ( "context" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/cloud9/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -36,6 +37,20 @@ type UpdateEnvironmentInput struct { // Any new or replacement description for the environment. Description *string + // Allows the environment owner to turn on or turn off the Amazon Web Services + // managed temporary credentials for an Cloud9 environment by using one of the + // following values: + // + // * ENABLE + // + // * DISABLE + // + // Only the environment owner can change + // the status of managed temporary credentials. An AccessDeniedException is thrown + // if an attempt to turn on or turn off managed temporary credentials is made by an + // account that's not the environment owner. + ManagedCredentialsAction types.ManagedCredentialsAction + // A replacement name for the environment. Name *string diff --git a/service/cloud9/serializers.go b/service/cloud9/serializers.go index 18de6cefa8c..28b52836868 100644 --- a/service/cloud9/serializers.go +++ b/service/cloud9/serializers.go @@ -711,6 +711,11 @@ func awsAwsjson11_serializeOpDocumentCreateEnvironmentEC2Input(v *CreateEnvironm ok.String(*v.Description) } + if v.DryRun != nil { + ok := object.Key("dryRun") + ok.Boolean(*v.DryRun) + } + if v.ImageId != nil { ok := object.Key("imageId") ok.String(*v.ImageId) @@ -938,6 +943,11 @@ func awsAwsjson11_serializeOpDocumentUpdateEnvironmentInput(v *UpdateEnvironment ok.String(*v.EnvironmentId) } + if len(v.ManagedCredentialsAction) > 0 { + ok := object.Key("managedCredentialsAction") + ok.String(string(v.ManagedCredentialsAction)) + } + if v.Name != nil { ok := object.Key("name") ok.String(*v.Name) diff --git a/service/cloud9/types/enums.go b/service/cloud9/types/enums.go index a343067208b..afd01119e0d 100644 --- a/service/cloud9/types/enums.go +++ b/service/cloud9/types/enums.go @@ -90,6 +90,24 @@ func (EnvironmentType) Values() []EnvironmentType { } } +type ManagedCredentialsAction string + +// Enum values for ManagedCredentialsAction +const ( + ManagedCredentialsActionEnable ManagedCredentialsAction = "ENABLE" + ManagedCredentialsActionDisable ManagedCredentialsAction = "DISABLE" +) + +// Values returns all known values for ManagedCredentialsAction. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ManagedCredentialsAction) Values() []ManagedCredentialsAction { + return []ManagedCredentialsAction{ + "ENABLE", + "DISABLE", + } +} + type ManagedCredentialsStatus string // Enum values for ManagedCredentialsStatus diff --git a/service/clouddirectory/types/errors.go b/service/clouddirectory/types/errors.go index 150ebdb3131..fb0c18f1365 100644 --- a/service/clouddirectory/types/errors.go +++ b/service/clouddirectory/types/errors.go @@ -7,7 +7,9 @@ import ( smithy "github.com/aws/smithy-go" ) -// Access denied. Check your permissions. +// Access denied or directory not found. Either you don't have permissions for this +// directory or the directory does not exist. Try calling ListDirectories and check +// your permissions. type AccessDeniedException struct { Message *string diff --git a/service/clouddirectory/types/types.go b/service/clouddirectory/types/types.go index 278bc7f1855..bc900d8e163 100644 --- a/service/clouddirectory/types/types.go +++ b/service/clouddirectory/types/types.go @@ -678,6 +678,8 @@ type BatchListObjectParentPathsResponse struct { noSmithyDocumentSerde } +// Lists parent objects that are associated with a given object in pagination +// fashion. type BatchListObjectParents struct { // The reference that identifies an object. @@ -685,16 +687,23 @@ type BatchListObjectParents struct { // This member is required. ObjectReference *ObjectReference + // The maximum number of items to be retrieved in a single call. This is an + // approximate number. MaxResults *int32 + // The pagination token. NextToken *string noSmithyDocumentSerde } +// Represents the output of a ListObjectParents response operation. type BatchListObjectParentsResponse struct { + + // The pagination token. NextToken *string + // Returns a list of parent reference and LinkName Tuples. ParentLinks []ObjectIdentifierAndLinkNameTuple noSmithyDocumentSerde @@ -885,6 +894,8 @@ type BatchReadOperation struct { // (https://docs.aws.amazon.com/clouddirectory/latest/developerguide/key_concepts_directorystructure.html). ListObjectParentPaths *BatchListObjectParentPaths + // Lists parent objects that are associated with a given object in pagination + // fashion. ListObjectParents *BatchListObjectParents // Returns policies attached to an object in pagination fashion. @@ -960,6 +971,7 @@ type BatchReadSuccessfulResponse struct { // (https://docs.aws.amazon.com/clouddirectory/latest/developerguide/key_concepts_directorystructure.html). ListObjectParentPaths *BatchListObjectParentPathsResponse + // The list of parent objects to retrieve. ListObjectParents *BatchListObjectParentsResponse // Returns policies attached to an object in pagination fashion. @@ -1397,12 +1409,14 @@ type ObjectReference struct { // An object identifier is an opaque string provided by Amazon Cloud Directory. // When creating objects, the system will provide you with the identifier of the // created object. An object’s identifier is immutable and no two objects will ever - // share the same object identifier + // share the same object identifier. To identify an object with ObjectIdentifier, + // the ObjectIdentifier must be wrapped in double quotes. // - // * /some/path - Identifies the object based on - // path + // * /some/path - + // Identifies the object based on path // - // * #SomeBatchReference - Identifies the object in a batch call + // * #SomeBatchReference - Identifies the + // object in a batch call Selector *string noSmithyDocumentSerde @@ -1467,13 +1481,14 @@ type Rule struct { // A facet. type SchemaFacet struct { - // The name of the facet. + // The name of the facet. If this value is set, SchemaArn must also be set. FacetName *string // The ARN of the schema that contains the facet with no minor component. See arns // and In-Place Schema Upgrade // (https://docs.aws.amazon.com/clouddirectory/latest/developerguide/schemas_inplaceschemaupgrade.html) - // for a description of when to provide minor versions. + // for a description of when to provide minor versions. If this value is set, + // FacetName must also be set. SchemaArn *string noSmithyDocumentSerde diff --git a/service/cloudwatchlogs/api_op_AssociateKmsKey.go b/service/cloudwatchlogs/api_op_AssociateKmsKey.go index 1c183653863..48319dc764d 100644 --- a/service/cloudwatchlogs/api_op_AssociateKmsKey.go +++ b/service/cloudwatchlogs/api_op_AssociateKmsKey.go @@ -10,16 +10,15 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Associates the specified AWS Key Management Service (AWS KMS) customer master -// key (CMK) with the specified log group. Associating an AWS KMS CMK with a log -// group overrides any existing associations between the log group and a CMK. After -// a CMK is associated with a log group, all newly ingested data for the log group -// is encrypted using the CMK. This association is stored as long as the data -// encrypted with the CMK is still within Amazon CloudWatch Logs. This enables -// Amazon CloudWatch Logs to decrypt this data whenever it is requested. CloudWatch -// Logs supports only symmetric CMKs. Do not use an associate an asymmetric CMK -// with your log group. For more information, see Using Symmetric and Asymmetric -// Keys +// Associates the specified Key Management Service customer master key (CMK) with +// the specified log group. Associating an KMS CMK with a log group overrides any +// existing associations between the log group and a CMK. After a CMK is associated +// with a log group, all newly ingested data for the log group is encrypted using +// the CMK. This association is stored as long as the data encrypted with the CMK +// is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this +// data whenever it is requested. CloudWatch Logs supports only symmetric CMKs. Do +// not use an associate an asymmetric CMK with your log group. For more +// information, see Using Symmetric and Asymmetric Keys // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html). // It can take up to 5 minutes for this operation to take effect. If you attempt to // associate a CMK with a log group but the CMK does not exist or the CMK is @@ -42,8 +41,8 @@ func (c *Client) AssociateKmsKey(ctx context.Context, params *AssociateKmsKeyInp type AssociateKmsKeyInput struct { // The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. This - // must be a symmetric CMK. For more information, see Amazon Resource Names - AWS - // Key Management Service (AWS KMS) + // must be a symmetric CMK. For more information, see Amazon Resource Names - Key + // Management Service // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kms) // and Using Symmetric and Asymmetric Keys // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html). diff --git a/service/cloudwatchlogs/api_op_CreateExportTask.go b/service/cloudwatchlogs/api_op_CreateExportTask.go index ab578387a84..80346f760fa 100644 --- a/service/cloudwatchlogs/api_op_CreateExportTask.go +++ b/service/cloudwatchlogs/api_op_CreateExportTask.go @@ -45,7 +45,7 @@ func (c *Client) CreateExportTask(ctx context.Context, params *CreateExportTaskI type CreateExportTaskInput struct { // The name of S3 bucket for the exported log data. The bucket must be in the same - // AWS region. + // Amazon Web Services region. // // This member is required. Destination *string diff --git a/service/cloudwatchlogs/api_op_CreateLogGroup.go b/service/cloudwatchlogs/api_op_CreateLogGroup.go index eba91a2691f..dbc35743d46 100644 --- a/service/cloudwatchlogs/api_op_CreateLogGroup.go +++ b/service/cloudwatchlogs/api_op_CreateLogGroup.go @@ -14,29 +14,28 @@ import ( // groups per account. You must use the following guidelines when naming a log // group: // -// * Log group names must be unique within a region for an AWS account. +// * Log group names must be unique within a region for an Amazon Web +// Services account. // -// * -// Log group names can be between 1 and 512 characters long. +// * Log group names can be between 1 and 512 characters +// long. // -// * Log group names -// consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' -// (hyphen), '/' (forward slash), '.' (period), and '#' (number sign) +// * Log group names consist of the following characters: a-z, A-Z, 0-9, '_' +// (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and '#' (number +// sign) // -// When you -// create a log group, by default the log events in the log group never expire. To -// set a retention policy so that events expire and are deleted after a specified -// time, use PutRetentionPolicy +// When you create a log group, by default the log events in the log group +// never expire. To set a retention policy so that events expire and are deleted +// after a specified time, use PutRetentionPolicy // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutRetentionPolicy.html). -// If you associate a AWS Key Management Service (AWS KMS) customer master key -// (CMK) with the log group, ingested data is encrypted using the CMK. This -// association is stored as long as the data encrypted with the CMK is still within -// Amazon CloudWatch Logs. This enables Amazon CloudWatch Logs to decrypt this data -// whenever it is requested. If you attempt to associate a CMK with the log group -// but the CMK does not exist or the CMK is disabled, you receive an -// InvalidParameterException error. CloudWatch Logs supports only symmetric CMKs. -// Do not associate an asymmetric CMK with your log group. For more information, -// see Using Symmetric and Asymmetric Keys +// If you associate a Key Management Service customer master key (CMK) with the log +// group, ingested data is encrypted using the CMK. This association is stored as +// long as the data encrypted with the CMK is still within CloudWatch Logs. This +// enables CloudWatch Logs to decrypt this data whenever it is requested. If you +// attempt to associate a CMK with the log group but the CMK does not exist or the +// CMK is disabled, you receive an InvalidParameterException error. CloudWatch Logs +// supports only symmetric CMKs. Do not associate an asymmetric CMK with your log +// group. For more information, see Using Symmetric and Asymmetric Keys // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html). func (c *Client) CreateLogGroup(ctx context.Context, params *CreateLogGroupInput, optFns ...func(*Options)) (*CreateLogGroupOutput, error) { if params == nil { @@ -61,12 +60,16 @@ type CreateLogGroupInput struct { LogGroupName *string // The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. For - // more information, see Amazon Resource Names - AWS Key Management Service (AWS - // KMS) + // more information, see Amazon Resource Names - Key Management Service // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kms). KmsKeyId *string - // The key-value pairs to use for the tags. + // The key-value pairs to use for the tags. CloudWatch Logs doesn’t support IAM + // policies that prevent users from assigning specified tags to log groups using + // the aws:Resource/key-name or aws:TagKeys condition keys. For more information + // about using tags to control access, see Controlling access to Amazon Web + // Services resources using tags + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html). Tags map[string]string noSmithyDocumentSerde diff --git a/service/cloudwatchlogs/api_op_DescribeLogGroups.go b/service/cloudwatchlogs/api_op_DescribeLogGroups.go index df25c5f7340..4aa0651ea3d 100644 --- a/service/cloudwatchlogs/api_op_DescribeLogGroups.go +++ b/service/cloudwatchlogs/api_op_DescribeLogGroups.go @@ -13,7 +13,13 @@ import ( ) // Lists the specified log groups. You can list all your log groups or filter the -// results by prefix. The results are ASCII-sorted by log group name. +// results by prefix. The results are ASCII-sorted by log group name. CloudWatch +// Logs doesn’t support IAM policies that control access to the DescribeLogGroups +// action by using the aws:ResourceTag/key-name condition key. Other CloudWatch +// Logs actions do support the use of the aws:ResourceTag/key-name condition key +// to control access. For more information about using tags to control access, see +// Controlling access to Amazon Web Services resources using tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html). func (c *Client) DescribeLogGroups(ctx context.Context, params *DescribeLogGroupsInput, optFns ...func(*Options)) (*DescribeLogGroupsOutput, error) { if params == nil { params = &DescribeLogGroupsInput{} diff --git a/service/cloudwatchlogs/api_op_DisassociateKmsKey.go b/service/cloudwatchlogs/api_op_DisassociateKmsKey.go index 96f41276804..5ffe6875736 100644 --- a/service/cloudwatchlogs/api_op_DisassociateKmsKey.go +++ b/service/cloudwatchlogs/api_op_DisassociateKmsKey.go @@ -10,13 +10,12 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Disassociates the associated AWS Key Management Service (AWS KMS) customer -// master key (CMK) from the specified log group. After the AWS KMS CMK is -// disassociated from the log group, AWS CloudWatch Logs stops encrypting newly -// ingested data for the log group. All previously ingested data remains encrypted, -// and AWS CloudWatch Logs requires permissions for the CMK whenever the encrypted -// data is requested. Note that it can take up to 5 minutes for this operation to -// take effect. +// Disassociates the associated Key Management Service customer master key (CMK) +// from the specified log group. After the KMS CMK is disassociated from the log +// group, CloudWatch Logs stops encrypting newly ingested data for the log group. +// All previously ingested data remains encrypted, and CloudWatch Logs requires +// permissions for the CMK whenever the encrypted data is requested. Note that it +// can take up to 5 minutes for this operation to take effect. func (c *Client) DisassociateKmsKey(ctx context.Context, params *DisassociateKmsKeyInput, optFns ...func(*Options)) (*DisassociateKmsKeyOutput, error) { if params == nil { params = &DisassociateKmsKeyInput{} diff --git a/service/cloudwatchlogs/api_op_GetLogEvents.go b/service/cloudwatchlogs/api_op_GetLogEvents.go index d32f7023490..b9dc0e39dbf 100644 --- a/service/cloudwatchlogs/api_op_GetLogEvents.go +++ b/service/cloudwatchlogs/api_op_GetLogEvents.go @@ -56,14 +56,13 @@ type GetLogEventsInput struct { Limit *int32 // The token for the next set of items to return. (You received this token from a - // previous call.) Using this token works only when you specify true for - // startFromHead. + // previous call.) NextToken *string // If the value is true, the earliest log events are returned first. If the value // is false, the latest log events are returned first. The default value is false. - // If you are using nextToken in this operation, you must specify true for - // startFromHead. + // If you are using a previous nextForwardToken value as the nextToken in this + // operation, you must specify true for startFromHead. StartFromHead *bool // The start of the time range, expressed as the number of milliseconds after Jan diff --git a/service/cloudwatchlogs/api_op_PutDestinationPolicy.go b/service/cloudwatchlogs/api_op_PutDestinationPolicy.go index 2b14dbcb1d0..6d8ab1b7f99 100644 --- a/service/cloudwatchlogs/api_op_PutDestinationPolicy.go +++ b/service/cloudwatchlogs/api_op_PutDestinationPolicy.go @@ -14,10 +14,10 @@ import ( // access policy is an IAM policy document // (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies_overview.html) that // is used to authorize claims to register a subscription filter against a given -// destination. If multiple AWS accounts are sending logs to this destination, each -// sender account must be listed separately in the policy. The policy does not -// support specifying * as the Principal or the use of the aws:PrincipalOrgId -// global key. +// destination. If multiple Amazon Web Services accounts are sending logs to this +// destination, each sender account must be listed separately in the policy. The +// policy does not support specifying * as the Principal or the use of the +// aws:PrincipalOrgId global key. func (c *Client) PutDestinationPolicy(ctx context.Context, params *PutDestinationPolicyInput, optFns ...func(*Options)) (*PutDestinationPolicyOutput, error) { if params == nil { params = &PutDestinationPolicyInput{} diff --git a/service/cloudwatchlogs/api_op_PutLogEvents.go b/service/cloudwatchlogs/api_op_PutLogEvents.go index 7288c471b87..463d010d2c6 100644 --- a/service/cloudwatchlogs/api_op_PutLogEvents.go +++ b/service/cloudwatchlogs/api_op_PutLogEvents.go @@ -33,22 +33,23 @@ import ( // * The log events in the batch // must be in chronological order by their timestamp. The timestamp is the time the // event occurred, expressed as the number of milliseconds after Jan 1, 1970 -// 00:00:00 UTC. (In AWS Tools for PowerShell and the AWS SDK for .NET, the -// timestamp is specified in .NET format: yyyy-mm-ddThh:mm:ss. For example, -// 2017-09-15T13:45:30.) +// 00:00:00 UTC. (In Amazon Web Services Tools for PowerShell and the Amazon Web +// Services SDK for .NET, the timestamp is specified in .NET format: +// yyyy-mm-ddThh:mm:ss. For example, 2017-09-15T13:45:30.) // -// * A batch of log events in a single request cannot span -// more than 24 hours. Otherwise, the operation fails. +// * A batch of log events +// in a single request cannot span more than 24 hours. Otherwise, the operation +// fails. // -// * The maximum number of log -// events in a batch is 10,000. +// * The maximum number of log events in a batch is 10,000. // -// * There is a quota of 5 requests per second per -// log stream. Additional requests are throttled. This quota can't be changed. +// * There is a +// quota of 5 requests per second per log stream. Additional requests are +// throttled. This quota can't be changed. // -// If -// a call to PutLogEvents returns "UnrecognizedClientException" the most likely -// cause is an invalid AWS access key ID or secret key. +// If a call to PutLogEvents returns +// "UnrecognizedClientException" the most likely cause is an invalid Amazon Web +// Services access key ID or secret key. func (c *Client) PutLogEvents(ctx context.Context, params *PutLogEventsInput, optFns ...func(*Options)) (*PutLogEventsOutput, error) { if params == nil { params = &PutLogEventsInput{} diff --git a/service/cloudwatchlogs/api_op_PutMetricFilter.go b/service/cloudwatchlogs/api_op_PutMetricFilter.go index 9fe303a2738..45a0463a6d2 100644 --- a/service/cloudwatchlogs/api_op_PutMetricFilter.go +++ b/service/cloudwatchlogs/api_op_PutMetricFilter.go @@ -26,7 +26,7 @@ import ( // name/value pairs for the dimensions that you have specified within a certain // amount of time. You can also set up a billing alarm to alert you if your charges // are higher than expected. For more information, see Creating a Billing Alarm to -// Monitor Your Estimated AWS Charges +// Monitor Your Estimated Amazon Web Services Charges // (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/monitor_estimated_charges_with_cloudwatch.html). func (c *Client) PutMetricFilter(ctx context.Context, params *PutMetricFilterInput, optFns ...func(*Options)) (*PutMetricFilterOutput, error) { if params == nil { diff --git a/service/cloudwatchlogs/api_op_PutResourcePolicy.go b/service/cloudwatchlogs/api_op_PutResourcePolicy.go index d97e52f4c99..0c515f438bf 100644 --- a/service/cloudwatchlogs/api_op_PutResourcePolicy.go +++ b/service/cloudwatchlogs/api_op_PutResourcePolicy.go @@ -11,9 +11,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates or updates a resource policy allowing other AWS services to put log -// events to this account, such as Amazon Route 53. An account can have up to 10 -// resource policies per AWS Region. +// Creates or updates a resource policy allowing other Amazon Web Services services +// to put log events to this account, such as Amazon Route 53. An account can have +// up to 10 resource policies per Amazon Web Services Region. func (c *Client) PutResourcePolicy(ctx context.Context, params *PutResourcePolicyInput, optFns ...func(*Options)) (*PutResourcePolicyOutput, error) { if params == nil { params = &PutResourcePolicyInput{} @@ -36,10 +36,18 @@ type PutResourcePolicyInput struct { // parameter is required. The following example creates a resource policy enabling // the Route 53 service to put DNS query logs in to the specified log group. // Replace "logArn" with the ARN of your CloudWatch Logs resource, such as a log - // group or log stream. { "Version": "2012-10-17", "Statement": [ { "Sid": + // group or log stream. CloudWatch Logs also supports aws:SourceArn + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-sourcearn) + // and aws:SourceAccount + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-sourceaccount) + // condition context keys. In the example resource policy, you would replace the + // value of SourceArn with the resource making the call from Route 53 to CloudWatch + // Logs and replace the value of SourceAccount with the Amazon Web Services account + // ID making that call. { "Version": "2012-10-17", "Statement": [ { "Sid": // "Route53LogsToCloudWatchLogs", "Effect": "Allow", "Principal": { "Service": [ - // "route53.amazonaws.com" ] }, "Action":"logs:PutLogEvents", "Resource": "logArn" - // } ] } + // "route53.amazonaws.com" ] }, "Action": "logs:PutLogEvents", "Resource": + // "logArn", "Condition": { "ArnLike": { "aws:SourceArn": "myRoute53ResourceArn" }, + // "StringEquals": { "aws:SourceAccount": "myAwsAccountId" } } } ] } PolicyDocument *string // Name of the new policy. This parameter is required. diff --git a/service/cloudwatchlogs/api_op_PutRetentionPolicy.go b/service/cloudwatchlogs/api_op_PutRetentionPolicy.go index 2326dbf0d92..619d1304be6 100644 --- a/service/cloudwatchlogs/api_op_PutRetentionPolicy.go +++ b/service/cloudwatchlogs/api_op_PutRetentionPolicy.go @@ -37,8 +37,9 @@ type PutRetentionPolicyInput struct { // The number of days to retain the log events in the specified log group. Possible // values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, - // and 3653. If you omit retentionInDays in a PutRetentionPolicy operation, the - // events in the log group are always retained and never expire. + // and 3653. To set a log group to never have log events expire, use + // DeleteRetentionPolicy + // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DeleteRetentionPolicy.html). // // This member is required. RetentionInDays *int32 diff --git a/service/cloudwatchlogs/api_op_PutSubscriptionFilter.go b/service/cloudwatchlogs/api_op_PutSubscriptionFilter.go index ae86ad531b3..5b6d5991544 100644 --- a/service/cloudwatchlogs/api_op_PutSubscriptionFilter.go +++ b/service/cloudwatchlogs/api_op_PutSubscriptionFilter.go @@ -30,14 +30,13 @@ import ( // stream that belongs to the same account as the subscription filter, for // same-account delivery. // -// * An AWS Lambda function that belongs to the same -// account as the subscription filter, for same-account delivery. +// * An Lambda function that belongs to the same account as +// the subscription filter, for same-account delivery. // -// Each log group -// can have up to two subscription filters associated with it. If you are updating -// an existing filter, you must specify the correct name in filterName. To perform -// a PutSubscriptionFilter operation, you must also have the iam:PassRole -// permission. +// Each log group can have up +// to two subscription filters associated with it. If you are updating an existing +// filter, you must specify the correct name in filterName. To perform a +// PutSubscriptionFilter operation, you must also have the iam:PassRole permission. func (c *Client) PutSubscriptionFilter(ctx context.Context, params *PutSubscriptionFilterInput, optFns ...func(*Options)) (*PutSubscriptionFilterOutput, error) { if params == nil { params = &PutSubscriptionFilterInput{} @@ -72,9 +71,8 @@ type PutSubscriptionFilterInput struct { // An Amazon Kinesis Firehose delivery stream belonging to the same account as the // subscription filter, for same-account delivery. // - // * An AWS Lambda function - // belonging to the same account as the subscription filter, for same-account - // delivery. + // * A Lambda function belonging + // to the same account as the subscription filter, for same-account delivery. // // This member is required. DestinationArn *string diff --git a/service/cloudwatchlogs/api_op_TagLogGroup.go b/service/cloudwatchlogs/api_op_TagLogGroup.go index 9c716442ca9..2d3eb27d440 100644 --- a/service/cloudwatchlogs/api_op_TagLogGroup.go +++ b/service/cloudwatchlogs/api_op_TagLogGroup.go @@ -17,7 +17,12 @@ import ( // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_UntagLogGroup.html). // For more information about tags, see Tag Log Groups in Amazon CloudWatch Logs // (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html#log-group-tagging) -// in the Amazon CloudWatch Logs User Guide. +// in the Amazon CloudWatch Logs User Guide. CloudWatch Logs doesn’t support IAM +// policies that prevent users from assigning specified tags to log groups using +// the aws:Resource/key-name or aws:TagKeys condition keys. For more information +// about using tags to control access, see Controlling access to Amazon Web +// Services resources using tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html). func (c *Client) TagLogGroup(ctx context.Context, params *TagLogGroupInput, optFns ...func(*Options)) (*TagLogGroupOutput, error) { if params == nil { params = &TagLogGroupInput{} diff --git a/service/cloudwatchlogs/api_op_UntagLogGroup.go b/service/cloudwatchlogs/api_op_UntagLogGroup.go index 1c236ecbfa9..be47e7cb35c 100644 --- a/service/cloudwatchlogs/api_op_UntagLogGroup.go +++ b/service/cloudwatchlogs/api_op_UntagLogGroup.go @@ -15,6 +15,9 @@ import ( // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListTagsLogGroup.html). // To add tags, use TagLogGroup // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_TagLogGroup.html). +// CloudWatch Logs doesn’t support IAM policies that prevent users from assigning +// specified tags to log groups using the aws:Resource/key-name or aws:TagKeys +// condition keys. func (c *Client) UntagLogGroup(ctx context.Context, params *UntagLogGroupInput, optFns ...func(*Options)) (*UntagLogGroupOutput, error) { if params == nil { params = &UntagLogGroupInput{} diff --git a/service/cloudwatchlogs/doc.go b/service/cloudwatchlogs/doc.go index 17c975bdbc3..dcb84948208 100644 --- a/service/cloudwatchlogs/doc.go +++ b/service/cloudwatchlogs/doc.go @@ -4,32 +4,32 @@ // for Amazon CloudWatch Logs. // // You can use Amazon CloudWatch Logs to monitor, store, and access your log files -// from EC2 instances, AWS CloudTrail, and other sources. You can then retrieve the +// from EC2 instances, CloudTrail, and other sources. You can then retrieve the // associated log data from CloudWatch Logs using the CloudWatch console, -// CloudWatch Logs commands in the AWS CLI, CloudWatch Logs API, or CloudWatch Logs -// SDK. You can use CloudWatch Logs to: +// CloudWatch Logs commands in the Amazon Web Services CLI, CloudWatch Logs API, or +// CloudWatch Logs SDK. You can use CloudWatch Logs to: // -// * Monitor logs from EC2 instances in -// real-time: You can use CloudWatch Logs to monitor applications and systems using -// log data. For example, CloudWatch Logs can track the number of errors that occur -// in your application logs and send you a notification whenever the rate of errors -// exceeds a threshold that you specify. CloudWatch Logs uses your log data for -// monitoring so no code changes are required. For example, you can monitor -// application logs for specific literal terms (such as "NullReferenceException") -// or count the number of occurrences of a literal term at a particular position in -// log data (such as "404" status codes in an Apache access log). When the term you -// are searching for is found, CloudWatch Logs reports the data to a CloudWatch -// metric that you specify. +// * Monitor logs from EC2 +// instances in real-time: You can use CloudWatch Logs to monitor applications and +// systems using log data. For example, CloudWatch Logs can track the number of +// errors that occur in your application logs and send you a notification whenever +// the rate of errors exceeds a threshold that you specify. CloudWatch Logs uses +// your log data for monitoring so no code changes are required. For example, you +// can monitor application logs for specific literal terms (such as +// "NullReferenceException") or count the number of occurrences of a literal term +// at a particular position in log data (such as "404" status codes in an Apache +// access log). When the term you are searching for is found, CloudWatch Logs +// reports the data to a CloudWatch metric that you specify. // -// * Monitor AWS CloudTrail logged events: You can create -// alarms in CloudWatch and receive notifications of particular API activity as -// captured by CloudTrail. You can use the notification to perform -// troubleshooting. +// * Monitor CloudTrail +// logged events: You can create alarms in CloudWatch and receive notifications of +// particular API activity as captured by CloudTrail. You can use the notification +// to perform troubleshooting. // -// * Archive log data: You can use CloudWatch Logs to store your -// log data in highly durable storage. You can change the log retention setting so -// that any log events older than this setting are automatically deleted. The -// CloudWatch Logs agent makes it easy to quickly send both rotated and non-rotated -// log data off of a host and into the log service. You can then access the raw log -// data when you need it. +// * Archive log data: You can use CloudWatch Logs to +// store your log data in highly durable storage. You can change the log retention +// setting so that any log events older than this setting are automatically +// deleted. The CloudWatch Logs agent makes it easy to quickly send both rotated +// and non-rotated log data off of a host and into the log service. You can then +// access the raw log data when you need it. package cloudwatchlogs diff --git a/service/cloudwatchlogs/types/errors.go b/service/cloudwatchlogs/types/errors.go index 640e9103c17..518d344859c 100644 --- a/service/cloudwatchlogs/types/errors.go +++ b/service/cloudwatchlogs/types/errors.go @@ -209,7 +209,8 @@ func (e *ServiceUnavailableException) ErrorMessage() string { func (e *ServiceUnavailableException) ErrorCode() string { return "ServiceUnavailableException" } func (e *ServiceUnavailableException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } -// The most likely cause is an invalid AWS access key ID or secret key. +// The most likely cause is an invalid Amazon Web Services access key ID or secret +// key. type UnrecognizedClientException struct { Message *string diff --git a/service/cloudwatchlogs/types/types.go b/service/cloudwatchlogs/types/types.go index e0e573e72ad..08fedec360c 100644 --- a/service/cloudwatchlogs/types/types.go +++ b/service/cloudwatchlogs/types/types.go @@ -9,8 +9,8 @@ import ( // Represents a cross-account destination that receives subscription log events. type Destination struct { - // An IAM policy document that governs which AWS accounts can create subscription - // filters against this destination. + // An IAM policy document that governs which Amazon Web Services accounts can + // create subscription filters against this destination. AccessPolicy *string // The ARN of this destination. @@ -157,8 +157,9 @@ type LogGroup struct { // The number of days to retain the log events in the specified log group. Possible // values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, - // and 3653. If you omit retentionInDays in a PutRetentionPolicy operation, the - // events in the log group are always retained and never expire. + // and 3653. To set a log group to never have log events expire, use + // DeleteRetentionPolicy + // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DeleteRetentionPolicy.html). RetentionInDays *int32 // The number of bytes stored. @@ -302,7 +303,7 @@ type MetricTransformation struct { // name/value pairs for the dimensions that you have specified within a certain // amount of time. You can also set up a billing alarm to alert you if your charges // are higher than expected. For more information, see Creating a Billing Alarm to - // Monitor Your Estimated AWS Charges + // Monitor Your Estimated Amazon Web Services Charges // (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/monitor_estimated_charges_with_cloudwatch.html). Dimensions map[string]string diff --git a/service/codebuild/types/types.go b/service/codebuild/types/types.go index b6a3883a306..bdbebe7f28d 100644 --- a/service/codebuild/types/types.go +++ b/service/codebuild/types/types.go @@ -1616,7 +1616,9 @@ type ProjectSource struct { // status cannot be updated. For more information, see Source provider access // (https://docs.aws.amazon.com/codebuild/latest/userguide/access-tokens.html) in // the CodeBuild User Guide. The status of a build triggered by a webhook is always - // reported to your source provider. + // reported to your source provider. If your project's builds are triggered by a + // webhook, you must push a new commit to the repo for a change to this property to + // take effect. ReportBuildStatus *bool // An identifier for this project source. The identifier can only contain diff --git a/service/configservice/types/enums.go b/service/configservice/types/enums.go index 8819d217f0f..7154a7c69cb 100644 --- a/service/configservice/types/enums.go +++ b/service/configservice/types/enums.go @@ -659,6 +659,17 @@ const ( ResourceTypeSecret ResourceType = "AWS::SecretsManager::Secret" ResourceTypeTopic ResourceType = "AWS::SNS::Topic" ResourceTypeFileData ResourceType = "AWS::SSM::FileData" + ResourceTypeBackupPlan ResourceType = "AWS::Backup::BackupPlan" + ResourceTypeBackupSelection ResourceType = "AWS::Backup::BackupSelection" + ResourceTypeBackupVault ResourceType = "AWS::Backup::BackupVault" + ResourceTypeBackupRecoveryPoint ResourceType = "AWS::Backup::RecoveryPoint" + ResourceTypeECRRepository ResourceType = "AWS::ECR::Repository" + ResourceTypeECSCluster ResourceType = "AWS::ECS::Cluster" + ResourceTypeECSService ResourceType = "AWS::ECS::Service" + ResourceTypeECSTaskDefinition ResourceType = "AWS::ECS::TaskDefinition" + ResourceTypeEFSAccessPoint ResourceType = "AWS::EFS::AccessPoint" + ResourceTypeEFSFileSystem ResourceType = "AWS::EFS::FileSystem" + ResourceTypeEKSCluster ResourceType = "AWS::EKS::Cluster" ) // Values returns all known values for ResourceType. Note that this can be expanded @@ -763,6 +774,17 @@ func (ResourceType) Values() []ResourceType { "AWS::SecretsManager::Secret", "AWS::SNS::Topic", "AWS::SSM::FileData", + "AWS::Backup::BackupPlan", + "AWS::Backup::BackupSelection", + "AWS::Backup::BackupVault", + "AWS::Backup::RecoveryPoint", + "AWS::ECR::Repository", + "AWS::ECS::Cluster", + "AWS::ECS::Service", + "AWS::ECS::TaskDefinition", + "AWS::EFS::AccessPoint", + "AWS::EFS::FileSystem", + "AWS::EKS::Cluster", } } diff --git a/service/costexplorer/api_op_CreateCostCategoryDefinition.go b/service/costexplorer/api_op_CreateCostCategoryDefinition.go index 7bca5c92ffb..8337cc42884 100644 --- a/service/costexplorer/api_op_CreateCostCategoryDefinition.go +++ b/service/costexplorer/api_op_CreateCostCategoryDefinition.go @@ -49,6 +49,10 @@ type CreateCostCategoryDefinitionInput struct { // The default value for the cost category. DefaultValue *string + // The split charge rules used to allocate your charges between your Cost Category + // values. + SplitChargeRules []types.CostCategorySplitChargeRule + noSmithyDocumentSerde } diff --git a/service/costexplorer/api_op_GetAnomalies.go b/service/costexplorer/api_op_GetAnomalies.go index e2610ec25d5..218b92d129a 100644 --- a/service/costexplorer/api_op_GetAnomalies.go +++ b/service/costexplorer/api_op_GetAnomalies.go @@ -11,8 +11,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Retrieves all of the cost anomalies detected on your account, during the time -// period specified by the DateInterval object. +// Retrieves all of the cost anomalies detected on your account during the time +// period that's specified by the DateInterval object. func (c *Client) GetAnomalies(ctx context.Context, params *GetAnomaliesInput, optFns ...func(*Options)) (*GetAnomaliesOutput, error) { if params == nil { params = &GetAnomaliesInput{} @@ -46,8 +46,9 @@ type GetAnomaliesInput struct { // Amazon Resource Name (ARN). MonitorArn *string - // The token to retrieve the next set of results. AWS provides the token when the - // response from a previous call has more results than the maximum page size. + // The token to retrieve the next set of results. Amazon Web Services provides the + // token when the response from a previous call has more results than the maximum + // page size. NextPageToken *string // Filters anomaly results by the total impact field on the anomaly object. For @@ -65,8 +66,9 @@ type GetAnomaliesOutput struct { // This member is required. Anomalies []types.Anomaly - // The token to retrieve the next set of results. AWS provides the token when the - // response from a previous call has more results than the maximum page size. + // The token to retrieve the next set of results. Amazon Web Services provides the + // token when the response from a previous call has more results than the maximum + // page size. NextPageToken *string // Metadata pertaining to the operation's result. diff --git a/service/costexplorer/api_op_GetAnomalyMonitors.go b/service/costexplorer/api_op_GetAnomalyMonitors.go index 965569feebd..26110816d63 100644 --- a/service/costexplorer/api_op_GetAnomalyMonitors.go +++ b/service/costexplorer/api_op_GetAnomalyMonitors.go @@ -30,14 +30,15 @@ func (c *Client) GetAnomalyMonitors(ctx context.Context, params *GetAnomalyMonit type GetAnomalyMonitorsInput struct { - // The number of entries a paginated response contains. + // The number of entries that a paginated response contains. MaxResults *int32 // A list of cost anomaly monitor ARNs. MonitorArnList []string - // The token to retrieve the next set of results. AWS provides the token when the - // response from a previous call has more results than the maximum page size. + // The token to retrieve the next set of results. Amazon Web Services provides the + // token when the response from a previous call has more results than the maximum + // page size. NextPageToken *string noSmithyDocumentSerde @@ -51,8 +52,9 @@ type GetAnomalyMonitorsOutput struct { // This member is required. AnomalyMonitors []types.AnomalyMonitor - // The token to retrieve the next set of results. AWS provides the token when the - // response from a previous call has more results than the maximum page size. + // The token to retrieve the next set of results. Amazon Web Services provides the + // token when the response from a previous call has more results than the maximum + // page size. NextPageToken *string // Metadata pertaining to the operation's result. diff --git a/service/costexplorer/api_op_GetAnomalySubscriptions.go b/service/costexplorer/api_op_GetAnomalySubscriptions.go index c08a7c1d728..094b8527836 100644 --- a/service/costexplorer/api_op_GetAnomalySubscriptions.go +++ b/service/costexplorer/api_op_GetAnomalySubscriptions.go @@ -36,8 +36,9 @@ type GetAnomalySubscriptionsInput struct { // Cost anomaly monitor ARNs. MonitorArn *string - // The token to retrieve the next set of results. AWS provides the token when the - // response from a previous call has more results than the maximum page size. + // The token to retrieve the next set of results. Amazon Web Services provides the + // token when the response from a previous call has more results than the maximum + // page size. NextPageToken *string // A list of cost anomaly subscription ARNs. @@ -54,8 +55,9 @@ type GetAnomalySubscriptionsOutput struct { // This member is required. AnomalySubscriptions []types.AnomalySubscription - // The token to retrieve the next set of results. AWS provides the token when the - // response from a previous call has more results than the maximum page size. + // The token to retrieve the next set of results. Amazon Web Services provides the + // token when the response from a previous call has more results than the maximum + // page size. NextPageToken *string // Metadata pertaining to the operation's result. diff --git a/service/costexplorer/api_op_GetCostAndUsage.go b/service/costexplorer/api_op_GetCostAndUsage.go index 7b560a9a7b2..f10f370ed47 100644 --- a/service/costexplorer/api_op_GetCostAndUsage.go +++ b/service/costexplorer/api_op_GetCostAndUsage.go @@ -12,14 +12,14 @@ import ( ) // Retrieves cost and usage metrics for your account. You can specify which cost -// and usage-related metric, such as BlendedCosts or UsageQuantity, that you want -// the request to return. You can also filter and group your data by various -// dimensions, such as SERVICE or AZ, in a specific time range. For a complete list -// of valid dimensions, see the GetDimensionValues +// and usage-related metric that you want the request to return. For example, you +// can specify BlendedCosts or UsageQuantity. You can also filter and group your +// data by various dimensions, such as SERVICE or AZ, in a specific time range. For +// a complete list of valid dimensions, see the GetDimensionValues // (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_GetDimensionValues.html) -// operation. Management account in an organization in AWS Organizations have -// access to all member accounts. For information about filter limitations, see -// Quotas and restrictions +// operation. Management account in an organization in Organizations have access to +// all member accounts. For information about filter limitations, see Quotas and +// restrictions // (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/billing-limits.html) // in the Billing and Cost Management User Guide. func (c *Client) GetCostAndUsage(ctx context.Context, params *GetCostAndUsageInput, optFns ...func(*Options)) (*GetCostAndUsageOutput, error) { @@ -39,9 +39,9 @@ func (c *Client) GetCostAndUsage(ctx context.Context, params *GetCostAndUsageInp type GetCostAndUsageInput struct { - // Sets the AWS cost granularity to MONTHLY or DAILY, or HOURLY. If Granularity - // isn't set, the response object doesn't include the Granularity, either MONTHLY - // or DAILY, or HOURLY. + // Sets the Amazon Web Services cost granularity to MONTHLY or DAILY, or HOURLY. If + // Granularity isn't set, the response object doesn't include the Granularity, + // either MONTHLY or DAILY, or HOURLY. // // This member is required. Granularity types.Granularity @@ -55,37 +55,39 @@ type GetCostAndUsageInput struct { // UsageQuantity metric, the service aggregates all usage numbers without taking // into account the units. For example, if you aggregate usageQuantity across all // of Amazon EC2, the results aren't meaningful because Amazon EC2 compute hours - // and data transfer are measured in different units (for example, hours vs. GB). + // and data transfer are measured in different units (for example, hours and GB). // To get more meaningful UsageQuantity metrics, filter by UsageType or // UsageTypeGroups. Metrics is required for GetCostAndUsage requests. // // This member is required. Metrics []string - // Sets the start and end dates for retrieving AWS costs. The start date is - // inclusive, but the end date is exclusive. For example, if start is 2017-01-01 - // and end is 2017-05-01, then the cost and usage data is retrieved from 2017-01-01 - // up to and including 2017-04-30 but not including 2017-05-01. + // Sets the start date and end date for retrieving Amazon Web Services costs. The + // start date is inclusive, but the end date is exclusive. For example, if start is + // 2017-01-01 and end is 2017-05-01, then the cost and usage data is retrieved from + // 2017-01-01 up to and including 2017-04-30 but not including 2017-05-01. // // This member is required. TimePeriod *types.DateInterval - // Filters AWS costs by different dimensions. For example, you can specify SERVICE - // and LINKED_ACCOUNT and get the costs that are associated with that account's - // usage of that service. You can nest Expression objects to define any combination - // of dimension filters. For more information, see Expression + // Filters Amazon Web Services costs by different dimensions. For example, you can + // specify SERVICE and LINKED_ACCOUNT and get the costs that are associated with + // that account's usage of that service. You can nest Expression objects to define + // any combination of dimension filters. For more information, see Expression // (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_Expression.html). Filter *types.Expression - // You can group AWS costs using up to two different groups, either dimensions, tag - // keys, cost categories, or any two group by types. When you group by tag key, you - // get all tag values, including empty strings. Valid values are AZ, INSTANCE_TYPE, - // LEGAL_ENTITY_NAME, LINKED_ACCOUNT, OPERATION, PLATFORM, PURCHASE_TYPE, SERVICE, - // TAGS, TENANCY, RECORD_TYPE, and USAGE_TYPE. + // You can group Amazon Web Services costs using up to two different groups, either + // dimensions, tag keys, cost categories, or any two group by types. Valid values + // for the DIMENSION type are AZ, INSTANCE_TYPE, LEGAL_ENTITY_NAME, LINKED_ACCOUNT, + // OPERATION, PLATFORM, PURCHASE_TYPE, SERVICE, TENANCY, RECORD_TYPE, and + // USAGE_TYPE. When you group by the TAG type and include a valid tag key, you get + // all tag values, including empty strings. GroupBy []types.GroupDefinition - // The token to retrieve the next set of results. AWS provides the token when the - // response from a previous call has more results than the maximum page size. + // The token to retrieve the next set of results. Amazon Web Services provides the + // token when the response from a previous call has more results than the maximum + // page size. NextPageToken *string noSmithyDocumentSerde @@ -101,11 +103,12 @@ type GetCostAndUsageOutput struct { // request. GroupDefinitions []types.GroupDefinition - // The token for the next set of retrievable results. AWS provides the token when - // the response from a previous call has more results than the maximum page size. + // The token for the next set of retrievable results. Amazon Web Services provides + // the token when the response from a previous call has more results than the + // maximum page size. NextPageToken *string - // The time period that is covered by the results in the response. + // The time period that's covered by the results in the response. ResultsByTime []types.ResultByTime // Metadata pertaining to the operation's result. diff --git a/service/costexplorer/api_op_GetCostAndUsageWithResources.go b/service/costexplorer/api_op_GetCostAndUsageWithResources.go index 41e4f345407..f10468bbbae 100644 --- a/service/costexplorer/api_op_GetCostAndUsageWithResources.go +++ b/service/costexplorer/api_op_GetCostAndUsageWithResources.go @@ -17,14 +17,13 @@ import ( // group your data by various dimensions, such as SERVICE or AZ, in a specific time // range. For a complete list of valid dimensions, see the GetDimensionValues // (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_GetDimensionValues.html) -// operation. Management account in an organization in AWS Organizations have -// access to all member accounts. This API is currently available for the Amazon -// Elastic Compute Cloud – Compute service only. This is an opt-in only feature. -// You can enable this feature from the Cost Explorer Settings page. For -// information on how to access the Settings page, see Controlling Access for Cost -// Explorer +// operation. Management account in an organization in Organizations have access to +// all member accounts. This API is currently available for the Amazon Elastic +// Compute Cloud – Compute service only. This is an opt-in only feature. You can +// enable this feature from the Cost Explorer Settings page. For information on how +// to access the Settings page, see Controlling Access for Cost Explorer // (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/ce-access.html) in -// the AWS Billing and Cost Management User Guide. +// the Billing and Cost Management User Guide. func (c *Client) GetCostAndUsageWithResources(ctx context.Context, params *GetCostAndUsageWithResourcesInput, optFns ...func(*Options)) (*GetCostAndUsageWithResourcesOutput, error) { if params == nil { params = &GetCostAndUsageWithResourcesInput{} @@ -55,9 +54,9 @@ type GetCostAndUsageWithResourcesInput struct { // This member is required. Filter *types.Expression - // Sets the AWS cost granularity to MONTHLY, DAILY, or HOURLY. If Granularity isn't - // set, the response object doesn't include the Granularity, MONTHLY, DAILY, or - // HOURLY. + // Sets the Amazon Web Services cost granularity to MONTHLY, DAILY, or HOURLY. If + // Granularity isn't set, the response object doesn't include the Granularity, + // MONTHLY, DAILY, or HOURLY. // // This member is required. Granularity types.Granularity @@ -90,8 +89,9 @@ type GetCostAndUsageWithResourcesInput struct { // UsageTypeGroups. Metrics is required for GetCostAndUsageWithResources requests. Metrics []string - // The token to retrieve the next set of results. AWS provides the token when the - // response from a previous call has more results than the maximum page size. + // The token to retrieve the next set of results. Amazon Web Services provides the + // token when the response from a previous call has more results than the maximum + // page size. NextPageToken *string noSmithyDocumentSerde @@ -107,8 +107,9 @@ type GetCostAndUsageWithResourcesOutput struct { // request. GroupDefinitions []types.GroupDefinition - // The token for the next set of retrievable results. AWS provides the token when - // the response from a previous call has more results than the maximum page size. + // The token for the next set of retrievable results. Amazon Web Services provides + // the token when the response from a previous call has more results than the + // maximum page size. NextPageToken *string // The time period that is covered by the results in the response. diff --git a/service/costexplorer/api_op_GetCostCategories.go b/service/costexplorer/api_op_GetCostCategories.go index 2b77ad9d9c1..cbbc24baa69 100644 --- a/service/costexplorer/api_op_GetCostCategories.go +++ b/service/costexplorer/api_op_GetCostCategories.go @@ -45,32 +45,33 @@ type GetCostCategoriesInput struct { // dimension values - You can set the dimension name and values for the filters // that you plan to use. For example, you can filter for REGION==us-east-1 OR // REGION==us-west-1. For GetRightsizingRecommendation, the Region is a full name - // (for example, REGION==US East (N. Virginia). The Expression example looks like: - // { "Dimensions": { "Key": "REGION", "Values": [ "us-east-1", “us-west-1” ] } } - // The list of dimension values are OR'd together to retrieve cost or usage data. - // You can create Expression and DimensionValues objects using either with* methods - // or set* methods in multiple lines. + // (for example, REGION==US East (N. Virginia). The Expression example is as + // follows: { "Dimensions": { "Key": "REGION", "Values": [ "us-east-1", “us-west-1” + // ] } } The list of dimension values are OR'd together to retrieve cost or usage + // data. You can create Expression and DimensionValues objects using either with* + // methods or set* methods in multiple lines. // - // * Compound dimension values with logical - // operations - You can use multiple Expression types and the logical operators - // AND/OR/NOT to create a list of one or more Expression objects. This allows you - // to filter on more advanced options. For example, you can filter on ((REGION == - // us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE != - // DataTransfer). The Expression for that looks like this: { "And": [ {"Or": [ - // {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" ] }}, - // {"Tags": { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": {"Dimensions": - // { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } Because each - // Expression can have only one operator, the service returns an error if more than - // one is specified. The following example shows an Expression object that creates - // an error. { "And": [ ... ], "DimensionValues": { "Dimension": "USAGE_TYPE", - // "Values": [ "DataTransfer" ] } } + // * Compound dimension values with + // logical operations - You can use multiple Expression types and the logical + // operators AND/OR/NOT to create a list of one or more Expression objects. By + // doing this, you can filter on more advanced options. For example, you can filter + // on ((REGION == us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND + // (USAGE_TYPE != DataTransfer). The Expression for that is as follows: { "And": [ + // {"Or": [ {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" + // ] }}, {"Tags": { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": + // {"Dimensions": { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } + // Because each Expression can have only one operator, the service returns an error + // if more than one is specified. The following example shows an Expression object + // that creates an error. { "And": [ ... ], "DimensionValues": { "Dimension": + // "USAGE_TYPE", "Values": [ "DataTransfer" ] } } // - // For the GetRightsizingRecommendation action, a - // combination of OR and NOT is not supported. OR is not supported between - // different dimensions, or dimensions and tags. NOT operators aren't supported. - // Dimensions are also limited to LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE. For - // the GetReservationPurchaseRecommendation action, only NOT is supported. AND and - // OR are not supported. Dimensions are limited to LINKED_ACCOUNT. + // For the + // GetRightsizingRecommendation action, a combination of OR and NOT isn't + // supported. OR isn't supported between different dimensions, or dimensions and + // tags. NOT operators aren't supported. Dimensions are also limited to + // LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE. For the + // GetReservationPurchaseRecommendation action, only NOT is supported. AND and OR + // aren't supported. Dimensions are limited to LINKED_ACCOUNT. Filter *types.Expression // This field is only used when SortBy is provided in the request. The maximum @@ -81,9 +82,9 @@ type GetCostCategoriesInput struct { MaxResults int32 // If the number of objects that are still available for retrieval exceeds the - // limit, AWS returns a NextPageToken value in the response. To retrieve the next - // batch of objects, provide the NextPageToken from the prior call in your next - // request. + // limit, Amazon Web Services returns a NextPageToken value in the response. To + // retrieve the next batch of objects, provide the NextPageToken from the prior + // call in your next request. NextPageToken *string // The value that you want to search the filter values for. If you do not specify a @@ -139,8 +140,9 @@ type GetCostCategoriesOutput struct { CostCategoryValues []string // If the number of objects that are still available for retrieval exceeds the - // limit, AWS returns a NextPageToken value in the response. To retrieve the next - // batch of objects, provide the marker from the prior call in your next request. + // limit, Amazon Web Services returns a NextPageToken value in the response. To + // retrieve the next batch of objects, provide the marker from the prior call in + // your next request. NextPageToken *string // Metadata pertaining to the operation's result. diff --git a/service/costexplorer/api_op_GetDimensionValues.go b/service/costexplorer/api_op_GetDimensionValues.go index a7afafabbc6..08da0332cac 100644 --- a/service/costexplorer/api_op_GetDimensionValues.go +++ b/service/costexplorer/api_op_GetDimensionValues.go @@ -36,8 +36,8 @@ type GetDimensionValuesInput struct { // This member is required. Dimension types.Dimension - // The start and end dates for retrieving the dimension values. The start date is - // inclusive, but the end date is exclusive. For example, if start is 2017-01-01 + // The start date and end date for retrieving the dimension values. The start date + // is inclusive, but the end date is exclusive. For example, if start is 2017-01-01 // and end is 2017-05-01, then the cost and usage data is retrieved from 2017-01-01 // up to and including 2017-04-30 but not including 2017-05-01. // @@ -62,44 +62,45 @@ type GetDimensionValuesInput struct { // is m4.xlarge. // // * LEGAL_ENTITY_NAME - The name of the organization that sells you - // AWS services, such as Amazon Web Services. + // Amazon Web Services services, such as Amazon Web Services. // - // * LINKED_ACCOUNT - The description - // in the attribute map that includes the full name of the member account. The - // value field contains the AWS ID of the member account. + // * LINKED_ACCOUNT - + // The description in the attribute map that includes the full name of the member + // account. The value field contains the Amazon Web Services ID of the member + // account. // - // * OPERATING_SYSTEM - The - // operating system. Examples are Windows or Linux. + // * OPERATING_SYSTEM - The operating system. Examples are Windows or + // Linux. // - // * OPERATION - The action - // performed. Examples include RunInstance and CreateBucket. + // * OPERATION - The action performed. Examples include RunInstance and + // CreateBucket. // - // * PLATFORM - The - // Amazon EC2 operating system. Examples are Windows or Linux. + // * PLATFORM - The Amazon EC2 operating system. Examples are + // Windows or Linux. // - // * PURCHASE_TYPE - - // The reservation type of the purchase to which this usage is related. Examples - // include On-Demand Instances and Standard Reserved Instances. + // * PURCHASE_TYPE - The reservation type of the purchase to + // which this usage is related. Examples include On-Demand Instances and Standard + // Reserved Instances. // - // * SERVICE - The - // AWS service such as Amazon DynamoDB. + // * SERVICE - The Amazon Web Services service such as Amazon + // DynamoDB. // - // * USAGE_TYPE - The type of usage. An - // example is DataTransfer-In-Bytes. The response for the GetDimensionValues - // operation includes a unit attribute. Examples include GB and Hrs. + // * USAGE_TYPE - The type of usage. An example is + // DataTransfer-In-Bytes. The response for the GetDimensionValues operation + // includes a unit attribute. Examples include GB and Hrs. // - // * - // USAGE_TYPE_GROUP - The grouping of common usage types. An example is Amazon EC2: - // CloudWatch – Alarms. The response for this operation includes a unit - // attribute. + // * USAGE_TYPE_GROUP - + // The grouping of common usage types. An example is Amazon EC2: CloudWatch – + // Alarms. The response for this operation includes a unit attribute. // - // * REGION - The AWS Region. + // * REGION - + // The Amazon Web Services Region. // - // * RECORD_TYPE - The different types of - // charges such as RI fees, usage costs, tax refunds, and credits. + // * RECORD_TYPE - The different types of charges + // such as RI fees, usage costs, tax refunds, and credits. // - // * RESOURCE_ID - - // The unique identifier of the resource. ResourceId is an opt-in feature only + // * RESOURCE_ID - The + // unique identifier of the resource. ResourceId is an opt-in feature only // available for last 14 days for EC2-Compute Service. // // If you set the context to @@ -120,44 +121,45 @@ type GetDimensionValuesInput struct { // // * LINKED_ACCOUNT - The description in the // attribute map that includes the full name of the member account. The value field - // contains the AWS ID of the member account. + // contains the Amazon Web Services ID of the member account. // - // * PLATFORM - The Amazon EC2 - // operating system. Examples are Windows or Linux. + // * PLATFORM - The + // Amazon EC2 operating system. Examples are Windows or Linux. // - // * REGION - The AWS Region. + // * REGION - The + // Amazon Web Services Region. // - // * - // SCOPE (Utilization only) - The scope of a Reserved Instance (RI). Values are - // regional or a single Availability Zone. + // * SCOPE (Utilization only) - The scope of a + // Reserved Instance (RI). Values are regional or a single Availability Zone. // - // * TAG (Coverage only) - The tags that - // are associated with a Reserved Instance (RI). + // * + // TAG (Coverage only) - The tags that are associated with a Reserved Instance + // (RI). // - // * TENANCY - The tenancy of a - // resource. Examples are shared or dedicated. + // * TENANCY - The tenancy of a resource. Examples are shared or + // dedicated. // - // If you set the context to - // SAVINGS_PLANS, you can use the following dimensions for searching: + // If you set the context to SAVINGS_PLANS, you can use the following + // dimensions for searching: // - // * - // SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute) + // * SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 + // Instance or Compute) // - // * - // PAYMENT_OPTION - Payment option for the given Savings Plans (for example, All - // Upfront) + // * PAYMENT_OPTION - Payment option for the given Savings + // Plans (for example, All Upfront) // - // * REGION - The AWS Region. + // * REGION - The Amazon Web Services Region. // - // * INSTANCE_TYPE_FAMILY - The family of - // instances (For example, m5) + // * + // INSTANCE_TYPE_FAMILY - The family of instances (For example, m5) // - // * LINKED_ACCOUNT - The description in the attribute - // map that includes the full name of the member account. The value field contains - // the AWS ID of the member account. + // * + // LINKED_ACCOUNT - The description in the attribute map that includes the full + // name of the member account. The value field contains the Amazon Web Services ID + // of the member account. // - // * SAVINGS_PLAN_ARN - The unique identifier - // for your Savings Plan + // * SAVINGS_PLAN_ARN - The unique identifier for your + // Savings Plan Context types.Context // Use Expression to filter by cost or by usage. There are two patterns: @@ -166,32 +168,33 @@ type GetDimensionValuesInput struct { // dimension values - You can set the dimension name and values for the filters // that you plan to use. For example, you can filter for REGION==us-east-1 OR // REGION==us-west-1. For GetRightsizingRecommendation, the Region is a full name - // (for example, REGION==US East (N. Virginia). The Expression example looks like: - // { "Dimensions": { "Key": "REGION", "Values": [ "us-east-1", “us-west-1” ] } } - // The list of dimension values are OR'd together to retrieve cost or usage data. - // You can create Expression and DimensionValues objects using either with* methods - // or set* methods in multiple lines. - // - // * Compound dimension values with logical - // operations - You can use multiple Expression types and the logical operators - // AND/OR/NOT to create a list of one or more Expression objects. This allows you - // to filter on more advanced options. For example, you can filter on ((REGION == - // us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE != - // DataTransfer). The Expression for that looks like this: { "And": [ {"Or": [ - // {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" ] }}, - // {"Tags": { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": {"Dimensions": - // { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } Because each - // Expression can have only one operator, the service returns an error if more than - // one is specified. The following example shows an Expression object that creates - // an error. { "And": [ ... ], "DimensionValues": { "Dimension": "USAGE_TYPE", - // "Values": [ "DataTransfer" ] } } - // - // For the GetRightsizingRecommendation action, a - // combination of OR and NOT is not supported. OR is not supported between - // different dimensions, or dimensions and tags. NOT operators aren't supported. - // Dimensions are also limited to LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE. For - // the GetReservationPurchaseRecommendation action, only NOT is supported. AND and - // OR are not supported. Dimensions are limited to LINKED_ACCOUNT. + // (for example, REGION==US East (N. Virginia). The Expression example is as + // follows: { "Dimensions": { "Key": "REGION", "Values": [ "us-east-1", “us-west-1” + // ] } } The list of dimension values are OR'd together to retrieve cost or usage + // data. You can create Expression and DimensionValues objects using either with* + // methods or set* methods in multiple lines. + // + // * Compound dimension values with + // logical operations - You can use multiple Expression types and the logical + // operators AND/OR/NOT to create a list of one or more Expression objects. By + // doing this, you can filter on more advanced options. For example, you can filter + // on ((REGION == us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND + // (USAGE_TYPE != DataTransfer). The Expression for that is as follows: { "And": [ + // {"Or": [ {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" + // ] }}, {"Tags": { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": + // {"Dimensions": { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } + // Because each Expression can have only one operator, the service returns an error + // if more than one is specified. The following example shows an Expression object + // that creates an error. { "And": [ ... ], "DimensionValues": { "Dimension": + // "USAGE_TYPE", "Values": [ "DataTransfer" ] } } + // + // For the + // GetRightsizingRecommendation action, a combination of OR and NOT isn't + // supported. OR isn't supported between different dimensions, or dimensions and + // tags. NOT operators aren't supported. Dimensions are also limited to + // LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE. For the + // GetReservationPurchaseRecommendation action, only NOT is supported. AND and OR + // aren't supported. Dimensions are limited to LINKED_ACCOUNT. Filter *types.Expression // This field is only used when SortBy is provided in the request. The maximum @@ -201,8 +204,9 @@ type GetDimensionValuesInput struct { // 1000. MaxResults int32 - // The token to retrieve the next set of results. AWS provides the token when the - // response from a previous call has more results than the maximum page size. + // The token to retrieve the next set of results. Amazon Web Services provides the + // token when the response from a previous call has more results than the maximum + // page size. NextPageToken *string // The value that you want to search the filter values for. @@ -252,75 +256,76 @@ type GetDimensionValuesOutput struct { // Amazon EC2 instance. An example is m4.xlarge. // // * LEGAL_ENTITY_NAME - The name of - // the organization that sells you AWS services, such as Amazon Web Services. + // the organization that sells you Amazon Web Services services, such as Amazon Web + // Services. // - // * - // LINKED_ACCOUNT - The description in the attribute map that includes the full - // name of the member account. The value field contains the AWS ID of the member - // account. + // * LINKED_ACCOUNT - The description in the attribute map that includes + // the full name of the member account. The value field contains the Amazon Web + // Services ID of the member account. // - // * OPERATING_SYSTEM - The operating system. Examples are Windows or - // Linux. + // * OPERATING_SYSTEM - The operating system. + // Examples are Windows or Linux. // - // * OPERATION - The action performed. Examples include RunInstance and - // CreateBucket. + // * OPERATION - The action performed. Examples + // include RunInstance and CreateBucket. // - // * PLATFORM - The Amazon EC2 operating system. Examples are - // Windows or Linux. + // * PLATFORM - The Amazon EC2 operating + // system. Examples are Windows or Linux. // - // * PURCHASE_TYPE - The reservation type of the purchase to - // which this usage is related. Examples include On-Demand Instances and Standard - // Reserved Instances. + // * PURCHASE_TYPE - The reservation type + // of the purchase to which this usage is related. Examples include On-Demand + // Instances and Standard Reserved Instances. // - // * SERVICE - The AWS service such as Amazon DynamoDB. + // * SERVICE - The Amazon Web Services + // service such as Amazon DynamoDB. // - // * - // USAGE_TYPE - The type of usage. An example is DataTransfer-In-Bytes. The - // response for the GetDimensionValues operation includes a unit attribute. - // Examples include GB and Hrs. + // * USAGE_TYPE - The type of usage. An example + // is DataTransfer-In-Bytes. The response for the GetDimensionValues operation + // includes a unit attribute. Examples include GB and Hrs. // - // * USAGE_TYPE_GROUP - The grouping of common usage - // types. An example is Amazon EC2: CloudWatch – Alarms. The response for this - // operation includes a unit attribute. + // * USAGE_TYPE_GROUP - + // The grouping of common usage types. An example is Amazon EC2: CloudWatch – + // Alarms. The response for this operation includes a unit attribute. // - // * RECORD_TYPE - The different types of - // charges such as RI fees, usage costs, tax refunds, and credits. + // * + // RECORD_TYPE - The different types of charges such as RI fees, usage costs, tax + // refunds, and credits. // - // * RESOURCE_ID - - // The unique identifier of the resource. ResourceId is an opt-in feature only - // available for last 14 days for EC2-Compute Service. + // * RESOURCE_ID - The unique identifier of the resource. + // ResourceId is an opt-in feature only available for last 14 days for EC2-Compute + // Service. // - // If you set the context to - // RESERVATIONS, you can use the following dimensions for searching: + // If you set the context to RESERVATIONS, you can use the following + // dimensions for searching: // - // * AZ - The - // Availability Zone. An example is us-east-1a. + // * AZ - The Availability Zone. An example is + // us-east-1a. // - // * CACHE_ENGINE - The Amazon - // ElastiCache operating system. Examples are Windows or Linux. + // * CACHE_ENGINE - The Amazon ElastiCache operating system. Examples + // are Windows or Linux. // - // * - // DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. - // Valid values are SingleAZ and MultiAZ. + // * DEPLOYMENT_OPTION - The scope of Amazon Relational + // Database Service deployments. Valid values are SingleAZ and MultiAZ. // - // * INSTANCE_TYPE - The type of Amazon EC2 - // instance. An example is m4.xlarge. + // * + // INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge. // - // * LINKED_ACCOUNT - The description in the - // attribute map that includes the full name of the member account. The value field - // contains the AWS ID of the member account. + // * + // LINKED_ACCOUNT - The description in the attribute map that includes the full + // name of the member account. The value field contains the Amazon Web Services ID + // of the member account. // - // * PLATFORM - The Amazon EC2 - // operating system. Examples are Windows or Linux. + // * PLATFORM - The Amazon EC2 operating system. Examples + // are Windows or Linux. // - // * REGION - The AWS Region. + // * REGION - The Amazon Web Services Region. // - // * - // SCOPE (Utilization only) - The scope of a Reserved Instance (RI). Values are - // regional or a single Availability Zone. + // * SCOPE + // (Utilization only) - The scope of a Reserved Instance (RI). Values are regional + // or a single Availability Zone. // - // * TAG (Coverage only) - The tags that - // are associated with a Reserved Instance (RI). + // * TAG (Coverage only) - The tags that are + // associated with a Reserved Instance (RI). // // * TENANCY - The tenancy of a // resource. Examples are shared or dedicated. @@ -335,22 +340,22 @@ type GetDimensionValuesOutput struct { // PAYMENT_OPTION - Payment option for the given Savings Plans (for example, All // Upfront) // - // * REGION - The AWS Region. + // * REGION - The Amazon Web Services Region. // - // * INSTANCE_TYPE_FAMILY - The family of - // instances (For example, m5) + // * INSTANCE_TYPE_FAMILY - + // The family of instances (For example, m5) // - // * LINKED_ACCOUNT - The description in the attribute - // map that includes the full name of the member account. The value field contains - // the AWS ID of the member account. + // * LINKED_ACCOUNT - The description in + // the attribute map that includes the full name of the member account. The value + // field contains the Amazon Web Services ID of the member account. // - // * SAVINGS_PLAN_ARN - The unique identifier - // for your Savings Plan + // * + // SAVINGS_PLAN_ARN - The unique identifier for your Savings Plan // // This member is required. DimensionValues []types.DimensionValuesWithAttributes - // The number of results that AWS returned at one time. + // The number of results that Amazon Web Services returned at one time. // // This member is required. ReturnSize *int32 @@ -360,8 +365,9 @@ type GetDimensionValuesOutput struct { // This member is required. TotalSize *int32 - // The token for the next set of retrievable results. AWS provides the token when - // the response from a previous call has more results than the maximum page size. + // The token for the next set of retrievable results. Amazon Web Services provides + // the token when the response from a previous call has more results than the + // maximum page size. NextPageToken *string // Metadata pertaining to the operation's result. diff --git a/service/costexplorer/api_op_GetReservationCoverage.go b/service/costexplorer/api_op_GetReservationCoverage.go index 7cb3cb62c33..9c8bdbb96bf 100644 --- a/service/costexplorer/api_op_GetReservationCoverage.go +++ b/service/costexplorer/api_op_GetReservationCoverage.go @@ -113,11 +113,11 @@ type GetReservationCoverageInput struct { // defaults to EC2. Cost category is also supported. Filter *types.Expression - // The granularity of the AWS cost data for the reservation. Valid values are - // MONTHLY and DAILY. If GroupBy is set, Granularity can't be set. If Granularity - // isn't set, the response object doesn't include Granularity, either MONTHLY or - // DAILY. The GetReservationCoverage operation supports only DAILY and MONTHLY - // granularities. + // The granularity of the Amazon Web Services cost data for the reservation. Valid + // values are MONTHLY and DAILY. If GroupBy is set, Granularity can't be set. If + // Granularity isn't set, the response object doesn't include Granularity, either + // MONTHLY or DAILY. The GetReservationCoverage operation supports only DAILY and + // MONTHLY granularities. Granularity types.Granularity // You can group the data by the following attributes: @@ -146,16 +146,18 @@ type GetReservationCoverageInput struct { GroupBy []types.GroupDefinition // The maximum number of objects that you returned for this request. If more - // objects are available, in the response, AWS provides a NextPageToken value that - // you can use in a subsequent call to get the next batch of objects. + // objects are available, in the response, Amazon Web Services provides a + // NextPageToken value that you can use in a subsequent call to get the next batch + // of objects. MaxResults int32 // The measurement that you want your reservation coverage reported in. Valid // values are Hour, Unit, and Cost. You can use multiple values in a request. Metrics []string - // The token to retrieve the next set of results. AWS provides the token when the - // response from a previous call has more results than the maximum page size. + // The token to retrieve the next set of results. Amazon Web Services provides the + // token when the response from a previous call has more results than the maximum + // page size. NextPageToken *string // The value by which you want to sort the data. The following values are supported @@ -198,8 +200,9 @@ type GetReservationCoverageOutput struct { // This member is required. CoveragesByTime []types.CoverageByTime - // The token for the next set of retrievable results. AWS provides the token when - // the response from a previous call has more results than the maximum page size. + // The token for the next set of retrievable results. Amazon Web Services provides + // the token when the response from a previous call has more results than the + // maximum page size. NextPageToken *string // The total amount of instance usage that a reservation covered. diff --git a/service/costexplorer/api_op_GetReservationPurchaseRecommendation.go b/service/costexplorer/api_op_GetReservationPurchaseRecommendation.go index 43db2c0f50c..cb64b42a79b 100644 --- a/service/costexplorer/api_op_GetReservationPurchaseRecommendation.go +++ b/service/costexplorer/api_op_GetReservationPurchaseRecommendation.go @@ -13,19 +13,21 @@ import ( // Gets recommendations for which reservations to purchase. These recommendations // could help you reduce your costs. Reservations provide a discounted hourly rate -// (up to 75%) compared to On-Demand pricing. AWS generates your recommendations by -// identifying your On-Demand usage during a specific time period and collecting -// your usage into categories that are eligible for a reservation. After AWS has -// these categories, it simulates every combination of reservations in each -// category of usage to identify the best number of each type of RI to purchase to -// maximize your estimated savings. For example, AWS automatically aggregates your -// Amazon EC2 Linux, shared tenancy, and c4 family usage in the US West (Oregon) -// Region and recommends that you buy size-flexible regional reservations to apply -// to the c4 family usage. AWS recommends the smallest size instance in an instance -// family. This makes it easier to purchase a size-flexible RI. AWS also shows the -// equal number of normalized units so that you can purchase any instance size that -// you want. For this example, your RI recommendation would be for c4.large because -// that is the smallest size instance in the c4 instance family. +// (up to 75%) compared to On-Demand pricing. Amazon Web Services generates your +// recommendations by identifying your On-Demand usage during a specific time +// period and collecting your usage into categories that are eligible for a +// reservation. After Amazon Web Services has these categories, it simulates every +// combination of reservations in each category of usage to identify the best +// number of each type of RI to purchase to maximize your estimated savings. For +// example, Amazon Web Services automatically aggregates your Amazon EC2 Linux, +// shared tenancy, and c4 family usage in the US West (Oregon) Region and +// recommends that you buy size-flexible regional reservations to apply to the c4 +// family usage. Amazon Web Services recommends the smallest size instance in an +// instance family. This makes it easier to purchase a size-flexible RI. Amazon Web +// Services also shows the equal number of normalized units so that you can +// purchase any instance size that you want. For this example, your RI +// recommendation would be for c4.large because that is the smallest size instance +// in the c4 instance family. func (c *Client) GetReservationPurchaseRecommendation(ctx context.Context, params *GetReservationPurchaseRecommendationInput, optFns ...func(*Options)) (*GetReservationPurchaseRecommendationOutput, error) { if params == nil { params = &GetReservationPurchaseRecommendationInput{} @@ -63,36 +65,37 @@ type GetReservationPurchaseRecommendationInput struct { // dimension values - You can set the dimension name and values for the filters // that you plan to use. For example, you can filter for REGION==us-east-1 OR // REGION==us-west-1. For GetRightsizingRecommendation, the Region is a full name - // (for example, REGION==US East (N. Virginia). The Expression example looks like: - // { "Dimensions": { "Key": "REGION", "Values": [ "us-east-1", “us-west-1” ] } } - // The list of dimension values are OR'd together to retrieve cost or usage data. - // You can create Expression and DimensionValues objects using either with* methods - // or set* methods in multiple lines. + // (for example, REGION==US East (N. Virginia). The Expression example is as + // follows: { "Dimensions": { "Key": "REGION", "Values": [ "us-east-1", “us-west-1” + // ] } } The list of dimension values are OR'd together to retrieve cost or usage + // data. You can create Expression and DimensionValues objects using either with* + // methods or set* methods in multiple lines. // - // * Compound dimension values with logical - // operations - You can use multiple Expression types and the logical operators - // AND/OR/NOT to create a list of one or more Expression objects. This allows you - // to filter on more advanced options. For example, you can filter on ((REGION == - // us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE != - // DataTransfer). The Expression for that looks like this: { "And": [ {"Or": [ - // {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" ] }}, - // {"Tags": { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": {"Dimensions": - // { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } Because each - // Expression can have only one operator, the service returns an error if more than - // one is specified. The following example shows an Expression object that creates - // an error. { "And": [ ... ], "DimensionValues": { "Dimension": "USAGE_TYPE", - // "Values": [ "DataTransfer" ] } } + // * Compound dimension values with + // logical operations - You can use multiple Expression types and the logical + // operators AND/OR/NOT to create a list of one or more Expression objects. By + // doing this, you can filter on more advanced options. For example, you can filter + // on ((REGION == us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND + // (USAGE_TYPE != DataTransfer). The Expression for that is as follows: { "And": [ + // {"Or": [ {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" + // ] }}, {"Tags": { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": + // {"Dimensions": { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } + // Because each Expression can have only one operator, the service returns an error + // if more than one is specified. The following example shows an Expression object + // that creates an error. { "And": [ ... ], "DimensionValues": { "Dimension": + // "USAGE_TYPE", "Values": [ "DataTransfer" ] } } // - // For the GetRightsizingRecommendation action, a - // combination of OR and NOT is not supported. OR is not supported between - // different dimensions, or dimensions and tags. NOT operators aren't supported. - // Dimensions are also limited to LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE. For - // the GetReservationPurchaseRecommendation action, only NOT is supported. AND and - // OR are not supported. Dimensions are limited to LINKED_ACCOUNT. + // For the + // GetRightsizingRecommendation action, a combination of OR and NOT isn't + // supported. OR isn't supported between different dimensions, or dimensions and + // tags. NOT operators aren't supported. Dimensions are also limited to + // LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE. For the + // GetReservationPurchaseRecommendation action, only NOT is supported. AND and OR + // aren't supported. Dimensions are limited to LINKED_ACCOUNT. Filter *types.Expression - // The number of previous days that you want AWS to consider when it calculates - // your recommendations. + // The number of previous days that you want Amazon Web Services to consider when + // it calculates your recommendations. LookbackPeriodInDays types.LookbackPeriodInDays // The pagination token that indicates the next set of results that you want to diff --git a/service/costexplorer/api_op_GetReservationUtilization.go b/service/costexplorer/api_op_GetReservationUtilization.go index b0c1eb3280f..bf34c99459b 100644 --- a/service/costexplorer/api_op_GetReservationUtilization.go +++ b/service/costexplorer/api_op_GetReservationUtilization.go @@ -85,12 +85,14 @@ type GetReservationUtilizationInput struct { GroupBy []types.GroupDefinition // The maximum number of objects that you returned for this request. If more - // objects are available, in the response, AWS provides a NextPageToken value that - // you can use in a subsequent call to get the next batch of objects. + // objects are available, in the response, Amazon Web Services provides a + // NextPageToken value that you can use in a subsequent call to get the next batch + // of objects. MaxResults int32 - // The token to retrieve the next set of results. AWS provides the token when the - // response from a previous call has more results than the maximum page size. + // The token to retrieve the next set of results. Amazon Web Services provides the + // token when the response from a previous call has more results than the maximum + // page size. NextPageToken *string // The value by which you want to sort the data. The following values are supported @@ -148,8 +150,9 @@ type GetReservationUtilizationOutput struct { // This member is required. UtilizationsByTime []types.UtilizationByTime - // The token for the next set of retrievable results. AWS provides the token when - // the response from a previous call has more results than the maximum page size. + // The token for the next set of retrievable results. Amazon Web Services provides + // the token when the response from a previous call has more results than the + // maximum page size. NextPageToken *string // The total amount of time that you used your RIs. diff --git a/service/costexplorer/api_op_GetRightsizingRecommendation.go b/service/costexplorer/api_op_GetRightsizingRecommendation.go index 8f4b85604bc..779380623be 100644 --- a/service/costexplorer/api_op_GetRightsizingRecommendation.go +++ b/service/costexplorer/api_op_GetRightsizingRecommendation.go @@ -17,7 +17,7 @@ import ( // metrics. For details on calculation and function, see Optimizing Your Cost with // Rightsizing Recommendations // (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/ce-rightsizing.html) -// in the AWS Billing and Cost Management User Guide. +// in the Billing and Cost Management User Guide. func (c *Client) GetRightsizingRecommendation(ctx context.Context, params *GetRightsizingRecommendationInput, optFns ...func(*Options)) (*GetRightsizingRecommendationOutput, error) { if params == nil { params = &GetRightsizingRecommendationInput{} @@ -54,32 +54,33 @@ type GetRightsizingRecommendationInput struct { // dimension values - You can set the dimension name and values for the filters // that you plan to use. For example, you can filter for REGION==us-east-1 OR // REGION==us-west-1. For GetRightsizingRecommendation, the Region is a full name - // (for example, REGION==US East (N. Virginia). The Expression example looks like: - // { "Dimensions": { "Key": "REGION", "Values": [ "us-east-1", “us-west-1” ] } } - // The list of dimension values are OR'd together to retrieve cost or usage data. - // You can create Expression and DimensionValues objects using either with* methods - // or set* methods in multiple lines. + // (for example, REGION==US East (N. Virginia). The Expression example is as + // follows: { "Dimensions": { "Key": "REGION", "Values": [ "us-east-1", “us-west-1” + // ] } } The list of dimension values are OR'd together to retrieve cost or usage + // data. You can create Expression and DimensionValues objects using either with* + // methods or set* methods in multiple lines. // - // * Compound dimension values with logical - // operations - You can use multiple Expression types and the logical operators - // AND/OR/NOT to create a list of one or more Expression objects. This allows you - // to filter on more advanced options. For example, you can filter on ((REGION == - // us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE != - // DataTransfer). The Expression for that looks like this: { "And": [ {"Or": [ - // {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" ] }}, - // {"Tags": { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": {"Dimensions": - // { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } Because each - // Expression can have only one operator, the service returns an error if more than - // one is specified. The following example shows an Expression object that creates - // an error. { "And": [ ... ], "DimensionValues": { "Dimension": "USAGE_TYPE", - // "Values": [ "DataTransfer" ] } } + // * Compound dimension values with + // logical operations - You can use multiple Expression types and the logical + // operators AND/OR/NOT to create a list of one or more Expression objects. By + // doing this, you can filter on more advanced options. For example, you can filter + // on ((REGION == us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND + // (USAGE_TYPE != DataTransfer). The Expression for that is as follows: { "And": [ + // {"Or": [ {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" + // ] }}, {"Tags": { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": + // {"Dimensions": { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } + // Because each Expression can have only one operator, the service returns an error + // if more than one is specified. The following example shows an Expression object + // that creates an error. { "And": [ ... ], "DimensionValues": { "Dimension": + // "USAGE_TYPE", "Values": [ "DataTransfer" ] } } // - // For the GetRightsizingRecommendation action, a - // combination of OR and NOT is not supported. OR is not supported between - // different dimensions, or dimensions and tags. NOT operators aren't supported. - // Dimensions are also limited to LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE. For - // the GetReservationPurchaseRecommendation action, only NOT is supported. AND and - // OR are not supported. Dimensions are limited to LINKED_ACCOUNT. + // For the + // GetRightsizingRecommendation action, a combination of OR and NOT isn't + // supported. OR isn't supported between different dimensions, or dimensions and + // tags. NOT operators aren't supported. Dimensions are also limited to + // LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE. For the + // GetReservationPurchaseRecommendation action, only NOT is supported. AND and OR + // aren't supported. Dimensions are limited to LINKED_ACCOUNT. Filter *types.Expression // The pagination token that indicates the next set of results that you want to diff --git a/service/costexplorer/api_op_GetSavingsPlansPurchaseRecommendation.go b/service/costexplorer/api_op_GetSavingsPlansPurchaseRecommendation.go index 74f3168f008..964c79d53eb 100644 --- a/service/costexplorer/api_op_GetSavingsPlansPurchaseRecommendation.go +++ b/service/costexplorer/api_op_GetSavingsPlansPurchaseRecommendation.go @@ -84,8 +84,9 @@ type GetSavingsPlansPurchaseRecommendationOutput struct { // Information regarding this specific recommendation set. Metadata *types.SavingsPlansPurchaseRecommendationMetadata - // The token for the next set of retrievable results. AWS provides the token when - // the response from a previous call has more results than the maximum page size. + // The token for the next set of retrievable results. Amazon Web Services provides + // the token when the response from a previous call has more results than the + // maximum page size. NextPageToken *string // Contains your request parameters, Savings Plan Recommendations Summary, and diff --git a/service/costexplorer/api_op_GetTags.go b/service/costexplorer/api_op_GetTags.go index f09064ae437..74055a6af93 100644 --- a/service/costexplorer/api_op_GetTags.go +++ b/service/costexplorer/api_op_GetTags.go @@ -44,32 +44,33 @@ type GetTagsInput struct { // dimension values - You can set the dimension name and values for the filters // that you plan to use. For example, you can filter for REGION==us-east-1 OR // REGION==us-west-1. For GetRightsizingRecommendation, the Region is a full name - // (for example, REGION==US East (N. Virginia). The Expression example looks like: - // { "Dimensions": { "Key": "REGION", "Values": [ "us-east-1", “us-west-1” ] } } - // The list of dimension values are OR'd together to retrieve cost or usage data. - // You can create Expression and DimensionValues objects using either with* methods - // or set* methods in multiple lines. + // (for example, REGION==US East (N. Virginia). The Expression example is as + // follows: { "Dimensions": { "Key": "REGION", "Values": [ "us-east-1", “us-west-1” + // ] } } The list of dimension values are OR'd together to retrieve cost or usage + // data. You can create Expression and DimensionValues objects using either with* + // methods or set* methods in multiple lines. // - // * Compound dimension values with logical - // operations - You can use multiple Expression types and the logical operators - // AND/OR/NOT to create a list of one or more Expression objects. This allows you - // to filter on more advanced options. For example, you can filter on ((REGION == - // us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE != - // DataTransfer). The Expression for that looks like this: { "And": [ {"Or": [ - // {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" ] }}, - // {"Tags": { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": {"Dimensions": - // { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } Because each - // Expression can have only one operator, the service returns an error if more than - // one is specified. The following example shows an Expression object that creates - // an error. { "And": [ ... ], "DimensionValues": { "Dimension": "USAGE_TYPE", - // "Values": [ "DataTransfer" ] } } + // * Compound dimension values with + // logical operations - You can use multiple Expression types and the logical + // operators AND/OR/NOT to create a list of one or more Expression objects. By + // doing this, you can filter on more advanced options. For example, you can filter + // on ((REGION == us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND + // (USAGE_TYPE != DataTransfer). The Expression for that is as follows: { "And": [ + // {"Or": [ {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" + // ] }}, {"Tags": { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": + // {"Dimensions": { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } + // Because each Expression can have only one operator, the service returns an error + // if more than one is specified. The following example shows an Expression object + // that creates an error. { "And": [ ... ], "DimensionValues": { "Dimension": + // "USAGE_TYPE", "Values": [ "DataTransfer" ] } } // - // For the GetRightsizingRecommendation action, a - // combination of OR and NOT is not supported. OR is not supported between - // different dimensions, or dimensions and tags. NOT operators aren't supported. - // Dimensions are also limited to LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE. For - // the GetReservationPurchaseRecommendation action, only NOT is supported. AND and - // OR are not supported. Dimensions are limited to LINKED_ACCOUNT. + // For the + // GetRightsizingRecommendation action, a combination of OR and NOT isn't + // supported. OR isn't supported between different dimensions, or dimensions and + // tags. NOT operators aren't supported. Dimensions are also limited to + // LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE. For the + // GetReservationPurchaseRecommendation action, only NOT is supported. AND and OR + // aren't supported. Dimensions are limited to LINKED_ACCOUNT. Filter *types.Expression // This field is only used when SortBy is provided in the request. The maximum @@ -78,8 +79,9 @@ type GetTagsInput struct { // for this parameter. For GetTags, MaxResults has an upper limit of 1000. MaxResults int32 - // The token to retrieve the next set of results. AWS provides the token when the - // response from a previous call has more results than the maximum page size. + // The token to retrieve the next set of results. Amazon Web Services provides the + // token when the response from a previous call has more results than the maximum + // page size. NextPageToken *string // The value that you want to search for. @@ -116,7 +118,7 @@ type GetTagsInput struct { type GetTagsOutput struct { - // The number of query results that AWS returns at a time. + // The number of query results that Amazon Web Services returns at a time. // // This member is required. ReturnSize *int32 @@ -131,8 +133,9 @@ type GetTagsOutput struct { // This member is required. TotalSize *int32 - // The token for the next set of retrievable results. AWS provides the token when - // the response from a previous call has more results than the maximum page size. + // The token for the next set of retrievable results. Amazon Web Services provides + // the token when the response from a previous call has more results than the + // maximum page size. NextPageToken *string // Metadata pertaining to the operation's result. diff --git a/service/costexplorer/api_op_UpdateAnomalyMonitor.go b/service/costexplorer/api_op_UpdateAnomalyMonitor.go index 393dc34f3a9..5c5af3cfcb5 100644 --- a/service/costexplorer/api_op_UpdateAnomalyMonitor.go +++ b/service/costexplorer/api_op_UpdateAnomalyMonitor.go @@ -11,7 +11,7 @@ import ( ) // Updates an existing cost anomaly monitor. The changes made are applied going -// forward, and does not change anomalies detected in the past. +// forward, and doesn'tt change anomalies detected in the past. func (c *Client) UpdateAnomalyMonitor(ctx context.Context, params *UpdateAnomalyMonitorInput, optFns ...func(*Options)) (*UpdateAnomalyMonitorOutput, error) { if params == nil { params = &UpdateAnomalyMonitorInput{} diff --git a/service/costexplorer/api_op_UpdateAnomalySubscription.go b/service/costexplorer/api_op_UpdateAnomalySubscription.go index 76ea5038719..f65633839cd 100644 --- a/service/costexplorer/api_op_UpdateAnomalySubscription.go +++ b/service/costexplorer/api_op_UpdateAnomalySubscription.go @@ -34,8 +34,7 @@ type UpdateAnomalySubscriptionInput struct { // This member is required. SubscriptionArn *string - // The update to the frequency value at which subscribers will receive - // notifications. + // The update to the frequency value that subscribers receive notifications. Frequency types.AnomalySubscriptionFrequency // A list of cost anomaly monitor ARNs. @@ -44,7 +43,7 @@ type UpdateAnomalySubscriptionInput struct { // The update to the subscriber list. Subscribers []types.Subscriber - // The subscription's new name. + // The new name of the subscription. SubscriptionName *string // The update to the threshold value for receiving notifications. diff --git a/service/costexplorer/api_op_UpdateCostCategoryDefinition.go b/service/costexplorer/api_op_UpdateCostCategoryDefinition.go index 4df0176a606..fa4bc8a8eb1 100644 --- a/service/costexplorer/api_op_UpdateCostCategoryDefinition.go +++ b/service/costexplorer/api_op_UpdateCostCategoryDefinition.go @@ -51,6 +51,10 @@ type UpdateCostCategoryDefinitionInput struct { // The default value for the cost category. DefaultValue *string + // The split charge rules used to allocate your charges between your Cost Category + // values. + SplitChargeRules []types.CostCategorySplitChargeRule + noSmithyDocumentSerde } diff --git a/service/costexplorer/deserializers.go b/service/costexplorer/deserializers.go index 3cda8b5b359..affe4f0c834 100644 --- a/service/costexplorer/deserializers.go +++ b/service/costexplorer/deserializers.go @@ -4541,6 +4541,11 @@ func awsAwsjson11_deserializeDocumentCostCategory(v **types.CostCategory, value sv.RuleVersion = types.CostCategoryRuleVersion(jtv) } + case "SplitChargeRules": + if err := awsAwsjson11_deserializeDocumentCostCategorySplitChargeRulesList(&sv.SplitChargeRules, value); err != nil { + return err + } + default: _, _ = key, value @@ -4944,6 +4949,250 @@ func awsAwsjson11_deserializeDocumentCostCategoryRulesList(v *[]types.CostCatego return nil } +func awsAwsjson11_deserializeDocumentCostCategorySplitChargeRule(v **types.CostCategorySplitChargeRule, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CostCategorySplitChargeRule + if *v == nil { + sv = &types.CostCategorySplitChargeRule{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Method": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CostCategorySplitChargeMethod to be of type string, got %T instead", value) + } + sv.Method = types.CostCategorySplitChargeMethod(jtv) + } + + case "Parameters": + if err := awsAwsjson11_deserializeDocumentCostCategorySplitChargeRuleParametersList(&sv.Parameters, value); err != nil { + return err + } + + case "Source": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GenericString to be of type string, got %T instead", value) + } + sv.Source = ptr.String(jtv) + } + + case "Targets": + if err := awsAwsjson11_deserializeDocumentCostCategorySplitChargeRuleTargetsList(&sv.Targets, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCostCategorySplitChargeRuleParameter(v **types.CostCategorySplitChargeRuleParameter, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CostCategorySplitChargeRuleParameter + if *v == nil { + sv = &types.CostCategorySplitChargeRuleParameter{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CostCategorySplitChargeRuleParameterType to be of type string, got %T instead", value) + } + sv.Type = types.CostCategorySplitChargeRuleParameterType(jtv) + } + + case "Values": + if err := awsAwsjson11_deserializeDocumentCostCategorySplitChargeRuleParameterValuesList(&sv.Values, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCostCategorySplitChargeRuleParametersList(v *[]types.CostCategorySplitChargeRuleParameter, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.CostCategorySplitChargeRuleParameter + if *v == nil { + cv = []types.CostCategorySplitChargeRuleParameter{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.CostCategorySplitChargeRuleParameter + destAddr := &col + if err := awsAwsjson11_deserializeDocumentCostCategorySplitChargeRuleParameter(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentCostCategorySplitChargeRuleParameterValuesList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GenericString to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentCostCategorySplitChargeRulesList(v *[]types.CostCategorySplitChargeRule, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.CostCategorySplitChargeRule + if *v == nil { + cv = []types.CostCategorySplitChargeRule{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.CostCategorySplitChargeRule + destAddr := &col + if err := awsAwsjson11_deserializeDocumentCostCategorySplitChargeRule(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentCostCategorySplitChargeRuleTargetsList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GenericString to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + func awsAwsjson11_deserializeDocumentCostCategoryValues(v **types.CostCategoryValues, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/costexplorer/doc.go b/service/costexplorer/doc.go index 1eccef2eac2..a5daff3b619 100644 --- a/service/costexplorer/doc.go +++ b/service/costexplorer/doc.go @@ -3,16 +3,16 @@ // Package costexplorer provides the API client, operations, and parameter types // for AWS Cost Explorer Service. // -// The Cost Explorer API enables you to programmatically query your cost and usage +// You can use the Cost Explorer API to programmatically query your cost and usage // data. You can query for aggregated data such as total monthly costs or total -// daily usage. You can also query for granular data, such as the number of daily -// write operations for Amazon DynamoDB database tables in your production +// daily usage. You can also query for granular data. This might include the number +// of daily write operations for Amazon DynamoDB database tables in your production // environment. Service Endpoint The Cost Explorer API provides the following // endpoint: // // * https://ce.us-east-1.amazonaws.com // -// For information about costs -// associated with the Cost Explorer API, see AWS Cost Management Pricing -// (http://aws.amazon.com/aws-cost-management/pricing/). +// For information about the costs +// that are associated with the Cost Explorer API, see Amazon Web Services Cost +// Management Pricing (http://aws.amazon.com/aws-cost-management/pricing/). package costexplorer diff --git a/service/costexplorer/serializers.go b/service/costexplorer/serializers.go index 2e1fa85e8cc..a3a3dbdd6f4 100644 --- a/service/costexplorer/serializers.go +++ b/service/costexplorer/serializers.go @@ -1615,6 +1615,104 @@ func awsAwsjson11_serializeDocumentCostCategoryRulesList(v []types.CostCategoryR return nil } +func awsAwsjson11_serializeDocumentCostCategorySplitChargeRule(v *types.CostCategorySplitChargeRule, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.Method) > 0 { + ok := object.Key("Method") + ok.String(string(v.Method)) + } + + if v.Parameters != nil { + ok := object.Key("Parameters") + if err := awsAwsjson11_serializeDocumentCostCategorySplitChargeRuleParametersList(v.Parameters, ok); err != nil { + return err + } + } + + if v.Source != nil { + ok := object.Key("Source") + ok.String(*v.Source) + } + + if v.Targets != nil { + ok := object.Key("Targets") + if err := awsAwsjson11_serializeDocumentCostCategorySplitChargeRuleTargetsList(v.Targets, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentCostCategorySplitChargeRuleParameter(v *types.CostCategorySplitChargeRuleParameter, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.Type) > 0 { + ok := object.Key("Type") + ok.String(string(v.Type)) + } + + if v.Values != nil { + ok := object.Key("Values") + if err := awsAwsjson11_serializeDocumentCostCategorySplitChargeRuleParameterValuesList(v.Values, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentCostCategorySplitChargeRuleParametersList(v []types.CostCategorySplitChargeRuleParameter, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentCostCategorySplitChargeRuleParameter(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentCostCategorySplitChargeRuleParameterValuesList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentCostCategorySplitChargeRulesList(v []types.CostCategorySplitChargeRule, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentCostCategorySplitChargeRule(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentCostCategorySplitChargeRuleTargetsList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + func awsAwsjson11_serializeDocumentCostCategoryValues(v *types.CostCategoryValues, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -2067,6 +2165,13 @@ func awsAwsjson11_serializeOpDocumentCreateCostCategoryDefinitionInput(v *Create ok.String(string(v.RuleVersion)) } + if v.SplitChargeRules != nil { + ok := object.Key("SplitChargeRules") + if err := awsAwsjson11_serializeDocumentCostCategorySplitChargeRulesList(v.SplitChargeRules, ok); err != nil { + return err + } + } + return nil } @@ -3052,5 +3157,12 @@ func awsAwsjson11_serializeOpDocumentUpdateCostCategoryDefinitionInput(v *Update ok.String(string(v.RuleVersion)) } + if v.SplitChargeRules != nil { + ok := object.Key("SplitChargeRules") + if err := awsAwsjson11_serializeDocumentCostCategorySplitChargeRulesList(v.SplitChargeRules, ok); err != nil { + return err + } + } + return nil } diff --git a/service/costexplorer/types/enums.go b/service/costexplorer/types/enums.go index 45054022e62..951a584aea5 100644 --- a/service/costexplorer/types/enums.go +++ b/service/costexplorer/types/enums.go @@ -133,6 +133,44 @@ func (CostCategoryRuleVersion) Values() []CostCategoryRuleVersion { } } +type CostCategorySplitChargeMethod string + +// Enum values for CostCategorySplitChargeMethod +const ( + CostCategorySplitChargeMethodFixed CostCategorySplitChargeMethod = "FIXED" + CostCategorySplitChargeMethodProportional CostCategorySplitChargeMethod = "PROPORTIONAL" + CostCategorySplitChargeMethodEven CostCategorySplitChargeMethod = "EVEN" +) + +// Values returns all known values for CostCategorySplitChargeMethod. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (CostCategorySplitChargeMethod) Values() []CostCategorySplitChargeMethod { + return []CostCategorySplitChargeMethod{ + "FIXED", + "PROPORTIONAL", + "EVEN", + } +} + +type CostCategorySplitChargeRuleParameterType string + +// Enum values for CostCategorySplitChargeRuleParameterType +const ( + CostCategorySplitChargeRuleParameterTypeAllocationPercentages CostCategorySplitChargeRuleParameterType = "ALLOCATION_PERCENTAGES" +) + +// Values returns all known values for CostCategorySplitChargeRuleParameterType. +// Note that this can be expanded in the future, and so it is only as up to date as +// the client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (CostCategorySplitChargeRuleParameterType) Values() []CostCategorySplitChargeRuleParameterType { + return []CostCategorySplitChargeRuleParameterType{ + "ALLOCATION_PERCENTAGES", + } +} + type CostCategoryStatus string // Enum values for CostCategoryStatus diff --git a/service/costexplorer/types/types.go b/service/costexplorer/types/types.go index feb9ec64dc6..e3c409f21ef 100644 --- a/service/costexplorer/types/types.go +++ b/service/costexplorer/types/types.go @@ -36,7 +36,8 @@ type Anomaly struct { // The first day the anomaly is detected. AnomalyStartDate *string - // The dimension for the anomaly. For example, an AWS service in a service monitor. + // The dimension for the anomaly (for example, an Amazon Web Services service in a + // service monitor). DimensionValue *string // The feedback value. @@ -62,8 +63,8 @@ type AnomalyDateInterval struct { noSmithyDocumentSerde } -// This object continuously inspects your account's cost data for anomalies, based -// on MonitorType and MonitorSpecification. The content consists of detailed +// This object continuously inspects your account's cost data for anomalies. It's +// based on MonitorType and MonitorSpecification. The content consists of detailed // metadata and the current status of the monitor object. type AnomalyMonitor struct { @@ -101,38 +102,39 @@ type AnomalyMonitor struct { // dimension values - You can set the dimension name and values for the filters // that you plan to use. For example, you can filter for REGION==us-east-1 OR // REGION==us-west-1. For GetRightsizingRecommendation, the Region is a full name - // (for example, REGION==US East (N. Virginia). The Expression example looks like: - // { "Dimensions": { "Key": "REGION", "Values": [ "us-east-1", “us-west-1” ] } } - // The list of dimension values are OR'd together to retrieve cost or usage data. - // You can create Expression and DimensionValues objects using either with* methods - // or set* methods in multiple lines. + // (for example, REGION==US East (N. Virginia). The Expression example is as + // follows: { "Dimensions": { "Key": "REGION", "Values": [ "us-east-1", “us-west-1” + // ] } } The list of dimension values are OR'd together to retrieve cost or usage + // data. You can create Expression and DimensionValues objects using either with* + // methods or set* methods in multiple lines. // - // * Compound dimension values with logical - // operations - You can use multiple Expression types and the logical operators - // AND/OR/NOT to create a list of one or more Expression objects. This allows you - // to filter on more advanced options. For example, you can filter on ((REGION == - // us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE != - // DataTransfer). The Expression for that looks like this: { "And": [ {"Or": [ - // {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" ] }}, - // {"Tags": { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": {"Dimensions": - // { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } Because each - // Expression can have only one operator, the service returns an error if more than - // one is specified. The following example shows an Expression object that creates - // an error. { "And": [ ... ], "DimensionValues": { "Dimension": "USAGE_TYPE", - // "Values": [ "DataTransfer" ] } } + // * Compound dimension values with + // logical operations - You can use multiple Expression types and the logical + // operators AND/OR/NOT to create a list of one or more Expression objects. By + // doing this, you can filter on more advanced options. For example, you can filter + // on ((REGION == us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND + // (USAGE_TYPE != DataTransfer). The Expression for that is as follows: { "And": [ + // {"Or": [ {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" + // ] }}, {"Tags": { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": + // {"Dimensions": { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } + // Because each Expression can have only one operator, the service returns an error + // if more than one is specified. The following example shows an Expression object + // that creates an error. { "And": [ ... ], "DimensionValues": { "Dimension": + // "USAGE_TYPE", "Values": [ "DataTransfer" ] } } // - // For the GetRightsizingRecommendation action, a - // combination of OR and NOT is not supported. OR is not supported between - // different dimensions, or dimensions and tags. NOT operators aren't supported. - // Dimensions are also limited to LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE. For - // the GetReservationPurchaseRecommendation action, only NOT is supported. AND and - // OR are not supported. Dimensions are limited to LINKED_ACCOUNT. + // For the + // GetRightsizingRecommendation action, a combination of OR and NOT isn't + // supported. OR isn't supported between different dimensions, or dimensions and + // tags. NOT operators aren't supported. Dimensions are also limited to + // LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE. For the + // GetReservationPurchaseRecommendation action, only NOT is supported. AND and OR + // aren't supported. Dimensions are limited to LINKED_ACCOUNT. MonitorSpecification *Expression noSmithyDocumentSerde } -// Quantifies the anomaly. The higher score means that it is more anomalous. +// Quantifies the anomaly. The higher score means that it's more anomalous. type AnomalyScore struct { // The last observed score. @@ -140,7 +142,7 @@ type AnomalyScore struct { // This member is required. CurrentScore float64 - // The maximum score observed during the AnomalyDateInterval. + // The maximum score that's observed during the AnomalyDateInterval. // // This member is required. MaxScore float64 @@ -154,7 +156,7 @@ type AnomalyScore struct { // of the AnomalySubscription object. type AnomalySubscription struct { - // The frequency at which anomaly reports are sent over email. + // The frequency that anomaly reports are sent over email. // // This member is required. Frequency AnomalySubscriptionFrequency @@ -197,7 +199,7 @@ type CostCategory struct { // This member is required. CostCategoryArn *string - // The Cost Category's effective start date. + // The effective state data of your Cost Category. // // This member is required. EffectiveStart *string @@ -212,8 +214,8 @@ type CostCategory struct { // This member is required. RuleVersion CostCategoryRuleVersion - // Rules are processed in order. If there are multiple rules that match the line - // item, then the first rule to match is used to determine that Cost Category + // The rules are processed in order. If there are multiple rules that match the + // line item, then the first rule to match is used to determine that Cost Category // value. // // This member is required. @@ -222,32 +224,36 @@ type CostCategory struct { // The default value for the cost category. DefaultValue *string - // The Cost Category's effective end date. + // The effective end data of your Cost Category. EffectiveEnd *string // The list of processing statuses for Cost Management products for a specific cost // category. ProcessingStatus []CostCategoryProcessingStatus + // The split charge rules that are used to allocate your charges between your Cost + // Category values. + SplitChargeRules []CostCategorySplitChargeRule + noSmithyDocumentSerde } // When creating or updating a cost category, you can define the CostCategoryRule // rule type as INHERITED_VALUE. This rule type adds the flexibility of defining a // rule that dynamically inherits the cost category value from the dimension value -// defined by CostCategoryInheritedValueDimension. For example, if you wanted to -// dynamically group costs based on the value of a specific tag key, you would -// first choose an inherited value rule type, then choose the tag dimension and -// specify the tag key to use. +// defined by CostCategoryInheritedValueDimension. For example, if you want to +// dynamically group costs that are based on the value of a specific tag key, first +// choose an inherited value rule type, then choose the tag dimension and specify +// the tag key to use. type CostCategoryInheritedValueDimension struct { // The key to extract cost category values. DimensionKey *string - // The name of dimension for which to group costs. If you specify - // LINKED_ACCOUNT_NAME, the cost category value will be based on account name. If - // you specify TAG, the cost category value will be based on the value of the - // specified tag key. + // The name of the dimension that's used to group costs. If you specify + // LINKED_ACCOUNT_NAME, the cost category value is based on account name. If you + // specify TAG, the cost category value will be based on the value of the specified + // tag key. DimensionName CostCategoryInheritedValueDimensionName noSmithyDocumentSerde @@ -286,7 +292,7 @@ type CostCategoryReference struct { // The unique name of the Cost Category. Name *string - // The number of rules associated with a specific Cost Category. + // The number of rules that are associated with a specific Cost Category. NumberOfRules int32 // The list of processing statuses for Cost Management products for a specific cost @@ -304,31 +310,31 @@ type CostCategoryReference struct { // value. type CostCategoryRule struct { - // The value the line item will be categorized as, if the line item contains the - // matched dimension. + // The value the line item is categorized as if the line item contains the matched + // dimension. InheritedValue *CostCategoryInheritedValueDimension // An Expression // (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_Expression.html) // object used to categorize costs. This supports dimensions, tags, and nested // expressions. Currently the only dimensions supported are LINKED_ACCOUNT, - // SERVICE_CODE, RECORD_TYPE, and LINKED_ACCOUNT_NAME. Root level OR is not + // SERVICE_CODE, RECORD_TYPE, and LINKED_ACCOUNT_NAME. Root level OR isn't // supported. We recommend that you create a separate rule instead. RECORD_TYPE is // a dimension used for Cost Explorer APIs, and is also supported for Cost Category // expressions. This dimension uses different terms, depending on whether you're // using the console or API/JSON editor. For a detailed comparison, see Term // Comparisons // (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/manage-cost-categories.html#cost-categories-terms) - // in the AWS Billing and Cost Management User Guide. + // in the Billing and Cost Management User Guide. Rule *Expression // You can define the CostCategoryRule rule type as either REGULAR or // INHERITED_VALUE. The INHERITED_VALUE rule type adds the flexibility of defining // a rule that dynamically inherits the cost category value from the dimension - // value defined by CostCategoryInheritedValueDimension. For example, if you wanted - // to dynamically group costs based on the value of a specific tag key, you would - // first choose an inherited value rule type, then choose the tag dimension and - // specify the tag key to use. + // value defined by CostCategoryInheritedValueDimension. For example, if you want + // to dynamically group costs based on the value of a specific tag key, first + // choose an inherited value rule type, then choose the tag dimension and specify + // the tag key to use. Type CostCategoryRuleType // The default value for the cost category. @@ -337,11 +343,60 @@ type CostCategoryRule struct { noSmithyDocumentSerde } +// Use the split charge rule to split the cost of one Cost Category value across +// several other target values. +type CostCategorySplitChargeRule struct { + + // The method that's used to define how to split your source costs across your + // targets. Proportional - Allocates charges across your targets based on the + // proportional weighted cost of each target. Fixed - Allocates charges across your + // targets based on your defined allocation percentage. >Even - Allocates costs + // evenly across all targets. + // + // This member is required. + Method CostCategorySplitChargeMethod + + // The Cost Category value that you want to split. That value can't be used as a + // source or a target in other split charge rules. To indicate uncategorized costs, + // you can use an empty string as the source. + // + // This member is required. + Source *string + + // The Cost Category values that you want to split costs across. These values can't + // be used as a source in other split charge rules. + // + // This member is required. + Targets []string + + // The parameters for a split charge method. This is only required for the FIXED + // method. + Parameters []CostCategorySplitChargeRuleParameter + + noSmithyDocumentSerde +} + +// The parameters for a split charge method. +type CostCategorySplitChargeRuleParameter struct { + + // The parameter type. + // + // This member is required. + Type CostCategorySplitChargeRuleParameterType + + // The parameter values. + // + // This member is required. + Values []string + + noSmithyDocumentSerde +} + // The Cost Categories values used for filtering the costs. If Values and Key are // not specified, the ABSENTMatchOption is applied to all Cost Categories. That is, -// filtering on resources that are not mapped to any Cost Categories. If Values is -// provided and Key is not specified, the ABSENTMatchOption is applied to the Cost -// Categories Key only. That is, filtering on resources without the given Cost +// it filters on resources that aren't mapped to any Cost Categories. If Values is +// provided and Key isn't specified, the ABSENTMatchOption is applied to the Cost +// Categories Key only. That is, it filters on resources without the given Cost // Categories key. type CostCategoryValues struct { @@ -416,14 +471,15 @@ type CoverageHours struct { noSmithyDocumentSerde } -// The amount of instance usage, in normalized units. Normalized units enable you +// The amount of instance usage, in normalized units. You can use normalized units // to see your EC2 usage for multiple sizes of instances in a uniform way. For -// example, suppose you run an xlarge instance and a 2xlarge instance. If you run -// both instances for the same amount of time, the 2xlarge instance uses twice as -// much of your reservation as the xlarge instance, even though both instances show -// only one instance-hour. Using normalized units instead of instance-hours, the -// xlarge instance used 8 normalized units, and the 2xlarge instance used 16 -// normalized units. For more information, see Modifying Reserved Instances +// example, suppose that you run an xlarge instance and a 2xlarge instance. If you +// run both instances for the same amount of time, the 2xlarge instance uses twice +// as much of your reservation as the xlarge instance, even though both instances +// show only one instance-hour. When you use normalized units instead of +// instance-hours, the xlarge instance used 8 normalized units, and the 2xlarge +// instance used 16 normalized units. For more information, see Modifying Reserved +// Instances // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html) in the // Amazon Elastic Compute Cloud User Guide for Linux Instances. type CoverageNormalizedUnits struct { @@ -447,20 +503,21 @@ type CoverageNormalizedUnits struct { // Context about the current instance. type CurrentInstance struct { - // The currency code that AWS used to calculate the costs for this instance. + // The currency code that Amazon Web Services used to calculate the costs for this + // instance. CurrencyCode *string - // The name you've given an instance. This field will show as blank if you haven't + // The name that you given an instance. This field shows as blank if you haven't // given the instance a name. InstanceName *string - // Current On-Demand cost of operating this instance on a monthly basis. + // The current On-Demand cost of operating this instance on a monthly basis. MonthlyCost *string - // Number of hours during the lookback period billed at On-Demand rates. + // The number of hours during the lookback period that's billed at On-Demand rates. OnDemandHoursInLookbackPeriod *string - // Number of hours during the lookback period covered by reservations. + // The number of hours during the lookback period that's covered by reservations. ReservationCoveredHoursInLookbackPeriod *string // Details about the resource and utilization. @@ -472,13 +529,13 @@ type CurrentInstance struct { // Utilization information of the current instance during the lookback period. ResourceUtilization *ResourceUtilization - // Number of hours during the lookback period covered by Savings Plans. + // The number of hours during the lookback period that's covered by Savings Plans. SavingsPlansCoveredHoursInLookbackPeriod *string - // Cost allocation resource tags applied to the instance. + // Cost allocation resource tags that are applied to the instance. Tags []TagValues - // The total number of hours the instance ran during the lookback period. + // The total number of hours that the instance ran during the lookback period. TotalRunningHoursInLookbackPeriod *string noSmithyDocumentSerde @@ -488,16 +545,16 @@ type CurrentInstance struct { type DateInterval struct { // The end of the time period. The end date is exclusive. For example, if end is - // 2017-05-01, AWS retrieves cost and usage data from the start date up to, but not - // including, 2017-05-01. + // 2017-05-01, Amazon Web Services retrieves cost and usage data from the start + // date up to, but not including, 2017-05-01. // // This member is required. End *string // The beginning of the time period. The start date is inclusive. For example, if - // start is 2017-01-01, AWS retrieves cost and usage data starting at 2017-01-01 up - // to the end date. The start date must be equal to or no later than the current - // date to avoid a validation error. + // start is 2017-01-01, Amazon Web Services retrieves cost and usage data starting + // at 2017-01-01 up to the end date. The start date must be equal to or no later + // than the current date to avoid a validation error. // // This member is required. Start *string @@ -538,8 +595,8 @@ type DimensionValuesWithAttributes struct { noSmithyDocumentSerde } -// The field that contains a list of disk (local storage) metrics associated with -// the current instance. +// The field that contains a list of disk (local storage) metrics that are +// associated with the current instance. type DiskResourceUtilization struct { // The maximum read throughput operations per second. @@ -557,8 +614,8 @@ type DiskResourceUtilization struct { noSmithyDocumentSerde } -// The EBS field that contains a list of EBS metrics associated with the current -// instance. +// The EBS field that contains a list of EBS metrics that are associated with the +// current instance. type EBSResourceUtilization struct { // The maximum size of read operations per second @@ -576,32 +633,33 @@ type EBSResourceUtilization struct { noSmithyDocumentSerde } -// Details about the Amazon EC2 instances that AWS recommends that you purchase. +// Details about the Amazon EC2 instances that Amazon Web Services recommends that +// you purchase. type EC2InstanceDetails struct { // The Availability Zone of the recommended reservation. AvailabilityZone *string - // Whether the recommendation is for a current-generation instance. + // Determines whether the recommendation is for a current-generation instance. CurrentGeneration bool // The instance family of the recommended reservation. Family *string - // The type of instance that AWS recommends. + // The type of instance that Amazon Web Services recommends. InstanceType *string // The platform of the recommended reservation. The platform is the specific // combination of operating system, license model, and software on an instance. Platform *string - // The AWS Region of the recommended reservation. + // The Amazon Web Services Region of the recommended reservation. Region *string - // Whether the recommended reservation is size flexible. + // Determines whether the recommended reservation is size flexible. SizeFlexEligible bool - // Whether the recommended reservation is dedicated or shared. + // Determines whether the recommended reservation is dedicated or shared. Tenancy *string noSmithyDocumentSerde @@ -610,32 +668,33 @@ type EC2InstanceDetails struct { // Details on the Amazon EC2 Resource. type EC2ResourceDetails struct { - // Hourly public On-Demand rate for the instance type. + // The hourly public On-Demand rate for the instance type. HourlyOnDemandRate *string - // The type of AWS instance. + // The type of Amazon Web Services instance. InstanceType *string - // Memory capacity of the AWS instance. + // The memory capacity of the Amazon Web Services instance. Memory *string - // Network performance capacity of the AWS instance. + // The network performance capacity of the Amazon Web Services instance. NetworkPerformance *string - // The platform of the AWS instance. The platform is the specific combination of - // operating system, license model, and software on an instance. + // The platform of the Amazon Web Services instance. The platform is the specific + // combination of operating system, license model, and software on an instance. Platform *string - // The AWS Region of the instance. + // The Amazon Web Services Region of the instance. Region *string // The SKU of the product. Sku *string - // The disk storage of the AWS instance (not EBS storage). + // The disk storage of the Amazon Web Services instance. This doesn't include EBS + // storage. Storage *string - // Number of VCPU cores in the AWS instance type. + // The number of VCPU cores in the Amazon Web Services instance type. Vcpu *string noSmithyDocumentSerde @@ -644,82 +703,84 @@ type EC2ResourceDetails struct { // Utilization metrics of the instance. type EC2ResourceUtilization struct { - // The field that contains a list of disk (local storage) metrics associated with - // the current instance. + // The field that contains a list of disk (local storage) metrics that are + // associated with the current instance. DiskResourceUtilization *DiskResourceUtilization - // The EBS field that contains a list of EBS metrics associated with the current - // instance. + // The EBS field that contains a list of EBS metrics that are associated with the + // current instance. EBSResourceUtilization *EBSResourceUtilization - // Maximum observed or expected CPU utilization of the instance. + // The maximum observed or expected CPU utilization of the instance. MaxCpuUtilizationPercentage *string - // Maximum observed or expected memory utilization of the instance. + // The maximum observed or expected memory utilization of the instance. MaxMemoryUtilizationPercentage *string - // Maximum observed or expected storage utilization of the instance (does not - // measure EBS storage). + // The maximum observed or expected storage utilization of the instance. This + // doesn't include EBS storage. MaxStorageUtilizationPercentage *string - // The network field that contains a list of network metrics associated with the - // current instance. + // The network field that contains a list of network metrics that are associated + // with the current instance. NetworkResourceUtilization *NetworkResourceUtilization noSmithyDocumentSerde } -// The Amazon EC2 hardware specifications that you want AWS to provide -// recommendations for. +// The Amazon EC2 hardware specifications that you want Amazon Web Services to +// provide recommendations for. type EC2Specification struct { - // Whether you want a recommendation for standard or convertible reservations. + // Indicates whether you want a recommendation for standard or convertible + // reservations. OfferingClass OfferingClass noSmithyDocumentSerde } -// Details about the Amazon ElastiCache instances that AWS recommends that you -// purchase. +// Details about the Amazon ElastiCache instances that Amazon Web Services +// recommends that you purchase. type ElastiCacheInstanceDetails struct { - // Whether the recommendation is for a current generation instance. + // Determines whether the recommendation is for a current generation instance. CurrentGeneration bool // The instance family of the recommended reservation. Family *string - // The type of node that AWS recommends. + // The type of node that Amazon Web Services recommends. NodeType *string // The description of the recommended reservation. ProductDescription *string - // The AWS Region of the recommended reservation. + // The Amazon Web Services Region of the recommended reservation. Region *string - // Whether the recommended reservation is size flexible. + // Determines whether the recommended reservation is size flexible. SizeFlexEligible bool noSmithyDocumentSerde } -// Details about the Amazon ES instances that AWS recommends that you purchase. +// Details about the Amazon ES instances that Amazon Web Services recommends that +// you purchase. type ESInstanceDetails struct { - // Whether the recommendation is for a current-generation instance. + // Determines whether the recommendation is for a current-generation instance. CurrentGeneration bool - // The class of instance that AWS recommends. + // The class of instance that Amazon Web Services recommends. InstanceClass *string - // The size of instance that AWS recommends. + // The size of instance that Amazon Web Services recommends. InstanceSize *string - // The AWS Region of the recommended reservation. + // The Amazon Web Services Region of the recommended reservation. Region *string - // Whether the recommended reservation is size flexible. + // Determines whether the recommended reservation is size flexible. SizeFlexEligible bool noSmithyDocumentSerde @@ -731,38 +792,39 @@ type ESInstanceDetails struct { // dimension values - You can set the dimension name and values for the filters // that you plan to use. For example, you can filter for REGION==us-east-1 OR // REGION==us-west-1. For GetRightsizingRecommendation, the Region is a full name -// (for example, REGION==US East (N. Virginia). The Expression example looks like: -// { "Dimensions": { "Key": "REGION", "Values": [ "us-east-1", “us-west-1” ] } } -// The list of dimension values are OR'd together to retrieve cost or usage data. -// You can create Expression and DimensionValues objects using either with* methods -// or set* methods in multiple lines. +// (for example, REGION==US East (N. Virginia). The Expression example is as +// follows: { "Dimensions": { "Key": "REGION", "Values": [ "us-east-1", “us-west-1” +// ] } } The list of dimension values are OR'd together to retrieve cost or usage +// data. You can create Expression and DimensionValues objects using either with* +// methods or set* methods in multiple lines. // -// * Compound dimension values with logical -// operations - You can use multiple Expression types and the logical operators -// AND/OR/NOT to create a list of one or more Expression objects. This allows you -// to filter on more advanced options. For example, you can filter on ((REGION == -// us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE != -// DataTransfer). The Expression for that looks like this: { "And": [ {"Or": [ -// {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" ] }}, -// {"Tags": { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": {"Dimensions": -// { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } Because each -// Expression can have only one operator, the service returns an error if more than -// one is specified. The following example shows an Expression object that creates -// an error. { "And": [ ... ], "DimensionValues": { "Dimension": "USAGE_TYPE", -// "Values": [ "DataTransfer" ] } } +// * Compound dimension values with +// logical operations - You can use multiple Expression types and the logical +// operators AND/OR/NOT to create a list of one or more Expression objects. By +// doing this, you can filter on more advanced options. For example, you can filter +// on ((REGION == us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND +// (USAGE_TYPE != DataTransfer). The Expression for that is as follows: { "And": [ +// {"Or": [ {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" +// ] }}, {"Tags": { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": +// {"Dimensions": { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } +// Because each Expression can have only one operator, the service returns an error +// if more than one is specified. The following example shows an Expression object +// that creates an error. { "And": [ ... ], "DimensionValues": { "Dimension": +// "USAGE_TYPE", "Values": [ "DataTransfer" ] } } // -// For the GetRightsizingRecommendation action, a -// combination of OR and NOT is not supported. OR is not supported between -// different dimensions, or dimensions and tags. NOT operators aren't supported. -// Dimensions are also limited to LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE. For -// the GetReservationPurchaseRecommendation action, only NOT is supported. AND and -// OR are not supported. Dimensions are limited to LINKED_ACCOUNT. +// For the +// GetRightsizingRecommendation action, a combination of OR and NOT isn't +// supported. OR isn't supported between different dimensions, or dimensions and +// tags. NOT operators aren't supported. Dimensions are also limited to +// LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE. For the +// GetReservationPurchaseRecommendation action, only NOT is supported. AND and OR +// aren't supported. Dimensions are limited to LINKED_ACCOUNT. type Expression struct { // Return results that match both Dimension objects. And []Expression - // The filter based on CostCategory values. + // The filter that's based on CostCategory values. CostCategories *CostCategoryValues // The specific Dimension to use for Expression. @@ -780,7 +842,7 @@ type Expression struct { noSmithyDocumentSerde } -// The forecast created for your query. +// The forecast that's created for your query. type ForecastResult struct { // The mean value of the forecast. @@ -823,36 +885,38 @@ type GroupDefinition struct { noSmithyDocumentSerde } -// The anomaly's dollar value. +// The dollar value of the anomaly. type Impact struct { - // The maximum dollar value observed for an anomaly. + // The maximum dollar value that's observed for an anomaly. // // This member is required. MaxImpact float64 - // The cumulative dollar value observed for an anomaly. + // The cumulative dollar value that's observed for an anomaly. TotalImpact float64 noSmithyDocumentSerde } -// Details about the instances that AWS recommends that you purchase. +// Details about the instances that Amazon Web Services recommends that you +// purchase. type InstanceDetails struct { - // The Amazon EC2 instances that AWS recommends that you purchase. + // The Amazon EC2 instances that Amazon Web Services recommends that you purchase. EC2InstanceDetails *EC2InstanceDetails - // The Amazon ES instances that AWS recommends that you purchase. + // The Amazon ES instances that Amazon Web Services recommends that you purchase. ESInstanceDetails *ESInstanceDetails - // The ElastiCache instances that AWS recommends that you purchase. + // The ElastiCache instances that Amazon Web Services recommends that you purchase. ElastiCacheInstanceDetails *ElastiCacheInstanceDetails - // The Amazon RDS instances that AWS recommends that you purchase. + // The Amazon RDS instances that Amazon Web Services recommends that you purchase. RDSInstanceDetails *RDSInstanceDetails - // The Amazon Redshift instances that AWS recommends that you purchase. + // The Amazon Redshift instances that Amazon Web Services recommends that you + // purchase. RedshiftInstanceDetails *RedshiftInstanceDetails noSmithyDocumentSerde @@ -873,35 +937,37 @@ type MetricValue struct { // Details on the modification recommendation. type ModifyRecommendationDetail struct { - // Identifies whether this instance type is the AWS default recommendation. + // Determines whether this instance type is the Amazon Web Services default + // recommendation. TargetInstances []TargetInstance noSmithyDocumentSerde } -// The network field that contains a list of network metrics associated with the -// current instance. +// The network field that contains a list of network metrics that are associated +// with the current instance. type NetworkResourceUtilization struct { - // The network ingress throughput utilization measured in Bytes per second. + // The network inbound throughput utilization measured in Bytes per second. NetworkInBytesPerSecond *string - // The network outgress throughput utilization measured in Bytes per second. + // The network outbound throughput utilization measured in Bytes per second. NetworkOutBytesPerSecond *string - // The network ingress packets measured in packets per second. + // The network ingress packets that are measured in packets per second. NetworkPacketsInPerSecond *string - // The network outgress packets measured in packets per second. + // The network outgress packets that are measured in packets per second. NetworkPacketsOutPerSecond *string noSmithyDocumentSerde } -// Details about the Amazon RDS instances that AWS recommends that you purchase. +// Details about the Amazon RDS instances that Amazon Web Services recommends that +// you purchase. type RDSInstanceDetails struct { - // Whether the recommendation is for a current-generation instance. + // Determines whether the recommendation is for a current-generation instance. CurrentGeneration bool // The database edition that the recommended reservation supports. @@ -910,45 +976,45 @@ type RDSInstanceDetails struct { // The database engine that the recommended reservation supports. DatabaseEngine *string - // Whether the recommendation is for a reservation in a single Availability Zone or - // a reservation with a backup in a second Availability Zone. + // Determines whether the recommendation is for a reservation in a single + // Availability Zone or a reservation with a backup in a second Availability Zone. DeploymentOption *string // The instance family of the recommended reservation. Family *string - // The type of instance that AWS recommends. + // The type of instance that Amazon Web Services recommends. InstanceType *string // The license model that the recommended reservation supports. LicenseModel *string - // The AWS Region of the recommended reservation. + // The Amazon Web Services Region of the recommended reservation. Region *string - // Whether the recommended reservation is size flexible. + // Determines whether the recommended reservation is size flexible. SizeFlexEligible bool noSmithyDocumentSerde } -// Details about the Amazon Redshift instances that AWS recommends that you -// purchase. +// Details about the Amazon Redshift instances that Amazon Web Services recommends +// that you purchase. type RedshiftInstanceDetails struct { - // Whether the recommendation is for a current-generation instance. + // Determines whether the recommendation is for a current-generation instance. CurrentGeneration bool // The instance family of the recommended reservation. Family *string - // The type of node that AWS recommends. + // The type of node that Amazon Web Services recommends. NodeType *string - // The AWS Region of the recommended reservation. + // The Amazon Web Services Region of the recommended reservation. Region *string - // Whether the recommended reservation is size flexible. + // Determines whether the recommended reservation is size flexible. SizeFlexEligible bool noSmithyDocumentSerde @@ -957,23 +1023,26 @@ type RedshiftInstanceDetails struct { // The aggregated numbers for your reservation usage. type ReservationAggregates struct { - // The monthly cost of your reservation, amortized over the reservation period. + // The monthly cost of your reservation. It's amortized over the reservation + // period. AmortizedRecurringFee *string - // The upfront cost of your reservation, amortized over the reservation period. + // The upfront cost of your reservation. It's amortized over the reservation + // period. AmortizedUpfrontFee *string - // How much you saved due to purchasing and utilizing reservation. AWS calculates - // this by subtracting TotalAmortizedFee from OnDemandCostOfRIHoursUsed. + // How much you saved due to purchasing and utilizing reservation. Amazon Web + // Services calculates this by subtracting TotalAmortizedFee from + // OnDemandCostOfRIHoursUsed. NetRISavings *string - // How much your reservation would cost if charged On-Demand rates. + // How much your reservation costs if charged On-Demand rates. OnDemandCostOfRIHoursUsed *string // How many reservation hours that you purchased. PurchasedHours *string - // How many Amazon EC2 reservation hours that you purchased, converted to + // The number of Amazon EC2 reservation hours that you purchased. It's converted to // normalized units. Normalized units are available only for Amazon EC2 usage after // November 11, 2017. PurchasedUnits *string @@ -981,38 +1050,38 @@ type ReservationAggregates struct { // The cost of unused hours for your reservation. RICostForUnusedHours *string - // The realized savings due to purchasing and using a reservation. + // The realized savings because of purchasing and using a reservation. RealizedSavings *string // The total number of reservation hours that you used. TotalActualHours *string - // The total number of Amazon EC2 reservation hours that you used, converted to - // normalized units. Normalized units are available only for Amazon EC2 usage after - // November 11, 2017. + // The total number of Amazon EC2 reservation hours that you used. It's converted + // to normalized units. Normalized units are available only for Amazon EC2 usage + // after November 11, 2017. TotalActualUnits *string - // The total cost of your reservation, amortized over the reservation period. + // The total cost of your reservation. It's amortized over the reservation period. TotalAmortizedFee *string - // How much you could save if you use your entire reservation. + // How much you might save if you use your entire reservation. TotalPotentialRISavings *string - // The unrealized savings due to purchasing and using a reservation. + // The unrealized savings because of purchasing and using a reservation. UnrealizedSavings *string // The number of reservation hours that you didn't use. UnusedHours *string - // The number of Amazon EC2 reservation hours that you didn't use, converted to - // normalized units. Normalized units are available only for Amazon EC2 usage after - // November 11, 2017. + // The number of Amazon EC2 reservation hours that you didn't use. It's converted + // to normalized units. Normalized units are available only for Amazon EC2 usage + // after November 11, 2017. UnusedUnits *string // The percentage of reservation time that you used. UtilizationPercentage *string - // The percentage of Amazon EC2 reservation time that you used, converted to + // The percentage of Amazon EC2 reservation time that you used. It's converted to // normalized units. Normalized units are available only for Amazon EC2 usage after // November 11, 2017. UtilizationPercentageInUnits *string @@ -1032,19 +1101,19 @@ type ReservationCoverageGroup struct { noSmithyDocumentSerde } -// A specific reservation that AWS recommends for purchase. +// A specific reservation that Amazon Web Services recommends for purchase. type ReservationPurchaseRecommendation struct { - // The account scope that AWS recommends that you purchase this instance for. For - // example, you can purchase this reservation for an entire organization in AWS - // Organizations. + // The account scope that Amazon Web Services recommends that you purchase this + // instance for. For example, you can purchase this reservation for an entire + // organization in Amazon Web Services Organizations. AccountScope AccountScope - // How many days of previous usage that AWS considers when making this - // recommendation. + // How many days of previous usage that Amazon Web Services considers when making + // this recommendation. LookbackPeriodInDays LookbackPeriodInDays - // The payment option for the reservation. For example, AllUpfront or NoUpfront. + // The payment option for the reservation (for example, AllUpfront or NoUpfront). PaymentOption PaymentOption // Details about the recommended purchases. @@ -1069,65 +1138,72 @@ type ReservationPurchaseRecommendationDetail struct { AccountId *string // The average number of normalized units that you used in an hour during the - // historical period. AWS uses this to calculate your recommended reservation - // purchases. + // historical period. Amazon Web Services uses this to calculate your recommended + // reservation purchases. AverageNormalizedUnitsUsedPerHour *string // The average number of instances that you used in an hour during the historical - // period. AWS uses this to calculate your recommended reservation purchases. + // period. Amazon Web Services uses this to calculate your recommended reservation + // purchases. AverageNumberOfInstancesUsedPerHour *string - // The average utilization of your instances. AWS uses this to calculate your - // recommended reservation purchases. + // The average utilization of your instances. Amazon Web Services uses this to + // calculate your recommended reservation purchases. AverageUtilization *string - // The currency code that AWS used to calculate the costs for this instance. + // The currency code that Amazon Web Services used to calculate the costs for this + // instance. CurrencyCode *string - // How long AWS estimates that it takes for this instance to start saving you - // money, in months. + // How long Amazon Web Services estimates that it takes for this instance to start + // saving you money, in months. EstimatedBreakEvenInMonths *string - // How much AWS estimates that you spend on On-Demand Instances in a month. + // How much Amazon Web Services estimates that you spend on On-Demand Instances in + // a month. EstimatedMonthlyOnDemandCost *string - // How much AWS estimates that this specific recommendation could save you in a - // month. + // How much Amazon Web Services estimates that this specific recommendation could + // save you in a month. EstimatedMonthlySavingsAmount *string - // How much AWS estimates that this specific recommendation could save you in a - // month, as a percentage of your overall costs. + // How much Amazon Web Services estimates that this specific recommendation could + // save you in a month, as a percentage of your overall costs. EstimatedMonthlySavingsPercentage *string - // How much AWS estimates that you would have spent for all usage during the - // specified historical period if you had a reservation. + // How much Amazon Web Services estimates that you would have spent for all usage + // during the specified historical period if you had a reservation. EstimatedReservationCostForLookbackPeriod *string - // Details about the instances that AWS recommends that you purchase. + // Details about the instances that Amazon Web Services recommends that you + // purchase. InstanceDetails *InstanceDetails // The maximum number of normalized units that you used in an hour during the - // historical period. AWS uses this to calculate your recommended reservation - // purchases. + // historical period. Amazon Web Services uses this to calculate your recommended + // reservation purchases. MaximumNormalizedUnitsUsedPerHour *string // The maximum number of instances that you used in an hour during the historical - // period. AWS uses this to calculate your recommended reservation purchases. + // period. Amazon Web Services uses this to calculate your recommended reservation + // purchases. MaximumNumberOfInstancesUsedPerHour *string // The minimum number of normalized units that you used in an hour during the - // historical period. AWS uses this to calculate your recommended reservation - // purchases. + // historical period. Amazon Web Services uses this to calculate your recommended + // reservation purchases. MinimumNormalizedUnitsUsedPerHour *string // The minimum number of instances that you used in an hour during the historical - // period. AWS uses this to calculate your recommended reservation purchases. + // period. Amazon Web Services uses this to calculate your recommended reservation + // purchases. MinimumNumberOfInstancesUsedPerHour *string - // The number of normalized units that AWS recommends that you purchase. + // The number of normalized units that Amazon Web Services recommends that you + // purchase. RecommendedNormalizedUnitsToPurchase *string - // The number of instances that AWS recommends that you purchase. + // The number of instances that Amazon Web Services recommends that you purchase. RecommendedNumberOfInstancesToPurchase *string // How much purchasing this instance costs you on a monthly basis. @@ -1140,10 +1216,10 @@ type ReservationPurchaseRecommendationDetail struct { } // Information about this specific recommendation, such as the timestamp for when -// AWS made a specific recommendation. +// Amazon Web Services made a specific recommendation. type ReservationPurchaseRecommendationMetadata struct { - // The timestamp for when AWS made this recommendation. + // The timestamp for when Amazon Web Services made this recommendation. GenerationTimestamp *string // The ID for this specific recommendation. @@ -1153,19 +1229,19 @@ type ReservationPurchaseRecommendationMetadata struct { } // A summary about this recommendation, such as the currency code, the amount that -// AWS estimates that you could save, and the total amount of reservation to -// purchase. +// Amazon Web Services estimates that you could save, and the total amount of +// reservation to purchase. type ReservationPurchaseRecommendationSummary struct { // The currency code used for this recommendation. CurrencyCode *string - // The total amount that AWS estimates that this recommendation could save you in a - // month. + // The total amount that Amazon Web Services estimates that this recommendation + // could save you in a month. TotalEstimatedMonthlySavingsAmount *string - // The total amount that AWS estimates that this recommendation could save you in a - // month, as a percentage of your costs. + // The total amount that Amazon Web Services estimates that this recommendation + // could save you in a month, as a percentage of your costs. TotalEstimatedMonthlySavingsPercentage *string noSmithyDocumentSerde @@ -1201,16 +1277,16 @@ type ResourceDetails struct { // Resource utilization of current resource. type ResourceUtilization struct { - // Utilization of current Amazon EC2 instance. + // The utilization of current Amazon EC2 instance. EC2ResourceUtilization *EC2ResourceUtilization noSmithyDocumentSerde } -// The result that is associated with a time period. +// The result that's associated with a time period. type ResultByTime struct { - // Whether the result is estimated. + // Determines whether the result is estimated. Estimated bool // The groups that this time period includes. @@ -1238,23 +1314,24 @@ type RightsizingRecommendation struct { // or over utilization of specific metrics (for example, CPU, Memory, Network). FindingReasonCodes []FindingReasonCode - // Details for modification recommendations. + // The details for the modification recommendations. ModifyRecommendationDetail *ModifyRecommendationDetail - // Recommendation to either terminate or modify the resource. + // A recommendation to either terminate or modify the resource. RightsizingType RightsizingType - // Details for termination recommendations. + // The details for termination recommendations. TerminateRecommendationDetail *TerminateRecommendationDetail noSmithyDocumentSerde } -// Enables you to customize recommendations across two attributes. You can choose -// to view recommendations for instances within the same instance families or -// across different instance families. You can also choose to view your estimated -// savings associated with recommendations with consideration of existing Savings -// Plans or RI benefits, or neither. +// You can use RightsizingRecommendationConfiguration to customize recommendations +// across two attributes. You can choose to view recommendations for instances +// within the same instance families or across different instance families. You can +// also choose to view your estimated savings that are associated with +// recommendations with consideration of existing Savings Plans or RI benefits, or +// neither. type RightsizingRecommendationConfiguration struct { // The option to consider RI or Savings Plans discount benefits in your savings @@ -1263,7 +1340,7 @@ type RightsizingRecommendationConfiguration struct { // This member is required. BenefitsConsidered bool - // The option to see recommendations within the same instance family, or + // The option to see recommendations within the same instance family or // recommendations for instances across other families. The default value is // SAME_INSTANCE_FAMILY. // @@ -1276,14 +1353,14 @@ type RightsizingRecommendationConfiguration struct { // Metadata for this recommendation set. type RightsizingRecommendationMetadata struct { - // Additional metadata that may be applicable to the recommendation. + // Additional metadata that might be applicable to the recommendation. AdditionalMetadata *string - // The timestamp for when AWS made this recommendation. + // The timestamp for when Amazon Web Services made this recommendation. GenerationTimestamp *string - // How many days of previous usage that AWS considers when making this - // recommendation. + // The number of days of previous usage that Amazon Web Services considers when + // making this recommendation. LookbackPeriodInDays LookbackPeriodInDays // The ID for this specific recommendation. @@ -1292,39 +1369,39 @@ type RightsizingRecommendationMetadata struct { noSmithyDocumentSerde } -// Summary of rightsizing recommendations +// The summary of rightsizing recommendations type RightsizingRecommendationSummary struct { - // Estimated total savings resulting from modifications, on a monthly basis. + // The estimated total savings resulting from modifications, on a monthly basis. EstimatedTotalMonthlySavingsAmount *string - // The currency code that AWS used to calculate the savings. + // The currency code that Amazon Web Services used to calculate the savings. SavingsCurrencyCode *string - // Savings percentage based on the recommended modifications, relative to the total - // On-Demand costs associated with these instances. + // The savings percentage based on the recommended modifications. It's relative to + // the total On-Demand costs that are associated with these instances. SavingsPercentage *string - // Total number of instance recommendations. + // The total number of instance recommendations. TotalRecommendationCount *string noSmithyDocumentSerde } -// The combination of AWS service, linked account, Region, and usage type where a -// cost anomaly is observed. +// The combination of Amazon Web Services service, linked account, Region, and +// usage type where a cost anomaly is observed. type RootCause struct { - // The linked account value associated with the cost anomaly. + // The member account value that's associated with the cost anomaly. LinkedAccount *string - // The AWS Region associated with the cost anomaly. + // The Amazon Web Services Region that's associated with the cost anomaly. Region *string - // The AWS service name associated with the cost anomaly. + // The Amazon Web Services service name that's associated with the cost anomaly. Service *string - // The UsageType value associated with the cost anomaly. + // The UsageType value that's associated with the cost anomaly. UsageType *string noSmithyDocumentSerde @@ -1370,32 +1447,33 @@ type SavingsPlansCoverage struct { type SavingsPlansCoverageData struct { // The percentage of your existing Savings Plans covered usage, divided by all of - // your eligible Savings Plans usage in an account(or set of accounts). + // your eligible Savings Plans usage in an account (or set of accounts). CoveragePercentage *string - // The cost of your AWS usage at the public On-Demand rate. + // The cost of your Amazon Web Services usage at the public On-Demand rate. OnDemandCost *string - // The amount of your AWS usage that is covered by a Savings Plans. + // The amount of your Amazon Web Services usage that is covered by a Savings Plans. SpendCoveredBySavingsPlans *string - // The total cost of your AWS usage, regardless of your purchase option. + // The total cost of your Amazon Web Services usage, regardless of your purchase + // option. TotalCost *string noSmithyDocumentSerde } -// Attribute details on a specific Savings Plan. +// The attribute details on a specific Savings Plan. type SavingsPlansDetails struct { // A group of instance types that Savings Plans applies to. InstanceFamily *string - // The unique ID used to distinguish Savings Plans from one another. + // The unique ID that's used to distinguish Savings Plans from one another. OfferingId *string - // A collection of AWS resources in a geographic area. Each AWS Region is isolated - // and independent of the other Regions. + // A collection of Amazon Web Services resources in a geographic area. Each Amazon + // Web Services Region is isolated and independent of the other Regions. Region *string noSmithyDocumentSerde @@ -1406,9 +1484,9 @@ type SavingsPlansDetails struct { type SavingsPlansPurchaseRecommendation struct { // The account scope that you want your recommendations for. Amazon Web Services - // calculates recommendations including the management account and member accounts - // if the value is set to PAYER. If the value is LINKED, recommendations are - // calculated for individual member accounts only. + // calculates recommendations that include the management account and member + // accounts if the value is set to PAYER. If the value is LINKED, recommendations + // are calculated for individual member accounts only. AccountScope AccountScope // The lookback period in days, used to generate the recommendation. @@ -1427,7 +1505,7 @@ type SavingsPlansPurchaseRecommendation struct { // The requested Savings Plans recommendation type. SavingsPlansType SupportedSavingsPlansType - // The Savings Plans recommendation term in years, used to generate the + // The Savings Plans recommendation term in years. It's used to generate the // recommendation. TermInYears TermInYears @@ -1440,8 +1518,8 @@ type SavingsPlansPurchaseRecommendationDetail struct { // The AccountID the recommendation is generated for. AccountId *string - // The currency code AWS used to generate the recommendations and present potential - // savings. + // The currency code that Amazon Web Services used to generate the recommendations + // and present potential savings. CurrencyCode *string // The average value of hourly On-Demand spend over the lookback period of the @@ -1459,7 +1537,7 @@ type SavingsPlansPurchaseRecommendationDetail struct { // The estimated utilization of the recommended Savings Plans. EstimatedAverageUtilization *string - // The estimated monthly savings amount, based on the recommended Savings Plans. + // The estimated monthly savings amount based on the recommended Savings Plans. EstimatedMonthlySavingsAmount *string // The remaining On-Demand cost estimated to not be covered by the recommended @@ -1470,24 +1548,25 @@ type SavingsPlansPurchaseRecommendationDetail struct { // based on your usage of the selected time period and the Savings Plans you own. EstimatedOnDemandCostWithCurrentCommitment *string - // The estimated return on investment based on the recommended Savings Plans - // purchased. This is calculated as estimatedSavingsAmount/ estimatedSPCost*100. + // The estimated return on investment that's based on the recommended Savings Plans + // that you purchased. This is calculated as estimatedSavingsAmount/ + // estimatedSPCost*100. EstimatedROI *string // The cost of the recommended Savings Plans over the length of the lookback // period. EstimatedSPCost *string - // The estimated savings amount based on the recommended Savings Plans over the - // length of the lookback period. + // The estimated savings amount that's based on the recommended Savings Plans over + // the length of the lookback period. EstimatedSavingsAmount *string // The estimated savings percentage relative to the total cost of applicable // On-Demand usage over the lookback period. EstimatedSavingsPercentage *string - // The recommended hourly commitment level for the Savings Plans type, and - // configuration based on the usage during the lookback period. + // The recommended hourly commitment level for the Savings Plans type and the + // configuration that's based on the usage during the lookback period. HourlyCommitmentToPurchase *string // Details for your recommended Savings Plans. @@ -1503,7 +1582,7 @@ type SavingsPlansPurchaseRecommendationDetail struct { // Metadata about your Savings Plans Purchase Recommendations. type SavingsPlansPurchaseRecommendationMetadata struct { - // Additional metadata that may be applicable to the recommendation. + // Additional metadata that might be applicable to the recommendation. AdditionalMetadata *string // The timestamp showing when the recommendations were generated. @@ -1518,8 +1597,8 @@ type SavingsPlansPurchaseRecommendationMetadata struct { // Summary metrics for your Savings Plans Purchase Recommendations. type SavingsPlansPurchaseRecommendationSummary struct { - // The currency code AWS used to generate the recommendations and present potential - // savings. + // The currency code that Amazon Web Services used to generate the recommendations + // and present potential savings. CurrencyCode *string // The current total on demand spend of the applicable usage types over the @@ -1529,16 +1608,17 @@ type SavingsPlansPurchaseRecommendationSummary struct { // The recommended Savings Plans cost on a daily (24 hourly) basis. DailyCommitmentToPurchase *string - // The estimated monthly savings amount, based on the recommended Savings Plans - // purchase. + // The estimated monthly savings amount that's based on the recommended Savings + // Plans purchase. EstimatedMonthlySavingsAmount *string - // The estimated On-Demand costs you would expect with no additional commitment, - // based on your usage of the selected time period and the Savings Plans you own. + // The estimated On-Demand costs you would expect with no additional commitment. + // It's based on your usage of the selected time period and the Savings Plans you + // own. EstimatedOnDemandCostWithCurrentCommitment *string - // The estimated return on investment based on the recommended Savings Plans and - // estimated savings. + // The estimated return on investment that's based on the recommended Savings Plans + // and estimated savings. EstimatedROI *string // The estimated total savings over the lookback period, based on the purchase of @@ -1555,7 +1635,7 @@ type SavingsPlansPurchaseRecommendationSummary struct { // remaining On-Demand usage. EstimatedTotalCost *string - // The recommended hourly commitment based on the recommendation parameters. + // The recommended hourly commitment that's based on the recommendation parameters. HourlyCommitmentToPurchase *string // The aggregate number of Savings Plans recommendations that exist for your @@ -1565,11 +1645,11 @@ type SavingsPlansPurchaseRecommendationSummary struct { noSmithyDocumentSerde } -// The amount of savings you're accumulating, against the public On-Demand rate of -// the usage accrued in an account. +// The amount of savings that you're accumulating, against the public On-Demand +// rate of the usage accrued in an account. type SavingsPlansSavings struct { - // The savings amount that you are accumulating for the usage that is covered by a + // The savings amount that you're accumulating for the usage that's covered by a // Savings Plans, when compared to the On-Demand equivalent of the same usage. NetSavings *string @@ -1580,14 +1660,14 @@ type SavingsPlansSavings struct { noSmithyDocumentSerde } -// The measurement of how well you are using your existing Savings Plans. +// The measurement of how well you're using your existing Savings Plans. type SavingsPlansUtilization struct { // The total amount of Savings Plans commitment that's been purchased in an account // (or set of accounts). TotalCommitment *string - // The amount of your Savings Plans commitment that was not consumed from Savings + // The amount of your Savings Plans commitment that wasn't consumed from Savings // Plans eligible usage in a specific period. UnusedCommitment *string @@ -1679,8 +1759,8 @@ type SavingsPlansUtilizationDetail struct { // Hardware specifications for the service that you want recommendations for. type ServiceSpecification struct { - // The Amazon EC2 hardware specifications that you want AWS to provide - // recommendations for. + // The Amazon EC2 hardware specifications that you want Amazon Web Services to + // provide recommendations for. EC2Specification *EC2Specification noSmithyDocumentSerde @@ -1689,12 +1769,12 @@ type ServiceSpecification struct { // The details of how to sort the data. type SortDefinition struct { - // The key by which to sort the data. + // The key that's used to sort the data. // // This member is required. Key *string - // The order in which to sort the data. + // The order that's used to sort the data. SortOrder SortOrder noSmithyDocumentSerde @@ -1703,7 +1783,7 @@ type SortDefinition struct { // The recipient of AnomalySubscription notifications. type Subscriber struct { - // The email address or SNS Amazon Resource Name (ARN), depending on the Type. + // The email address or SNS Amazon Resource Name (ARN). This depends on the Type. Address *string // Indicates if the subscriber accepts the notifications. @@ -1715,10 +1795,10 @@ type Subscriber struct { noSmithyDocumentSerde } -// The values that are available for a tag. If Values and Key are not specified, -// the ABSENTMatchOption is applied to all tags. That is, filtering on resources -// with no tags. If Values is provided and Key is not specified, the -// ABSENTMatchOption is applied to the tag Key only. That is, filtering on +// The values that are available for a tag. If Values and Key aren't specified, the +// ABSENTMatchOption is applied to all tags. That is, it's filtered on resources +// with no tags. If Values is provided and Key isn't specified, the +// ABSENTMatchOption is applied to the tag Key only. That is, it's filtered on // resources without the given tag key. type TagValues struct { @@ -1739,19 +1819,21 @@ type TagValues struct { // Details on recommended instance. type TargetInstance struct { - // The currency code that AWS used to calculate the costs for this instance. + // The currency code that Amazon Web Services used to calculate the costs for this + // instance. CurrencyCode *string - // Indicates whether this recommendation is the defaulted AWS recommendation. + // Determines whether this recommendation is the defaulted Amazon Web Services + // recommendation. DefaultTargetInstance bool - // Expected cost to operate this instance type on a monthly basis. + // The expected cost to operate this instance type on a monthly basis. EstimatedMonthlyCost *string - // Estimated savings resulting from modification, on a monthly basis. + // The estimated savings that result from modification, on a monthly basis. EstimatedMonthlySavings *string - // Expected utilization metrics for target instance type. + // The expected utilization metrics for target instance type. ExpectedResourceUtilization *ResourceUtilization // Explains the actions you might need to take in order to successfully migrate @@ -1767,10 +1849,11 @@ type TargetInstance struct { // Details on termination recommendation. type TerminateRecommendationDetail struct { - // The currency code that AWS used to calculate the costs for this instance. + // The currency code that Amazon Web Services used to calculate the costs for this + // instance. CurrencyCode *string - // Estimated savings resulting from modification, on a monthly basis. + // The estimated savings that result from modification, on a monthly basis. EstimatedMonthlySavings *string noSmithyDocumentSerde @@ -1779,17 +1862,17 @@ type TerminateRecommendationDetail struct { // Filters cost anomalies based on the total impact. type TotalImpactFilter struct { - // The comparing value used in the filter. + // The comparing value that's used in the filter. // // This member is required. NumericOperator NumericOperator - // The lower bound dollar value used in the filter. + // The lower bound dollar value that's used in the filter. // // This member is required. StartValue float64 - // The upper bound dollar value used in the filter. + // The upper bound dollar value that's used in the filter. EndValue float64 noSmithyDocumentSerde diff --git a/service/costexplorer/validators.go b/service/costexplorer/validators.go index 78729823890..f09b7aa22f1 100644 --- a/service/costexplorer/validators.go +++ b/service/costexplorer/validators.go @@ -718,6 +718,84 @@ func validateAnomalySubscription(v *types.AnomalySubscription) error { } } +func validateCostCategorySplitChargeRule(v *types.CostCategorySplitChargeRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CostCategorySplitChargeRule"} + if v.Source == nil { + invalidParams.Add(smithy.NewErrParamRequired("Source")) + } + if v.Targets == nil { + invalidParams.Add(smithy.NewErrParamRequired("Targets")) + } + if len(v.Method) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Method")) + } + if v.Parameters != nil { + if err := validateCostCategorySplitChargeRuleParametersList(v.Parameters); err != nil { + invalidParams.AddNested("Parameters", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCostCategorySplitChargeRuleParameter(v *types.CostCategorySplitChargeRuleParameter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CostCategorySplitChargeRuleParameter"} + if len(v.Type) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Type")) + } + if v.Values == nil { + invalidParams.Add(smithy.NewErrParamRequired("Values")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCostCategorySplitChargeRuleParametersList(v []types.CostCategorySplitChargeRuleParameter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CostCategorySplitChargeRuleParametersList"} + for i := range v { + if err := validateCostCategorySplitChargeRuleParameter(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCostCategorySplitChargeRulesList(v []types.CostCategorySplitChargeRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CostCategorySplitChargeRulesList"} + for i := range v { + if err := validateCostCategorySplitChargeRule(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateDateInterval(v *types.DateInterval) error { if v == nil { return nil @@ -850,6 +928,11 @@ func validateOpCreateCostCategoryDefinitionInput(v *CreateCostCategoryDefinition if v.Rules == nil { invalidParams.Add(smithy.NewErrParamRequired("Rules")) } + if v.SplitChargeRules != nil { + if err := validateCostCategorySplitChargeRulesList(v.SplitChargeRules); err != nil { + invalidParams.AddNested("SplitChargeRules", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { @@ -1357,6 +1440,11 @@ func validateOpUpdateCostCategoryDefinitionInput(v *UpdateCostCategoryDefinition if v.Rules == nil { invalidParams.Add(smithy.NewErrParamRequired("Rules")) } + if v.SplitChargeRules != nil { + if err := validateCostCategorySplitChargeRulesList(v.SplitChargeRules); err != nil { + invalidParams.AddNested("SplitChargeRules", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { diff --git a/service/customerprofiles/api_op_CreateDomain.go b/service/customerprofiles/api_op_CreateDomain.go index d24d4f385a2..375cc7b06d7 100644 --- a/service/customerprofiles/api_op_CreateDomain.go +++ b/service/customerprofiles/api_op_CreateDomain.go @@ -16,7 +16,12 @@ import ( // profile attributes, object types, profile keys, and encryption keys. You can // create multiple domains, and each domain can have multiple third-party // integrations. Each Amazon Connect instance can be associated with only one -// domain. Multiple Amazon Connect instances can be associated with one domain. +// domain. Multiple Amazon Connect instances can be associated with one domain. Use +// this API or UpdateDomain +// (https://docs.aws.amazon.com/customerprofiles/latest/APIReference/API_UpdateDomain.html) +// to enable identity resolution +// (https://docs.aws.amazon.com/customerprofiles/latest/APIReference/API_GetMatches.html): +// set Matching to true. func (c *Client) CreateDomain(ctx context.Context, params *CreateDomainInput, optFns ...func(*Options)) (*CreateDomainOutput, error) { if params == nil { params = &CreateDomainInput{} @@ -55,8 +60,12 @@ type CreateDomainInput struct { // before it is placed in permanent or semi-permanent storage. DefaultEncryptionKey *string - // The process of matching duplicate profiles. This process runs every Saturday at - // 12AM. + // The process of matching duplicate profiles. If Matching = true, Amazon Connect + // Customer Profiles starts a weekly batch process every Saturday at 12AM UTC to + // detect duplicate profiles in your domains. After that batch process completes, + // use the GetMatches + // (https://docs.aws.amazon.com/customerprofiles/latest/APIReference/API_GetMatches.html) + // API to return and review the results. Matching *types.MatchingRequest // The tags used to organize, track, or control access for this resource. @@ -96,8 +105,12 @@ type CreateDomainOutput struct { // before it is placed in permanent or semi-permanent storage. DefaultEncryptionKey *string - // The process of matching duplicate profiles. This process runs every Saturday at - // 12AM. + // The process of matching duplicate profiles. If Matching = true, Amazon Connect + // Customer Profiles starts a weekly batch process every Saturday at 12AM UTC to + // detect duplicate profiles in your domains. After that batch process completes, + // use the GetMatches + // (https://docs.aws.amazon.com/customerprofiles/latest/APIReference/API_GetMatches.html) + // API to return and review the results. Matching *types.MatchingResponse // The tags used to organize, track, or control access for this resource. diff --git a/service/customerprofiles/api_op_GetDomain.go b/service/customerprofiles/api_op_GetDomain.go index 006dcc163ed..030a7f7261c 100644 --- a/service/customerprofiles/api_op_GetDomain.go +++ b/service/customerprofiles/api_op_GetDomain.go @@ -67,8 +67,12 @@ type GetDomainOutput struct { // The default number of days until the data within the domain expires. DefaultExpirationDays *int32 - // The process of matching duplicate profiles. This process runs every Saturday at - // 12AM. + // The process of matching duplicate profiles. If Matching = true, Amazon Connect + // Customer Profiles starts a weekly batch process every Saturday at 12AM UTC to + // detect duplicate profiles in your domains. After that batch process completes, + // use the GetMatches + // (https://docs.aws.amazon.com/customerprofiles/latest/APIReference/API_GetMatches.html) + // API to return and review the results. Matching *types.MatchingResponse // Usage-specific statistics about the domain. diff --git a/service/customerprofiles/api_op_GetMatches.go b/service/customerprofiles/api_op_GetMatches.go index 054e7305f81..b058eb4ba36 100644 --- a/service/customerprofiles/api_op_GetMatches.go +++ b/service/customerprofiles/api_op_GetMatches.go @@ -19,8 +19,8 @@ import ( // (https://docs.aws.amazon.com/customerprofiles/latest/APIReference/API_UpdateDomain.html) // to enable identity resolution: set Matching to true. GetMatches returns // potentially matching profiles, based on the results of the latest run of a -// machine learning process. Amazon Connect runs a batch process every Saturday at -// 12AM UTC to identify matching profiles. The results are returned up to seven +// machine learning process. Amazon Connect starts a batch process every Saturday +// at 12AM UTC to identify matching profiles. The results are returned up to seven // days after the Saturday run. Amazon Connect uses the following profile // attributes to identify matches: // @@ -43,6 +43,13 @@ import ( // * FullName // // * BusinessName +// +// For +// example, two or more profiles—with spelling mistakes such as John Doe and Jhn +// Doe, or different casing email addresses such as JOHN_DOE@ANYCOMPANY.COM and +// johndoe@anycompany.com, or different phone number formats such as 555-010-0000 +// and +1-555-010-0000—can be detected as belonging to the same customer John Doe +// and merged into a unified profile. func (c *Client) GetMatches(ctx context.Context, params *GetMatchesInput, optFns ...func(*Options)) (*GetMatchesOutput, error) { if params == nil { params = &GetMatchesInput{} diff --git a/service/customerprofiles/api_op_ListProfileObjects.go b/service/customerprofiles/api_op_ListProfileObjects.go index 7fce91dc862..63f5836cfc0 100644 --- a/service/customerprofiles/api_op_ListProfileObjects.go +++ b/service/customerprofiles/api_op_ListProfileObjects.go @@ -51,6 +51,10 @@ type ListProfileObjectsInput struct { // The pagination token from the previous call to ListProfileObjects. NextToken *string + // Applies a filter to the response to include profile objects with the specified + // index values. This filter is only supported for ObjectTypeName _asset and _case. + ObjectFilter *types.ObjectFilter + noSmithyDocumentSerde } diff --git a/service/customerprofiles/api_op_UpdateDomain.go b/service/customerprofiles/api_op_UpdateDomain.go index 6132be814da..c1c9354c87d 100644 --- a/service/customerprofiles/api_op_UpdateDomain.go +++ b/service/customerprofiles/api_op_UpdateDomain.go @@ -14,7 +14,11 @@ import ( // Updates the properties of a domain, including creating or selecting a dead // letter queue or an encryption key. After a domain is created, the name can’t be -// changed. +// changed. Use this API or CreateDomain +// (https://docs.aws.amazon.com/customerprofiles/latest/APIReference/API_CreateDomain.html) +// to enable identity resolution +// (https://docs.aws.amazon.com/customerprofiles/latest/APIReference/API_GetMatches.html): +// set Matching to true. func (c *Client) UpdateDomain(ctx context.Context, params *UpdateDomainInput, optFns ...func(*Options)) (*UpdateDomainOutput, error) { if params == nil { params = &UpdateDomainInput{} @@ -53,8 +57,12 @@ type UpdateDomainInput struct { // The default number of days until the data within the domain expires. DefaultExpirationDays *int32 - // The process of matching duplicate profiles. This process runs every Saturday at - // 12AM. + // The process of matching duplicate profiles. If Matching = true, Amazon Connect + // Customer Profiles starts a weekly batch process every Saturday at 12AM UTC to + // detect duplicate profiles in your domains. After that batch process completes, + // use the GetMatches + // (https://docs.aws.amazon.com/customerprofiles/latest/APIReference/API_GetMatches.html) + // API to return and review the results. Matching *types.MatchingRequest // The tags used to organize, track, or control access for this resource. @@ -92,8 +100,12 @@ type UpdateDomainOutput struct { // The default number of days until the data within the domain expires. DefaultExpirationDays *int32 - // The process of matching duplicate profiles. This process runs every Saturday at - // 12AM. + // The process of matching duplicate profiles. If Matching = true, Amazon Connect + // Customer Profiles starts a weekly batch process every Saturday at 12AM UTC to + // detect duplicate profiles in your domains. After that batch process completes, + // use the GetMatches + // (https://docs.aws.amazon.com/customerprofiles/latest/APIReference/API_GetMatches.html) + // API to return and review the results. Matching *types.MatchingResponse // The tags used to organize, track, or control access for this resource. diff --git a/service/customerprofiles/serializers.go b/service/customerprofiles/serializers.go index a88623d8c8a..8ca3dbbb71d 100644 --- a/service/customerprofiles/serializers.go +++ b/service/customerprofiles/serializers.go @@ -1489,6 +1489,13 @@ func awsRestjson1_serializeOpDocumentListProfileObjectsInput(v *ListProfileObjec object := value.Object() defer object.Close() + if v.ObjectFilter != nil { + ok := object.Key("ObjectFilter") + if err := awsRestjson1_serializeDocumentObjectFilter(v.ObjectFilter, ok); err != nil { + return err + } + } + if v.ObjectTypeName != nil { ok := object.Key("ObjectTypeName") ok.String(*v.ObjectTypeName) @@ -2984,6 +2991,25 @@ func awsRestjson1_serializeDocumentMatchingRequest(v *types.MatchingRequest, val return nil } +func awsRestjson1_serializeDocumentObjectFilter(v *types.ObjectFilter, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyName != nil { + ok := object.Key("KeyName") + ok.String(*v.KeyName) + } + + if v.Values != nil { + ok := object.Key("Values") + if err := awsRestjson1_serializeDocumentRequestValueList(v.Values, ok); err != nil { + return err + } + } + + return nil +} + func awsRestjson1_serializeDocumentObjectTypeField(v *types.ObjectTypeField, value smithyjson.Value) error { object := value.Object() defer object.Close() diff --git a/service/customerprofiles/types/enums.go b/service/customerprofiles/types/enums.go index 77d9187a0ef..2321d4d2370 100644 --- a/service/customerprofiles/types/enums.go +++ b/service/customerprofiles/types/enums.go @@ -367,6 +367,8 @@ type StandardIdentifier string // Enum values for StandardIdentifier const ( StandardIdentifierProfile StandardIdentifier = "PROFILE" + StandardIdentifierAsset StandardIdentifier = "ASSET" + StandardIdentifierCase StandardIdentifier = "CASE" StandardIdentifierUnique StandardIdentifier = "UNIQUE" StandardIdentifierSecondary StandardIdentifier = "SECONDARY" StandardIdentifierLookupOnly StandardIdentifier = "LOOKUP_ONLY" @@ -379,6 +381,8 @@ const ( func (StandardIdentifier) Values() []StandardIdentifier { return []StandardIdentifier{ "PROFILE", + "ASSET", + "CASE", "UNIQUE", "SECONDARY", "LOOKUP_ONLY", diff --git a/service/customerprofiles/types/types.go b/service/customerprofiles/types/types.go index afea1404f88..2b07a81a60d 100644 --- a/service/customerprofiles/types/types.go +++ b/service/customerprofiles/types/types.go @@ -363,6 +363,26 @@ type MatchItem struct { noSmithyDocumentSerde } +// The filter applied to ListProfileObjects response to include profile objects +// with the specified index values. This filter is only supported for +// ObjectTypeName _asset and _case. +type ObjectFilter struct { + + // A searchable identifier of a standard profile object. The predefined keys you + // can use to search for _asset include: _assetId, _assetName, _serialNumber. The + // predefined keys you can use to search for _case include: _caseId. + // + // This member is required. + KeyName *string + + // A list of key values. + // + // This member is required. + Values []string + + noSmithyDocumentSerde +} + // Represents a field in a ProfileObjectType. type ObjectTypeField struct { @@ -389,14 +409,14 @@ type ObjectTypeKey struct { FieldNames []string // The types of keys that a ProfileObject can have. Each ProfileObject can have - // only 1 UNIQUE key but multiple PROFILE keys. PROFILE means that this key can be - // used to tie an object to a PROFILE. UNIQUE means that it can be used to uniquely - // identify an object. If a key a is marked as SECONDARY, it will be used to search - // for profiles after all other PROFILE keys have been searched. A LOOKUP_ONLY key - // is only used to match a profile but is not persisted to be used for searching of - // the profile. A NEW_ONLY key is only used if the profile does not already exist - // before the object is ingested, otherwise it is only used for matching objects to - // profiles. + // only 1 UNIQUE key but multiple PROFILE keys. PROFILE, ASSET or CASE means that + // this key can be used to tie an object to a PROFILE, ASSET or CASE respectively. + // UNIQUE means that it can be used to uniquely identify an object. If a key a is + // marked as SECONDARY, it will be used to search for profiles after all other + // PROFILE keys have been searched. A LOOKUP_ONLY key is only used to match a + // profile but is not persisted to be used for searching of the profile. A NEW_ONLY + // key is only used if the profile does not already exist before the object is + // ingested, otherwise it is only used for matching objects to profiles. StandardIdentifiers []StandardIdentifier noSmithyDocumentSerde diff --git a/service/customerprofiles/validators.go b/service/customerprofiles/validators.go index 0692f5ec1b0..cb1417fc74a 100644 --- a/service/customerprofiles/validators.go +++ b/service/customerprofiles/validators.go @@ -751,6 +751,24 @@ func validateMatchingRequest(v *types.MatchingRequest) error { } } +func validateObjectFilter(v *types.ObjectFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ObjectFilter"} + if v.KeyName == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyName")) + } + if v.Values == nil { + invalidParams.Add(smithy.NewErrParamRequired("Values")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateS3SourceProperties(v *types.S3SourceProperties) error { if v == nil { return nil @@ -1261,6 +1279,11 @@ func validateOpListProfileObjectsInput(v *ListProfileObjectsInput) error { if v.ProfileId == nil { invalidParams.Add(smithy.NewErrParamRequired("ProfileId")) } + if v.ObjectFilter != nil { + if err := validateObjectFilter(v.ObjectFilter); err != nil { + invalidParams.AddNested("ObjectFilter", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { diff --git a/service/databrew/types/enums.go b/service/databrew/types/enums.go index c4dc3f0107d..d47f443b460 100644 --- a/service/databrew/types/enums.go +++ b/service/databrew/types/enums.go @@ -192,13 +192,14 @@ type OutputFormat string // Enum values for OutputFormat const ( - OutputFormatCsv OutputFormat = "CSV" - OutputFormatJson OutputFormat = "JSON" - OutputFormatParquet OutputFormat = "PARQUET" - OutputFormatGlueparquet OutputFormat = "GLUEPARQUET" - OutputFormatAvro OutputFormat = "AVRO" - OutputFormatOrc OutputFormat = "ORC" - OutputFormatXml OutputFormat = "XML" + OutputFormatCsv OutputFormat = "CSV" + OutputFormatJson OutputFormat = "JSON" + OutputFormatParquet OutputFormat = "PARQUET" + OutputFormatGlueparquet OutputFormat = "GLUEPARQUET" + OutputFormatAvro OutputFormat = "AVRO" + OutputFormatOrc OutputFormat = "ORC" + OutputFormatXml OutputFormat = "XML" + OutputFormatTableauhyper OutputFormat = "TABLEAUHYPER" ) // Values returns all known values for OutputFormat. Note that this can be expanded @@ -213,6 +214,7 @@ func (OutputFormat) Values() []OutputFormat { "AVRO", "ORC", "XML", + "TABLEAUHYPER", } } diff --git a/service/directoryservice/api_op_AddIpRoutes.go b/service/directoryservice/api_op_AddIpRoutes.go index c7dbbb293d7..bfb08afed3d 100644 --- a/service/directoryservice/api_op_AddIpRoutes.go +++ b/service/directoryservice/api_op_AddIpRoutes.go @@ -11,15 +11,15 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// If the DNS server for your on-premises domain uses a publicly addressable IP +// If the DNS server for your self-managed domain uses a publicly addressable IP // address, you must add a CIDR address block to correctly route traffic to and // from your Microsoft AD on Amazon Web Services. AddIpRoutes adds this address // block. You can also use AddIpRoutes to facilitate routing traffic that uses -// public IP ranges from your Microsoft AD on AWS to a peer VPC. Before you call -// AddIpRoutes, ensure that all of the required permissions have been explicitly -// granted through a policy. For details about what permissions are required to run -// the AddIpRoutes operation, see AWS Directory Service API Permissions: Actions, -// Resources, and Conditions Reference +// public IP ranges from your Microsoft AD on Amazon Web Services to a peer VPC. +// Before you call AddIpRoutes, ensure that all of the required permissions have +// been explicitly granted through a policy. For details about what permissions are +// required to run the AddIpRoutes operation, see Directory Service API +// Permissions: Actions, Resources, and Conditions Reference // (http://docs.aws.amazon.com/directoryservice/latest/admin-guide/UsingWithDS_IAM_ResourcePermissions.html). func (c *Client) AddIpRoutes(ctx context.Context, params *AddIpRoutesInput, optFns ...func(*Options)) (*AddIpRoutesOutput, error) { if params == nil { @@ -44,39 +44,39 @@ type AddIpRoutesInput struct { DirectoryId *string // IP address blocks, using CIDR format, of the traffic to route. This is often the - // IP address block of the DNS server used for your on-premises domain. + // IP address block of the DNS server used for your self-managed domain. // // This member is required. IpRoutes []types.IpRoute // If set to true, updates the inbound and outbound rules of the security group - // that has the description: "AWS created security group for directory ID directory - // controllers." Following are the new rules: Inbound: + // that has the description: "Amazon Web Services created security group for + // directory ID directory controllers." Following are the new rules: Inbound: + // + // * + // Type: Custom UDP Rule, Protocol: UDP, Range: 88, Source: 0.0.0.0/0 + // + // * Type: + // Custom UDP Rule, Protocol: UDP, Range: 123, Source: 0.0.0.0/0 + // + // * Type: Custom + // UDP Rule, Protocol: UDP, Range: 138, Source: 0.0.0.0/0 // // * Type: Custom UDP Rule, - // Protocol: UDP, Range: 88, Source: 0.0.0.0/0 + // Protocol: UDP, Range: 389, Source: 0.0.0.0/0 // // * Type: Custom UDP Rule, Protocol: - // UDP, Range: 123, Source: 0.0.0.0/0 + // UDP, Range: 464, Source: 0.0.0.0/0 // // * Type: Custom UDP Rule, Protocol: UDP, - // Range: 138, Source: 0.0.0.0/0 + // Range: 445, Source: 0.0.0.0/0 // - // * Type: Custom UDP Rule, Protocol: UDP, Range: - // 389, Source: 0.0.0.0/0 + // * Type: Custom TCP Rule, Protocol: TCP, Range: + // 88, Source: 0.0.0.0/0 // - // * Type: Custom UDP Rule, Protocol: UDP, Range: 464, + // * Type: Custom TCP Rule, Protocol: TCP, Range: 135, // Source: 0.0.0.0/0 // - // * Type: Custom UDP Rule, Protocol: UDP, Range: 445, Source: - // 0.0.0.0/0 - // - // * Type: Custom TCP Rule, Protocol: TCP, Range: 88, Source: - // 0.0.0.0/0 - // - // * Type: Custom TCP Rule, Protocol: TCP, Range: 135, Source: - // 0.0.0.0/0 - // // * Type: Custom TCP Rule, Protocol: TCP, Range: 445, Source: // 0.0.0.0/0 // diff --git a/service/directoryservice/api_op_ConnectDirectory.go b/service/directoryservice/api_op_ConnectDirectory.go index c433381714a..e24eaff10c1 100644 --- a/service/directoryservice/api_op_ConnectDirectory.go +++ b/service/directoryservice/api_op_ConnectDirectory.go @@ -11,10 +11,10 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates an AD Connector to connect to an on-premises directory. Before you call +// Creates an AD Connector to connect to a self-managed directory. Before you call // ConnectDirectory, ensure that all of the required permissions have been // explicitly granted through a policy. For details about what permissions are -// required to run the ConnectDirectory operation, see AWS Directory Service API +// required to run the ConnectDirectory operation, see Directory Service API // Permissions: Actions, Resources, and Conditions Reference // (http://docs.aws.amazon.com/directoryservice/latest/admin-guide/UsingWithDS_IAM_ResourcePermissions.html). func (c *Client) ConnectDirectory(ctx context.Context, params *ConnectDirectoryInput, optFns ...func(*Options)) (*ConnectDirectoryOutput, error) { @@ -41,12 +41,13 @@ type ConnectDirectoryInput struct { // This member is required. ConnectSettings *types.DirectoryConnectSettings - // The fully qualified name of the on-premises directory, such as corp.example.com. + // The fully qualified name of your self-managed directory, such as + // corp.example.com. // // This member is required. Name *string - // The password for the on-premises user account. + // The password for your self-managed user account. // // This member is required. Password *string @@ -59,7 +60,7 @@ type ConnectDirectoryInput struct { // A description for the directory. Description *string - // The NetBIOS name of the on-premises directory, such as CORP. + // The NetBIOS name of your self-managed directory, such as CORP. ShortName *string // The tags to be assigned to AD Connector. diff --git a/service/directoryservice/api_op_CreateAlias.go b/service/directoryservice/api_op_CreateAlias.go index 9a3455ac3d6..d60e6c1671f 100644 --- a/service/directoryservice/api_op_CreateAlias.go +++ b/service/directoryservice/api_op_CreateAlias.go @@ -32,9 +32,9 @@ func (c *Client) CreateAlias(ctx context.Context, params *CreateAliasInput, optF // Contains the inputs for the CreateAlias operation. type CreateAliasInput struct { - // The requested alias. The alias must be unique amongst all aliases in AWS. This - // operation throws an EntityAlreadyExistsException error if the alias already - // exists. + // The requested alias. The alias must be unique amongst all aliases in Amazon Web + // Services. This operation throws an EntityAlreadyExistsException error if the + // alias already exists. // // This member is required. Alias *string diff --git a/service/directoryservice/api_op_CreateConditionalForwarder.go b/service/directoryservice/api_op_CreateConditionalForwarder.go index e66506b659b..d5873d69480 100644 --- a/service/directoryservice/api_op_CreateConditionalForwarder.go +++ b/service/directoryservice/api_op_CreateConditionalForwarder.go @@ -10,9 +10,10 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a conditional forwarder associated with your AWS directory. Conditional -// forwarders are required in order to set up a trust relationship with another -// domain. The conditional forwarder points to the trusted domain. +// Creates a conditional forwarder associated with your Amazon Web Services +// directory. Conditional forwarders are required in order to set up a trust +// relationship with another domain. The conditional forwarder points to the +// trusted domain. func (c *Client) CreateConditionalForwarder(ctx context.Context, params *CreateConditionalForwarderInput, optFns ...func(*Options)) (*CreateConditionalForwarderOutput, error) { if params == nil { params = &CreateConditionalForwarderInput{} @@ -28,13 +29,13 @@ func (c *Client) CreateConditionalForwarder(ctx context.Context, params *CreateC return out, nil } -// Initiates the creation of a conditional forwarder for your AWS Directory Service -// for Microsoft Active Directory. Conditional forwarders are required in order to -// set up a trust relationship with another domain. +// Initiates the creation of a conditional forwarder for your Directory Service for +// Microsoft Active Directory. Conditional forwarders are required in order to set +// up a trust relationship with another domain. type CreateConditionalForwarderInput struct { - // The directory ID of the AWS directory for which you are creating the conditional - // forwarder. + // The directory ID of the Amazon Web Services directory for which you are creating + // the conditional forwarder. // // This member is required. DirectoryId *string diff --git a/service/directoryservice/api_op_CreateDirectory.go b/service/directoryservice/api_op_CreateDirectory.go index ed770b22482..8589d169061 100644 --- a/service/directoryservice/api_op_CreateDirectory.go +++ b/service/directoryservice/api_op_CreateDirectory.go @@ -13,10 +13,10 @@ import ( // Creates a Simple AD directory. For more information, see Simple Active Directory // (https://docs.aws.amazon.com/directoryservice/latest/admin-guide/directory_simple_ad.html) -// in the AWS Directory Service Admin Guide. Before you call CreateDirectory, -// ensure that all of the required permissions have been explicitly granted through -// a policy. For details about what permissions are required to run the -// CreateDirectory operation, see AWS Directory Service API Permissions: Actions, +// in the Directory Service Admin Guide. Before you call CreateDirectory, ensure +// that all of the required permissions have been explicitly granted through a +// policy. For details about what permissions are required to run the +// CreateDirectory operation, see Directory Service API Permissions: Actions, // Resources, and Conditions Reference // (http://docs.aws.amazon.com/directoryservice/latest/admin-guide/UsingWithDS_IAM_ResourcePermissions.html). func (c *Client) CreateDirectory(ctx context.Context, params *CreateDirectoryInput, optFns ...func(*Options)) (*CreateDirectoryOutput, error) { diff --git a/service/directoryservice/api_op_CreateLogSubscription.go b/service/directoryservice/api_op_CreateLogSubscription.go index 1c95d9a65c7..354e60f809f 100644 --- a/service/directoryservice/api_op_CreateLogSubscription.go +++ b/service/directoryservice/api_op_CreateLogSubscription.go @@ -11,7 +11,8 @@ import ( ) // Creates a subscription to forward real-time Directory Service domain controller -// security logs to the specified Amazon CloudWatch log group in your AWS account. +// security logs to the specified Amazon CloudWatch log group in your Amazon Web +// Services account. func (c *Client) CreateLogSubscription(ctx context.Context, params *CreateLogSubscriptionInput, optFns ...func(*Options)) (*CreateLogSubscriptionOutput, error) { if params == nil { params = &CreateLogSubscriptionInput{} diff --git a/service/directoryservice/api_op_CreateMicrosoftAD.go b/service/directoryservice/api_op_CreateMicrosoftAD.go index fa858dec249..2f89e8fe9b0 100644 --- a/service/directoryservice/api_op_CreateMicrosoftAD.go +++ b/service/directoryservice/api_op_CreateMicrosoftAD.go @@ -11,13 +11,13 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a Microsoft AD directory in the AWS Cloud. For more information, see AWS -// Managed Microsoft AD +// Creates a Microsoft AD directory in the Amazon Web Services Cloud. For more +// information, see Managed Microsoft AD // (https://docs.aws.amazon.com/directoryservice/latest/admin-guide/directory_microsoft_ad.html) -// in the AWS Directory Service Admin Guide. Before you call CreateMicrosoftAD, -// ensure that all of the required permissions have been explicitly granted through -// a policy. For details about what permissions are required to run the -// CreateMicrosoftAD operation, see AWS Directory Service API Permissions: Actions, +// in the Directory Service Admin Guide. Before you call CreateMicrosoftAD, ensure +// that all of the required permissions have been explicitly granted through a +// policy. For details about what permissions are required to run the +// CreateMicrosoftAD operation, see Directory Service API Permissions: Actions, // Resources, and Conditions Reference // (http://docs.aws.amazon.com/directoryservice/latest/admin-guide/UsingWithDS_IAM_ResourcePermissions.html). func (c *Client) CreateMicrosoftAD(ctx context.Context, params *CreateMicrosoftADInput, optFns ...func(*Options)) (*CreateMicrosoftADOutput, error) { @@ -35,12 +35,12 @@ func (c *Client) CreateMicrosoftAD(ctx context.Context, params *CreateMicrosoftA return out, nil } -// Creates an AWS Managed Microsoft AD directory. +// Creates an Managed Microsoft AD directory. type CreateMicrosoftADInput struct { - // The fully qualified domain name for the AWS Managed Microsoft AD directory, such - // as corp.example.com. This name will resolve inside your VPC only. It does not - // need to be publicly resolvable. + // The fully qualified domain name for the Managed Microsoft AD directory, such as + // corp.example.com. This name will resolve inside your VPC only. It does not need + // to be publicly resolvable. // // This member is required. Name *string @@ -57,11 +57,11 @@ type CreateMicrosoftADInput struct { // This member is required. VpcSettings *types.DirectoryVpcSettings - // A description for the directory. This label will appear on the AWS console - // Directory Details page after the directory is created. + // A description for the directory. This label will appear on the Amazon Web + // Services console Directory Details page after the directory is created. Description *string - // AWS Managed Microsoft AD is available in two editions: Standard and Enterprise. + // Managed Microsoft AD is available in two editions: Standard and Enterprise. // Enterprise is the default. Edition types.DirectoryEdition @@ -70,7 +70,7 @@ type CreateMicrosoftADInput struct { // for the directory DNS corp.example.com. ShortName *string - // The tags to be assigned to the AWS Managed Microsoft AD directory. + // The tags to be assigned to the Managed Microsoft AD directory. Tags []types.Tag noSmithyDocumentSerde diff --git a/service/directoryservice/api_op_CreateSnapshot.go b/service/directoryservice/api_op_CreateSnapshot.go index 12ed2b2bd53..cd40b16f1de 100644 --- a/service/directoryservice/api_op_CreateSnapshot.go +++ b/service/directoryservice/api_op_CreateSnapshot.go @@ -10,8 +10,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a snapshot of a Simple AD or Microsoft AD directory in the AWS cloud. -// You cannot take snapshots of AD Connector directories. +// Creates a snapshot of a Simple AD or Microsoft AD directory in the Amazon Web +// Services cloud. You cannot take snapshots of AD Connector directories. func (c *Client) CreateSnapshot(ctx context.Context, params *CreateSnapshotInput, optFns ...func(*Options)) (*CreateSnapshotOutput, error) { if params == nil { params = &CreateSnapshotInput{} diff --git a/service/directoryservice/api_op_CreateTrust.go b/service/directoryservice/api_op_CreateTrust.go index ad5abdac40c..34f8c0ec418 100644 --- a/service/directoryservice/api_op_CreateTrust.go +++ b/service/directoryservice/api_op_CreateTrust.go @@ -11,14 +11,14 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// AWS Directory Service for Microsoft Active Directory allows you to configure -// trust relationships. For example, you can establish a trust between your AWS -// Managed Microsoft AD directory, and your existing on-premises Microsoft Active +// Directory Service for Microsoft Active Directory allows you to configure trust +// relationships. For example, you can establish a trust between your Managed +// Microsoft AD directory, and your existing self-managed Microsoft Active // Directory. This would allow you to provide users and groups access to resources // in either domain, with a single set of credentials. This action initiates the -// creation of the AWS side of a trust relationship between an AWS Managed -// Microsoft AD directory and an external domain. You can create either a forest -// trust or an external trust. +// creation of the Amazon Web Services side of a trust relationship between an +// Managed Microsoft AD directory and an external domain. You can create either a +// forest trust or an external trust. func (c *Client) CreateTrust(ctx context.Context, params *CreateTrustInput, optFns ...func(*Options)) (*CreateTrustOutput, error) { if params == nil { params = &CreateTrustInput{} @@ -34,17 +34,17 @@ func (c *Client) CreateTrust(ctx context.Context, params *CreateTrustInput, optF return out, nil } -// AWS Directory Service for Microsoft Active Directory allows you to configure -// trust relationships. For example, you can establish a trust between your AWS -// Managed Microsoft AD directory, and your existing on-premises Microsoft Active +// Directory Service for Microsoft Active Directory allows you to configure trust +// relationships. For example, you can establish a trust between your Managed +// Microsoft AD directory, and your existing self-managed Microsoft Active // Directory. This would allow you to provide users and groups access to resources // in either domain, with a single set of credentials. This action initiates the -// creation of the AWS side of a trust relationship between an AWS Managed -// Microsoft AD directory and an external domain. +// creation of the Amazon Web Services side of a trust relationship between an +// Managed Microsoft AD directory and an external domain. type CreateTrustInput struct { - // The Directory ID of the AWS Managed Microsoft AD directory for which to - // establish the trust relationship. + // The Directory ID of the Managed Microsoft AD directory for which to establish + // the trust relationship. // // This member is required. DirectoryId *string diff --git a/service/directoryservice/api_op_DeleteConditionalForwarder.go b/service/directoryservice/api_op_DeleteConditionalForwarder.go index 79c6bcac447..b67c13c2ee0 100644 --- a/service/directoryservice/api_op_DeleteConditionalForwarder.go +++ b/service/directoryservice/api_op_DeleteConditionalForwarder.go @@ -10,7 +10,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes a conditional forwarder that has been set up for your AWS directory. +// Deletes a conditional forwarder that has been set up for your Amazon Web +// Services directory. func (c *Client) DeleteConditionalForwarder(ctx context.Context, params *DeleteConditionalForwarderInput, optFns ...func(*Options)) (*DeleteConditionalForwarderOutput, error) { if params == nil { params = &DeleteConditionalForwarderInput{} diff --git a/service/directoryservice/api_op_DeleteDirectory.go b/service/directoryservice/api_op_DeleteDirectory.go index bd46e711913..b2715d9bc5c 100644 --- a/service/directoryservice/api_op_DeleteDirectory.go +++ b/service/directoryservice/api_op_DeleteDirectory.go @@ -10,10 +10,10 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes an AWS Directory Service directory. Before you call DeleteDirectory, -// ensure that all of the required permissions have been explicitly granted through -// a policy. For details about what permissions are required to run the -// DeleteDirectory operation, see AWS Directory Service API Permissions: Actions, +// Deletes an Directory Service directory. Before you call DeleteDirectory, ensure +// that all of the required permissions have been explicitly granted through a +// policy. For details about what permissions are required to run the +// DeleteDirectory operation, see Directory Service API Permissions: Actions, // Resources, and Conditions Reference // (http://docs.aws.amazon.com/directoryservice/latest/admin-guide/UsingWithDS_IAM_ResourcePermissions.html). func (c *Client) DeleteDirectory(ctx context.Context, params *DeleteDirectoryInput, optFns ...func(*Options)) (*DeleteDirectoryOutput, error) { diff --git a/service/directoryservice/api_op_DeleteTrust.go b/service/directoryservice/api_op_DeleteTrust.go index 53c35e10731..770257e7a83 100644 --- a/service/directoryservice/api_op_DeleteTrust.go +++ b/service/directoryservice/api_op_DeleteTrust.go @@ -10,7 +10,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes an existing trust relationship between your AWS Managed Microsoft AD +// Deletes an existing trust relationship between your Managed Microsoft AD // directory and an external domain. func (c *Client) DeleteTrust(ctx context.Context, params *DeleteTrustInput, optFns ...func(*Options)) (*DeleteTrustOutput, error) { if params == nil { @@ -27,7 +27,7 @@ func (c *Client) DeleteTrust(ctx context.Context, params *DeleteTrustInput, optF return out, nil } -// Deletes the local side of an existing trust relationship between the AWS Managed +// Deletes the local side of an existing trust relationship between the Managed // Microsoft AD directory and the external domain. type DeleteTrustInput struct { diff --git a/service/directoryservice/api_op_DeregisterEventTopic.go b/service/directoryservice/api_op_DeregisterEventTopic.go index b03fb99e7a5..af2923a8097 100644 --- a/service/directoryservice/api_op_DeregisterEventTopic.go +++ b/service/directoryservice/api_op_DeregisterEventTopic.go @@ -10,7 +10,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Removes the specified directory as a publisher to the specified SNS topic. +// Removes the specified directory as a publisher to the specified Amazon SNS +// topic. func (c *Client) DeregisterEventTopic(ctx context.Context, params *DeregisterEventTopicInput, optFns ...func(*Options)) (*DeregisterEventTopicOutput, error) { if params == nil { params = &DeregisterEventTopicInput{} @@ -26,16 +27,18 @@ func (c *Client) DeregisterEventTopic(ctx context.Context, params *DeregisterEve return out, nil } -// Removes the specified directory as a publisher to the specified SNS topic. +// Removes the specified directory as a publisher to the specified Amazon SNS +// topic. type DeregisterEventTopicInput struct { // The Directory ID to remove as a publisher. This directory will no longer send - // messages to the specified SNS topic. + // messages to the specified Amazon SNS topic. // // This member is required. DirectoryId *string - // The name of the SNS topic from which to remove the directory as a publisher. + // The name of the Amazon SNS topic from which to remove the directory as a + // publisher. // // This member is required. TopicName *string diff --git a/service/directoryservice/api_op_DescribeClientAuthenticationSettings.go b/service/directoryservice/api_op_DescribeClientAuthenticationSettings.go new file mode 100644 index 00000000000..15918689ecd --- /dev/null +++ b/service/directoryservice/api_op_DescribeClientAuthenticationSettings.go @@ -0,0 +1,145 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package directoryservice + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/directoryservice/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves information about the type of client authentication for the specified +// directory, if the type is specified. If no type is specified, information about +// all client authentication types that are supported for the specified directory +// is retrieved. Currently, only SmartCard is supported. +func (c *Client) DescribeClientAuthenticationSettings(ctx context.Context, params *DescribeClientAuthenticationSettingsInput, optFns ...func(*Options)) (*DescribeClientAuthenticationSettingsOutput, error) { + if params == nil { + params = &DescribeClientAuthenticationSettingsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeClientAuthenticationSettings", params, optFns, c.addOperationDescribeClientAuthenticationSettingsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeClientAuthenticationSettingsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeClientAuthenticationSettingsInput struct { + + // The identifier of the directory for which to retrieve information. + // + // This member is required. + DirectoryId *string + + // The maximum number of items to return. If this value is zero, the maximum number + // of items is specified by the limitations of the operation. + Limit *int32 + + // The DescribeClientAuthenticationSettingsResult.NextToken value from a previous + // call to DescribeClientAuthenticationSettings. Pass null if this is the first + // call. + NextToken *string + + // The type of client authentication for which to retrieve information. If no type + // is specified, a list of all client authentication types that are supported for + // the specified directory is retrieved. + Type types.ClientAuthenticationType + + noSmithyDocumentSerde +} + +type DescribeClientAuthenticationSettingsOutput struct { + + // Information about the type of client authentication for the specified directory. + // The following information is retrieved: The date and time when the status of the + // client authentication type was last updated, whether the client authentication + // type is enabled or disabled, and the type of client authentication. + ClientAuthenticationSettingsInfo []types.ClientAuthenticationSettingInfo + + // The next token used to retrieve the client authentication settings if the number + // of setting types exceeds page limit and there is another page. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeClientAuthenticationSettingsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeClientAuthenticationSettings{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeClientAuthenticationSettings{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeClientAuthenticationSettingsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeClientAuthenticationSettings(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeClientAuthenticationSettings(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ds", + OperationName: "DescribeClientAuthenticationSettings", + } +} diff --git a/service/directoryservice/api_op_DescribeEventTopics.go b/service/directoryservice/api_op_DescribeEventTopics.go index 938d54af27f..358fd24136b 100644 --- a/service/directoryservice/api_op_DescribeEventTopics.go +++ b/service/directoryservice/api_op_DescribeEventTopics.go @@ -11,9 +11,10 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Obtains information about which SNS topics receive status messages from the -// specified directory. If no input parameters are provided, such as DirectoryId or -// TopicName, this request describes all of the associations in the account. +// Obtains information about which Amazon SNS topics receive status messages from +// the specified directory. If no input parameters are provided, such as +// DirectoryId or TopicName, this request describes all of the associations in the +// account. func (c *Client) DescribeEventTopics(ctx context.Context, params *DescribeEventTopicsInput, optFns ...func(*Options)) (*DescribeEventTopicsOutput, error) { if params == nil { params = &DescribeEventTopicsInput{} @@ -32,13 +33,13 @@ func (c *Client) DescribeEventTopics(ctx context.Context, params *DescribeEventT // Describes event topics. type DescribeEventTopicsInput struct { - // The Directory ID for which to get the list of associated SNS topics. If this - // member is null, associations for all Directory IDs are returned. + // The Directory ID for which to get the list of associated Amazon SNS topics. If + // this member is null, associations for all Directory IDs are returned. DirectoryId *string - // A list of SNS topic names for which to obtain the information. If this member is - // null, all associations for the specified Directory ID are returned. An empty - // list results in an InvalidParameterException being thrown. + // A list of Amazon SNS topic names for which to obtain the information. If this + // member is null, all associations for the specified Directory ID are returned. An + // empty list results in an InvalidParameterException being thrown. TopicNames []string noSmithyDocumentSerde @@ -47,7 +48,7 @@ type DescribeEventTopicsInput struct { // The result of a DescribeEventTopic request. type DescribeEventTopicsOutput struct { - // A list of SNS topic names that receive status messages from the specified + // A list of Amazon SNS topic names that receive status messages from the specified // Directory ID. EventTopics []types.EventTopic diff --git a/service/directoryservice/api_op_DescribeTrusts.go b/service/directoryservice/api_op_DescribeTrusts.go index 827f231bbab..f6e71526713 100644 --- a/service/directoryservice/api_op_DescribeTrusts.go +++ b/service/directoryservice/api_op_DescribeTrusts.go @@ -29,13 +29,13 @@ func (c *Client) DescribeTrusts(ctx context.Context, params *DescribeTrustsInput return out, nil } -// Describes the trust relationships for a particular AWS Managed Microsoft AD -// directory. If no input parameters are are provided, such as directory ID or -// trust ID, this request describes all the trust relationships. +// Describes the trust relationships for a particular Managed Microsoft AD +// directory. If no input parameters are provided, such as directory ID or trust +// ID, this request describes all the trust relationships. type DescribeTrustsInput struct { - // The Directory ID of the AWS directory that is a part of the requested trust - // relationship. + // The Directory ID of the Amazon Web Services directory that is a part of the + // requested trust relationship. DirectoryId *string // The maximum number of objects to return. diff --git a/service/directoryservice/api_op_EnableClientAuthentication.go b/service/directoryservice/api_op_EnableClientAuthentication.go index ee94b199148..c28a2ae015c 100644 --- a/service/directoryservice/api_op_EnableClientAuthentication.go +++ b/service/directoryservice/api_op_EnableClientAuthentication.go @@ -36,8 +36,8 @@ type EnableClientAuthenticationInput struct { // The type of client authentication to enable. Currently only the value SmartCard // is supported. Smart card authentication in AD Connector requires that you enable - // Kerberos Constrained Delegation for the Service User to the LDAP service in the - // on-premises AD. + // Kerberos Constrained Delegation for the Service User to the LDAP service in your + // self-managed AD. // // This member is required. Type types.ClientAuthenticationType diff --git a/service/directoryservice/api_op_EnableSso.go b/service/directoryservice/api_op_EnableSso.go index 10825384244..683c2cd70b4 100644 --- a/service/directoryservice/api_op_EnableSso.go +++ b/service/directoryservice/api_op_EnableSso.go @@ -11,8 +11,8 @@ import ( ) // Enables single sign-on for a directory. Single sign-on allows users in your -// directory to access certain AWS services from a computer joined to the directory -// without having to enter their credentials separately. +// directory to access certain Amazon Web Services services from a computer joined +// to the directory without having to enter their credentials separately. func (c *Client) EnableSso(ctx context.Context, params *EnableSsoInput, optFns ...func(*Options)) (*EnableSsoOutput, error) { if params == nil { params = &EnableSsoInput{} diff --git a/service/directoryservice/api_op_GetDirectoryLimits.go b/service/directoryservice/api_op_GetDirectoryLimits.go index 9cb05b7b14d..1ca226cee0b 100644 --- a/service/directoryservice/api_op_GetDirectoryLimits.go +++ b/service/directoryservice/api_op_GetDirectoryLimits.go @@ -36,7 +36,7 @@ type GetDirectoryLimitsInput struct { type GetDirectoryLimitsOutput struct { // A DirectoryLimits object that contains the directory limits for the current - // rRegion. + // Region. DirectoryLimits *types.DirectoryLimits // Metadata pertaining to the operation's result. diff --git a/service/directoryservice/api_op_ListLogSubscriptions.go b/service/directoryservice/api_op_ListLogSubscriptions.go index ae7da636a38..a135612c9ea 100644 --- a/service/directoryservice/api_op_ListLogSubscriptions.go +++ b/service/directoryservice/api_op_ListLogSubscriptions.go @@ -11,7 +11,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Lists the active log subscriptions for the AWS account. +// Lists the active log subscriptions for the Amazon Web Services account. func (c *Client) ListLogSubscriptions(ctx context.Context, params *ListLogSubscriptionsInput, optFns ...func(*Options)) (*ListLogSubscriptionsOutput, error) { if params == nil { params = &ListLogSubscriptionsInput{} @@ -31,8 +31,9 @@ type ListLogSubscriptionsInput struct { // If a DirectoryID is provided, lists only the log subscription associated with // that directory. If no DirectoryId is provided, lists all log subscriptions - // associated with your AWS account. If there are no log subscriptions for the AWS - // account or the directory, an empty list will be returned. + // associated with your Amazon Web Services account. If there are no log + // subscriptions for the Amazon Web Services account or the directory, an empty + // list will be returned. DirectoryId *string // The maximum number of items returned. @@ -46,7 +47,8 @@ type ListLogSubscriptionsInput struct { type ListLogSubscriptionsOutput struct { - // A list of active LogSubscription objects for calling the AWS account. + // A list of active LogSubscription objects for calling the Amazon Web Services + // account. LogSubscriptions []types.LogSubscription // The token for the next set of items to return. diff --git a/service/directoryservice/api_op_RegisterEventTopic.go b/service/directoryservice/api_op_RegisterEventTopic.go index 45e980331d1..1db59cfb2ea 100644 --- a/service/directoryservice/api_op_RegisterEventTopic.go +++ b/service/directoryservice/api_op_RegisterEventTopic.go @@ -10,11 +10,12 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Associates a directory with an SNS topic. This establishes the directory as a -// publisher to the specified SNS topic. You can then receive email or text (SMS) -// messages when the status of your directory changes. You get notified if your -// directory goes from an Active status to an Impaired or Inoperable status. You -// also receive a notification when the directory returns to an Active status. +// Associates a directory with an Amazon SNS topic. This establishes the directory +// as a publisher to the specified Amazon SNS topic. You can then receive email or +// text (SMS) messages when the status of your directory changes. You get notified +// if your directory goes from an Active status to an Impaired or Inoperable +// status. You also receive a notification when the directory returns to an Active +// status. func (c *Client) RegisterEventTopic(ctx context.Context, params *RegisterEventTopicInput, optFns ...func(*Options)) (*RegisterEventTopicOutput, error) { if params == nil { params = &RegisterEventTopicInput{} @@ -33,13 +34,13 @@ func (c *Client) RegisterEventTopic(ctx context.Context, params *RegisterEventTo // Registers a new event topic. type RegisterEventTopicInput struct { - // The Directory ID that will publish status messages to the SNS topic. + // The Directory ID that will publish status messages to the Amazon SNS topic. // // This member is required. DirectoryId *string - // The SNS topic name to which the directory will publish status messages. This SNS - // topic must be in the same region as the specified Directory ID. + // The Amazon SNS topic name to which the directory will publish status messages. + // This Amazon SNS topic must be in the same region as the specified Directory ID. // // This member is required. TopicName *string diff --git a/service/directoryservice/api_op_ResetUserPassword.go b/service/directoryservice/api_op_ResetUserPassword.go index baf8cd30dd2..b8276302739 100644 --- a/service/directoryservice/api_op_ResetUserPassword.go +++ b/service/directoryservice/api_op_ResetUserPassword.go @@ -10,7 +10,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Resets the password for any user in your AWS Managed Microsoft AD or Simple AD +// Resets the password for any user in your Managed Microsoft AD or Simple AD // directory. You can reset the password for any user in your directory with the // following exceptions: // @@ -18,13 +18,14 @@ import ( // user that is a member of either the Domain Admins or Enterprise Admins group // except for the administrator user. // -// * For AWS Managed Microsoft AD, you can only +// * For Managed Microsoft AD, you can only // reset the password for a user that is in an OU based off of the NetBIOS name // that you typed when you created your directory. For example, you cannot reset -// the password for a user in the AWS Reserved OU. For more information about the -// OU structure for an AWS Managed Microsoft AD directory, see What Gets Created +// the password for a user in the Amazon Web Services Reserved OU. For more +// information about the OU structure for an Managed Microsoft AD directory, see +// What Gets Created // (https://docs.aws.amazon.com/directoryservice/latest/admin-guide/ms_ad_getting_started_what_gets_created.html) -// in the AWS Directory Service Administration Guide. +// in the Directory Service Administration Guide. func (c *Client) ResetUserPassword(ctx context.Context, params *ResetUserPasswordInput, optFns ...func(*Options)) (*ResetUserPasswordOutput, error) { if params == nil { params = &ResetUserPasswordInput{} @@ -42,8 +43,8 @@ func (c *Client) ResetUserPassword(ctx context.Context, params *ResetUserPasswor type ResetUserPasswordInput struct { - // Identifier of the AWS Managed Microsoft AD or Simple AD directory in which the - // user resides. + // Identifier of the Managed Microsoft AD or Simple AD directory in which the user + // resides. // // This member is required. DirectoryId *string diff --git a/service/directoryservice/api_op_ShareDirectory.go b/service/directoryservice/api_op_ShareDirectory.go index 8b1c599e659..7e9d3e8770c 100644 --- a/service/directoryservice/api_op_ShareDirectory.go +++ b/service/directoryservice/api_op_ShareDirectory.go @@ -11,19 +11,21 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Shares a specified directory (DirectoryId) in your AWS account (directory owner) -// with another AWS account (directory consumer). With this operation you can use -// your directory from any AWS account and from any Amazon VPC within an AWS -// Region. When you share your AWS Managed Microsoft AD directory, AWS Directory -// Service creates a shared directory in the directory consumer account. This -// shared directory contains the metadata to provide access to the directory within -// the directory owner account. The shared directory is visible in all VPCs in the -// directory consumer account. The ShareMethod parameter determines whether the -// specified directory can be shared between AWS accounts inside the same AWS +// Shares a specified directory (DirectoryId) in your Amazon Web Services account +// (directory owner) with another Amazon Web Services account (directory consumer). +// With this operation you can use your directory from any Amazon Web Services +// account and from any Amazon VPC within an Amazon Web Services Region. When you +// share your Managed Microsoft AD directory, Directory Service creates a shared +// directory in the directory consumer account. This shared directory contains the +// metadata to provide access to the directory within the directory owner account. +// The shared directory is visible in all VPCs in the directory consumer account. +// The ShareMethod parameter determines whether the specified directory can be +// shared between Amazon Web Services accounts inside the same Amazon Web Services // organization (ORGANIZATIONS). It also determines whether you can share the -// directory with any other AWS account either inside or outside of the -// organization (HANDSHAKE). The ShareNotes parameter is only used when HANDSHAKE -// is called, which sends a directory sharing request to the directory consumer. +// directory with any other Amazon Web Services account either inside or outside of +// the organization (HANDSHAKE). The ShareNotes parameter is only used when +// HANDSHAKE is called, which sends a directory sharing request to the directory +// consumer. func (c *Client) ShareDirectory(ctx context.Context, params *ShareDirectoryInput, optFns ...func(*Options)) (*ShareDirectoryOutput, error) { if params == nil { params = &ShareDirectoryInput{} @@ -41,15 +43,16 @@ func (c *Client) ShareDirectory(ctx context.Context, params *ShareDirectoryInput type ShareDirectoryInput struct { - // Identifier of the AWS Managed Microsoft AD directory that you want to share with - // other AWS accounts. + // Identifier of the Managed Microsoft AD directory that you want to share with + // other Amazon Web Services accounts. // // This member is required. DirectoryId *string // The method used when sharing a directory to determine whether the directory - // should be shared within your AWS organization (ORGANIZATIONS) or with any AWS - // account by sending a directory sharing request (HANDSHAKE). + // should be shared within your Amazon Web Services organization (ORGANIZATIONS) or + // with any Amazon Web Services account by sending a directory sharing request + // (HANDSHAKE). // // This member is required. ShareMethod types.ShareMethod diff --git a/service/directoryservice/api_op_UnshareDirectory.go b/service/directoryservice/api_op_UnshareDirectory.go index 8741bcce08a..513ca5753d9 100644 --- a/service/directoryservice/api_op_UnshareDirectory.go +++ b/service/directoryservice/api_op_UnshareDirectory.go @@ -29,7 +29,7 @@ func (c *Client) UnshareDirectory(ctx context.Context, params *UnshareDirectoryI type UnshareDirectoryInput struct { - // The identifier of the AWS Managed Microsoft AD directory that you want to stop + // The identifier of the Managed Microsoft AD directory that you want to stop // sharing. // // This member is required. diff --git a/service/directoryservice/api_op_UpdateConditionalForwarder.go b/service/directoryservice/api_op_UpdateConditionalForwarder.go index 0970c2a1079..d3118533815 100644 --- a/service/directoryservice/api_op_UpdateConditionalForwarder.go +++ b/service/directoryservice/api_op_UpdateConditionalForwarder.go @@ -10,7 +10,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Updates a conditional forwarder that has been set up for your AWS directory. +// Updates a conditional forwarder that has been set up for your Amazon Web +// Services directory. func (c *Client) UpdateConditionalForwarder(ctx context.Context, params *UpdateConditionalForwarderInput, optFns ...func(*Options)) (*UpdateConditionalForwarderOutput, error) { if params == nil { params = &UpdateConditionalForwarderInput{} @@ -29,8 +30,8 @@ func (c *Client) UpdateConditionalForwarder(ctx context.Context, params *UpdateC // Updates a conditional forwarder. type UpdateConditionalForwarderInput struct { - // The directory ID of the AWS directory for which to update the conditional - // forwarder. + // The directory ID of the Amazon Web Services directory for which to update the + // conditional forwarder. // // This member is required. DirectoryId *string diff --git a/service/directoryservice/api_op_UpdateTrust.go b/service/directoryservice/api_op_UpdateTrust.go index 98e184f94b2..5de060b80db 100644 --- a/service/directoryservice/api_op_UpdateTrust.go +++ b/service/directoryservice/api_op_UpdateTrust.go @@ -11,8 +11,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Updates the trust that has been set up between your AWS Managed Microsoft AD -// directory and an on-premises Active Directory. +// Updates the trust that has been set up between your Managed Microsoft AD +// directory and an self-managed Active Directory. func (c *Client) UpdateTrust(ctx context.Context, params *UpdateTrustInput, optFns ...func(*Options)) (*UpdateTrustOutput, error) { if params == nil { params = &UpdateTrustInput{} @@ -43,7 +43,7 @@ type UpdateTrustInput struct { type UpdateTrustOutput struct { - // The AWS request identifier. + // The Amazon Web Services request identifier. RequestId *string // Identifier of the trust relationship. diff --git a/service/directoryservice/api_op_VerifyTrust.go b/service/directoryservice/api_op_VerifyTrust.go index 5604369bd8e..7213dba0b6f 100644 --- a/service/directoryservice/api_op_VerifyTrust.go +++ b/service/directoryservice/api_op_VerifyTrust.go @@ -10,9 +10,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// AWS Directory Service for Microsoft Active Directory allows you to configure and +// Directory Service for Microsoft Active Directory allows you to configure and // verify trust relationships. This action verifies a trust relationship between -// your AWS Managed Microsoft AD directory and an external domain. +// your Managed Microsoft AD directory and an external domain. func (c *Client) VerifyTrust(ctx context.Context, params *VerifyTrustInput, optFns ...func(*Options)) (*VerifyTrustOutput, error) { if params == nil { params = &VerifyTrustInput{} @@ -28,8 +28,8 @@ func (c *Client) VerifyTrust(ctx context.Context, params *VerifyTrustInput, optF return out, nil } -// Initiates the verification of an existing trust relationship between an AWS -// Managed Microsoft AD directory and an external domain. +// Initiates the verification of an existing trust relationship between an Managed +// Microsoft AD directory and an external domain. type VerifyTrustInput struct { // The unique Trust ID of the trust relationship to verify. diff --git a/service/directoryservice/deserializers.go b/service/directoryservice/deserializers.go index 38f385c9c42..a24a3528644 100644 --- a/service/directoryservice/deserializers.go +++ b/service/directoryservice/deserializers.go @@ -2755,6 +2755,132 @@ func awsAwsjson11_deserializeOpErrorDescribeCertificate(response *smithyhttp.Res } } +type awsAwsjson11_deserializeOpDescribeClientAuthenticationSettings struct { +} + +func (*awsAwsjson11_deserializeOpDescribeClientAuthenticationSettings) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeClientAuthenticationSettings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeClientAuthenticationSettings(response, &metadata) + } + output := &DescribeClientAuthenticationSettingsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeClientAuthenticationSettingsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeClientAuthenticationSettings(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson11_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ClientException", errorCode): + return awsAwsjson11_deserializeErrorClientException(response, errorBody) + + case strings.EqualFold("DirectoryDoesNotExistException", errorCode): + return awsAwsjson11_deserializeErrorDirectoryDoesNotExistException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServiceException", errorCode): + return awsAwsjson11_deserializeErrorServiceException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsAwsjson11_deserializeOpDescribeConditionalForwarders struct { } @@ -9598,6 +9724,105 @@ func awsAwsjson11_deserializeDocumentCertificatesInfo(v *[]types.CertificateInfo return nil } +func awsAwsjson11_deserializeDocumentClientAuthenticationSettingInfo(v **types.ClientAuthenticationSettingInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ClientAuthenticationSettingInfo + if *v == nil { + sv = &types.ClientAuthenticationSettingInfo{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "LastUpdatedDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastUpdatedDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected LastUpdatedDateTime to be a JSON Number, got %T instead", value) + + } + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ClientAuthenticationStatus to be of type string, got %T instead", value) + } + sv.Status = types.ClientAuthenticationStatus(jtv) + } + + case "Type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ClientAuthenticationType to be of type string, got %T instead", value) + } + sv.Type = types.ClientAuthenticationType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentClientAuthenticationSettingsInfo(v *[]types.ClientAuthenticationSettingInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ClientAuthenticationSettingInfo + if *v == nil { + cv = []types.ClientAuthenticationSettingInfo{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ClientAuthenticationSettingInfo + destAddr := &col + if err := awsAwsjson11_deserializeDocumentClientAuthenticationSettingInfo(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + func awsAwsjson11_deserializeDocumentClientCertAuthSettings(v **types.ClientCertAuthSettings, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -14448,6 +14673,51 @@ func awsAwsjson11_deserializeOpDocumentDescribeCertificateOutput(v **DescribeCer return nil } +func awsAwsjson11_deserializeOpDocumentDescribeClientAuthenticationSettingsOutput(v **DescribeClientAuthenticationSettingsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeClientAuthenticationSettingsOutput + if *v == nil { + sv = &DescribeClientAuthenticationSettingsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ClientAuthenticationSettingsInfo": + if err := awsAwsjson11_deserializeDocumentClientAuthenticationSettingsInfo(&sv.ClientAuthenticationSettingsInfo, value); err != nil { + return err + } + + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeOpDocumentDescribeConditionalForwardersOutput(v **DescribeConditionalForwardersOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/directoryservice/doc.go b/service/directoryservice/doc.go index 5d8acf721da..7b824c96b0e 100644 --- a/service/directoryservice/doc.go +++ b/service/directoryservice/doc.go @@ -3,18 +3,19 @@ // Package directoryservice provides the API client, operations, and parameter // types for AWS Directory Service. // -// AWS Directory Service AWS Directory Service is a web service that makes it easy -// for you to setup and run directories in the AWS cloud, or connect your AWS -// resources with an existing on-premises Microsoft Active Directory. This guide -// provides detailed information about AWS Directory Service operations, data -// types, parameters, and errors. For information about AWS Directory Services -// features, see AWS Directory Service (https://aws.amazon.com/directoryservice/) -// and the AWS Directory Service Administration Guide +// Directory Service Directory Service is a web service that makes it easy for you +// to setup and run directories in the Amazon Web Services cloud, or connect your +// Amazon Web Services resources with an existing self-managed Microsoft Active +// Directory. This guide provides detailed information about Directory Service +// operations, data types, parameters, and errors. For information about Directory +// Services features, see Directory Service +// (https://aws.amazon.com/directoryservice/) and the Directory Service +// Administration Guide // (http://docs.aws.amazon.com/directoryservice/latest/admin-guide/what_is.html). -// AWS provides SDKs that consist of libraries and sample code for various -// programming languages and platforms (Java, Ruby, .Net, iOS, Android, etc.). The -// SDKs provide a convenient way to create programmatic access to AWS Directory -// Service and other AWS services. For more information about the AWS SDKs, -// including how to download and install them, see Tools for Amazon Web Services -// (http://aws.amazon.com/tools/). +// Amazon Web Services provides SDKs that consist of libraries and sample code for +// various programming languages and platforms (Java, Ruby, .Net, iOS, Android, +// etc.). The SDKs provide a convenient way to create programmatic access to +// Directory Service and other Amazon Web Services services. For more information +// about the Amazon Web Services SDKs, including how to download and install them, +// see Tools for Amazon Web Services (http://aws.amazon.com/tools/). package directoryservice diff --git a/service/directoryservice/generated.json b/service/directoryservice/generated.json index 30ffa995826..1e892ee31f2 100644 --- a/service/directoryservice/generated.json +++ b/service/directoryservice/generated.json @@ -27,6 +27,7 @@ "api_op_DeregisterCertificate.go", "api_op_DeregisterEventTopic.go", "api_op_DescribeCertificate.go", + "api_op_DescribeClientAuthenticationSettings.go", "api_op_DescribeConditionalForwarders.go", "api_op_DescribeDirectories.go", "api_op_DescribeDomainControllers.go", diff --git a/service/directoryservice/serializers.go b/service/directoryservice/serializers.go index 8b6f33cfad3..0863bb95b24 100644 --- a/service/directoryservice/serializers.go +++ b/service/directoryservice/serializers.go @@ -1048,6 +1048,53 @@ func (m *awsAwsjson11_serializeOpDescribeCertificate) HandleSerialize(ctx contex return next.HandleSerialize(ctx, in) } +type awsAwsjson11_serializeOpDescribeClientAuthenticationSettings struct { +} + +func (*awsAwsjson11_serializeOpDescribeClientAuthenticationSettings) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeClientAuthenticationSettings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeClientAuthenticationSettingsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DirectoryService_20150416.DescribeClientAuthenticationSettings") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeClientAuthenticationSettingsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsjson11_serializeOpDescribeConditionalForwarders struct { } @@ -3791,6 +3838,33 @@ func awsAwsjson11_serializeOpDocumentDescribeCertificateInput(v *DescribeCertifi return nil } +func awsAwsjson11_serializeOpDocumentDescribeClientAuthenticationSettingsInput(v *DescribeClientAuthenticationSettingsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DirectoryId != nil { + ok := object.Key("DirectoryId") + ok.String(*v.DirectoryId) + } + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if len(v.Type) > 0 { + ok := object.Key("Type") + ok.String(string(v.Type)) + } + + return nil +} + func awsAwsjson11_serializeOpDocumentDescribeConditionalForwardersInput(v *DescribeConditionalForwardersInput, value smithyjson.Value) error { object := value.Object() defer object.Close() diff --git a/service/directoryservice/types/enums.go b/service/directoryservice/types/enums.go index 0f4eddb215e..da4337fa207 100644 --- a/service/directoryservice/types/enums.go +++ b/service/directoryservice/types/enums.go @@ -46,6 +46,24 @@ func (CertificateType) Values() []CertificateType { } } +type ClientAuthenticationStatus string + +// Enum values for ClientAuthenticationStatus +const ( + ClientAuthenticationStatusEnabled ClientAuthenticationStatus = "Enabled" + ClientAuthenticationStatusDisabled ClientAuthenticationStatus = "Disabled" +) + +// Values returns all known values for ClientAuthenticationStatus. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (ClientAuthenticationStatus) Values() []ClientAuthenticationStatus { + return []ClientAuthenticationStatus{ + "Enabled", + "Disabled", + } +} + type ClientAuthenticationType string // Enum values for ClientAuthenticationType diff --git a/service/directoryservice/types/errors.go b/service/directoryservice/types/errors.go index 434c407f7f0..0e68e83ffef 100644 --- a/service/directoryservice/types/errors.go +++ b/service/directoryservice/types/errors.go @@ -163,7 +163,7 @@ func (e *ClientException) ErrorMessage() string { func (e *ClientException) ErrorCode() string { return "ClientException" } func (e *ClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The Region you specified is the same Region where the AWS Managed Microsoft AD +// The Region you specified is the same Region where the Managed Microsoft AD // directory was created. Specify a different Region and try again. type DirectoryAlreadyInRegionException struct { Message *string @@ -187,7 +187,8 @@ func (e *DirectoryAlreadyInRegionException) ErrorCode() string { } func (e *DirectoryAlreadyInRegionException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The specified directory has already been shared with this AWS account. +// The specified directory has already been shared with this Amazon Web Services +// account. type DirectoryAlreadySharedException struct { Message *string @@ -256,7 +257,8 @@ func (e *DirectoryLimitExceededException) ErrorCode() string { } func (e *DirectoryLimitExceededException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The specified directory has not been shared with this AWS account. +// The specified directory has not been shared with this Amazon Web Services +// account. type DirectoryNotSharedException struct { Message *string @@ -586,7 +588,8 @@ func (e *NoAvailableCertificateException) ErrorCode() string { } func (e *NoAvailableCertificateException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// Exception encountered while trying to access your AWS organization. +// Exception encountered while trying to access your Amazon Web Services +// organization. type OrganizationsException struct { Message *string @@ -629,7 +632,7 @@ func (e *RegionLimitExceededException) ErrorMessage() string { func (e *RegionLimitExceededException) ErrorCode() string { return "RegionLimitExceededException" } func (e *RegionLimitExceededException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// An exception has occurred in AWS Directory Service. +// An exception has occurred in Directory Service. type ServiceException struct { Message *string @@ -650,8 +653,8 @@ func (e *ServiceException) ErrorMessage() string { func (e *ServiceException) ErrorCode() string { return "ServiceException" } func (e *ServiceException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } -// The maximum number of AWS accounts that you can share with this directory has -// been reached. +// The maximum number of Amazon Web Services accounts that you can share with this +// directory has been reached. type ShareLimitExceededException struct { Message *string diff --git a/service/directoryservice/types/types.go b/service/directoryservice/types/types.go index 57701d31873..40c41187516 100644 --- a/service/directoryservice/types/types.go +++ b/service/directoryservice/types/types.go @@ -73,6 +73,25 @@ type CertificateInfo struct { noSmithyDocumentSerde } +// Contains information about a client authentication method for a directory. +type ClientAuthenticationSettingInfo struct { + + // The date and time when the status of the client authentication type was last + // updated. + LastUpdatedDateTime *time.Time + + // Whether the client authentication type is enabled or disabled for the specified + // directory. + Status ClientAuthenticationStatus + + // The type of client authentication for the specified directory. If no type is + // specified, a list of all client authentication types that are supported for the + // directory is retrieved. + Type ClientAuthenticationType + + noSmithyDocumentSerde +} + // Contains information about the client certificate authentication settings for // the RegisterCertificate and DescribeCertificate operations. type ClientCertAuthSettings struct { @@ -116,7 +135,7 @@ type ConditionalForwarder struct { // The replication scope of the conditional forwarder. The only allowed value is // Domain, which will replicate the conditional forwarder to all of the domain - // controllers for your AWS directory. + // controllers for your Amazon Web Services directory. ReplicationScope ReplicationScope noSmithyDocumentSerde @@ -126,17 +145,17 @@ type ConditionalForwarder struct { // directory is being created. type DirectoryConnectSettings struct { - // A list of one or more IP addresses of DNS servers or domain controllers in the - // on-premises directory. + // A list of one or more IP addresses of DNS servers or domain controllers in your + // self-managed directory. // // This member is required. CustomerDnsIps []string - // The user name of an account in the on-premises directory that is used to connect - // to the directory. This account must have the following permissions: + // The user name of an account in your self-managed directory that is used to + // connect to the directory. This account must have the following permissions: // - // * Read - // users and groups + // * + // Read users and groups // // * Create computer objects // @@ -167,7 +186,7 @@ type DirectoryConnectSettingsDescription struct { // The IP addresses of the AD Connector servers. ConnectIps []string - // The user name of the service account in the on-premises directory. + // The user name of the service account in your self-managed directory. CustomerUserName *string // The security group identifier for the AD Connector directory. @@ -182,7 +201,7 @@ type DirectoryConnectSettingsDescription struct { noSmithyDocumentSerde } -// Contains information about an AWS Directory Service directory. +// Contains information about an Directory Service directory. type DirectoryDescription struct { // The access URL for the directory, such as http://.awsapps.com. If no alias has @@ -212,7 +231,7 @@ type DirectoryDescription struct { // The IP addresses of the DNS servers for the directory. For a Simple AD or // Microsoft AD directory, these are the IP addresses of the Simple AD or Microsoft // AD directory servers. For an AD Connector directory, these are the IP addresses - // of the DNS servers or domain controllers in the on-premises directory to which + // of the DNS servers or domain controllers in your self-managed directory to which // the AD Connector is connected. DnsIpAddrs []string @@ -225,7 +244,7 @@ type DirectoryDescription struct { // The fully qualified name of the directory. Name *string - // Describes the AWS Managed Microsoft AD directory in the directory owner account. + // Describes the Managed Microsoft AD directory in the directory owner account. OwnerDirectoryDescription *OwnerDirectoryDescription // A RadiusSettings object that contains information about the RADIUS server @@ -239,8 +258,9 @@ type DirectoryDescription struct { RegionsInfo *RegionsInfo // The method used when sharing a directory to determine whether the directory - // should be shared within your AWS organization (ORGANIZATIONS) or with any AWS - // account by sending a shared directory request (HANDSHAKE). + // should be shared within your Amazon Web Services organization (ORGANIZATIONS) or + // with any Amazon Web Services account by sending a shared directory request + // (HANDSHAKE). ShareMethod ShareMethod // A directory share request that is sent by the directory owner to the directory @@ -248,7 +268,7 @@ type DirectoryDescription struct { // administrator determine whether to approve or reject the share invitation. ShareNotes *string - // Current directory status of the shared AWS Managed Microsoft AD directory. + // Current directory status of the shared Managed Microsoft AD directory. ShareStatus ShareStatus // The short name of the directory. @@ -275,7 +295,7 @@ type DirectoryDescription struct { // A DirectoryVpcSettingsDescription object that contains additional information // about a directory. This member is only present if the directory is a Simple AD - // or Managed AD directory. + // or Managed Microsoft AD directory. VpcSettings *DirectoryVpcSettingsDescription noSmithyDocumentSerde @@ -293,14 +313,13 @@ type DirectoryLimits struct { // Indicates if the cloud directory limit has been reached. CloudOnlyDirectoriesLimitReached bool - // The current number of AWS Managed Microsoft AD directories in the region. + // The current number of Managed Microsoft AD directories in the region. CloudOnlyMicrosoftADCurrentCount *int32 - // The maximum number of AWS Managed Microsoft AD directories allowed in the - // region. + // The maximum number of Managed Microsoft AD directories allowed in the region. CloudOnlyMicrosoftADLimit *int32 - // Indicates if the AWS Managed Microsoft AD directory limit has been reached. + // Indicates if the Managed Microsoft AD directory limit has been reached. CloudOnlyMicrosoftADLimitReached bool // The current number of connected directories in the Region. @@ -319,8 +338,8 @@ type DirectoryLimits struct { type DirectoryVpcSettings struct { // The identifiers of the subnets for the directory servers. The two subnets must - // be in different Availability Zones. AWS Directory Service creates a directory - // server and a DNS server in each of these subnets. + // be in different Availability Zones. Directory Service creates a directory server + // and a DNS server in each of these subnets. // // This member is required. SubnetIds []string @@ -387,34 +406,35 @@ type DomainController struct { noSmithyDocumentSerde } -// Information about SNS topic and AWS Directory Service directory associations. +// Information about Amazon SNS topic and Directory Service directory associations. type EventTopic struct { - // The date and time of when you associated your directory with the SNS topic. + // The date and time of when you associated your directory with the Amazon SNS + // topic. CreatedDateTime *time.Time - // The Directory ID of an AWS Directory Service directory that will publish status - // messages to an SNS topic. + // The Directory ID of an Directory Service directory that will publish status + // messages to an Amazon SNS topic. DirectoryId *string // The topic registration status. Status TopicStatus - // The SNS topic ARN (Amazon Resource Name). + // The Amazon SNS topic ARN (Amazon Resource Name). TopicArn *string - // The name of an AWS SNS topic the receives status messages from the directory. + // The name of an Amazon SNS topic the receives status messages from the directory. TopicName *string noSmithyDocumentSerde } // IP address block. This is often the address block of the DNS server used for -// your on-premises domain. +// your self-managed domain. type IpRoute struct { // IP address block using CIDR format, for example 10.0.0.0/24. This is often the - // address block of the DNS server used for your on-premises domain. For a single + // address block of the DNS server used for your self-managed domain. For a single // IP address use a CIDR address block with /32. For example 10.0.0.0/32. CidrIp *string @@ -487,8 +507,7 @@ type OwnerDirectoryDescription struct { // Identifier of the directory owner account. AccountId *string - // Identifier of the AWS Managed Microsoft AD directory in the directory owner - // account. + // Identifier of the Managed Microsoft AD directory in the directory owner account. DirectoryId *string // IP address of the directory’s domain controllers. @@ -516,8 +535,8 @@ type RadiusSettings struct { // Not currently used. DisplayLabel *string - // The port that your RADIUS server is using for communications. Your on-premises - // network must allow inbound traffic over this port from the AWS Directory Service + // The port that your RADIUS server is using for communications. Your self-managed + // network must allow inbound traffic over this port from the Directory Service // servers. RadiusPort int32 @@ -584,7 +603,7 @@ type RegionsInfo struct { // Region. AdditionalRegions []string - // The Region where the AWS Managed Microsoft AD directory was originally created. + // The Region where the Managed Microsoft AD directory was originally created. PrimaryRegion *string noSmithyDocumentSerde @@ -636,8 +655,9 @@ type SharedDirectory struct { OwnerDirectoryId *string // The method used when sharing a directory to determine whether the directory - // should be shared within your AWS organization (ORGANIZATIONS) or with any AWS - // account by sending a shared directory request (HANDSHAKE). + // should be shared within your Amazon Web Services organization (ORGANIZATIONS) or + // with any Amazon Web Services account by sending a shared directory request + // (HANDSHAKE). ShareMethod ShareMethod // A directory share request that is sent by the directory owner to the directory @@ -645,7 +665,7 @@ type SharedDirectory struct { // administrator determine whether to approve or reject the share invitation. ShareNotes *string - // Current directory status of the shared AWS Managed Microsoft AD directory. + // Current directory status of the shared Managed Microsoft AD directory. ShareStatus ShareStatus // Identifier of the directory consumer account that has access to the shared @@ -735,14 +755,15 @@ type Tag struct { noSmithyDocumentSerde } -// Describes a trust relationship between an AWS Managed Microsoft AD directory and -// an external domain. +// Describes a trust relationship between an Managed Microsoft AD directory and an +// external domain. type Trust struct { // The date and time that the trust relationship was created. CreatedDateTime *time.Time - // The Directory ID of the AWS directory involved in the trust relationship. + // The Directory ID of the Amazon Web Services directory involved in the trust + // relationship. DirectoryId *string // The date and time that the trust relationship was last updated. diff --git a/service/directoryservice/validators.go b/service/directoryservice/validators.go index 8fca04bb602..07f63bbee95 100644 --- a/service/directoryservice/validators.go +++ b/service/directoryservice/validators.go @@ -450,6 +450,26 @@ func (m *validateOpDescribeCertificate) HandleInitialize(ctx context.Context, in return next.HandleInitialize(ctx, in) } +type validateOpDescribeClientAuthenticationSettings struct { +} + +func (*validateOpDescribeClientAuthenticationSettings) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeClientAuthenticationSettings) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeClientAuthenticationSettingsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeClientAuthenticationSettingsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpDescribeConditionalForwarders struct { } @@ -1218,6 +1238,10 @@ func addOpDescribeCertificateValidationMiddleware(stack *middleware.Stack) error return stack.Initialize.Add(&validateOpDescribeCertificate{}, middleware.After) } +func addOpDescribeClientAuthenticationSettingsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeClientAuthenticationSettings{}, middleware.After) +} + func addOpDescribeConditionalForwardersValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDescribeConditionalForwarders{}, middleware.After) } @@ -1908,6 +1932,21 @@ func validateOpDescribeCertificateInput(v *DescribeCertificateInput) error { } } +func validateOpDescribeClientAuthenticationSettingsInput(v *DescribeClientAuthenticationSettingsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeClientAuthenticationSettingsInput"} + if v.DirectoryId == nil { + invalidParams.Add(smithy.NewErrParamRequired("DirectoryId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpDescribeConditionalForwardersInput(v *DescribeConditionalForwardersInput) error { if v == nil { return nil diff --git a/service/ec2/api_op_CreateKeyPair.go b/service/ec2/api_op_CreateKeyPair.go index 39cf080644e..e0fb26bbd7a 100644 --- a/service/ec2/api_op_CreateKeyPair.go +++ b/service/ec2/api_op_CreateKeyPair.go @@ -11,14 +11,15 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a 2048-bit RSA key pair with the specified name. Amazon EC2 stores the -// public key and displays the private key for you to save to a file. The private -// key is returned as an unencrypted PEM encoded PKCS#1 private key. If a key with -// the specified name already exists, Amazon EC2 returns an error. You can have up -// to five thousand key pairs per Region. The key pair returned to you is available -// only in the Region in which you create it. If you prefer, you can create your -// own key pair using a third-party tool and upload it to any Region using -// ImportKeyPair. For more information, see Key Pairs +// Creates an ED25519 or 2048-bit RSA key pair with the specified name. Amazon EC2 +// stores the public key and displays the private key for you to save to a file. +// The private key is returned as an unencrypted PEM encoded PKCS#1 private key. If +// a key with the specified name already exists, Amazon EC2 returns an error. The +// key pair returned to you is available only in the Amazon Web Services Region in +// which you create it. If you prefer, you can create your own key pair using a +// third-party tool and upload it to any Region using ImportKeyPair. You can have +// up to 5,000 key pairs per Amazon Web Services Region. For more information, see +// Amazon EC2 key pairs // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) in the // Amazon Elastic Compute Cloud User Guide. func (c *Client) CreateKeyPair(ctx context.Context, params *CreateKeyPairInput, optFns ...func(*Options)) (*CreateKeyPairOutput, error) { @@ -49,6 +50,10 @@ type CreateKeyPairInput struct { // UnauthorizedOperation. DryRun *bool + // The type of key pair. Note that ED25519 keys are not supported for Windows + // instances, EC2 Instance Connect, and EC2 Serial Console. Default: rsa + KeyType types.KeyType + // The tags to apply to the new key pair. TagSpecifications []types.TagSpecification @@ -61,7 +66,7 @@ type CreateKeyPairOutput struct { // The SHA-1 digest of the DER encoded private key. KeyFingerprint *string - // An unencrypted PEM encoded RSA private key. + // An unencrypted PEM encoded RSA or ED25519 private key. KeyMaterial *string // The name of the key pair. diff --git a/service/ec2/api_op_CreateSecurityGroup.go b/service/ec2/api_op_CreateSecurityGroup.go index 34e93dfca40..6a935e49334 100644 --- a/service/ec2/api_op_CreateSecurityGroup.go +++ b/service/ec2/api_op_CreateSecurityGroup.go @@ -13,9 +13,9 @@ import ( // Creates a security group. A security group acts as a virtual firewall for your // instance to control inbound and outbound traffic. For more information, see -// Amazon EC2 Security Groups +// Amazon EC2 security groups // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html) -// in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your VPC +// in the Amazon Elastic Compute Cloud User Guide and Security groups for your VPC // (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) // in the Amazon Virtual Private Cloud User Guide. When you create a security // group, you specify a friendly name of your choice. You can have a security group diff --git a/service/ec2/api_op_DescribeKeyPairs.go b/service/ec2/api_op_DescribeKeyPairs.go index 42eed53ee5a..73c2f3813f0 100644 --- a/service/ec2/api_op_DescribeKeyPairs.go +++ b/service/ec2/api_op_DescribeKeyPairs.go @@ -20,7 +20,7 @@ import ( ) // Describes the specified key pairs or all of your key pairs. For more information -// about key pairs, see Key Pairs +// about key pairs, see Amazon EC2 key pairs // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) in the // Amazon Elastic Compute Cloud User Guide. func (c *Client) DescribeKeyPairs(ctx context.Context, params *DescribeKeyPairsInput, optFns ...func(*Options)) (*DescribeKeyPairsOutput, error) { diff --git a/service/ec2/api_op_DescribeSecurityGroups.go b/service/ec2/api_op_DescribeSecurityGroups.go index 0f9dbe87048..1071b8f0aea 100644 --- a/service/ec2/api_op_DescribeSecurityGroups.go +++ b/service/ec2/api_op_DescribeSecurityGroups.go @@ -21,9 +21,9 @@ import ( // Describes the specified security groups or all of your security groups. A // security group is for use with instances either in the EC2-Classic platform or -// in a specific VPC. For more information, see Amazon EC2 Security Groups +// in a specific VPC. For more information, see Amazon EC2 security groups // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html) -// in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your VPC +// in the Amazon Elastic Compute Cloud User Guide and Security groups for your VPC // (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) // in the Amazon Virtual Private Cloud User Guide. func (c *Client) DescribeSecurityGroups(ctx context.Context, params *DescribeSecurityGroupsInput, optFns ...func(*Options)) (*DescribeSecurityGroupsOutput, error) { diff --git a/service/ec2/api_op_ExportImage.go b/service/ec2/api_op_ExportImage.go index bb19c49c173..b4742cce3cc 100644 --- a/service/ec2/api_op_ExportImage.go +++ b/service/ec2/api_op_ExportImage.go @@ -44,7 +44,7 @@ type ExportImageInput struct { ImageId *string // Information about the destination Amazon S3 bucket. The bucket must exist and - // grant WRITE and READ_ACP permissions to the AWS account + // grant WRITE and READ_ACP permissions to the Amazon Web Services account // vm-import-export@amazon.com. // // This member is required. diff --git a/service/ec2/api_op_ImportImage.go b/service/ec2/api_op_ImportImage.go index 4f97c5a23fb..7a37962f6c4 100644 --- a/service/ec2/api_op_ImportImage.go +++ b/service/ec2/api_op_ImportImage.go @@ -55,9 +55,8 @@ type ImportImageInput struct { DryRun *bool // Specifies whether the destination AMI of the imported image should be encrypted. - // The default CMK for EBS is used unless you specify a non-default AWS Key - // Management Service (AWS KMS) CMK using KmsKeyId. For more information, see - // Amazon EBS Encryption + // The default KMS key for EBS is used unless you specify a non-default KMS key + // using KmsKeyId. For more information, see Amazon EBS Encryption // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) in the // Amazon Elastic Compute Cloud User Guide. Encrypted *bool @@ -65,37 +64,37 @@ type ImportImageInput struct { // The target hypervisor platform. Valid values: xen Hypervisor *string - // An identifier for the symmetric AWS Key Management Service (AWS KMS) customer - // master key (CMK) to use when creating the encrypted AMI. This parameter is only - // required if you want to use a non-default CMK; if this parameter is not - // specified, the default CMK for EBS is used. If a KmsKeyId is specified, the - // Encrypted flag must also be set. The CMK identifier may be provided in any of - // the following formats: + // An identifier for the symmetric KMS key to use when creating the encrypted AMI. + // This parameter is only required if you want to use a non-default KMS key; if + // this parameter is not specified, the default KMS key for EBS is used. If a + // KmsKeyId is specified, the Encrypted flag must also be set. The KMS key + // identifier may be provided in any of the following formats: // // * Key ID // - // * Key alias. The alias ARN contains the - // arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of - // the CMK owner, the alias namespace, and then the CMK alias. For example, + // * Key + // alias. The alias ARN contains the arn:aws:kms namespace, followed by the Region + // of the key, the Amazon Web Services account ID of the key owner, the alias + // namespace, and then the key alias. For example, // arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // // * ARN using key ID. The - // ID ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, - // the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For - // example, + // ID ARN contains the arn:aws:kms namespace, followed by the Region of the key, + // the Amazon Web Services account ID of the key owner, the key namespace, and then + // the key ID. For example, // arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. // // * // ARN using key alias. The alias ARN contains the arn:aws:kms namespace, followed - // by the Region of the CMK, the AWS account ID of the CMK owner, the alias - // namespace, and then the CMK alias. For example, + // by the Region of the key, the Amazon Web Services account ID of the key owner, + // the alias namespace, and then the key alias. For example, // arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // - // AWS parses KmsKeyId - // asynchronously, meaning that the action you call may appear to complete even - // though you provided an invalid identifier. This action will eventually report - // failure. The specified CMK must exist in the Region that the AMI is being copied - // to. Amazon EBS does not support asymmetric CMKs. + // Amazon Web Services + // parses KmsKeyId asynchronously, meaning that the action you call may appear to + // complete even though you provided an invalid identifier. This action will + // eventually report failure. The specified KMS key must exist in the Region that + // the AMI is being copied to. Amazon EBS does not support asymmetric KMS keys. KmsKeyId *string // The ARNs of the license configurations. @@ -104,10 +103,10 @@ type ImportImageInput struct { // The license type to be used for the Amazon Machine Image (AMI) after importing. // By default, we detect the source-system operating system (OS) and apply the // appropriate license. Specify AWS to replace the source-system license with an - // AWS license, if appropriate. Specify BYOL to retain the source-system license, - // if appropriate. To use BYOL, you must have existing licenses with rights to use - // these licenses in a third party cloud, such as AWS. For more information, see - // Prerequisites + // Amazon Web Services license, if appropriate. Specify BYOL to retain the + // source-system license, if appropriate. To use BYOL, you must have existing + // licenses with rights to use these licenses in a third party cloud, such as + // Amazon Web Services. For more information, see Prerequisites // (https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html#prerequisites-image) // in the VM Import/Export User Guide. LicenseType *string @@ -121,6 +120,12 @@ type ImportImageInput struct { // The tags to apply to the import image task during creation. TagSpecifications []types.TagSpecification + // The usage operation value. For more information, see AMI billing information + // fields + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/billing-info-fields.html) + // in the Amazon Elastic Compute Cloud User Guide. + UsageOperation *string + noSmithyDocumentSerde } @@ -144,8 +149,8 @@ type ImportImageOutput struct { // The task ID of the import image task. ImportTaskId *string - // The identifier for the symmetric AWS Key Management Service (AWS KMS) customer - // master key (CMK) that was used to create the encrypted AMI. + // The identifier for the symmetric KMS key that was used to create the encrypted + // AMI. KmsKeyId *string // The ARNs of the license configurations. @@ -172,6 +177,9 @@ type ImportImageOutput struct { // Any tags assigned to the import image task. Tags []types.Tag + // The usage operation value. + UsageOperation *string + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata diff --git a/service/ec2/api_op_ImportInstance.go b/service/ec2/api_op_ImportInstance.go index 0e8db13813e..b167eeea103 100644 --- a/service/ec2/api_op_ImportInstance.go +++ b/service/ec2/api_op_ImportInstance.go @@ -13,8 +13,8 @@ import ( // Creates an import instance task using metadata from the specified disk image. // This API action supports only single-volume VMs. To import multi-volume VMs, use -// ImportImage instead. This API action is not supported by the AWS Command Line -// Interface (AWS CLI). For information about using the Amazon EC2 CLI, which is +// ImportImage instead. This API action is not supported by the Command Line +// Interface (CLI). For information about using the Amazon EC2 CLI, which is // deprecated, see Importing a VM to Amazon EC2 // (https://awsdocs.s3.amazonaws.com/EC2/ec2-clt.pdf#UsingVirtualMachinesinAmazonEC2) // in the Amazon EC2 CLI Reference PDF file. For information about the import diff --git a/service/ec2/api_op_ImportKeyPair.go b/service/ec2/api_op_ImportKeyPair.go index dc0f55b5c4c..1f062b9bf44 100644 --- a/service/ec2/api_op_ImportKeyPair.go +++ b/service/ec2/api_op_ImportKeyPair.go @@ -11,12 +11,13 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Imports the public key from an RSA key pair that you created with a third-party -// tool. Compare this with CreateKeyPair, in which Amazon Web Services creates the -// key pair and gives the keys to you (Amazon Web Services keeps a copy of the -// public key). With ImportKeyPair, you create the key pair and give Amazon Web -// Services just the public key. The private key is never transferred between you -// and Amazon Web Services. For more information about key pairs, see Key Pairs +// Imports the public key from an RSA or ED25519 key pair that you created with a +// third-party tool. Compare this with CreateKeyPair, in which Amazon Web Services +// creates the key pair and gives the keys to you (Amazon Web Services keeps a copy +// of the public key). With ImportKeyPair, you create the key pair and give Amazon +// Web Services just the public key. The private key is never transferred between +// you and Amazon Web Services. For more information about key pairs, see Amazon +// EC2 key pairs // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) in the // Amazon Elastic Compute Cloud User Guide. func (c *Client) ImportKeyPair(ctx context.Context, params *ImportKeyPairInput, optFns ...func(*Options)) (*ImportKeyPairOutput, error) { diff --git a/service/ec2/api_op_ImportSnapshot.go b/service/ec2/api_op_ImportSnapshot.go index d35ff85709d..22925d9c07d 100644 --- a/service/ec2/api_op_ImportSnapshot.go +++ b/service/ec2/api_op_ImportSnapshot.go @@ -51,44 +51,44 @@ type ImportSnapshotInput struct { DryRun *bool // Specifies whether the destination snapshot of the imported image should be - // encrypted. The default CMK for EBS is used unless you specify a non-default AWS - // Key Management Service (AWS KMS) CMK using KmsKeyId. For more information, see - // Amazon EBS Encryption + // encrypted. The default KMS key for EBS is used unless you specify a non-default + // KMS key using KmsKeyId. For more information, see Amazon EBS Encryption // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) in the // Amazon Elastic Compute Cloud User Guide. Encrypted *bool - // An identifier for the symmetric AWS Key Management Service (AWS KMS) customer - // master key (CMK) to use when creating the encrypted snapshot. This parameter is - // only required if you want to use a non-default CMK; if this parameter is not - // specified, the default CMK for EBS is used. If a KmsKeyId is specified, the - // Encrypted flag must also be set. The CMK identifier may be provided in any of - // the following formats: + // An identifier for the symmetric KMS key to use when creating the encrypted + // snapshot. This parameter is only required if you want to use a non-default KMS + // key; if this parameter is not specified, the default KMS key for EBS is used. If + // a KmsKeyId is specified, the Encrypted flag must also be set. The KMS key + // identifier may be provided in any of the following formats: // // * Key ID // - // * Key alias. The alias ARN contains the - // arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of - // the CMK owner, the alias namespace, and then the CMK alias. For example, + // * Key + // alias. The alias ARN contains the arn:aws:kms namespace, followed by the Region + // of the key, the Amazon Web Services account ID of the key owner, the alias + // namespace, and then the key alias. For example, // arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // // * ARN using key ID. The - // ID ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, - // the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For - // example, + // ID ARN contains the arn:aws:kms namespace, followed by the Region of the key, + // the Amazon Web Services account ID of the key owner, the key namespace, and then + // the key ID. For example, // arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. // // * // ARN using key alias. The alias ARN contains the arn:aws:kms namespace, followed - // by the Region of the CMK, the AWS account ID of the CMK owner, the alias - // namespace, and then the CMK alias. For example, + // by the Region of the key, the Amazon Web Services account ID of the key owner, + // the alias namespace, and then the key alias. For example, // arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // - // AWS parses KmsKeyId - // asynchronously, meaning that the action you call may appear to complete even - // though you provided an invalid identifier. This action will eventually report - // failure. The specified CMK must exist in the Region that the snapshot is being - // copied to. Amazon EBS does not support asymmetric CMKs. + // Amazon Web Services + // parses KmsKeyId asynchronously, meaning that the action you call may appear to + // complete even though you provided an invalid identifier. This action will + // eventually report failure. The specified KMS key must exist in the Region that + // the snapshot is being copied to. Amazon EBS does not support asymmetric KMS + // keys. KmsKeyId *string // The name of the role to use when not using the default role, 'vmimport'. diff --git a/service/ec2/api_op_ImportVolume.go b/service/ec2/api_op_ImportVolume.go index 9ea23b45f0c..f92267df5e0 100644 --- a/service/ec2/api_op_ImportVolume.go +++ b/service/ec2/api_op_ImportVolume.go @@ -14,9 +14,9 @@ import ( // Creates an import volume task using metadata from the specified disk image. This // API action supports only single-volume VMs. To import multi-volume VMs, use // ImportImage instead. To import a disk to a snapshot, use ImportSnapshot instead. -// This API action is not supported by the AWS Command Line Interface (AWS CLI). -// For information about using the Amazon EC2 CLI, which is deprecated, see -// Importing Disks to Amazon EBS +// This API action is not supported by the Command Line Interface (CLI). For +// information about using the Amazon EC2 CLI, which is deprecated, see Importing +// Disks to Amazon EBS // (https://awsdocs.s3.amazonaws.com/EC2/ec2-clt.pdf#importing-your-volumes-into-amazon-ebs) // in the Amazon EC2 CLI Reference PDF file. For information about the import // manifest referenced by this API action, see VM Import Manifest diff --git a/service/ec2/deserializers.go b/service/ec2/deserializers.go index 00ba9893783..027f12c1a4b 100644 --- a/service/ec2/deserializers.go +++ b/service/ec2/deserializers.go @@ -60984,6 +60984,19 @@ func awsEc2query_deserializeDocumentImportImageTask(v **types.ImportImageTask, d return err } + case strings.EqualFold("usageOperation", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.UsageOperation = ptr.String(xtv) + } + default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() @@ -69096,6 +69109,19 @@ func awsEc2query_deserializeDocumentKeyPairInfo(v **types.KeyPairInfo, decoder s sv.KeyPairId = ptr.String(xtv) } + case strings.EqualFold("keyType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.KeyType = types.KeyType(xtv) + } + case strings.EqualFold("tagSet", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsEc2query_deserializeDocumentTagList(&sv.Tags, nodeDecoder); err != nil { @@ -122526,6 +122552,19 @@ func awsEc2query_deserializeOpDocumentImportImageOutput(v **ImportImageOutput, d return err } + case strings.EqualFold("usageOperation", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.UsageOperation = ptr.String(xtv) + } + default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() diff --git a/service/ec2/serializers.go b/service/ec2/serializers.go index bbe7207c961..7b1f89f12d1 100644 --- a/service/ec2/serializers.go +++ b/service/ec2/serializers.go @@ -35313,6 +35313,11 @@ func awsEc2query_serializeOpDocumentCreateKeyPairInput(v *CreateKeyPairInput, va objectKey.String(*v.KeyName) } + if len(v.KeyType) > 0 { + objectKey := object.Key("KeyType") + objectKey.String(string(v.KeyType)) + } + if v.TagSpecifications != nil { objectKey := object.FlatKey("TagSpecification") if err := awsEc2query_serializeDocumentTagSpecificationList(v.TagSpecifications, objectKey); err != nil { @@ -44089,6 +44094,11 @@ func awsEc2query_serializeOpDocumentImportImageInput(v *ImportImageInput, value } } + if v.UsageOperation != nil { + objectKey := object.Key("UsageOperation") + objectKey.String(*v.UsageOperation) + } + return nil } diff --git a/service/ec2/types/enums.go b/service/ec2/types/enums.go index a4d8fba6e48..ad1df31da90 100644 --- a/service/ec2/types/enums.go +++ b/service/ec2/types/enums.go @@ -3022,6 +3022,24 @@ func (Ipv6SupportValue) Values() []Ipv6SupportValue { } } +type KeyType string + +// Enum values for KeyType +const ( + KeyTypeRsa KeyType = "rsa" + KeyTypeEd25519 KeyType = "ed25519" +) + +// Values returns all known values for KeyType. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (KeyType) Values() []KeyType { + return []KeyType{ + "rsa", + "ed25519", + } +} + type LaunchTemplateErrorCode string // Enum values for LaunchTemplateErrorCode diff --git a/service/ec2/types/types.go b/service/ec2/types/types.go index 90b045736d2..01b5e6d8c22 100644 --- a/service/ec2/types/types.go +++ b/service/ec2/types/types.go @@ -2920,8 +2920,8 @@ type ExportToS3Task struct { DiskImageFormat DiskImageFormat // The Amazon S3 bucket for the destination image. The destination bucket must - // exist and grant WRITE and READ_ACP permissions to the AWS account - // vm-import-export@amazon.com. + // exist and grant WRITE and READ_ACP permissions to the Amazon Web Services + // account vm-import-export@amazon.com. S3Bucket *string // The encryption key for your S3 bucket. @@ -2941,8 +2941,8 @@ type ExportToS3TaskSpecification struct { DiskImageFormat DiskImageFormat // The Amazon S3 bucket for the destination image. The destination bucket must - // exist and grant WRITE and READ_ACP permissions to the AWS account - // vm-import-export@amazon.com. + // exist and grant WRITE and READ_ACP permissions to the Amazon Web Services + // account vm-import-export@amazon.com. S3Bucket *string // The image is written to a single object in the Amazon S3 bucket at the S3 key @@ -4090,8 +4090,7 @@ type ImportImageTask struct { // The ID of the import image task. ImportTaskId *string - // The identifier for the AWS Key Management Service (AWS KMS) customer master key - // (CMK) that was used to create the encrypted image. + // The identifier for the KMS key that was used to create the encrypted image. KmsKeyId *string // The ARNs of the license configurations that are associated with the import image @@ -4119,6 +4118,9 @@ type ImportImageTask struct { // The tags for the import image task. Tags []Tag + // The usage operation value. + UsageOperation *string + noSmithyDocumentSerde } @@ -5542,10 +5544,26 @@ type Ipv6Range struct { // Describes a key pair. type KeyPairInfo struct { - // If you used CreateKeyPair to create the key pair, this is the SHA-1 digest of - // the DER encoded private key. If you used ImportKeyPair to provide Amazon Web - // Services the public key, this is the MD5 public key fingerprint as specified in - // section 4 of RFC4716. + // If you used CreateKeyPair to create the key pair: + // + // * For RSA key pairs, the key + // fingerprint is the SHA-1 digest of the DER encoded private key. + // + // * For ED25519 + // key pairs, the key fingerprint is the base64-encoded SHA-256 digest, which is + // the default for OpenSSH, starting with OpenSSH 6.8 + // (http://www.openssh.com/txt/release-6.8). + // + // If you used ImportKeyPair to provide + // Amazon Web Services the public key: + // + // * For RSA key pairs, the key fingerprint is + // the MD5 public key fingerprint as specified in section 4 of RFC4716. + // + // * For + // ED25519 key pairs, the key fingerprint is the base64-encoded SHA-256 digest, + // which is the default for OpenSSH, starting with OpenSSH 6.8 + // (http://www.openssh.com/txt/release-6.8). KeyFingerprint *string // The name of the key pair. @@ -5554,6 +5572,9 @@ type KeyPairInfo struct { // The ID of the key pair. KeyPairId *string + // The type of key pair. + KeyType KeyType + // Any tags applied to the key pair. Tags []Tag @@ -10136,8 +10157,7 @@ type SnapshotTaskDetail struct { // The format of the disk image from which the snapshot is created. Format *string - // The identifier for the AWS Key Management Service (AWS KMS) customer master key - // (CMK) that was used to create the encrypted snapshot. + // The identifier for the KMS key that was used to create the encrypted snapshot. KmsKeyId *string // The percentage of completion for the import snapshot task. @@ -12450,9 +12470,9 @@ type UserBucketDetails struct { // Describes the user data for an instance. type UserData struct { - // The user data. If you are using an AWS SDK or command line tool, Base64-encoding - // is performed for you, and you can load the text from a file. Otherwise, you must - // provide Base64-encoded text. + // The user data. If you are using an Amazon Web Services SDK or command line tool, + // Base64-encoding is performed for you, and you can load the text from a file. + // Otherwise, you must provide Base64-encoded text. Data *string noSmithyDocumentSerde diff --git a/service/elasticache/api_op_AddTagsToResource.go b/service/elasticache/api_op_AddTagsToResource.go index c3d054bcdfa..d96bd3429e1 100644 --- a/service/elasticache/api_op_AddTagsToResource.go +++ b/service/elasticache/api_op_AddTagsToResource.go @@ -17,12 +17,12 @@ import ( // replication groups, those actions will be replicated to all nodes in the // replication group. For more information, see Resource-level permissions // (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/IAM.ResourceLevelPermissions.html). -// For example, you can use cost-allocation tags to your ElastiCache resources, AWS -// generates a cost allocation report as a comma-separated value (CSV) file with -// your usage and costs aggregated by your tags. You can apply tags that represent -// business categories (such as cost centers, application names, or owners) to -// organize your costs across multiple services. For more information, see Using -// Cost Allocation Tags in Amazon ElastiCache +// For example, you can use cost-allocation tags to your ElastiCache resources, +// Amazon generates a cost allocation report as a comma-separated value (CSV) file +// with your usage and costs aggregated by your tags. You can apply tags that +// represent business categories (such as cost centers, application names, or +// owners) to organize your costs across multiple services. For more information, +// see Using Cost Allocation Tags in Amazon ElastiCache // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Tagging.html) in // the ElastiCache User Guide. func (c *Client) AddTagsToResource(ctx context.Context, params *AddTagsToResourceInput, optFns ...func(*Options)) (*AddTagsToResourceOutput, error) { @@ -47,7 +47,7 @@ type AddTagsToResourceInput struct { // added, for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster or // arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot. ElastiCache // resources are cluster and snapshot. For more information about ARNs, see Amazon - // Resource Names (ARNs) and AWS Service Namespaces + // Resource Names (ARNs) and Amazon Service Namespaces // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). // // This member is required. diff --git a/service/elasticache/api_op_AuthorizeCacheSecurityGroupIngress.go b/service/elasticache/api_op_AuthorizeCacheSecurityGroupIngress.go index 0a8de3924ad..9551ed0fd8c 100644 --- a/service/elasticache/api_op_AuthorizeCacheSecurityGroupIngress.go +++ b/service/elasticache/api_op_AuthorizeCacheSecurityGroupIngress.go @@ -44,9 +44,9 @@ type AuthorizeCacheSecurityGroupIngressInput struct { // This member is required. EC2SecurityGroupName *string - // The AWS account number of the Amazon EC2 security group owner. Note that this is - // not the same thing as an AWS access key ID - you must provide a valid AWS - // account number for this parameter. + // The Amazon account number of the Amazon EC2 security group owner. Note that this + // is not the same thing as an Amazon access key ID - you must provide a valid + // Amazon account number for this parameter. // // This member is required. EC2SecurityGroupOwnerId *string diff --git a/service/elasticache/api_op_CreateGlobalReplicationGroup.go b/service/elasticache/api_op_CreateGlobalReplicationGroup.go index d43e9440fcc..050c6945757 100644 --- a/service/elasticache/api_op_CreateGlobalReplicationGroup.go +++ b/service/elasticache/api_op_CreateGlobalReplicationGroup.go @@ -42,13 +42,13 @@ func (c *Client) CreateGlobalReplicationGroup(ctx context.Context, params *Creat type CreateGlobalReplicationGroupInput struct { // The suffix name of a Global datastore. Amazon ElastiCache automatically applies - // a prefix to the Global datastore ID when it is created. Each AWS Region has its - // own prefix. For instance, a Global datastore ID created in the US-West-1 region - // will begin with "dsdfu" along with the suffix name you provide. The suffix, - // combined with the auto-generated prefix, guarantees uniqueness of the Global - // datastore name across multiple regions. For a full list of AWS Regions and their - // respective Global datastore iD prefixes, see Using the AWS CLI with Global - // datastores + // a prefix to the Global datastore ID when it is created. Each Amazon Region has + // its own prefix. For instance, a Global datastore ID created in the US-West-1 + // region will begin with "dsdfu" along with the suffix name you provide. The + // suffix, combined with the auto-generated prefix, guarantees uniqueness of the + // Global datastore name across multiple regions. For a full list of Amazon Regions + // and their respective Global datastore iD prefixes, see Using the Amazon CLI with + // Global datastores // (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Redis-Global-Datastores-CLI.html). // // This member is required. @@ -69,7 +69,7 @@ type CreateGlobalReplicationGroupInput struct { type CreateGlobalReplicationGroupOutput struct { // Consists of a primary cluster that accepts writes and an associated secondary - // cluster that resides in a different AWS region. The secondary cluster accepts + // cluster that resides in a different Amazon region. The secondary cluster accepts // only reads. The primary cluster automatically replicates updates to the // secondary cluster. // diff --git a/service/elasticache/api_op_CreateReplicationGroup.go b/service/elasticache/api_op_CreateReplicationGroup.go index b2c37068ba1..7609c31976e 100644 --- a/service/elasticache/api_op_CreateReplicationGroup.go +++ b/service/elasticache/api_op_CreateReplicationGroup.go @@ -31,7 +31,7 @@ import ( // more information, see Creating a Subnet Group // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SubnetGroups.Creating.html). // For versions below 5.0.6, the limit is 250 per cluster. To request a limit -// increase, see AWS Service Limits +// increase, see Amazon Service Limits // (https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) and // choose the limit type Nodes per cluster per instance type. When a Redis (cluster // mode disabled) replication group has been successfully created, you can add one diff --git a/service/elasticache/api_op_DecreaseNodeGroupsInGlobalReplicationGroup.go b/service/elasticache/api_op_DecreaseNodeGroupsInGlobalReplicationGroup.go index b4642bc5601..769d149b1f9 100644 --- a/service/elasticache/api_op_DecreaseNodeGroupsInGlobalReplicationGroup.go +++ b/service/elasticache/api_op_DecreaseNodeGroupsInGlobalReplicationGroup.go @@ -66,7 +66,7 @@ type DecreaseNodeGroupsInGlobalReplicationGroupInput struct { type DecreaseNodeGroupsInGlobalReplicationGroupOutput struct { // Consists of a primary cluster that accepts writes and an associated secondary - // cluster that resides in a different AWS region. The secondary cluster accepts + // cluster that resides in a different Amazon region. The secondary cluster accepts // only reads. The primary cluster automatically replicates updates to the // secondary cluster. // diff --git a/service/elasticache/api_op_DeleteGlobalReplicationGroup.go b/service/elasticache/api_op_DeleteGlobalReplicationGroup.go index 0fea3b8f670..33edc412e3d 100644 --- a/service/elasticache/api_op_DeleteGlobalReplicationGroup.go +++ b/service/elasticache/api_op_DeleteGlobalReplicationGroup.go @@ -62,7 +62,7 @@ type DeleteGlobalReplicationGroupInput struct { type DeleteGlobalReplicationGroupOutput struct { // Consists of a primary cluster that accepts writes and an associated secondary - // cluster that resides in a different AWS region. The secondary cluster accepts + // cluster that resides in a different Amazon region. The secondary cluster accepts // only reads. The primary cluster automatically replicates updates to the // secondary cluster. // diff --git a/service/elasticache/api_op_DescribeCacheClusters.go b/service/elasticache/api_op_DescribeCacheClusters.go index 15e3971e106..7709c995a18 100644 --- a/service/elasticache/api_op_DescribeCacheClusters.go +++ b/service/elasticache/api_op_DescribeCacheClusters.go @@ -4,10 +4,12 @@ package elasticache import ( "context" + "errors" "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + smithy "github.com/aws/smithy-go" "github.com/aws/smithy-go/middleware" smithytime "github.com/aws/smithy-go/time" smithyhttp "github.com/aws/smithy-go/transport/http" @@ -506,6 +508,329 @@ func cacheClusterAvailableStateRetryable(ctx context.Context, input *DescribeCac return true, nil } +// CacheClusterDeletedWaiterOptions are waiter options for +// CacheClusterDeletedWaiter +type CacheClusterDeletedWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // CacheClusterDeletedWaiter will use default minimum delay of 15 seconds. Note + // that MinDelay must resolve to a value lesser than or equal to the MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or set + // to zero, CacheClusterDeletedWaiter will use default max delay of 120 seconds. + // Note that MaxDelay must resolve to value greater than or equal to the MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. By + // default service-modeled logic will populate this option. This option can thus be + // used to define a custom waiter state with fall-back to service-modeled waiter + // state mutators.The function returns an error in case of a failure state. In case + // of retry state, this function returns a bool value of true and nil error, while + // in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *DescribeCacheClustersInput, *DescribeCacheClustersOutput, error) (bool, error) +} + +// CacheClusterDeletedWaiter defines the waiters for CacheClusterDeleted +type CacheClusterDeletedWaiter struct { + client DescribeCacheClustersAPIClient + + options CacheClusterDeletedWaiterOptions +} + +// NewCacheClusterDeletedWaiter constructs a CacheClusterDeletedWaiter. +func NewCacheClusterDeletedWaiter(client DescribeCacheClustersAPIClient, optFns ...func(*CacheClusterDeletedWaiterOptions)) *CacheClusterDeletedWaiter { + options := CacheClusterDeletedWaiterOptions{} + options.MinDelay = 15 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = cacheClusterDeletedStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &CacheClusterDeletedWaiter{ + client: client, + options: options, + } +} + +// Wait calls the waiter function for CacheClusterDeleted waiter. The maxWaitDur is +// the maximum wait duration the waiter will wait. The maxWaitDur is required and +// must be greater than zero. +func (w *CacheClusterDeletedWaiter) Wait(ctx context.Context, params *DescribeCacheClustersInput, maxWaitDur time.Duration, optFns ...func(*CacheClusterDeletedWaiterOptions)) error { + if maxWaitDur <= 0 { + return fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.DescribeCacheClusters(ctx, params, func(o *Options) { + o.APIOptions = append(o.APIOptions, apiOptions...) + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { + return err + } + if !retryable { + return nil + } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { + return fmt.Errorf("error computing waiter delay, %w", err) + } + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return fmt.Errorf("exceeded max wait time for CacheClusterDeleted waiter") +} + +func cacheClusterDeletedStateRetryable(ctx context.Context, input *DescribeCacheClustersInput, output *DescribeCacheClustersOutput, err error) (bool, error) { + + if err == nil { + pathValue, err := jmespath.Search("CacheClusters[].CacheClusterStatus", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "deleted" + var match = true + listOfValues, ok := pathValue.([]interface{}) + if !ok { + return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) + } + + if len(listOfValues) == 0 { + match = false + } + for _, v := range listOfValues { + value, ok := v.(*string) + if !ok { + return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue) + } + + if string(*value) != expectedValue { + match = false + } + } + + if match { + return false, nil + } + } + + if err != nil { + var apiErr smithy.APIError + ok := errors.As(err, &apiErr) + if !ok { + return false, fmt.Errorf("expected err to be of type smithy.APIError, got %w", err) + } + + if "CacheClusterNotFound" == apiErr.ErrorCode() { + return false, nil + } + } + + if err == nil { + pathValue, err := jmespath.Search("CacheClusters[].CacheClusterStatus", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "available" + listOfValues, ok := pathValue.([]interface{}) + if !ok { + return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) + } + + for _, v := range listOfValues { + value, ok := v.(*string) + if !ok { + return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue) + } + + if string(*value) == expectedValue { + return false, fmt.Errorf("waiter state transitioned to Failure") + } + } + } + + if err == nil { + pathValue, err := jmespath.Search("CacheClusters[].CacheClusterStatus", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "creating" + listOfValues, ok := pathValue.([]interface{}) + if !ok { + return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) + } + + for _, v := range listOfValues { + value, ok := v.(*string) + if !ok { + return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue) + } + + if string(*value) == expectedValue { + return false, fmt.Errorf("waiter state transitioned to Failure") + } + } + } + + if err == nil { + pathValue, err := jmespath.Search("CacheClusters[].CacheClusterStatus", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "incompatible-network" + listOfValues, ok := pathValue.([]interface{}) + if !ok { + return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) + } + + for _, v := range listOfValues { + value, ok := v.(*string) + if !ok { + return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue) + } + + if string(*value) == expectedValue { + return false, fmt.Errorf("waiter state transitioned to Failure") + } + } + } + + if err == nil { + pathValue, err := jmespath.Search("CacheClusters[].CacheClusterStatus", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "modifying" + listOfValues, ok := pathValue.([]interface{}) + if !ok { + return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) + } + + for _, v := range listOfValues { + value, ok := v.(*string) + if !ok { + return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue) + } + + if string(*value) == expectedValue { + return false, fmt.Errorf("waiter state transitioned to Failure") + } + } + } + + if err == nil { + pathValue, err := jmespath.Search("CacheClusters[].CacheClusterStatus", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "restore-failed" + listOfValues, ok := pathValue.([]interface{}) + if !ok { + return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) + } + + for _, v := range listOfValues { + value, ok := v.(*string) + if !ok { + return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue) + } + + if string(*value) == expectedValue { + return false, fmt.Errorf("waiter state transitioned to Failure") + } + } + } + + if err == nil { + pathValue, err := jmespath.Search("CacheClusters[].CacheClusterStatus", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "snapshotting" + listOfValues, ok := pathValue.([]interface{}) + if !ok { + return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) + } + + for _, v := range listOfValues { + value, ok := v.(*string) + if !ok { + return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue) + } + + if string(*value) == expectedValue { + return false, fmt.Errorf("waiter state transitioned to Failure") + } + } + } + + return true, nil +} + func newServiceMetadataMiddleware_opDescribeCacheClusters(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/service/elasticache/api_op_DisassociateGlobalReplicationGroup.go b/service/elasticache/api_op_DisassociateGlobalReplicationGroup.go index 413c43f720d..2d267997445 100644 --- a/service/elasticache/api_op_DisassociateGlobalReplicationGroup.go +++ b/service/elasticache/api_op_DisassociateGlobalReplicationGroup.go @@ -13,7 +13,7 @@ import ( // Remove a secondary cluster from the Global datastore using the Global datastore // name. The secondary cluster will no longer receive updates from the primary -// cluster, but will remain as a standalone cluster in that AWS region. +// cluster, but will remain as a standalone cluster in that Amazon region. func (c *Client) DisassociateGlobalReplicationGroup(ctx context.Context, params *DisassociateGlobalReplicationGroupInput, optFns ...func(*Options)) (*DisassociateGlobalReplicationGroupOutput, error) { if params == nil { params = &DisassociateGlobalReplicationGroupInput{} @@ -41,7 +41,8 @@ type DisassociateGlobalReplicationGroupInput struct { // This member is required. ReplicationGroupId *string - // The AWS region of secondary cluster you wish to remove from the Global datastore + // The Amazon region of secondary cluster you wish to remove from the Global + // datastore // // This member is required. ReplicationGroupRegion *string @@ -52,7 +53,7 @@ type DisassociateGlobalReplicationGroupInput struct { type DisassociateGlobalReplicationGroupOutput struct { // Consists of a primary cluster that accepts writes and an associated secondary - // cluster that resides in a different AWS region. The secondary cluster accepts + // cluster that resides in a different Amazon region. The secondary cluster accepts // only reads. The primary cluster automatically replicates updates to the // secondary cluster. // diff --git a/service/elasticache/api_op_FailoverGlobalReplicationGroup.go b/service/elasticache/api_op_FailoverGlobalReplicationGroup.go index 870932312ab..ca22662024e 100644 --- a/service/elasticache/api_op_FailoverGlobalReplicationGroup.go +++ b/service/elasticache/api_op_FailoverGlobalReplicationGroup.go @@ -36,7 +36,7 @@ type FailoverGlobalReplicationGroupInput struct { // This member is required. GlobalReplicationGroupId *string - // The AWS region of the primary cluster of the Global datastore + // The Amazon region of the primary cluster of the Global datastore // // This member is required. PrimaryRegion *string @@ -52,7 +52,7 @@ type FailoverGlobalReplicationGroupInput struct { type FailoverGlobalReplicationGroupOutput struct { // Consists of a primary cluster that accepts writes and an associated secondary - // cluster that resides in a different AWS region. The secondary cluster accepts + // cluster that resides in a different Amazon region. The secondary cluster accepts // only reads. The primary cluster automatically replicates updates to the // secondary cluster. // diff --git a/service/elasticache/api_op_IncreaseNodeGroupsInGlobalReplicationGroup.go b/service/elasticache/api_op_IncreaseNodeGroupsInGlobalReplicationGroup.go index af154ec7c6a..4f47638f024 100644 --- a/service/elasticache/api_op_IncreaseNodeGroupsInGlobalReplicationGroup.go +++ b/service/elasticache/api_op_IncreaseNodeGroupsInGlobalReplicationGroup.go @@ -45,8 +45,8 @@ type IncreaseNodeGroupsInGlobalReplicationGroupInput struct { // This member is required. NodeGroupCount int32 - // Describes the replication group IDs, the AWS regions where they are stored and - // the shard configuration for each that comprise the Global datastore + // Describes the replication group IDs, the Amazon regions where they are stored + // and the shard configuration for each that comprise the Global datastore RegionalConfigurations []types.RegionalConfiguration noSmithyDocumentSerde @@ -55,7 +55,7 @@ type IncreaseNodeGroupsInGlobalReplicationGroupInput struct { type IncreaseNodeGroupsInGlobalReplicationGroupOutput struct { // Consists of a primary cluster that accepts writes and an associated secondary - // cluster that resides in a different AWS region. The secondary cluster accepts + // cluster that resides in a different Amazon region. The secondary cluster accepts // only reads. The primary cluster automatically replicates updates to the // secondary cluster. // diff --git a/service/elasticache/api_op_ListTagsForResource.go b/service/elasticache/api_op_ListTagsForResource.go index 012387784ce..7be05078fe3 100644 --- a/service/elasticache/api_op_ListTagsForResource.go +++ b/service/elasticache/api_op_ListTagsForResource.go @@ -41,8 +41,8 @@ type ListTagsForResourceInput struct { // The Amazon Resource Name (ARN) of the resource for which you want the list of // tags, for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster or // arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot. For more - // information about ARNs, see Amazon Resource Names (ARNs) and AWS Service - // Namespaces + // information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services + // Service Namespaces // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). // // This member is required. diff --git a/service/elasticache/api_op_ModifyGlobalReplicationGroup.go b/service/elasticache/api_op_ModifyGlobalReplicationGroup.go index 1e4d1fa29d5..6789ff54509 100644 --- a/service/elasticache/api_op_ModifyGlobalReplicationGroup.go +++ b/service/elasticache/api_op_ModifyGlobalReplicationGroup.go @@ -66,7 +66,7 @@ type ModifyGlobalReplicationGroupInput struct { type ModifyGlobalReplicationGroupOutput struct { // Consists of a primary cluster that accepts writes and an associated secondary - // cluster that resides in a different AWS region. The secondary cluster accepts + // cluster that resides in a different Amazon region. The secondary cluster accepts // only reads. The primary cluster automatically replicates updates to the // secondary cluster. // diff --git a/service/elasticache/api_op_ModifyReplicationGroup.go b/service/elasticache/api_op_ModifyReplicationGroup.go index 650620e91bd..4d1f4ed45a2 100644 --- a/service/elasticache/api_op_ModifyReplicationGroup.go +++ b/service/elasticache/api_op_ModifyReplicationGroup.go @@ -117,8 +117,7 @@ type ModifyReplicationGroupInput struct { // Specifies the destination, format and type of the logs. LogDeliveryConfigurations []types.LogDeliveryConfigurationRequest - // A list of tags to be added to this resource. A tag is a key-value pair. A tag - // key must be accompanied by a tag value, although null is accepted. + // A flag to indicate MultiAZ is enabled. MultiAZEnabled *bool // Deprecated. This parameter is not used. diff --git a/service/elasticache/api_op_RebalanceSlotsInGlobalReplicationGroup.go b/service/elasticache/api_op_RebalanceSlotsInGlobalReplicationGroup.go index 91fd591495d..6f14c3a49a5 100644 --- a/service/elasticache/api_op_RebalanceSlotsInGlobalReplicationGroup.go +++ b/service/elasticache/api_op_RebalanceSlotsInGlobalReplicationGroup.go @@ -46,7 +46,7 @@ type RebalanceSlotsInGlobalReplicationGroupInput struct { type RebalanceSlotsInGlobalReplicationGroupOutput struct { // Consists of a primary cluster that accepts writes and an associated secondary - // cluster that resides in a different AWS region. The secondary cluster accepts + // cluster that resides in a different Amazon region. The secondary cluster accepts // only reads. The primary cluster automatically replicates updates to the // secondary cluster. // diff --git a/service/elasticache/api_op_RemoveTagsFromResource.go b/service/elasticache/api_op_RemoveTagsFromResource.go index d25df89e8aa..1b00d867692 100644 --- a/service/elasticache/api_op_RemoveTagsFromResource.go +++ b/service/elasticache/api_op_RemoveTagsFromResource.go @@ -39,7 +39,7 @@ type RemoveTagsFromResourceInput struct { // The Amazon Resource Name (ARN) of the resource from which you want the tags // removed, for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster // or arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot. For more - // information about ARNs, see Amazon Resource Names (ARNs) and AWS Service + // information about ARNs, see Amazon Resource Names (ARNs) and Amazon Service // Namespaces // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). // diff --git a/service/elasticache/api_op_RevokeCacheSecurityGroupIngress.go b/service/elasticache/api_op_RevokeCacheSecurityGroupIngress.go index 771228eb70a..29df8a053c4 100644 --- a/service/elasticache/api_op_RevokeCacheSecurityGroupIngress.go +++ b/service/elasticache/api_op_RevokeCacheSecurityGroupIngress.go @@ -41,9 +41,9 @@ type RevokeCacheSecurityGroupIngressInput struct { // This member is required. EC2SecurityGroupName *string - // The AWS account number of the Amazon EC2 security group owner. Note that this is - // not the same thing as an AWS access key ID - you must provide a valid AWS - // account number for this parameter. + // The Amazon account number of the Amazon EC2 security group owner. Note that this + // is not the same thing as an Amazon access key ID - you must provide a valid + // Amazon account number for this parameter. // // This member is required. EC2SecurityGroupOwnerId *string diff --git a/service/elasticache/api_op_TestFailover.go b/service/elasticache/api_op_TestFailover.go index 8d900b69045..53883fc42b6 100644 --- a/service/elasticache/api_op_TestFailover.go +++ b/service/elasticache/api_op_TestFailover.go @@ -17,7 +17,7 @@ import ( // // * A customer can use this // operation to test automatic failover on up to 5 shards (called node groups in -// the ElastiCache API and AWS CLI) in any rolling 24-hour period. +// the ElastiCache API and Amazon CLI) in any rolling 24-hour period. // // * If calling // this operation on shards in different clusters (called replication groups in the @@ -29,7 +29,7 @@ import ( // call can be made. // // * To determine whether the node replacement is complete you -// can check Events using the Amazon ElastiCache console, the AWS CLI, or the +// can check Events using the Amazon ElastiCache console, the Amazon CLI, or the // ElastiCache API. Look for the following automatic failover related events, // listed here in order of occurrance: // diff --git a/service/elasticache/deserializers.go b/service/elasticache/deserializers.go index 7f0cf6cd538..f1385495f4a 100644 --- a/service/elasticache/deserializers.go +++ b/service/elasticache/deserializers.go @@ -18871,6 +18871,23 @@ func awsAwsquery_deserializeDocumentReplicationGroup(v **types.ReplicationGroup, return err } + case strings.EqualFold("ReplicationGroupCreateTime", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.ReplicationGroupCreateTime = ptr.Time(t) + } + case strings.EqualFold("ReplicationGroupId", t.Name.Local): val, err := decoder.Value() if err != nil { diff --git a/service/elasticache/types/types.go b/service/elasticache/types/types.go index 26f14947b52..613c5915f6c 100644 --- a/service/elasticache/types/types.go +++ b/service/elasticache/types/types.go @@ -329,7 +329,7 @@ type CacheNode struct { // The cache node identifier. A node ID is a numeric identifier (0001, 0002, etc.). // The combination of cluster ID and node ID uniquely identifies every cache node - // used in a customer's AWS account. + // used in a customer's Amazon account. CacheNodeId *string // The current state of this cache node, one of the following values: available, @@ -501,7 +501,7 @@ type CacheSecurityGroup struct { // security group. EC2SecurityGroups []EC2SecurityGroup - // The AWS account ID of the cache security group owner. + // The Amazon account ID of the cache security group owner. OwnerId *string noSmithyDocumentSerde @@ -632,7 +632,7 @@ type EC2SecurityGroup struct { // The name of the Amazon EC2 security group. EC2SecurityGroupName *string - // The AWS account ID of the Amazon EC2 security group owner. + // The Amazon account ID of the Amazon EC2 security group owner. EC2SecurityGroupOwnerId *string // The status of the Amazon EC2 security group. @@ -726,7 +726,7 @@ type GlobalNodeGroup struct { } // Consists of a primary cluster that accepts writes and an associated secondary -// cluster that resides in a different AWS region. The secondary cluster accepts +// cluster that resides in a different Amazon region. The secondary cluster accepts // only reads. The primary cluster automatically replicates updates to the // secondary cluster. // @@ -800,7 +800,7 @@ type GlobalReplicationGroupInfo struct { noSmithyDocumentSerde } -// A member of a Global datastore. It contains the Replication Group Id, the AWS +// A member of a Global datastore. It contains the Replication Group Id, the Amazon // region and the role of the replication group. type GlobalReplicationGroupMember struct { @@ -810,7 +810,7 @@ type GlobalReplicationGroupMember struct { // The replication group id of the Global datastore member. ReplicationGroupId *string - // The AWS region of the Global datastore member. + // The Amazon region of the Global datastore member. ReplicationGroupRegion *string // Indicates the role of the replication group, primary or secondary. @@ -1199,7 +1199,7 @@ type RegionalConfiguration struct { // This member is required. ReplicationGroupId *string - // The AWS region where the cluster is stored + // The Amazon region where the cluster is stored // // This member is required. ReplicationGroupRegion *string @@ -1283,6 +1283,9 @@ type ReplicationGroup struct { // or during the next maintenance window. PendingModifiedValues *ReplicationGroupPendingModifiedValues + // The date and time when the cluster was created. + ReplicationGroupCreateTime *time.Time + // The identifier for the replication group. ReplicationGroupId *string diff --git a/service/emr/api_client.go b/service/emr/api_client.go index 76d089eaa0c..f3e9800aeb2 100644 --- a/service/emr/api_client.go +++ b/service/emr/api_client.go @@ -21,8 +21,7 @@ import ( const ServiceID = "EMR" const ServiceAPIVersion = "2009-03-31" -// Client provides the API client to make operations call for Amazon Elastic -// MapReduce. +// Client provides the API client to make operations call for Amazon EMR. type Client struct { options Options } diff --git a/service/emr/api_op_CreateStudioSessionMapping.go b/service/emr/api_op_CreateStudioSessionMapping.go index 1207fe2ece0..afaa2619986 100644 --- a/service/emr/api_op_CreateStudioSessionMapping.go +++ b/service/emr/api_op_CreateStudioSessionMapping.go @@ -56,7 +56,7 @@ type CreateStudioSessionMappingInput struct { // and GroupId // (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-GroupId) // in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName - // or IdentityId must be specified. + // or IdentityId must be specified, but not both. IdentityId *string // The name of the user or group. For more information, see UserName @@ -64,7 +64,7 @@ type CreateStudioSessionMappingInput struct { // and DisplayName // (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-DisplayName) // in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName - // or IdentityId must be specified. + // or IdentityId must be specified, but not both. IdentityName *string noSmithyDocumentSerde diff --git a/service/emr/api_op_ListReleaseLabels.go b/service/emr/api_op_ListReleaseLabels.go index 6646049a4d5..a9160dc0a41 100644 --- a/service/emr/api_op_ListReleaseLabels.go +++ b/service/emr/api_op_ListReleaseLabels.go @@ -42,7 +42,7 @@ type ListReleaseLabelsInput struct { // Specifies the next page of results. If NextToken is not specified, which is // usually the case for the first request of ListReleaseLabels, the first page of // results are determined by other filtering parameters or by the latest version. - // The ListReleaseLabels request fails if the identity (AWS AccountID) and all + // The ListReleaseLabels request fails if the identity (account ID) and all // filtering parameters are different from the original request, or if the // NextToken is expired or tampered with. NextToken *string diff --git a/service/emr/api_op_RunJobFlow.go b/service/emr/api_op_RunJobFlow.go index 5a040113924..f477f99b379 100644 --- a/service/emr/api_op_RunJobFlow.go +++ b/service/emr/api_op_RunJobFlow.go @@ -232,7 +232,7 @@ type RunJobFlowInput struct { // Set this value to true so that IAM principals in the account associated with the // cluster can perform EMR actions on the cluster that their IAM policies allow. - // This value defaults to false for clusters created using the EMR API or the CLI + // This value defaults to true for clusters created using the EMR API or the CLI // create-cluster // (https://docs.aws.amazon.com/cli/latest/reference/emr/create-cluster.html) // command. When set to false, only the IAM principal that created the cluster and @@ -240,7 +240,7 @@ type RunJobFlowInput struct { // IAM permissions policies attached to other IAM principals. For more information, // see Understanding the EMR Cluster VisibleToAllUsers Setting // (https://docs.aws.amazon.com/emr/latest/ManagementGuide/security_iam_emr-with-iam.html#security_set_visible_to_all_users) - // in the Amazon EMR Management Guide. + // in the Amazon EMRManagement Guide. VisibleToAllUsers bool noSmithyDocumentSerde diff --git a/service/emr/api_op_SetVisibleToAllUsers.go b/service/emr/api_op_SetVisibleToAllUsers.go index 587f94d3af1..e64d7f0752d 100644 --- a/service/emr/api_op_SetVisibleToAllUsers.go +++ b/service/emr/api_op_SetVisibleToAllUsers.go @@ -19,7 +19,7 @@ import ( // RunJobFlowInput$VisibleToAllUsers parameter. For more information, see // Understanding the EMR Cluster VisibleToAllUsers Setting // (https://docs.aws.amazon.com/emr/latest/ManagementGuide/security_iam_emr-with-iam.html#security_set_visible_to_all_users) -// in the Amazon EMR Management Guide. +// in the Amazon EMRManagement Guide. func (c *Client) SetVisibleToAllUsers(ctx context.Context, params *SetVisibleToAllUsersInput, optFns ...func(*Options)) (*SetVisibleToAllUsersOutput, error) { if params == nil { params = &SetVisibleToAllUsersInput{} diff --git a/service/emr/deserializers.go b/service/emr/deserializers.go index 6b21bfe8da3..ca774f62db9 100644 --- a/service/emr/deserializers.go +++ b/service/emr/deserializers.go @@ -8072,6 +8072,15 @@ func awsAwsjson11_deserializeDocumentInstanceGroup(v **types.InstanceGroup, valu sv.ConfigurationsVersion = i64 } + case "CustomAmiId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected XmlStringMaxLen256 to be of type string, got %T instead", value) + } + sv.CustomAmiId = ptr.String(jtv) + } + case "EbsBlockDevices": if err := awsAwsjson11_deserializeDocumentEbsBlockDeviceList(&sv.EbsBlockDevices, value); err != nil { return err @@ -8241,6 +8250,15 @@ func awsAwsjson11_deserializeDocumentInstanceGroupDetail(v **types.InstanceGroup } } + case "CustomAmiId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected XmlStringMaxLen256 to be of type string, got %T instead", value) + } + sv.CustomAmiId = ptr.String(jtv) + } + case "EndDateTime": if value != nil { switch jtv := value.(type) { @@ -9005,6 +9023,15 @@ func awsAwsjson11_deserializeDocumentInstanceTypeSpecification(v **types.Instanc return err } + case "CustomAmiId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected XmlStringMaxLen256 to be of type string, got %T instead", value) + } + sv.CustomAmiId = ptr.String(jtv) + } + case "EbsBlockDevices": if err := awsAwsjson11_deserializeDocumentEbsBlockDeviceList(&sv.EbsBlockDevices, value); err != nil { return err diff --git a/service/emr/doc.go b/service/emr/doc.go index 34b7e50162e..f055c7347b2 100644 --- a/service/emr/doc.go +++ b/service/emr/doc.go @@ -1,7 +1,7 @@ // Code generated by smithy-go-codegen DO NOT EDIT. // Package emr provides the API client, operations, and parameter types for Amazon -// Elastic MapReduce. +// EMR. // // Amazon EMR is a web service that makes it easier to process large amounts of // data efficiently. Amazon EMR uses Hadoop processing combined with several Amazon diff --git a/service/emr/serializers.go b/service/emr/serializers.go index f446287176a..3f68afcfb96 100644 --- a/service/emr/serializers.go +++ b/service/emr/serializers.go @@ -2848,6 +2848,11 @@ func awsAwsjson11_serializeDocumentInstanceGroupConfig(v *types.InstanceGroupCon } } + if v.CustomAmiId != nil { + ok := object.Key("CustomAmiId") + ok.String(*v.CustomAmiId) + } + if v.EbsConfiguration != nil { ok := object.Key("EbsConfiguration") if err := awsAwsjson11_serializeDocumentEbsConfiguration(v.EbsConfiguration, ok); err != nil { @@ -3029,6 +3034,11 @@ func awsAwsjson11_serializeDocumentInstanceTypeConfig(v *types.InstanceTypeConfi } } + if v.CustomAmiId != nil { + ok := object.Key("CustomAmiId") + ok.String(*v.CustomAmiId) + } + if v.EbsConfiguration != nil { ok := object.Key("EbsConfiguration") if err := awsAwsjson11_serializeDocumentEbsConfiguration(v.EbsConfiguration, ok); err != nil { diff --git a/service/emr/types/types.go b/service/emr/types/types.go index bce2b773d30..a3ed826c3a5 100644 --- a/service/emr/types/types.go +++ b/service/emr/types/types.go @@ -393,17 +393,15 @@ type Cluster struct { // perform EMR cluster actions on the cluster that their IAM policies allow. When // false, only the IAM principal that created the cluster and the account root user // can perform EMR actions, regardless of IAM permissions policies attached to - // other IAM principals. The default value is false if a value is not provided when - // creating a cluster using the EMR API RunJobFlow command or the CLI - // create-cluster + // other IAM principals. The default value is true if a value is not provided when + // creating a cluster using the EMR API RunJobFlow command, the CLI create-cluster // (https://docs.aws.amazon.com/cli/latest/reference/emr/create-cluster.html) - // command. The default value is true when a cluster is created using the - // Management Console. IAM principals that are allowed to perform actions on the - // cluster can use the SetVisibleToAllUsers action to change the value on a running - // cluster. For more information, see Understanding the EMR Cluster - // VisibleToAllUsers Setting + // command, or the Management Console. IAM principals that are allowed to perform + // actions on the cluster can use the SetVisibleToAllUsers action to change the + // value on a running cluster. For more information, see Understanding the EMR + // Cluster VisibleToAllUsers Setting // (https://docs.aws.amazon.com/emr/latest/ManagementGuide/security_iam_emr-with-iam.html#security_set_visible_to_all_users) - // in the Amazon EMR Management Guide. + // in the Amazon EMRManagement Guide. VisibleToAllUsers bool noSmithyDocumentSerde @@ -1087,6 +1085,9 @@ type InstanceGroup struct { // instance group. ConfigurationsVersion int64 + // The custom AMI ID to use for the provisioned instance group. + CustomAmiId *string + // The EBS block devices that are mapped to this instance group. EbsBlockDevices []EbsBlockDevice @@ -1168,6 +1169,9 @@ type InstanceGroupConfig struct { // instance group (master, core, and task). Configurations []Configuration + // The custom AMI ID to use for the provisioned instance group. + CustomAmiId *string + // EBS configurations that will be attached to each EC2 instance in the instance // group. EbsConfiguration *EbsConfiguration @@ -1225,6 +1229,9 @@ type InstanceGroupDetail struct { // to set the amount equal to the On-Demand price, or specify an amount in USD. BidPrice *string + // The custom AMI ID to use for the provisioned instance group. + CustomAmiId *string + // The date/time the instance group was terminated. EndDateTime *time.Time @@ -1404,6 +1411,9 @@ type InstanceTypeConfig struct { // cluster. Configurations []Configuration + // The custom AMI ID to use for the instance type. + CustomAmiId *string + // The configuration of Amazon Elastic Block Store (Amazon EBS) attached to each // instance as defined by InstanceType. EbsConfiguration *EbsConfiguration @@ -1435,6 +1445,9 @@ type InstanceTypeSpecification struct { // Amazon EMR. Configurations []Configuration + // The custom AMI ID to use for the instance type. + CustomAmiId *string + // The configuration of Amazon Elastic Block Store (Amazon EBS) attached to each // instance as defined by InstanceType. EbsBlockDevices []EbsBlockDevice @@ -1533,16 +1546,15 @@ type JobFlowDetail struct { // perform EMR cluster actions that their IAM policies allow. When false, only the // IAM principal that created the cluster and the account root user can perform EMR // actions, regardless of IAM permissions policies attached to other IAM - // principals. The default value is false if a value is not provided when creating - // a cluster using the EMR API RunJobFlow command or the CLI create-cluster + // principals. The default value is true if a value is not provided when creating a + // cluster using the EMR API RunJobFlow command, the CLI create-cluster // (https://docs.aws.amazon.com/cli/latest/reference/emr/create-cluster.html) - // command. The default value is true when a cluster is created using the - // Management Console. IAM principals that are authorized to perform actions on the - // cluster can use the SetVisibleToAllUsers action to change the value on a running - // cluster. For more information, see Understanding the EMR Cluster - // VisibleToAllUsers Setting + // command, or the Management Console. IAM principals that are authorized to + // perform actions on the cluster can use the SetVisibleToAllUsers action to change + // the value on a running cluster. For more information, see Understanding the EMR + // Cluster VisibleToAllUsers Setting // (https://docs.aws.amazon.com/emr/latest/ManagementGuide/security_iam_emr-with-iam.html#security_set_visible_to_all_users) - // in the Amazon EMR Management Guide. + // in the Amazon EMRManagement Guide. VisibleToAllUsers bool noSmithyDocumentSerde diff --git a/service/iotsitewise/api_op_GetInterpolatedAssetPropertyValues.go b/service/iotsitewise/api_op_GetInterpolatedAssetPropertyValues.go index 25e138cd280..1fe9b91976a 100644 --- a/service/iotsitewise/api_op_GetInterpolatedAssetPropertyValues.go +++ b/service/iotsitewise/api_op_GetInterpolatedAssetPropertyValues.go @@ -66,7 +66,17 @@ type GetInterpolatedAssetPropertyValuesInput struct { // This member is required. StartTimeInSeconds *int64 - // The interpolation type. Valid values: LINEAR_INTERPOLATION + // The interpolation type. Valid values: LINEAR_INTERPOLATION | LOCF_INTERPOLATION + // For the LOCF_INTERPOLATION interpolation, if no data point is found for an + // interval, IoT SiteWise returns the same interpolated value calculated for the + // previous interval and carries forward this interpolated value until a new data + // point is found. For example, you can get the interpolated temperature values for + // a wind turbine every 24 hours over a duration of 7 days. If the + // LOCF_INTERPOLATION interpolation starts on July 1, 2021, at 9 AM, IoT SiteWise + // uses the data points from July 1, 2021, at 9 AM to July 2, 2021, at 9 AM to + // compute the first interpolated value. If no data points is found after 9 A.M. on + // July 2, 2021, IoT SiteWise uses the same interpolated value for the rest of the + // days. // // This member is required. Type *string @@ -77,6 +87,29 @@ type GetInterpolatedAssetPropertyValuesInput struct { // The nanosecond offset converted from endTimeInSeconds. EndTimeOffsetInNanos *int32 + // The query interval for the window in seconds. IoT SiteWise computes each + // interpolated value by using data points from the timestamp of each interval + // minus the window to the timestamp of each interval plus the window. If not + // specified, the window is between the start time minus the interval and the end + // time plus the interval. + // + // * If you specify a value for the + // intervalWindowInSeconds parameter, the type parameter must be + // LINEAR_INTERPOLATION. + // + // * If no data point is found during the specified query + // window, IoT SiteWise won't return an interpolated value for the interval. This + // indicates that there's a gap in the ingested data points. + // + // For example, you can + // get the interpolated temperature values for a wind turbine every 24 hours over a + // duration of 7 days. If the interpolation starts on July 1, 2021, at 9 AM with a + // window of 2 hours, IoT SiteWise uses the data points from 7 AM (9 AM - 2 hours) + // to 11 AM (9 AM + 2 hours) on July 2, 2021 to compute the first interpolated + // value, uses the data points from 7 AM (9 AM - 2 hours) to 11 AM (9 AM + 2 hours) + // on July 3, 2021 to compute the second interpolated value, and so on. + IntervalWindowInSeconds *int64 + // The maximum number of results to return for each paginated request. If not // specified, the default value is 10. MaxResults *int32 diff --git a/service/iotsitewise/serializers.go b/service/iotsitewise/serializers.go index 62b912d96c0..a5387f4cc72 100644 --- a/service/iotsitewise/serializers.go +++ b/service/iotsitewise/serializers.go @@ -2527,6 +2527,10 @@ func awsRestjson1_serializeOpHttpBindingsGetInterpolatedAssetPropertyValuesInput encoder.SetQuery("intervalInSeconds").Long(*v.IntervalInSeconds) } + if v.IntervalWindowInSeconds != nil { + encoder.SetQuery("intervalWindowInSeconds").Long(*v.IntervalWindowInSeconds) + } + if v.MaxResults != nil { encoder.SetQuery("maxResults").Integer(*v.MaxResults) } diff --git a/service/iotsitewise/types/types.go b/service/iotsitewise/types/types.go index 5b436ddfe09..f67436e6da4 100644 --- a/service/iotsitewise/types/types.go +++ b/service/iotsitewise/types/types.go @@ -1534,40 +1534,39 @@ type TumblingWindow struct { // for interval, IoT SiteWise aggregates data in one of the following ways: // // * If - // you create the metric before or at 6:00 p.m. (UTC), you get the first - // aggregation result at 6 p.m. (UTC) on the day when you create the metric. + // you create the metric before or at 6:00 PM (UTC), you get the first aggregation + // result at 6 PM (UTC) on the day when you create the metric. // - // * If - // you create the metric after 6:00 p.m. (UTC), you get the first aggregation - // result at 6 p.m. (UTC) the next day. + // * If you create the + // metric after 6:00 PM (UTC), you get the first aggregation result at 6 PM (UTC) + // the next day. // - // * The ISO 8601 format. For example, if you - // specify PT18H for offset and 1d for interval, IoT SiteWise aggregates data in - // one of the following ways: + // * The ISO 8601 format. For example, if you specify PT18H for + // offset and 1d for interval, IoT SiteWise aggregates data in one of the following + // ways: // - // * If you create the metric before or at 6:00 p.m. - // (UTC), you get the first aggregation result at 6 p.m. (UTC) on the day when you - // create the metric. + // * If you create the metric before or at 6:00 PM (UTC), you get the first + // aggregation result at 6 PM (UTC) on the day when you create the metric. // - // * If you create the metric after 6:00 p.m. (UTC), you get - // the first aggregation result at 6 p.m. (UTC) the next day. + // * If + // you create the metric after 6:00 PM (UTC), you get the first aggregation result + // at 6 PM (UTC) the next day. // - // * The 24-hour clock. - // For example, if you specify 00:03:00 for offset and 5m for interval, and you - // create the metric at 2 p.m. (UTC), you get the first aggregation result at 2:03 - // p.m. (UTC). You get the second aggregation result at 2:08 p.m. (UTC). + // * The 24-hour clock. For example, if you specify + // 00:03:00 for offset and 5m for interval, and you create the metric at 2 PM + // (UTC), you get the first aggregation result at 2:03 PM (UTC). You get the second + // aggregation result at 2:08 PM (UTC). // - // * The - // offset time zone. For example, if you specify 2021-07-23T18:00-08 for offset and - // 1d for interval, IoT SiteWise aggregates data in one of the following ways: + // * The offset time zone. For example, if + // you specify 2021-07-23T18:00-08 for offset and 1d for interval, IoT SiteWise + // aggregates data in one of the following ways: // - // * - // If you create the metric before or at 6:00 p.m. (PST), you get the first - // aggregation result at 6 p.m. (PST) on the day when you create the metric. + // * If you create the metric before + // or at 6:00 PM (PST), you get the first aggregation result at 6 PM (PST) on the + // day when you create the metric. // - // * If - // you create the metric after 6:00 p.m. (PST), you get the first aggregation - // result at 6 p.m. (PST) the next day. + // * If you create the metric after 6:00 PM (PST), + // you get the first aggregation result at 6 PM (PST) the next day. Offset *string noSmithyDocumentSerde diff --git a/service/lambda/api_op_ListFunctions.go b/service/lambda/api_op_ListFunctions.go index 25e611dbb4c..ef42164073c 100644 --- a/service/lambda/api_op_ListFunctions.go +++ b/service/lambda/api_op_ListFunctions.go @@ -43,10 +43,10 @@ type ListFunctionsInput struct { // the next page of results. Marker *string - // For Lambda@Edge functions, the Region of the master function. For example, - // us-east-1 filters the list of functions to only include Lambda@Edge functions - // replicated from a master function in US East (N. Virginia). If specified, you - // must set FunctionVersion to ALL. + // For Lambda@Edge functions, the Amazon Web Services Region of the master + // function. For example, us-east-1 filters the list of functions to only include + // Lambda@Edge functions replicated from a master function in US East (N. + // Virginia). If specified, you must set FunctionVersion to ALL. MasterRegion *string // The maximum number of functions to return in the response. Note that diff --git a/service/lambda/types/enums.go b/service/lambda/types/enums.go index 6421a4be813..03830fc83db 100644 --- a/service/lambda/types/enums.go +++ b/service/lambda/types/enums.go @@ -237,6 +237,7 @@ const ( RuntimePython36 Runtime = "python3.6" RuntimePython37 Runtime = "python3.7" RuntimePython38 Runtime = "python3.8" + RuntimePython39 Runtime = "python3.9" RuntimeDotnetcore10 Runtime = "dotnetcore1.0" RuntimeDotnetcore20 Runtime = "dotnetcore2.0" RuntimeDotnetcore21 Runtime = "dotnetcore2.1" @@ -268,6 +269,7 @@ func (Runtime) Values() []Runtime { "python3.6", "python3.7", "python3.8", + "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", diff --git a/service/licensemanager/types/enums.go b/service/licensemanager/types/enums.go index 00315e6f77b..843105fe0c9 100644 --- a/service/licensemanager/types/enums.go +++ b/service/licensemanager/types/enums.go @@ -35,6 +35,7 @@ type CheckoutType string // Enum values for CheckoutType const ( CheckoutTypeProvisional CheckoutType = "PROVISIONAL" + CheckoutTypePerpetual CheckoutType = "PERPETUAL" ) // Values returns all known values for CheckoutType. Note that this can be expanded @@ -43,6 +44,7 @@ const ( func (CheckoutType) Values() []CheckoutType { return []CheckoutType{ "PROVISIONAL", + "PERPETUAL", } } diff --git a/service/memorydb/LICENSE.txt b/service/memorydb/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/service/memorydb/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/service/memorydb/api_client.go b/service/memorydb/api_client.go new file mode 100644 index 00000000000..cfcfae1cb72 --- /dev/null +++ b/service/memorydb/api_client.go @@ -0,0 +1,261 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" + "time" +) + +const ServiceID = "MemoryDB" +const ServiceAPIVersion = "2021-01-01" + +// Client provides the API client to make operations call for Amazon MemoryDB. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveDefaultEndpointConfiguration(&options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + EndpointResolver EndpointResolver + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. + Retryer aws.Retryer + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// WithEndpointResolver returns a functional option for setting the Client's +// EndpointResolver option. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + if o.HTTPClient != nil { + return + } + o.HTTPClient = awshttp.NewBuildableClient() +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + o.Retryer = retry.NewStandard() +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, NewDefaultEndpointResolver()) +} + +func addClientUserAgent(stack *middleware.Stack) error { + return awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "memorydb", goModuleVersion)(stack) +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return awshttp.AddResponseErrorMiddleware(stack) +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} diff --git a/service/memorydb/api_op_BatchUpdateCluster.go b/service/memorydb/api_op_BatchUpdateCluster.go new file mode 100644 index 00000000000..1cb123a8d64 --- /dev/null +++ b/service/memorydb/api_op_BatchUpdateCluster.go @@ -0,0 +1,129 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Apply the service update to a list of clusters supplied. For more information on +// service updates and applying them, see Applying the service updates +// (https://docs.aws.amazon.com/AmazonMemoryDB/latest/devguide/managing-updates.html#applying-updates). +func (c *Client) BatchUpdateCluster(ctx context.Context, params *BatchUpdateClusterInput, optFns ...func(*Options)) (*BatchUpdateClusterOutput, error) { + if params == nil { + params = &BatchUpdateClusterInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "BatchUpdateCluster", params, optFns, c.addOperationBatchUpdateClusterMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*BatchUpdateClusterOutput) + out.ResultMetadata = metadata + return out, nil +} + +type BatchUpdateClusterInput struct { + + // The cluster names to apply the updates. + // + // This member is required. + ClusterNames []string + + // The unique ID of the service update + ServiceUpdate *types.ServiceUpdateRequest + + noSmithyDocumentSerde +} + +type BatchUpdateClusterOutput struct { + + // The list of clusters that have been updated. + ProcessedClusters []types.Cluster + + // The list of clusters where updates have not been applied. + UnprocessedClusters []types.UnprocessedCluster + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationBatchUpdateClusterMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpBatchUpdateCluster{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpBatchUpdateCluster{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpBatchUpdateClusterValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchUpdateCluster(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opBatchUpdateCluster(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "BatchUpdateCluster", + } +} diff --git a/service/memorydb/api_op_CopySnapshot.go b/service/memorydb/api_op_CopySnapshot.go new file mode 100644 index 00000000000..f263b9aa56b --- /dev/null +++ b/service/memorydb/api_op_CopySnapshot.go @@ -0,0 +1,144 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Makes a copy of an existing snapshot. +func (c *Client) CopySnapshot(ctx context.Context, params *CopySnapshotInput, optFns ...func(*Options)) (*CopySnapshotOutput, error) { + if params == nil { + params = &CopySnapshotInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CopySnapshot", params, optFns, c.addOperationCopySnapshotMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CopySnapshotOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CopySnapshotInput struct { + + // The name of an existing snapshot from which to make a copy. + // + // This member is required. + SourceSnapshotName *string + + // A name for the snapshot copy. MemoryDB does not permit overwriting a snapshot, + // therefore this name must be unique within its context - MemoryDB or an Amazon S3 + // bucket if exporting. + // + // This member is required. + TargetSnapshotName *string + + // The ID of the KMS key used to encrypt the target snapshot. + KmsKeyId *string + + // A list of tags to be added to this resource. A tag is a key-value pair. A tag + // key must be accompanied by a tag value, although null is accepted. + Tags []types.Tag + + // The Amazon S3 bucket to which the snapshot is exported. This parameter is used + // only when exporting a snapshot for external access. When using this parameter to + // export a snapshot, be sure MemoryDB has the needed permissions to this S3 + // bucket. For more information, see Step 2: Grant MemoryDB Access to Your Amazon + // S3 Bucket + // (https://docs.aws.amazon.com/AmazonMemoryDB/latest/devguide/snapshots-exporting.html). + TargetBucket *string + + noSmithyDocumentSerde +} + +type CopySnapshotOutput struct { + + // Represents a copy of an entire cluster as of the time when the snapshot was + // taken. + Snapshot *types.Snapshot + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCopySnapshotMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCopySnapshot{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCopySnapshot{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCopySnapshotValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCopySnapshot(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCopySnapshot(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "CopySnapshot", + } +} diff --git a/service/memorydb/api_op_CreateACL.go b/service/memorydb/api_op_CreateACL.go new file mode 100644 index 00000000000..5822aa53ca0 --- /dev/null +++ b/service/memorydb/api_op_CreateACL.go @@ -0,0 +1,130 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates an Access Control List. For more information, see Authenticating users +// with Access Contol Lists (ACLs) +// (https://docs.aws.amazon.com/AmazonMemoryDB/latest/devguide/Clusters.ACLs.html). +func (c *Client) CreateACL(ctx context.Context, params *CreateACLInput, optFns ...func(*Options)) (*CreateACLOutput, error) { + if params == nil { + params = &CreateACLInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateACL", params, optFns, c.addOperationCreateACLMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateACLOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateACLInput struct { + + // The name of the Access Control List. + // + // This member is required. + ACLName *string + + // A list of tags to be added to this resource. A tag is a key-value pair. A tag + // key must be accompanied by a tag value, although null is accepted. + Tags []types.Tag + + // The list of users that belong to the Access Control List. + UserNames []string + + noSmithyDocumentSerde +} + +type CreateACLOutput struct { + + // The newly-created Access Control List. + ACL *types.ACL + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateACLMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateACL{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateACL{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateACLValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateACL(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateACL(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "CreateACL", + } +} diff --git a/service/memorydb/api_op_CreateCluster.go b/service/memorydb/api_op_CreateCluster.go new file mode 100644 index 00000000000..63c7d960453 --- /dev/null +++ b/service/memorydb/api_op_CreateCluster.go @@ -0,0 +1,206 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a cluster. All nodes in the cluster run the same protocol-compliant +// engine software. +func (c *Client) CreateCluster(ctx context.Context, params *CreateClusterInput, optFns ...func(*Options)) (*CreateClusterOutput, error) { + if params == nil { + params = &CreateClusterInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateCluster", params, optFns, c.addOperationCreateClusterMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateClusterOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateClusterInput struct { + + // The name of the Access Control List to associate with the cluster. + // + // This member is required. + ACLName *string + + // The name of the cluster. This value must be unique as it also serves as the + // cluster identifier. + // + // This member is required. + ClusterName *string + + // The compute and memory capacity of the nodes in the cluster. + // + // This member is required. + NodeType *string + + // When set to true, the cluster will automatically receive minor engine version + // upgrades after launch. + AutoMinorVersionUpgrade *bool + + // An optional description of the cluster. + Description *string + + // The version number of the Redis engine to be used for the cluster. + EngineVersion *string + + // The ID of the KMS key used to encrypt the cluster. + KmsKeyId *string + + // Specifies the weekly time range during which maintenance on the cluster is + // performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H + // Clock UTC). The minimum maintenance window is a 60 minute period. + MaintenanceWindow *string + + // The number of replicas to apply to each shard. The limit is 5. + NumReplicasPerShard *int32 + + // The number of shards the cluster will contain. Clusters can have up to 500 + // shards, with your data partitioned across the shards. For example, you can + // choose to configure a 500 node cluster that ranges between 83 shards (one + // primary and 5 replicas per shard) and 500 shards (single primary and no + // replicas). Make sure there are enough available IP addresses to accommodate the + // increase. Common pitfalls include the subnets in the subnet group have too small + // a CIDR range or the subnets are shared and heavily used by other clusters. + NumShards *int32 + + // The name of the parameter group associated with the cluster. + ParameterGroupName *string + + // The port number on which each of the nodes accepts connections. + Port *int32 + + // A list of security group names to associate with this cluster. + SecurityGroupIds []string + + // A list of Amazon Resource Names (ARN) that uniquely identify the RDB snapshot + // files stored in Amazon S3. The snapshot files are used to populate the new + // cluster. The Amazon S3 object name in the ARN cannot contain any commas. + SnapshotArns []string + + // The name of a snapshot from which to restore data into the new cluster. The + // snapshot status changes to restoring while the new cluster is being created. + SnapshotName *string + + // The number of days for which MemoryDB retains automatic snapshots before + // deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot + // that was taken today is retained for 5 days before being deleted. + SnapshotRetentionLimit *int32 + + // The daily time range (in UTC) during which MemoryDB begins taking a daily + // snapshot of your shard. Example: 05:00-09:00 If you do not specify this + // parameter, MemoryDB automatically chooses an appropriate time range. + SnapshotWindow *string + + // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) + // topic to which notifications are sent. + SnsTopicArn *string + + // The name of the subnet group to be used for the cluster. + SubnetGroupName *string + + // A flag to enable in-transit encryption on the cluster. + TLSEnabled *bool + + // A list of tags to be added to this resource. Tags are comma-separated key,value + // pairs (e.g. Key=myKey, Value=myKeyValue. You can include multiple tags as shown + // following: Key=myKey, Value=myKeyValue Key=mySecondKey, Value=mySecondKeyValue. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type CreateClusterOutput struct { + + // The newly-created cluster. + Cluster *types.Cluster + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateClusterMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateCluster{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateCluster{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateClusterValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateCluster(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateCluster(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "CreateCluster", + } +} diff --git a/service/memorydb/api_op_CreateParameterGroup.go b/service/memorydb/api_op_CreateParameterGroup.go new file mode 100644 index 00000000000..a7072316b63 --- /dev/null +++ b/service/memorydb/api_op_CreateParameterGroup.go @@ -0,0 +1,137 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a new MemoryDB parameter group. A parameter group is a collection of +// parameters and their values that are applied to all of the nodes in any cluster. +// For more information, see Configuring engine parameters using parameter groups +// (https://docs.aws.amazon.com/AmazonMemoryDB/latest/devguide/ParameterGroups.html). +func (c *Client) CreateParameterGroup(ctx context.Context, params *CreateParameterGroupInput, optFns ...func(*Options)) (*CreateParameterGroupOutput, error) { + if params == nil { + params = &CreateParameterGroupInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateParameterGroup", params, optFns, c.addOperationCreateParameterGroupMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateParameterGroupOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateParameterGroupInput struct { + + // The name of the parameter group family that the parameter group can be used + // with. + // + // This member is required. + Family *string + + // The name of the parameter group. + // + // This member is required. + ParameterGroupName *string + + // An optional description of the parameter group. + Description *string + + // A list of tags to be added to this resource. A tag is a key-value pair. A tag + // key must be accompanied by a tag value, although null is accepted. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type CreateParameterGroupOutput struct { + + // The newly-created parameter group. + ParameterGroup *types.ParameterGroup + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateParameterGroupMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateParameterGroup{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateParameterGroup{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateParameterGroupValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateParameterGroup(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateParameterGroup(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "CreateParameterGroup", + } +} diff --git a/service/memorydb/api_op_CreateSnapshot.go b/service/memorydb/api_op_CreateSnapshot.go new file mode 100644 index 00000000000..246eaaa477d --- /dev/null +++ b/service/memorydb/api_op_CreateSnapshot.go @@ -0,0 +1,133 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a copy of an entire cluster at a specific moment in time. +func (c *Client) CreateSnapshot(ctx context.Context, params *CreateSnapshotInput, optFns ...func(*Options)) (*CreateSnapshotOutput, error) { + if params == nil { + params = &CreateSnapshotInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateSnapshot", params, optFns, c.addOperationCreateSnapshotMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateSnapshotOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateSnapshotInput struct { + + // The snapshot is created from this cluster. + // + // This member is required. + ClusterName *string + + // A name for the snapshot being created. + // + // This member is required. + SnapshotName *string + + // The ID of the KMS key used to encrypt the snapshot. + KmsKeyId *string + + // A list of tags to be added to this resource. A tag is a key-value pair. A tag + // key must be accompanied by a tag value, although null is accepted. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type CreateSnapshotOutput struct { + + // The newly-created snapshot. + Snapshot *types.Snapshot + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateSnapshotMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateSnapshot{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateSnapshot{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateSnapshotValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateSnapshot(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateSnapshot(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "CreateSnapshot", + } +} diff --git a/service/memorydb/api_op_CreateSubnetGroup.go b/service/memorydb/api_op_CreateSubnetGroup.go new file mode 100644 index 00000000000..647c818cd3a --- /dev/null +++ b/service/memorydb/api_op_CreateSubnetGroup.go @@ -0,0 +1,139 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a subnet group. A subnet group is a collection of subnets (typically +// private) that you can designate for your clusters running in an Amazon Virtual +// Private Cloud (VPC) environment. When you create a cluster in an Amazon VPC, you +// must specify a subnet group. MemoryDB uses that subnet group to choose a subnet +// and IP addresses within that subnet to associate with your nodes. For more +// information, see Subnets and subnet groups +// (https://docs.aws.amazon.com/AmazonMemoryDB/latest/devguide/SubnetGroups.html). +func (c *Client) CreateSubnetGroup(ctx context.Context, params *CreateSubnetGroupInput, optFns ...func(*Options)) (*CreateSubnetGroupOutput, error) { + if params == nil { + params = &CreateSubnetGroupInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateSubnetGroup", params, optFns, c.addOperationCreateSubnetGroupMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateSubnetGroupOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateSubnetGroupInput struct { + + // The name of the subnet group. + // + // This member is required. + SubnetGroupName *string + + // A list of VPC subnet IDs for the subnet group. + // + // This member is required. + SubnetIds []string + + // A description for the subnet group. + Description *string + + // A list of tags to be added to this resource. A tag is a key-value pair. A tag + // key must be accompanied by a tag value, although null is accepted. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type CreateSubnetGroupOutput struct { + + // The newly-created subnet group + SubnetGroup *types.SubnetGroup + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateSubnetGroupMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateSubnetGroup{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateSubnetGroup{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateSubnetGroupValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateSubnetGroup(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateSubnetGroup(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "CreateSubnetGroup", + } +} diff --git a/service/memorydb/api_op_CreateUser.go b/service/memorydb/api_op_CreateUser.go new file mode 100644 index 00000000000..6a170762750 --- /dev/null +++ b/service/memorydb/api_op_CreateUser.go @@ -0,0 +1,139 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a MemoryDB user. For more information, see Authenticating users with +// Access Contol Lists (ACLs) +// (https://docs.aws.amazon.com/AmazonMemoryDB/latest/devguide/Clusters.ACLs.html). +func (c *Client) CreateUser(ctx context.Context, params *CreateUserInput, optFns ...func(*Options)) (*CreateUserOutput, error) { + if params == nil { + params = &CreateUserInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateUser", params, optFns, c.addOperationCreateUserMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateUserOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateUserInput struct { + + // Access permissions string used for this user. + // + // This member is required. + AccessString *string + + // Denotes the user's authentication properties, such as whether it requires a + // password to authenticate. + // + // This member is required. + AuthenticationMode *types.AuthenticationMode + + // The name of the user. This value must be unique as it also serves as the user + // identifier. + // + // This member is required. + UserName *string + + // A list of tags to be added to this resource. A tag is a key-value pair. A tag + // key must be accompanied by a tag value, although null is accepted. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type CreateUserOutput struct { + + // The newly-created user. + User *types.User + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateUserMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateUser{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateUser{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateUserValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateUser(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateUser(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "CreateUser", + } +} diff --git a/service/memorydb/api_op_DeleteACL.go b/service/memorydb/api_op_DeleteACL.go new file mode 100644 index 00000000000..12dc1b36b31 --- /dev/null +++ b/service/memorydb/api_op_DeleteACL.go @@ -0,0 +1,124 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes an Access Control List. The ACL must first be disassociated from the +// cluster before it can be deleted. For more information, see Authenticating users +// with Access Contol Lists (ACLs) +// (https://docs.aws.amazon.com/AmazonMemoryDB/latest/devguide/Clusters.ACLs.html). +func (c *Client) DeleteACL(ctx context.Context, params *DeleteACLInput, optFns ...func(*Options)) (*DeleteACLOutput, error) { + if params == nil { + params = &DeleteACLInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteACL", params, optFns, c.addOperationDeleteACLMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteACLOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteACLInput struct { + + // The name of the Access Control List to delete + // + // This member is required. + ACLName *string + + noSmithyDocumentSerde +} + +type DeleteACLOutput struct { + + // The Access Control List object that has been deleted. + ACL *types.ACL + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteACLMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteACL{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteACL{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteACLValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteACL(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteACL(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "DeleteACL", + } +} diff --git a/service/memorydb/api_op_DeleteCluster.go b/service/memorydb/api_op_DeleteCluster.go new file mode 100644 index 00000000000..9d614900cea --- /dev/null +++ b/service/memorydb/api_op_DeleteCluster.go @@ -0,0 +1,126 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a cluster. It also deletes all associated nodes and node endpoints +func (c *Client) DeleteCluster(ctx context.Context, params *DeleteClusterInput, optFns ...func(*Options)) (*DeleteClusterOutput, error) { + if params == nil { + params = &DeleteClusterInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteCluster", params, optFns, c.addOperationDeleteClusterMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteClusterOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteClusterInput struct { + + // The name of the cluster to be deleted + // + // This member is required. + ClusterName *string + + // The user-supplied name of a final cluster snapshot. This is the unique name that + // identifies the snapshot. MemoryDB creates the snapshot, and then deletes the + // cluster immediately afterward. + FinalSnapshotName *string + + noSmithyDocumentSerde +} + +type DeleteClusterOutput struct { + + // The cluster object that has been deleted + Cluster *types.Cluster + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteClusterMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteCluster{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteCluster{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteClusterValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteCluster(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteCluster(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "DeleteCluster", + } +} diff --git a/service/memorydb/api_op_DeleteParameterGroup.go b/service/memorydb/api_op_DeleteParameterGroup.go new file mode 100644 index 00000000000..5fe0ffe4003 --- /dev/null +++ b/service/memorydb/api_op_DeleteParameterGroup.go @@ -0,0 +1,123 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the specified parameter group. You cannot delete a parameter group if it +// is associated with any clusters. You cannot delete the default parameter groups +// in your account. +func (c *Client) DeleteParameterGroup(ctx context.Context, params *DeleteParameterGroupInput, optFns ...func(*Options)) (*DeleteParameterGroupOutput, error) { + if params == nil { + params = &DeleteParameterGroupInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteParameterGroup", params, optFns, c.addOperationDeleteParameterGroupMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteParameterGroupOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteParameterGroupInput struct { + + // The name of the parameter group to delete. + // + // This member is required. + ParameterGroupName *string + + noSmithyDocumentSerde +} + +type DeleteParameterGroupOutput struct { + + // The parameter group that has been deleted. + ParameterGroup *types.ParameterGroup + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteParameterGroupMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteParameterGroup{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteParameterGroup{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteParameterGroupValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteParameterGroup(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteParameterGroup(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "DeleteParameterGroup", + } +} diff --git a/service/memorydb/api_op_DeleteSnapshot.go b/service/memorydb/api_op_DeleteSnapshot.go new file mode 100644 index 00000000000..58ebfdcf253 --- /dev/null +++ b/service/memorydb/api_op_DeleteSnapshot.go @@ -0,0 +1,123 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes an existing snapshot. When you receive a successful response from this +// operation, MemoryDB immediately begins deleting the snapshot; you cannot cancel +// or revert this operation. +func (c *Client) DeleteSnapshot(ctx context.Context, params *DeleteSnapshotInput, optFns ...func(*Options)) (*DeleteSnapshotOutput, error) { + if params == nil { + params = &DeleteSnapshotInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteSnapshot", params, optFns, c.addOperationDeleteSnapshotMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteSnapshotOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteSnapshotInput struct { + + // The name of the snapshot to delete + // + // This member is required. + SnapshotName *string + + noSmithyDocumentSerde +} + +type DeleteSnapshotOutput struct { + + // The snapshot object that has been deleted. + Snapshot *types.Snapshot + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteSnapshotMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteSnapshot{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteSnapshot{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteSnapshotValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteSnapshot(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteSnapshot(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "DeleteSnapshot", + } +} diff --git a/service/memorydb/api_op_DeleteSubnetGroup.go b/service/memorydb/api_op_DeleteSubnetGroup.go new file mode 100644 index 00000000000..409746eedb4 --- /dev/null +++ b/service/memorydb/api_op_DeleteSubnetGroup.go @@ -0,0 +1,122 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a subnet group. You cannot delete a default subnet group or one that is +// associated with any clusters. +func (c *Client) DeleteSubnetGroup(ctx context.Context, params *DeleteSubnetGroupInput, optFns ...func(*Options)) (*DeleteSubnetGroupOutput, error) { + if params == nil { + params = &DeleteSubnetGroupInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteSubnetGroup", params, optFns, c.addOperationDeleteSubnetGroupMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteSubnetGroupOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteSubnetGroupInput struct { + + // The name of the subnet group to delete + // + // This member is required. + SubnetGroupName *string + + noSmithyDocumentSerde +} + +type DeleteSubnetGroupOutput struct { + + // The subnet group object that has been deleted. + SubnetGroup *types.SubnetGroup + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteSubnetGroupMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteSubnetGroup{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteSubnetGroup{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteSubnetGroupValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteSubnetGroup(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteSubnetGroup(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "DeleteSubnetGroup", + } +} diff --git a/service/memorydb/api_op_DeleteUser.go b/service/memorydb/api_op_DeleteUser.go new file mode 100644 index 00000000000..148f5190c9d --- /dev/null +++ b/service/memorydb/api_op_DeleteUser.go @@ -0,0 +1,122 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a user. The user will be removed from all ACLs and in turn removed from +// all clusters. +func (c *Client) DeleteUser(ctx context.Context, params *DeleteUserInput, optFns ...func(*Options)) (*DeleteUserOutput, error) { + if params == nil { + params = &DeleteUserInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteUser", params, optFns, c.addOperationDeleteUserMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteUserOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteUserInput struct { + + // The name of the user to delete + // + // This member is required. + UserName *string + + noSmithyDocumentSerde +} + +type DeleteUserOutput struct { + + // The user object that has been deleted. + User *types.User + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteUserMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteUser{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteUser{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteUserValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteUser(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteUser(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "DeleteUser", + } +} diff --git a/service/memorydb/api_op_DescribeACLs.go b/service/memorydb/api_op_DescribeACLs.go new file mode 100644 index 00000000000..1f43e463de0 --- /dev/null +++ b/service/memorydb/api_op_DescribeACLs.go @@ -0,0 +1,134 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of ACLs +func (c *Client) DescribeACLs(ctx context.Context, params *DescribeACLsInput, optFns ...func(*Options)) (*DescribeACLsOutput, error) { + if params == nil { + params = &DescribeACLsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeACLs", params, optFns, c.addOperationDescribeACLsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeACLsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeACLsInput struct { + + // The name of the ACL + ACLName *string + + // The maximum number of records to include in the response. If more records exist + // than the specified MaxResults value, a token is included in the response so that + // the remaining results can be retrieved. + MaxResults *int32 + + // An optional argument to pass in case the total number of records exceeds the + // value of MaxResults. If nextToken is returned, there are more results available. + // The value of nextToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. + NextToken *string + + noSmithyDocumentSerde +} + +type DescribeACLsOutput struct { + + // The list of ACLs + ACLs []types.ACL + + // If nextToken is returned, there are more results available. The value of + // nextToken is a unique pagination token for each page. Make the call again using + // the returned token to retrieve the next page. Keep all other arguments + // unchanged. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeACLsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeACLs{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeACLs{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeACLs(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeACLs(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "DescribeACLs", + } +} diff --git a/service/memorydb/api_op_DescribeClusters.go b/service/memorydb/api_op_DescribeClusters.go new file mode 100644 index 00000000000..2a576894fa3 --- /dev/null +++ b/service/memorydb/api_op_DescribeClusters.go @@ -0,0 +1,140 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns information about all provisioned clusters if no cluster identifier is +// specified, or about a specific cluster if a cluster name is supplied. +func (c *Client) DescribeClusters(ctx context.Context, params *DescribeClustersInput, optFns ...func(*Options)) (*DescribeClustersOutput, error) { + if params == nil { + params = &DescribeClustersInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeClusters", params, optFns, c.addOperationDescribeClustersMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeClustersOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeClustersInput struct { + + // The name of the cluster + ClusterName *string + + // The maximum number of records to include in the response. If more records exist + // than the specified MaxResults value, a token is included in the response so that + // the remaining results can be retrieved. + MaxResults *int32 + + // An optional argument to pass in case the total number of records exceeds the + // value of MaxResults. If nextToken is returned, there are more results available. + // The value of nextToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. + NextToken *string + + // An optional flag that can be included in the request to retrieve information + // about the individual shard(s). + ShowShardDetails *bool + + noSmithyDocumentSerde +} + +type DescribeClustersOutput struct { + + // A list of clusters + Clusters []types.Cluster + + // An optional argument to pass in case the total number of records exceeds the + // value of MaxResults. If nextToken is returned, there are more results available. + // The value of nextToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeClustersMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeClusters{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeClusters{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeClusters(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeClusters(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "DescribeClusters", + } +} diff --git a/service/memorydb/api_op_DescribeEngineVersions.go b/service/memorydb/api_op_DescribeEngineVersions.go new file mode 100644 index 00000000000..71174200e55 --- /dev/null +++ b/service/memorydb/api_op_DescribeEngineVersions.go @@ -0,0 +1,143 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of the available Redis engine versions. +func (c *Client) DescribeEngineVersions(ctx context.Context, params *DescribeEngineVersionsInput, optFns ...func(*Options)) (*DescribeEngineVersionsOutput, error) { + if params == nil { + params = &DescribeEngineVersionsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeEngineVersions", params, optFns, c.addOperationDescribeEngineVersionsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeEngineVersionsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeEngineVersionsInput struct { + + // If true, specifies that only the default version of the specified engine or + // engine and major version combination is to be returned. + DefaultOnly bool + + // The Redis engine version + EngineVersion *string + + // The maximum number of records to include in the response. If more records exist + // than the specified MaxResults value, a token is included in the response so that + // the remaining results can be retrieved. + MaxResults *int32 + + // An optional argument to pass in case the total number of records exceeds the + // value of MaxResults. If nextToken is returned, there are more results available. + // The value of nextToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. + NextToken *string + + // The name of a specific parameter group family to return details for. + ParameterGroupFamily *string + + noSmithyDocumentSerde +} + +type DescribeEngineVersionsOutput struct { + + // A list of engine version details. Each element in the list contains detailed + // information about one engine version. + EngineVersions []types.EngineVersionInfo + + // An optional argument to pass in case the total number of records exceeds the + // value of MaxResults. If nextToken is returned, there are more results available. + // The value of nextToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeEngineVersionsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeEngineVersions{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeEngineVersions{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeEngineVersions(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeEngineVersions(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "DescribeEngineVersions", + } +} diff --git a/service/memorydb/api_op_DescribeEvents.go b/service/memorydb/api_op_DescribeEvents.go new file mode 100644 index 00000000000..5a6fed16bdc --- /dev/null +++ b/service/memorydb/api_op_DescribeEvents.go @@ -0,0 +1,157 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Returns events related to clusters, security groups, and parameter groups. You +// can obtain events specific to a particular cluster, security group, or parameter +// group by providing the name as a parameter. By default, only the events +// occurring within the last hour are returned; however, you can retrieve up to 14 +// days' worth of events if necessary. +func (c *Client) DescribeEvents(ctx context.Context, params *DescribeEventsInput, optFns ...func(*Options)) (*DescribeEventsOutput, error) { + if params == nil { + params = &DescribeEventsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeEvents", params, optFns, c.addOperationDescribeEventsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeEventsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeEventsInput struct { + + // The number of minutes worth of events to retrieve. + Duration *int32 + + // The end of the time interval for which to retrieve events, specified in ISO 8601 + // format. Example: 2017-03-30T07:03:49.555Z + EndTime *time.Time + + // The maximum number of records to include in the response. If more records exist + // than the specified MaxResults value, a token is included in the response so that + // the remaining results can be retrieved. + MaxResults *int32 + + // An optional argument to pass in case the total number of records exceeds the + // value of MaxResults. If nextToken is returned, there are more results available. + // The value of nextToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. + NextToken *string + + // The identifier of the event source for which events are returned. If not + // specified, all sources are included in the response. + SourceName *string + + // The event source to retrieve events for. If no value is specified, all events + // are returned. + SourceType types.SourceType + + // The beginning of the time interval to retrieve events for, specified in ISO 8601 + // format. Example: 2017-03-30T07:03:49.555Z + StartTime *time.Time + + noSmithyDocumentSerde +} + +type DescribeEventsOutput struct { + + // A list of events. Each element in the list contains detailed information about + // one event. + Events []types.Event + + // An optional argument to pass in case the total number of records exceeds the + // value of MaxResults. If nextToken is returned, there are more results available. + // The value of nextToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeEventsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeEvents{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeEvents{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeEvents(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeEvents(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "DescribeEvents", + } +} diff --git a/service/memorydb/api_op_DescribeParameterGroups.go b/service/memorydb/api_op_DescribeParameterGroups.go new file mode 100644 index 00000000000..fcf44fc43fe --- /dev/null +++ b/service/memorydb/api_op_DescribeParameterGroups.go @@ -0,0 +1,137 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of parameter group descriptions. If a parameter group name is +// specified, the list contains only the descriptions for that group. +func (c *Client) DescribeParameterGroups(ctx context.Context, params *DescribeParameterGroupsInput, optFns ...func(*Options)) (*DescribeParameterGroupsOutput, error) { + if params == nil { + params = &DescribeParameterGroupsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeParameterGroups", params, optFns, c.addOperationDescribeParameterGroupsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeParameterGroupsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeParameterGroupsInput struct { + + // The maximum number of records to include in the response. If more records exist + // than the specified MaxResults value, a token is included in the response so that + // the remaining results can be retrieved. + MaxResults *int32 + + // An optional argument to pass in case the total number of records exceeds the + // value of MaxResults. If nextToken is returned, there are more results available. + // The value of nextToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. + NextToken *string + + // The name of a specific parameter group to return details for. + ParameterGroupName *string + + noSmithyDocumentSerde +} + +type DescribeParameterGroupsOutput struct { + + // An optional argument to pass in case the total number of records exceeds the + // value of MaxResults. If nextToken is returned, there are more results available. + // The value of nextToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. + NextToken *string + + // A list of parameter groups. Each element in the list contains detailed + // information about one parameter group. + ParameterGroups []types.ParameterGroup + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeParameterGroupsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeParameterGroups{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeParameterGroups{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeParameterGroups(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeParameterGroups(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "DescribeParameterGroups", + } +} diff --git a/service/memorydb/api_op_DescribeParameters.go b/service/memorydb/api_op_DescribeParameters.go new file mode 100644 index 00000000000..09ed94e8f18 --- /dev/null +++ b/service/memorydb/api_op_DescribeParameters.go @@ -0,0 +1,141 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the detailed parameter list for a particular parameter group. +func (c *Client) DescribeParameters(ctx context.Context, params *DescribeParametersInput, optFns ...func(*Options)) (*DescribeParametersOutput, error) { + if params == nil { + params = &DescribeParametersInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeParameters", params, optFns, c.addOperationDescribeParametersMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeParametersOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeParametersInput struct { + + // he name of a specific parameter group to return details for. + // + // This member is required. + ParameterGroupName *string + + // The maximum number of records to include in the response. If more records exist + // than the specified MaxResults value, a token is included in the response so that + // the remaining results can be retrieved. + MaxResults *int32 + + // An optional argument to pass in case the total number of records exceeds the + // value of MaxResults. If nextToken is returned, there are more results available. + // The value of nextToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. + NextToken *string + + noSmithyDocumentSerde +} + +type DescribeParametersOutput struct { + + // An optional argument to pass in case the total number of records exceeds the + // value of MaxResults. If nextToken is returned, there are more results available. + // The value of nextToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. + NextToken *string + + // A list of parameters specific to a particular parameter group. Each element in + // the list contains detailed information about one parameter. + Parameters []types.Parameter + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeParametersMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeParameters{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeParameters{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeParametersValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeParameters(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeParameters(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "DescribeParameters", + } +} diff --git a/service/memorydb/api_op_DescribeServiceUpdates.go b/service/memorydb/api_op_DescribeServiceUpdates.go new file mode 100644 index 00000000000..18e9dcacc8b --- /dev/null +++ b/service/memorydb/api_op_DescribeServiceUpdates.go @@ -0,0 +1,141 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns details of the service updates +func (c *Client) DescribeServiceUpdates(ctx context.Context, params *DescribeServiceUpdatesInput, optFns ...func(*Options)) (*DescribeServiceUpdatesOutput, error) { + if params == nil { + params = &DescribeServiceUpdatesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeServiceUpdates", params, optFns, c.addOperationDescribeServiceUpdatesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeServiceUpdatesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeServiceUpdatesInput struct { + + // The list of cluster names to identify service updates to apply + ClusterNames []string + + // The maximum number of records to include in the response. If more records exist + // than the specified MaxResults value, a token is included in the response so that + // the remaining results can be retrieved. + MaxResults *int32 + + // An optional argument to pass in case the total number of records exceeds the + // value of MaxResults. If nextToken is returned, there are more results available. + // The value of nextToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. + NextToken *string + + // The unique ID of the service update to describe. + ServiceUpdateName *string + + // The status(es) of the service updates to filter on + Status []types.ServiceUpdateStatus + + noSmithyDocumentSerde +} + +type DescribeServiceUpdatesOutput struct { + + // An optional argument to pass in case the total number of records exceeds the + // value of MaxResults. If nextToken is returned, there are more results available. + // The value of nextToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. + NextToken *string + + // A list of service updates + ServiceUpdates []types.ServiceUpdate + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeServiceUpdatesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeServiceUpdates{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeServiceUpdates{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeServiceUpdates(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeServiceUpdates(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "DescribeServiceUpdates", + } +} diff --git a/service/memorydb/api_op_DescribeSnapshots.go b/service/memorydb/api_op_DescribeSnapshots.go new file mode 100644 index 00000000000..d9ac18fe41f --- /dev/null +++ b/service/memorydb/api_op_DescribeSnapshots.go @@ -0,0 +1,152 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns information about cluster snapshots. By default, DescribeSnapshots lists +// all of your snapshots; it can optionally describe a single snapshot, or just the +// snapshots associated with a particular cluster. +func (c *Client) DescribeSnapshots(ctx context.Context, params *DescribeSnapshotsInput, optFns ...func(*Options)) (*DescribeSnapshotsOutput, error) { + if params == nil { + params = &DescribeSnapshotsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeSnapshots", params, optFns, c.addOperationDescribeSnapshotsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeSnapshotsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeSnapshotsInput struct { + + // A user-supplied cluster identifier. If this parameter is specified, only + // snapshots associated with that specific cluster are described. + ClusterName *string + + // The maximum number of records to include in the response. If more records exist + // than the specified MaxResults value, a token is included in the response so that + // the remaining results can be retrieved. + MaxResults *int32 + + // An optional argument to pass in case the total number of records exceeds the + // value of MaxResults. If nextToken is returned, there are more results available. + // The value of nextToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. + NextToken *string + + // A Boolean value which if true, the shard configuration is included in the + // snapshot description. + ShowDetail *bool + + // A user-supplied name of the snapshot. If this parameter is specified, only this + // named snapshot is described. + SnapshotName *string + + // If set to system, the output shows snapshots that were automatically created by + // MemoryDB. If set to user the output shows snapshots that were manually created. + // If omitted, the output shows both automatically and manually created snapshots. + Source *string + + noSmithyDocumentSerde +} + +type DescribeSnapshotsOutput struct { + + // An optional argument to pass in case the total number of records exceeds the + // value of MaxResults. If nextToken is returned, there are more results available. + // The value of nextToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. + NextToken *string + + // A list of snapshots. Each item in the list contains detailed information about + // one snapshot. + Snapshots []types.Snapshot + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeSnapshotsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeSnapshots{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeSnapshots{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeSnapshots(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeSnapshots(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "DescribeSnapshots", + } +} diff --git a/service/memorydb/api_op_DescribeSubnetGroups.go b/service/memorydb/api_op_DescribeSubnetGroups.go new file mode 100644 index 00000000000..510a54702a2 --- /dev/null +++ b/service/memorydb/api_op_DescribeSubnetGroups.go @@ -0,0 +1,137 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of subnet group descriptions. If a subnet group name is +// specified, the list contains only the description of that group. +func (c *Client) DescribeSubnetGroups(ctx context.Context, params *DescribeSubnetGroupsInput, optFns ...func(*Options)) (*DescribeSubnetGroupsOutput, error) { + if params == nil { + params = &DescribeSubnetGroupsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeSubnetGroups", params, optFns, c.addOperationDescribeSubnetGroupsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeSubnetGroupsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeSubnetGroupsInput struct { + + // The maximum number of records to include in the response. If more records exist + // than the specified MaxResults value, a token is included in the response so that + // the remaining results can be retrieved. + MaxResults *int32 + + // An optional argument to pass in case the total number of records exceeds the + // value of MaxResults. If nextToken is returned, there are more results available. + // The value of nextToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. + NextToken *string + + // The name of the subnet group to return details for. + SubnetGroupName *string + + noSmithyDocumentSerde +} + +type DescribeSubnetGroupsOutput struct { + + // An optional argument to pass in case the total number of records exceeds the + // value of MaxResults. If nextToken is returned, there are more results available. + // The value of nextToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. + NextToken *string + + // A list of subnet groups. Each element in the list contains detailed information + // about one group. + SubnetGroups []types.SubnetGroup + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeSubnetGroupsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeSubnetGroups{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeSubnetGroups{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeSubnetGroups(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeSubnetGroups(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "DescribeSubnetGroups", + } +} diff --git a/service/memorydb/api_op_DescribeUsers.go b/service/memorydb/api_op_DescribeUsers.go new file mode 100644 index 00000000000..25efb77d575 --- /dev/null +++ b/service/memorydb/api_op_DescribeUsers.go @@ -0,0 +1,141 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of users. +func (c *Client) DescribeUsers(ctx context.Context, params *DescribeUsersInput, optFns ...func(*Options)) (*DescribeUsersOutput, error) { + if params == nil { + params = &DescribeUsersInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeUsers", params, optFns, c.addOperationDescribeUsersMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeUsersOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeUsersInput struct { + + // Filter to determine the list of users to return. + Filters []types.Filter + + // The maximum number of records to include in the response. If more records exist + // than the specified MaxResults value, a token is included in the response so that + // the remaining results can be retrieved. + MaxResults *int32 + + // An optional argument to pass in case the total number of records exceeds the + // value of MaxResults. If nextToken is returned, there are more results available. + // The value of nextToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. + NextToken *string + + // The name of the user + UserName *string + + noSmithyDocumentSerde +} + +type DescribeUsersOutput struct { + + // An optional argument to pass in case the total number of records exceeds the + // value of MaxResults. If nextToken is returned, there are more results available. + // The value of nextToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. + NextToken *string + + // A list of users. + Users []types.User + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeUsersMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeUsers{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeUsers{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeUsersValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeUsers(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeUsers(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "DescribeUsers", + } +} diff --git a/service/memorydb/api_op_FailoverShard.go b/service/memorydb/api_op_FailoverShard.go new file mode 100644 index 00000000000..5ffd2b21508 --- /dev/null +++ b/service/memorydb/api_op_FailoverShard.go @@ -0,0 +1,126 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Used to failover a shard +func (c *Client) FailoverShard(ctx context.Context, params *FailoverShardInput, optFns ...func(*Options)) (*FailoverShardOutput, error) { + if params == nil { + params = &FailoverShardInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "FailoverShard", params, optFns, c.addOperationFailoverShardMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*FailoverShardOutput) + out.ResultMetadata = metadata + return out, nil +} + +type FailoverShardInput struct { + + // The cluster being failed over + // + // This member is required. + ClusterName *string + + // The name of the shard + // + // This member is required. + ShardName *string + + noSmithyDocumentSerde +} + +type FailoverShardOutput struct { + + // The cluster being failed over + Cluster *types.Cluster + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationFailoverShardMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpFailoverShard{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpFailoverShard{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpFailoverShardValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opFailoverShard(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opFailoverShard(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "FailoverShard", + } +} diff --git a/service/memorydb/api_op_ListAllowedNodeTypeUpdates.go b/service/memorydb/api_op_ListAllowedNodeTypeUpdates.go new file mode 100644 index 00000000000..250e66c6e78 --- /dev/null +++ b/service/memorydb/api_op_ListAllowedNodeTypeUpdates.go @@ -0,0 +1,128 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists all available node types that you can scale to from your cluster's current +// node type. When you use the UpdateCluster operation to scale your cluster, the +// value of the NodeType parameter must be one of the node types returned by this +// operation. +func (c *Client) ListAllowedNodeTypeUpdates(ctx context.Context, params *ListAllowedNodeTypeUpdatesInput, optFns ...func(*Options)) (*ListAllowedNodeTypeUpdatesOutput, error) { + if params == nil { + params = &ListAllowedNodeTypeUpdatesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListAllowedNodeTypeUpdates", params, optFns, c.addOperationListAllowedNodeTypeUpdatesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListAllowedNodeTypeUpdatesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListAllowedNodeTypeUpdatesInput struct { + + // The name of the cluster you want to scale. MemoryDB uses the cluster name to + // identify the current node type being used by this cluster, and from that to + // create a list of node types you can scale up to. + // + // This member is required. + ClusterName *string + + noSmithyDocumentSerde +} + +type ListAllowedNodeTypeUpdatesOutput struct { + + // A list node types which you can use to scale down your cluster. + ScaleDownNodeTypes []string + + // A list node types which you can use to scale up your cluster. + ScaleUpNodeTypes []string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListAllowedNodeTypeUpdatesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListAllowedNodeTypeUpdates{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListAllowedNodeTypeUpdates{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListAllowedNodeTypeUpdatesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAllowedNodeTypeUpdates(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListAllowedNodeTypeUpdates(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "ListAllowedNodeTypeUpdates", + } +} diff --git a/service/memorydb/api_op_ListTags.go b/service/memorydb/api_op_ListTags.go new file mode 100644 index 00000000000..9451baac64c --- /dev/null +++ b/service/memorydb/api_op_ListTags.go @@ -0,0 +1,126 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists all tags currently on a named resource. A tag is a key-value pair where +// the key and value are case-sensitive. You can use tags to categorize and track +// your MemoryDB resources. For more information, see Tagging your MemoryDB +// resources +// (https://docs.aws.amazon.com/AmazonMemoryDB/latest/devguide/Tagging-Resources.html) +func (c *Client) ListTags(ctx context.Context, params *ListTagsInput, optFns ...func(*Options)) (*ListTagsOutput, error) { + if params == nil { + params = &ListTagsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListTags", params, optFns, c.addOperationListTagsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListTagsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListTagsInput struct { + + // The Amazon Resource Name (ARN) of the resource for which you want the list of + // tags + // + // This member is required. + ResourceArn *string + + noSmithyDocumentSerde +} + +type ListTagsOutput struct { + + // A list of tags as key-value pairs. + TagList []types.Tag + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListTagsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListTags{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListTags{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListTagsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTags(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListTags(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "ListTags", + } +} diff --git a/service/memorydb/api_op_ResetParameterGroup.go b/service/memorydb/api_op_ResetParameterGroup.go new file mode 100644 index 00000000000..dc0370c61a0 --- /dev/null +++ b/service/memorydb/api_op_ResetParameterGroup.go @@ -0,0 +1,134 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Modifies the parameters of a parameter group to the engine or system default +// value. You can reset specific parameters by submitting a list of parameter +// names. To reset the entire parameter group, specify the AllParameters and +// ParameterGroupName parameters. +func (c *Client) ResetParameterGroup(ctx context.Context, params *ResetParameterGroupInput, optFns ...func(*Options)) (*ResetParameterGroupOutput, error) { + if params == nil { + params = &ResetParameterGroupInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ResetParameterGroup", params, optFns, c.addOperationResetParameterGroupMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ResetParameterGroupOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ResetParameterGroupInput struct { + + // The name of the parameter group to reset. + // + // This member is required. + ParameterGroupName *string + + // If true, all parameters in the parameter group are reset to their default + // values. If false, only the parameters listed by ParameterNames are reset to + // their default values. + AllParameters bool + + // An array of parameter names to reset to their default values. If AllParameters + // is true, do not use ParameterNames. If AllParameters is false, you must specify + // the name of at least one parameter to reset. + ParameterNames []string + + noSmithyDocumentSerde +} + +type ResetParameterGroupOutput struct { + + // The parameter group being reset. + ParameterGroup *types.ParameterGroup + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationResetParameterGroupMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpResetParameterGroup{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpResetParameterGroup{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpResetParameterGroupValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opResetParameterGroup(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opResetParameterGroup(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "ResetParameterGroup", + } +} diff --git a/service/memorydb/api_op_TagResource.go b/service/memorydb/api_op_TagResource.go new file mode 100644 index 00000000000..441107f1759 --- /dev/null +++ b/service/memorydb/api_op_TagResource.go @@ -0,0 +1,138 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// A tag is a key-value pair where the key and value are case-sensitive. You can +// use tags to categorize and track all your MemoryDB resources. When you add or +// remove tags on clusters, those actions will be replicated to all nodes in the +// cluster. For more information, see Resource-level permissions +// (https://docs.aws.amazon.com/AmazonMemoryDB/latest/devguide/IAM.ResourceLevelPermissions.html). +// For example, you can use cost-allocation tags to your MemoryDB resources, Amazon +// generates a cost allocation report as a comma-separated value (CSV) file with +// your usage and costs aggregated by your tags. You can apply tags that represent +// business categories (such as cost centers, application names, or owners) to +// organize your costs across multiple services. For more information, see Using +// Cost Allocation Tags +// (https://docs.aws.amazon.com/AmazonMemoryDB/latest/devguide/Tagging.html). +func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { + if params == nil { + params = &TagResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "TagResource", params, optFns, c.addOperationTagResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*TagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type TagResourceInput struct { + + // The Amazon Resource Name (ARN) of the resource to which the tags are to be added + // + // This member is required. + ResourceArn *string + + // A list of tags to be added to this resource. A tag is a key-value pair. A tag + // key must be accompanied by a tag value, although null is accepted. + // + // This member is required. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type TagResourceOutput struct { + + // A list of tags as key-value pairs. + TagList []types.Tag + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpTagResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpTagResource{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpTagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTagResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opTagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "TagResource", + } +} diff --git a/service/memorydb/api_op_UntagResource.go b/service/memorydb/api_op_UntagResource.go new file mode 100644 index 00000000000..67924400ffa --- /dev/null +++ b/service/memorydb/api_op_UntagResource.go @@ -0,0 +1,127 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Use this operation to remove tags on a resource +func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) { + if params == nil { + params = &UntagResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UntagResource", params, optFns, c.addOperationUntagResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UntagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UntagResourceInput struct { + + // The Amazon Resource Name (ARN) of the resource to which the tags are to be + // removed + // + // This member is required. + ResourceArn *string + + // The list of keys of the tags that are to be removed + // + // This member is required. + TagKeys []string + + noSmithyDocumentSerde +} + +type UntagResourceOutput struct { + + // The list of tags removed + TagList []types.Tag + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUntagResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUntagResource{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUntagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUntagResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUntagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "UntagResource", + } +} diff --git a/service/memorydb/api_op_UpdateACL.go b/service/memorydb/api_op_UpdateACL.go new file mode 100644 index 00000000000..4c034279017 --- /dev/null +++ b/service/memorydb/api_op_UpdateACL.go @@ -0,0 +1,127 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Changes the list of users that belong to the Access Control List. +func (c *Client) UpdateACL(ctx context.Context, params *UpdateACLInput, optFns ...func(*Options)) (*UpdateACLOutput, error) { + if params == nil { + params = &UpdateACLInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateACL", params, optFns, c.addOperationUpdateACLMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateACLOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateACLInput struct { + + // The name of the Access Control List + // + // This member is required. + ACLName *string + + // The list of users to add to the Access Control List + UserNamesToAdd []string + + // The list of users to remove from the Access Control List + UserNamesToRemove []string + + noSmithyDocumentSerde +} + +type UpdateACLOutput struct { + + // The updated Access Control List + ACL *types.ACL + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateACLMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateACL{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateACL{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateACLValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateACL(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateACL(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "UpdateACL", + } +} diff --git a/service/memorydb/api_op_UpdateCluster.go b/service/memorydb/api_op_UpdateCluster.go new file mode 100644 index 00000000000..02d6baf7be8 --- /dev/null +++ b/service/memorydb/api_op_UpdateCluster.go @@ -0,0 +1,169 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Modifies the settings for a cluster. You can use this operation to change one or +// more cluster configuration settings by specifying the settings and the new +// values. +func (c *Client) UpdateCluster(ctx context.Context, params *UpdateClusterInput, optFns ...func(*Options)) (*UpdateClusterOutput, error) { + if params == nil { + params = &UpdateClusterInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateCluster", params, optFns, c.addOperationUpdateClusterMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateClusterOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateClusterInput struct { + + // The name of the cluster to update + // + // This member is required. + ClusterName *string + + // The Access Control List that is associated with the cluster + ACLName *string + + // The description of the cluster to update + Description *string + + // The upgraded version of the engine to be run on the nodes. You can upgrade to a + // newer engine version, but you cannot downgrade to an earlier engine version. If + // you want to use an earlier engine version, you must delete the existing cluster + // and create it anew with the earlier engine version. + EngineVersion *string + + // The maintenance window to update + MaintenanceWindow *string + + // A valid node type that you want to scale this cluster up or down to. + NodeType *string + + // The name of the parameter group to update + ParameterGroupName *string + + // The number of replicas that will reside in each shard + ReplicaConfiguration *types.ReplicaConfigurationRequest + + // The SecurityGroupIds to update + SecurityGroupIds []string + + // The number of shards in the cluster + ShardConfiguration *types.ShardConfigurationRequest + + // The number of days for which MemoryDB retains automatic cluster snapshots before + // deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot + // that was taken today is retained for 5 days before being deleted. + SnapshotRetentionLimit *int32 + + // The daily time range (in UTC) during which MemoryDB begins taking a daily + // snapshot of your cluster. + SnapshotWindow *string + + // The SNS topic ARN to update + SnsTopicArn *string + + // The status of the Amazon SNS notification topic. Notifications are sent only if + // the status is active. + SnsTopicStatus *string + + noSmithyDocumentSerde +} + +type UpdateClusterOutput struct { + + // The updated cluster + Cluster *types.Cluster + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateClusterMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateCluster{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateCluster{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateClusterValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateCluster(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateCluster(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "UpdateCluster", + } +} diff --git a/service/memorydb/api_op_UpdateParameterGroup.go b/service/memorydb/api_op_UpdateParameterGroup.go new file mode 100644 index 00000000000..727b34f6276 --- /dev/null +++ b/service/memorydb/api_op_UpdateParameterGroup.go @@ -0,0 +1,129 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Updates the parameters of a parameter group. You can modify up to 20 parameters +// in a single request by submitting a list parameter name and value pairs. +func (c *Client) UpdateParameterGroup(ctx context.Context, params *UpdateParameterGroupInput, optFns ...func(*Options)) (*UpdateParameterGroupOutput, error) { + if params == nil { + params = &UpdateParameterGroupInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateParameterGroup", params, optFns, c.addOperationUpdateParameterGroupMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateParameterGroupOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateParameterGroupInput struct { + + // The name of the parameter group to update. + // + // This member is required. + ParameterGroupName *string + + // An array of parameter names and values for the parameter update. You must supply + // at least one parameter name and value; subsequent arguments are optional. A + // maximum of 20 parameters may be updated per request. + // + // This member is required. + ParameterNameValues []types.ParameterNameValue + + noSmithyDocumentSerde +} + +type UpdateParameterGroupOutput struct { + + // The updated parameter group + ParameterGroup *types.ParameterGroup + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateParameterGroupMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateParameterGroup{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateParameterGroup{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateParameterGroupValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateParameterGroup(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateParameterGroup(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "UpdateParameterGroup", + } +} diff --git a/service/memorydb/api_op_UpdateSubnetGroup.go b/service/memorydb/api_op_UpdateSubnetGroup.go new file mode 100644 index 00000000000..83c533ff785 --- /dev/null +++ b/service/memorydb/api_op_UpdateSubnetGroup.go @@ -0,0 +1,128 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Updates a subnet group. For more information, see Updating a subnet group +// (https://docs.aws.amazon.com/AmazonMemoryDB/latest/devguide/SubnetGroups.Modifying.html) +func (c *Client) UpdateSubnetGroup(ctx context.Context, params *UpdateSubnetGroupInput, optFns ...func(*Options)) (*UpdateSubnetGroupOutput, error) { + if params == nil { + params = &UpdateSubnetGroupInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateSubnetGroup", params, optFns, c.addOperationUpdateSubnetGroupMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateSubnetGroupOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateSubnetGroupInput struct { + + // The name of the subnet group + // + // This member is required. + SubnetGroupName *string + + // A description of the subnet group + Description *string + + // The EC2 subnet IDs for the subnet group. + SubnetIds []string + + noSmithyDocumentSerde +} + +type UpdateSubnetGroupOutput struct { + + // The updated subnet group + SubnetGroup *types.SubnetGroup + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateSubnetGroupMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateSubnetGroup{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateSubnetGroup{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateSubnetGroupValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateSubnetGroup(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateSubnetGroup(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "UpdateSubnetGroup", + } +} diff --git a/service/memorydb/api_op_UpdateUser.go b/service/memorydb/api_op_UpdateUser.go new file mode 100644 index 00000000000..d047889516f --- /dev/null +++ b/service/memorydb/api_op_UpdateUser.go @@ -0,0 +1,128 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Changes user password(s) and/or access string. +func (c *Client) UpdateUser(ctx context.Context, params *UpdateUserInput, optFns ...func(*Options)) (*UpdateUserOutput, error) { + if params == nil { + params = &UpdateUserInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateUser", params, optFns, c.addOperationUpdateUserMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateUserOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateUserInput struct { + + // The name of the user + // + // This member is required. + UserName *string + + // Access permissions string used for this user. + AccessString *string + + // Denotes the user's authentication properties, such as whether it requires a + // password to authenticate. + AuthenticationMode *types.AuthenticationMode + + noSmithyDocumentSerde +} + +type UpdateUserOutput struct { + + // The updated user + User *types.User + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateUserMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateUser{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateUser{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateUserValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateUser(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateUser(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "memorydb", + OperationName: "UpdateUser", + } +} diff --git a/service/memorydb/deserializers.go b/service/memorydb/deserializers.go new file mode 100644 index 00000000000..1318ef5e3b4 --- /dev/null +++ b/service/memorydb/deserializers.go @@ -0,0 +1,12229 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + smithy "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "math" + "strings" +) + +type awsAwsjson11_deserializeOpBatchUpdateCluster struct { +} + +func (*awsAwsjson11_deserializeOpBatchUpdateCluster) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpBatchUpdateCluster) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorBatchUpdateCluster(response, &metadata) + } + output := &BatchUpdateClusterOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentBatchUpdateClusterOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorBatchUpdateCluster(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("ServiceUpdateNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceUpdateNotFoundFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCopySnapshot struct { +} + +func (*awsAwsjson11_deserializeOpCopySnapshot) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCopySnapshot) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCopySnapshot(response, &metadata) + } + output := &CopySnapshotOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCopySnapshotOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCopySnapshot(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("InvalidSnapshotStateFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidSnapshotStateFault(response, errorBody) + + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + case strings.EqualFold("SnapshotAlreadyExistsFault", errorCode): + return awsAwsjson11_deserializeErrorSnapshotAlreadyExistsFault(response, errorBody) + + case strings.EqualFold("SnapshotNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorSnapshotNotFoundFault(response, errorBody) + + case strings.EqualFold("SnapshotQuotaExceededFault", errorCode): + return awsAwsjson11_deserializeErrorSnapshotQuotaExceededFault(response, errorBody) + + case strings.EqualFold("TagQuotaPerResourceExceeded", errorCode): + return awsAwsjson11_deserializeErrorTagQuotaPerResourceExceeded(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateACL struct { +} + +func (*awsAwsjson11_deserializeOpCreateACL) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateACL) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateACL(response, &metadata) + } + output := &CreateACLOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateACLOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateACL(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ACLAlreadyExistsFault", errorCode): + return awsAwsjson11_deserializeErrorACLAlreadyExistsFault(response, errorBody) + + case strings.EqualFold("ACLQuotaExceededFault", errorCode): + return awsAwsjson11_deserializeErrorACLQuotaExceededFault(response, errorBody) + + case strings.EqualFold("DefaultUserRequired", errorCode): + return awsAwsjson11_deserializeErrorDefaultUserRequired(response, errorBody) + + case strings.EqualFold("DuplicateUserNameFault", errorCode): + return awsAwsjson11_deserializeErrorDuplicateUserNameFault(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("TagQuotaPerResourceExceeded", errorCode): + return awsAwsjson11_deserializeErrorTagQuotaPerResourceExceeded(response, errorBody) + + case strings.EqualFold("UserNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorUserNotFoundFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateCluster struct { +} + +func (*awsAwsjson11_deserializeOpCreateCluster) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateCluster) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateCluster(response, &metadata) + } + output := &CreateClusterOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateClusterOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateCluster(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ACLNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorACLNotFoundFault(response, errorBody) + + case strings.EqualFold("ClusterAlreadyExistsFault", errorCode): + return awsAwsjson11_deserializeErrorClusterAlreadyExistsFault(response, errorBody) + + case strings.EqualFold("ClusterQuotaForCustomerExceededFault", errorCode): + return awsAwsjson11_deserializeErrorClusterQuotaForCustomerExceededFault(response, errorBody) + + case strings.EqualFold("InsufficientClusterCapacityFault", errorCode): + return awsAwsjson11_deserializeErrorInsufficientClusterCapacityFault(response, errorBody) + + case strings.EqualFold("InvalidACLStateFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidACLStateFault(response, errorBody) + + case strings.EqualFold("InvalidCredentialsException", errorCode): + return awsAwsjson11_deserializeErrorInvalidCredentialsException(response, errorBody) + + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("InvalidVPCNetworkStateFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidVPCNetworkStateFault(response, errorBody) + + case strings.EqualFold("NodeQuotaForClusterExceededFault", errorCode): + return awsAwsjson11_deserializeErrorNodeQuotaForClusterExceededFault(response, errorBody) + + case strings.EqualFold("NodeQuotaForCustomerExceededFault", errorCode): + return awsAwsjson11_deserializeErrorNodeQuotaForCustomerExceededFault(response, errorBody) + + case strings.EqualFold("ParameterGroupNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorParameterGroupNotFoundFault(response, errorBody) + + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + case strings.EqualFold("ShardsPerClusterQuotaExceededFault", errorCode): + return awsAwsjson11_deserializeErrorShardsPerClusterQuotaExceededFault(response, errorBody) + + case strings.EqualFold("SubnetGroupNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorSubnetGroupNotFoundFault(response, errorBody) + + case strings.EqualFold("TagQuotaPerResourceExceeded", errorCode): + return awsAwsjson11_deserializeErrorTagQuotaPerResourceExceeded(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateParameterGroup struct { +} + +func (*awsAwsjson11_deserializeOpCreateParameterGroup) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateParameterGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateParameterGroup(response, &metadata) + } + output := &CreateParameterGroupOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateParameterGroupOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateParameterGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + case strings.EqualFold("InvalidParameterGroupStateFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterGroupStateFault(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("ParameterGroupAlreadyExistsFault", errorCode): + return awsAwsjson11_deserializeErrorParameterGroupAlreadyExistsFault(response, errorBody) + + case strings.EqualFold("ParameterGroupQuotaExceededFault", errorCode): + return awsAwsjson11_deserializeErrorParameterGroupQuotaExceededFault(response, errorBody) + + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + case strings.EqualFold("TagQuotaPerResourceExceeded", errorCode): + return awsAwsjson11_deserializeErrorTagQuotaPerResourceExceeded(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateSnapshot struct { +} + +func (*awsAwsjson11_deserializeOpCreateSnapshot) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateSnapshot) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateSnapshot(response, &metadata) + } + output := &CreateSnapshotOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateSnapshotOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateSnapshot(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ClusterNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundFault(response, errorBody) + + case strings.EqualFold("InvalidClusterStateFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidClusterStateFault(response, errorBody) + + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + case strings.EqualFold("SnapshotAlreadyExistsFault", errorCode): + return awsAwsjson11_deserializeErrorSnapshotAlreadyExistsFault(response, errorBody) + + case strings.EqualFold("SnapshotQuotaExceededFault", errorCode): + return awsAwsjson11_deserializeErrorSnapshotQuotaExceededFault(response, errorBody) + + case strings.EqualFold("TagQuotaPerResourceExceeded", errorCode): + return awsAwsjson11_deserializeErrorTagQuotaPerResourceExceeded(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateSubnetGroup struct { +} + +func (*awsAwsjson11_deserializeOpCreateSubnetGroup) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateSubnetGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateSubnetGroup(response, &metadata) + } + output := &CreateSubnetGroupOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateSubnetGroupOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateSubnetGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidSubnet", errorCode): + return awsAwsjson11_deserializeErrorInvalidSubnet(response, errorBody) + + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + case strings.EqualFold("SubnetGroupAlreadyExistsFault", errorCode): + return awsAwsjson11_deserializeErrorSubnetGroupAlreadyExistsFault(response, errorBody) + + case strings.EqualFold("SubnetGroupQuotaExceededFault", errorCode): + return awsAwsjson11_deserializeErrorSubnetGroupQuotaExceededFault(response, errorBody) + + case strings.EqualFold("SubnetNotAllowedFault", errorCode): + return awsAwsjson11_deserializeErrorSubnetNotAllowedFault(response, errorBody) + + case strings.EqualFold("SubnetQuotaExceededFault", errorCode): + return awsAwsjson11_deserializeErrorSubnetQuotaExceededFault(response, errorBody) + + case strings.EqualFold("TagQuotaPerResourceExceeded", errorCode): + return awsAwsjson11_deserializeErrorTagQuotaPerResourceExceeded(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateUser struct { +} + +func (*awsAwsjson11_deserializeOpCreateUser) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateUser) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateUser(response, &metadata) + } + output := &CreateUserOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateUserOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateUser(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DuplicateUserNameFault", errorCode): + return awsAwsjson11_deserializeErrorDuplicateUserNameFault(response, errorBody) + + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("TagQuotaPerResourceExceeded", errorCode): + return awsAwsjson11_deserializeErrorTagQuotaPerResourceExceeded(response, errorBody) + + case strings.EqualFold("UserAlreadyExistsFault", errorCode): + return awsAwsjson11_deserializeErrorUserAlreadyExistsFault(response, errorBody) + + case strings.EqualFold("UserQuotaExceededFault", errorCode): + return awsAwsjson11_deserializeErrorUserQuotaExceededFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteACL struct { +} + +func (*awsAwsjson11_deserializeOpDeleteACL) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteACL) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteACL(response, &metadata) + } + output := &DeleteACLOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteACLOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteACL(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ACLNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorACLNotFoundFault(response, errorBody) + + case strings.EqualFold("InvalidACLStateFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidACLStateFault(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteCluster struct { +} + +func (*awsAwsjson11_deserializeOpDeleteCluster) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteCluster) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteCluster(response, &metadata) + } + output := &DeleteClusterOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteClusterOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteCluster(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ClusterNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundFault(response, errorBody) + + case strings.EqualFold("InvalidClusterStateFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidClusterStateFault(response, errorBody) + + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + case strings.EqualFold("SnapshotAlreadyExistsFault", errorCode): + return awsAwsjson11_deserializeErrorSnapshotAlreadyExistsFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteParameterGroup struct { +} + +func (*awsAwsjson11_deserializeOpDeleteParameterGroup) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteParameterGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteParameterGroup(response, &metadata) + } + output := &DeleteParameterGroupOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteParameterGroupOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteParameterGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + case strings.EqualFold("InvalidParameterGroupStateFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterGroupStateFault(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("ParameterGroupNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorParameterGroupNotFoundFault(response, errorBody) + + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteSnapshot struct { +} + +func (*awsAwsjson11_deserializeOpDeleteSnapshot) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteSnapshot) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteSnapshot(response, &metadata) + } + output := &DeleteSnapshotOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteSnapshotOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteSnapshot(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("InvalidSnapshotStateFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidSnapshotStateFault(response, errorBody) + + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + case strings.EqualFold("SnapshotNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorSnapshotNotFoundFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteSubnetGroup struct { +} + +func (*awsAwsjson11_deserializeOpDeleteSubnetGroup) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteSubnetGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteSubnetGroup(response, &metadata) + } + output := &DeleteSubnetGroupOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteSubnetGroupOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteSubnetGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + case strings.EqualFold("SubnetGroupInUseFault", errorCode): + return awsAwsjson11_deserializeErrorSubnetGroupInUseFault(response, errorBody) + + case strings.EqualFold("SubnetGroupNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorSubnetGroupNotFoundFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteUser struct { +} + +func (*awsAwsjson11_deserializeOpDeleteUser) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteUser) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteUser(response, &metadata) + } + output := &DeleteUserOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteUserOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteUser(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("InvalidUserStateFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidUserStateFault(response, errorBody) + + case strings.EqualFold("UserNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorUserNotFoundFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeACLs struct { +} + +func (*awsAwsjson11_deserializeOpDescribeACLs) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeACLs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeACLs(response, &metadata) + } + output := &DescribeACLsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeACLsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeACLs(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ACLNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorACLNotFoundFault(response, errorBody) + + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeClusters struct { +} + +func (*awsAwsjson11_deserializeOpDescribeClusters) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeClusters) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeClusters(response, &metadata) + } + output := &DescribeClustersOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeClustersOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeClusters(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ClusterNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundFault(response, errorBody) + + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeEngineVersions struct { +} + +func (*awsAwsjson11_deserializeOpDescribeEngineVersions) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeEngineVersions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeEngineVersions(response, &metadata) + } + output := &DescribeEngineVersionsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeEngineVersionsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeEngineVersions(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeEvents struct { +} + +func (*awsAwsjson11_deserializeOpDescribeEvents) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeEvents) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeEvents(response, &metadata) + } + output := &DescribeEventsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeEventsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeEvents(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeParameterGroups struct { +} + +func (*awsAwsjson11_deserializeOpDescribeParameterGroups) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeParameterGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeParameterGroups(response, &metadata) + } + output := &DescribeParameterGroupsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeParameterGroupsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeParameterGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("ParameterGroupNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorParameterGroupNotFoundFault(response, errorBody) + + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeParameters struct { +} + +func (*awsAwsjson11_deserializeOpDescribeParameters) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeParameters) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeParameters(response, &metadata) + } + output := &DescribeParametersOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeParametersOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeParameters(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("ParameterGroupNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorParameterGroupNotFoundFault(response, errorBody) + + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeServiceUpdates struct { +} + +func (*awsAwsjson11_deserializeOpDescribeServiceUpdates) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeServiceUpdates) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeServiceUpdates(response, &metadata) + } + output := &DescribeServiceUpdatesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeServiceUpdatesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeServiceUpdates(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeSnapshots struct { +} + +func (*awsAwsjson11_deserializeOpDescribeSnapshots) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeSnapshots) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeSnapshots(response, &metadata) + } + output := &DescribeSnapshotsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeSnapshotsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeSnapshots(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + case strings.EqualFold("SnapshotNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorSnapshotNotFoundFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeSubnetGroups struct { +} + +func (*awsAwsjson11_deserializeOpDescribeSubnetGroups) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeSubnetGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeSubnetGroups(response, &metadata) + } + output := &DescribeSubnetGroupsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeSubnetGroupsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeSubnetGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + case strings.EqualFold("SubnetGroupNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorSubnetGroupNotFoundFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeUsers struct { +} + +func (*awsAwsjson11_deserializeOpDescribeUsers) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeUsers) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeUsers(response, &metadata) + } + output := &DescribeUsersOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeUsersOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeUsers(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + case strings.EqualFold("UserNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorUserNotFoundFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpFailoverShard struct { +} + +func (*awsAwsjson11_deserializeOpFailoverShard) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpFailoverShard) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorFailoverShard(response, &metadata) + } + output := &FailoverShardOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentFailoverShardOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorFailoverShard(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("APICallRateForCustomerExceededFault", errorCode): + return awsAwsjson11_deserializeErrorAPICallRateForCustomerExceededFault(response, errorBody) + + case strings.EqualFold("ClusterNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundFault(response, errorBody) + + case strings.EqualFold("InvalidClusterStateFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidClusterStateFault(response, errorBody) + + case strings.EqualFold("InvalidKMSKeyFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidKMSKeyFault(response, errorBody) + + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("ShardNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorShardNotFoundFault(response, errorBody) + + case strings.EqualFold("TestFailoverNotAvailableFault", errorCode): + return awsAwsjson11_deserializeErrorTestFailoverNotAvailableFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListAllowedNodeTypeUpdates struct { +} + +func (*awsAwsjson11_deserializeOpListAllowedNodeTypeUpdates) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListAllowedNodeTypeUpdates) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListAllowedNodeTypeUpdates(response, &metadata) + } + output := &ListAllowedNodeTypeUpdatesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListAllowedNodeTypeUpdatesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListAllowedNodeTypeUpdates(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ClusterNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundFault(response, errorBody) + + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListTags struct { +} + +func (*awsAwsjson11_deserializeOpListTags) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListTags) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListTags(response, &metadata) + } + output := &ListTagsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListTagsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListTags(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ACLNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorACLNotFoundFault(response, errorBody) + + case strings.EqualFold("ClusterNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundFault(response, errorBody) + + case strings.EqualFold("InvalidARNFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidARNFault(response, errorBody) + + case strings.EqualFold("InvalidClusterStateFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidClusterStateFault(response, errorBody) + + case strings.EqualFold("ParameterGroupNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorParameterGroupNotFoundFault(response, errorBody) + + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + case strings.EqualFold("SnapshotNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorSnapshotNotFoundFault(response, errorBody) + + case strings.EqualFold("SubnetGroupNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorSubnetGroupNotFoundFault(response, errorBody) + + case strings.EqualFold("UserNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorUserNotFoundFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpResetParameterGroup struct { +} + +func (*awsAwsjson11_deserializeOpResetParameterGroup) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpResetParameterGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorResetParameterGroup(response, &metadata) + } + output := &ResetParameterGroupOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentResetParameterGroupOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorResetParameterGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + case strings.EqualFold("InvalidParameterGroupStateFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterGroupStateFault(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("ParameterGroupNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorParameterGroupNotFoundFault(response, errorBody) + + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpTagResource struct { +} + +func (*awsAwsjson11_deserializeOpTagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorTagResource(response, &metadata) + } + output := &TagResourceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentTagResourceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ACLNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorACLNotFoundFault(response, errorBody) + + case strings.EqualFold("ClusterNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundFault(response, errorBody) + + case strings.EqualFold("InvalidARNFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidARNFault(response, errorBody) + + case strings.EqualFold("InvalidClusterStateFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidClusterStateFault(response, errorBody) + + case strings.EqualFold("ParameterGroupNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorParameterGroupNotFoundFault(response, errorBody) + + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + case strings.EqualFold("SnapshotNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorSnapshotNotFoundFault(response, errorBody) + + case strings.EqualFold("SubnetGroupNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorSubnetGroupNotFoundFault(response, errorBody) + + case strings.EqualFold("TagQuotaPerResourceExceeded", errorCode): + return awsAwsjson11_deserializeErrorTagQuotaPerResourceExceeded(response, errorBody) + + case strings.EqualFold("UserNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorUserNotFoundFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUntagResource struct { +} + +func (*awsAwsjson11_deserializeOpUntagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUntagResource(response, &metadata) + } + output := &UntagResourceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUntagResourceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ACLNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorACLNotFoundFault(response, errorBody) + + case strings.EqualFold("ClusterNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundFault(response, errorBody) + + case strings.EqualFold("InvalidARNFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidARNFault(response, errorBody) + + case strings.EqualFold("InvalidClusterStateFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidClusterStateFault(response, errorBody) + + case strings.EqualFold("ParameterGroupNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorParameterGroupNotFoundFault(response, errorBody) + + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + case strings.EqualFold("SnapshotNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorSnapshotNotFoundFault(response, errorBody) + + case strings.EqualFold("SubnetGroupNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorSubnetGroupNotFoundFault(response, errorBody) + + case strings.EqualFold("TagNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorTagNotFoundFault(response, errorBody) + + case strings.EqualFold("UserNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorUserNotFoundFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateACL struct { +} + +func (*awsAwsjson11_deserializeOpUpdateACL) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateACL) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateACL(response, &metadata) + } + output := &UpdateACLOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateACLOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateACL(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ACLNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorACLNotFoundFault(response, errorBody) + + case strings.EqualFold("DefaultUserRequired", errorCode): + return awsAwsjson11_deserializeErrorDefaultUserRequired(response, errorBody) + + case strings.EqualFold("DuplicateUserNameFault", errorCode): + return awsAwsjson11_deserializeErrorDuplicateUserNameFault(response, errorBody) + + case strings.EqualFold("InvalidACLStateFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidACLStateFault(response, errorBody) + + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("UserNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorUserNotFoundFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateCluster struct { +} + +func (*awsAwsjson11_deserializeOpUpdateCluster) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateCluster) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateCluster(response, &metadata) + } + output := &UpdateClusterOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateClusterOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateCluster(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ACLNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorACLNotFoundFault(response, errorBody) + + case strings.EqualFold("ClusterNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorClusterNotFoundFault(response, errorBody) + + case strings.EqualFold("ClusterQuotaForCustomerExceededFault", errorCode): + return awsAwsjson11_deserializeErrorClusterQuotaForCustomerExceededFault(response, errorBody) + + case strings.EqualFold("InvalidACLStateFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidACLStateFault(response, errorBody) + + case strings.EqualFold("InvalidClusterStateFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidClusterStateFault(response, errorBody) + + case strings.EqualFold("InvalidKMSKeyFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidKMSKeyFault(response, errorBody) + + case strings.EqualFold("InvalidNodeStateFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidNodeStateFault(response, errorBody) + + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("InvalidVPCNetworkStateFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidVPCNetworkStateFault(response, errorBody) + + case strings.EqualFold("NoOperationFault", errorCode): + return awsAwsjson11_deserializeErrorNoOperationFault(response, errorBody) + + case strings.EqualFold("NodeQuotaForClusterExceededFault", errorCode): + return awsAwsjson11_deserializeErrorNodeQuotaForClusterExceededFault(response, errorBody) + + case strings.EqualFold("NodeQuotaForCustomerExceededFault", errorCode): + return awsAwsjson11_deserializeErrorNodeQuotaForCustomerExceededFault(response, errorBody) + + case strings.EqualFold("ParameterGroupNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorParameterGroupNotFoundFault(response, errorBody) + + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + case strings.EqualFold("ShardsPerClusterQuotaExceededFault", errorCode): + return awsAwsjson11_deserializeErrorShardsPerClusterQuotaExceededFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateParameterGroup struct { +} + +func (*awsAwsjson11_deserializeOpUpdateParameterGroup) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateParameterGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateParameterGroup(response, &metadata) + } + output := &UpdateParameterGroupOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateParameterGroupOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateParameterGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + case strings.EqualFold("InvalidParameterGroupStateFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterGroupStateFault(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("ParameterGroupNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorParameterGroupNotFoundFault(response, errorBody) + + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateSubnetGroup struct { +} + +func (*awsAwsjson11_deserializeOpUpdateSubnetGroup) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateSubnetGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateSubnetGroup(response, &metadata) + } + output := &UpdateSubnetGroupOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateSubnetGroupOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateSubnetGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidSubnet", errorCode): + return awsAwsjson11_deserializeErrorInvalidSubnet(response, errorBody) + + case strings.EqualFold("ServiceLinkedRoleNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response, errorBody) + + case strings.EqualFold("SubnetGroupNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorSubnetGroupNotFoundFault(response, errorBody) + + case strings.EqualFold("SubnetInUse", errorCode): + return awsAwsjson11_deserializeErrorSubnetInUse(response, errorBody) + + case strings.EqualFold("SubnetNotAllowedFault", errorCode): + return awsAwsjson11_deserializeErrorSubnetNotAllowedFault(response, errorBody) + + case strings.EqualFold("SubnetQuotaExceededFault", errorCode): + return awsAwsjson11_deserializeErrorSubnetQuotaExceededFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateUser struct { +} + +func (*awsAwsjson11_deserializeOpUpdateUser) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateUser) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateUser(response, &metadata) + } + output := &UpdateUserOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateUserOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateUser(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterCombinationException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response, errorBody) + + case strings.EqualFold("InvalidParameterValueException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterValueException(response, errorBody) + + case strings.EqualFold("InvalidUserStateFault", errorCode): + return awsAwsjson11_deserializeErrorInvalidUserStateFault(response, errorBody) + + case strings.EqualFold("UserNotFoundFault", errorCode): + return awsAwsjson11_deserializeErrorUserNotFoundFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsAwsjson11_deserializeErrorACLAlreadyExistsFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ACLAlreadyExistsFault{} + err := awsAwsjson11_deserializeDocumentACLAlreadyExistsFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorACLNotFoundFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ACLNotFoundFault{} + err := awsAwsjson11_deserializeDocumentACLNotFoundFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorACLQuotaExceededFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ACLQuotaExceededFault{} + err := awsAwsjson11_deserializeDocumentACLQuotaExceededFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorAPICallRateForCustomerExceededFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.APICallRateForCustomerExceededFault{} + err := awsAwsjson11_deserializeDocumentAPICallRateForCustomerExceededFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorClusterAlreadyExistsFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ClusterAlreadyExistsFault{} + err := awsAwsjson11_deserializeDocumentClusterAlreadyExistsFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorClusterNotFoundFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ClusterNotFoundFault{} + err := awsAwsjson11_deserializeDocumentClusterNotFoundFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorClusterQuotaForCustomerExceededFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ClusterQuotaForCustomerExceededFault{} + err := awsAwsjson11_deserializeDocumentClusterQuotaForCustomerExceededFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorDefaultUserRequired(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.DefaultUserRequired{} + err := awsAwsjson11_deserializeDocumentDefaultUserRequired(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorDuplicateUserNameFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.DuplicateUserNameFault{} + err := awsAwsjson11_deserializeDocumentDuplicateUserNameFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInsufficientClusterCapacityFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InsufficientClusterCapacityFault{} + err := awsAwsjson11_deserializeDocumentInsufficientClusterCapacityFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidACLStateFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidACLStateFault{} + err := awsAwsjson11_deserializeDocumentInvalidACLStateFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidARNFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidARNFault{} + err := awsAwsjson11_deserializeDocumentInvalidARNFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidClusterStateFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidClusterStateFault{} + err := awsAwsjson11_deserializeDocumentInvalidClusterStateFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidCredentialsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidCredentialsException{} + err := awsAwsjson11_deserializeDocumentInvalidCredentialsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidKMSKeyFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidKMSKeyFault{} + err := awsAwsjson11_deserializeDocumentInvalidKMSKeyFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidNodeStateFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidNodeStateFault{} + err := awsAwsjson11_deserializeDocumentInvalidNodeStateFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidParameterCombinationException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidParameterCombinationException{} + err := awsAwsjson11_deserializeDocumentInvalidParameterCombinationException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidParameterGroupStateFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidParameterGroupStateFault{} + err := awsAwsjson11_deserializeDocumentInvalidParameterGroupStateFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidParameterValueException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidParameterValueException{} + err := awsAwsjson11_deserializeDocumentInvalidParameterValueException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidSnapshotStateFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidSnapshotStateFault{} + err := awsAwsjson11_deserializeDocumentInvalidSnapshotStateFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidSubnet(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidSubnet{} + err := awsAwsjson11_deserializeDocumentInvalidSubnet(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidUserStateFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidUserStateFault{} + err := awsAwsjson11_deserializeDocumentInvalidUserStateFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidVPCNetworkStateFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidVPCNetworkStateFault{} + err := awsAwsjson11_deserializeDocumentInvalidVPCNetworkStateFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorNodeQuotaForClusterExceededFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.NodeQuotaForClusterExceededFault{} + err := awsAwsjson11_deserializeDocumentNodeQuotaForClusterExceededFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorNodeQuotaForCustomerExceededFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.NodeQuotaForCustomerExceededFault{} + err := awsAwsjson11_deserializeDocumentNodeQuotaForCustomerExceededFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorNoOperationFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.NoOperationFault{} + err := awsAwsjson11_deserializeDocumentNoOperationFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorParameterGroupAlreadyExistsFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ParameterGroupAlreadyExistsFault{} + err := awsAwsjson11_deserializeDocumentParameterGroupAlreadyExistsFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorParameterGroupNotFoundFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ParameterGroupNotFoundFault{} + err := awsAwsjson11_deserializeDocumentParameterGroupNotFoundFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorParameterGroupQuotaExceededFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ParameterGroupQuotaExceededFault{} + err := awsAwsjson11_deserializeDocumentParameterGroupQuotaExceededFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorServiceLinkedRoleNotFoundFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ServiceLinkedRoleNotFoundFault{} + err := awsAwsjson11_deserializeDocumentServiceLinkedRoleNotFoundFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorServiceUpdateNotFoundFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ServiceUpdateNotFoundFault{} + err := awsAwsjson11_deserializeDocumentServiceUpdateNotFoundFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorShardNotFoundFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ShardNotFoundFault{} + err := awsAwsjson11_deserializeDocumentShardNotFoundFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorShardsPerClusterQuotaExceededFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ShardsPerClusterQuotaExceededFault{} + err := awsAwsjson11_deserializeDocumentShardsPerClusterQuotaExceededFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorSnapshotAlreadyExistsFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.SnapshotAlreadyExistsFault{} + err := awsAwsjson11_deserializeDocumentSnapshotAlreadyExistsFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorSnapshotNotFoundFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.SnapshotNotFoundFault{} + err := awsAwsjson11_deserializeDocumentSnapshotNotFoundFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorSnapshotQuotaExceededFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.SnapshotQuotaExceededFault{} + err := awsAwsjson11_deserializeDocumentSnapshotQuotaExceededFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorSubnetGroupAlreadyExistsFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.SubnetGroupAlreadyExistsFault{} + err := awsAwsjson11_deserializeDocumentSubnetGroupAlreadyExistsFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorSubnetGroupInUseFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.SubnetGroupInUseFault{} + err := awsAwsjson11_deserializeDocumentSubnetGroupInUseFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorSubnetGroupNotFoundFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.SubnetGroupNotFoundFault{} + err := awsAwsjson11_deserializeDocumentSubnetGroupNotFoundFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorSubnetGroupQuotaExceededFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.SubnetGroupQuotaExceededFault{} + err := awsAwsjson11_deserializeDocumentSubnetGroupQuotaExceededFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorSubnetInUse(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.SubnetInUse{} + err := awsAwsjson11_deserializeDocumentSubnetInUse(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorSubnetNotAllowedFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.SubnetNotAllowedFault{} + err := awsAwsjson11_deserializeDocumentSubnetNotAllowedFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorSubnetQuotaExceededFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.SubnetQuotaExceededFault{} + err := awsAwsjson11_deserializeDocumentSubnetQuotaExceededFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorTagNotFoundFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.TagNotFoundFault{} + err := awsAwsjson11_deserializeDocumentTagNotFoundFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorTagQuotaPerResourceExceeded(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.TagQuotaPerResourceExceeded{} + err := awsAwsjson11_deserializeDocumentTagQuotaPerResourceExceeded(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorTestFailoverNotAvailableFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.TestFailoverNotAvailableFault{} + err := awsAwsjson11_deserializeDocumentTestFailoverNotAvailableFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorUserAlreadyExistsFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.UserAlreadyExistsFault{} + err := awsAwsjson11_deserializeDocumentUserAlreadyExistsFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorUserNotFoundFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.UserNotFoundFault{} + err := awsAwsjson11_deserializeDocumentUserNotFoundFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorUserQuotaExceededFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.UserQuotaExceededFault{} + err := awsAwsjson11_deserializeDocumentUserQuotaExceededFault(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeDocumentACL(v **types.ACL, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ACL + if *v == nil { + sv = &types.ACL{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ARN": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ARN = ptr.String(jtv) + } + + case "Clusters": + if err := awsAwsjson11_deserializeDocumentACLClusterNameList(&sv.Clusters, value); err != nil { + return err + } + + case "MinimumEngineVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.MinimumEngineVersion = ptr.String(jtv) + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "PendingChanges": + if err := awsAwsjson11_deserializeDocumentACLPendingChanges(&sv.PendingChanges, value); err != nil { + return err + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Status = ptr.String(jtv) + } + + case "UserNames": + if err := awsAwsjson11_deserializeDocumentUserNameList(&sv.UserNames, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentACLAlreadyExistsFault(v **types.ACLAlreadyExistsFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ACLAlreadyExistsFault + if *v == nil { + sv = &types.ACLAlreadyExistsFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentACLClusterNameList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentACLList(v *[]types.ACL, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ACL + if *v == nil { + cv = []types.ACL{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ACL + destAddr := &col + if err := awsAwsjson11_deserializeDocumentACL(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentACLNameList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ACLName to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentACLNotFoundFault(v **types.ACLNotFoundFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ACLNotFoundFault + if *v == nil { + sv = &types.ACLNotFoundFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentACLPendingChanges(v **types.ACLPendingChanges, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ACLPendingChanges + if *v == nil { + sv = &types.ACLPendingChanges{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "UserNamesToAdd": + if err := awsAwsjson11_deserializeDocumentUserNameList(&sv.UserNamesToAdd, value); err != nil { + return err + } + + case "UserNamesToRemove": + if err := awsAwsjson11_deserializeDocumentUserNameList(&sv.UserNamesToRemove, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentACLQuotaExceededFault(v **types.ACLQuotaExceededFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ACLQuotaExceededFault + if *v == nil { + sv = &types.ACLQuotaExceededFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentACLsUpdateStatus(v **types.ACLsUpdateStatus, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ACLsUpdateStatus + if *v == nil { + sv = &types.ACLsUpdateStatus{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ACLToApply": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ACLName to be of type string, got %T instead", value) + } + sv.ACLToApply = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentAPICallRateForCustomerExceededFault(v **types.APICallRateForCustomerExceededFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.APICallRateForCustomerExceededFault + if *v == nil { + sv = &types.APICallRateForCustomerExceededFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentAuthentication(v **types.Authentication, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Authentication + if *v == nil { + sv = &types.Authentication{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "PasswordCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected IntegerOptional to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.PasswordCount = ptr.Int32(int32(i64)) + } + + case "Type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AuthenticationType to be of type string, got %T instead", value) + } + sv.Type = types.AuthenticationType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentAvailabilityZone(v **types.AvailabilityZone, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AvailabilityZone + if *v == nil { + sv = &types.AvailabilityZone{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCluster(v **types.Cluster, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Cluster + if *v == nil { + sv = &types.Cluster{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ACLName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ACLName to be of type string, got %T instead", value) + } + sv.ACLName = ptr.String(jtv) + } + + case "ARN": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ARN = ptr.String(jtv) + } + + case "AutoMinorVersionUpgrade": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanOptional to be of type *bool, got %T instead", value) + } + sv.AutoMinorVersionUpgrade = ptr.Bool(jtv) + } + + case "AvailabilityMode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AZStatus to be of type string, got %T instead", value) + } + sv.AvailabilityMode = types.AZStatus(jtv) + } + + case "ClusterEndpoint": + if err := awsAwsjson11_deserializeDocumentEndpoint(&sv.ClusterEndpoint, value); err != nil { + return err + } + + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "EnginePatchVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.EnginePatchVersion = ptr.String(jtv) + } + + case "EngineVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.EngineVersion = ptr.String(jtv) + } + + case "KmsKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.KmsKeyId = ptr.String(jtv) + } + + case "MaintenanceWindow": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.MaintenanceWindow = ptr.String(jtv) + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "NodeType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NodeType = ptr.String(jtv) + } + + case "NumberOfShards": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected IntegerOptional to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.NumberOfShards = ptr.Int32(int32(i64)) + } + + case "ParameterGroupName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ParameterGroupName = ptr.String(jtv) + } + + case "ParameterGroupStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ParameterGroupStatus = ptr.String(jtv) + } + + case "PendingUpdates": + if err := awsAwsjson11_deserializeDocumentClusterPendingUpdates(&sv.PendingUpdates, value); err != nil { + return err + } + + case "SecurityGroups": + if err := awsAwsjson11_deserializeDocumentSecurityGroupMembershipList(&sv.SecurityGroups, value); err != nil { + return err + } + + case "Shards": + if err := awsAwsjson11_deserializeDocumentShardList(&sv.Shards, value); err != nil { + return err + } + + case "SnapshotRetentionLimit": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected IntegerOptional to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.SnapshotRetentionLimit = ptr.Int32(int32(i64)) + } + + case "SnapshotWindow": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.SnapshotWindow = ptr.String(jtv) + } + + case "SnsTopicArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.SnsTopicArn = ptr.String(jtv) + } + + case "SnsTopicStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.SnsTopicStatus = ptr.String(jtv) + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Status = ptr.String(jtv) + } + + case "SubnetGroupName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.SubnetGroupName = ptr.String(jtv) + } + + case "TLSEnabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanOptional to be of type *bool, got %T instead", value) + } + sv.TLSEnabled = ptr.Bool(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentClusterAlreadyExistsFault(v **types.ClusterAlreadyExistsFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ClusterAlreadyExistsFault + if *v == nil { + sv = &types.ClusterAlreadyExistsFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentClusterConfiguration(v **types.ClusterConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ClusterConfiguration + if *v == nil { + sv = &types.ClusterConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "EngineVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.EngineVersion = ptr.String(jtv) + } + + case "MaintenanceWindow": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.MaintenanceWindow = ptr.String(jtv) + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "NodeType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NodeType = ptr.String(jtv) + } + + case "NumShards": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected IntegerOptional to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.NumShards = ptr.Int32(int32(i64)) + } + + case "ParameterGroupName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ParameterGroupName = ptr.String(jtv) + } + + case "Port": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected IntegerOptional to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Port = ptr.Int32(int32(i64)) + } + + case "Shards": + if err := awsAwsjson11_deserializeDocumentShardDetails(&sv.Shards, value); err != nil { + return err + } + + case "SnapshotRetentionLimit": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected IntegerOptional to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.SnapshotRetentionLimit = ptr.Int32(int32(i64)) + } + + case "SnapshotWindow": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.SnapshotWindow = ptr.String(jtv) + } + + case "SubnetGroupName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.SubnetGroupName = ptr.String(jtv) + } + + case "TopicArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.TopicArn = ptr.String(jtv) + } + + case "VpcId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.VpcId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentClusterList(v *[]types.Cluster, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Cluster + if *v == nil { + cv = []types.Cluster{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Cluster + destAddr := &col + if err := awsAwsjson11_deserializeDocumentCluster(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentClusterNotFoundFault(v **types.ClusterNotFoundFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ClusterNotFoundFault + if *v == nil { + sv = &types.ClusterNotFoundFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentClusterPendingUpdates(v **types.ClusterPendingUpdates, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ClusterPendingUpdates + if *v == nil { + sv = &types.ClusterPendingUpdates{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ACLs": + if err := awsAwsjson11_deserializeDocumentACLsUpdateStatus(&sv.ACLs, value); err != nil { + return err + } + + case "Resharding": + if err := awsAwsjson11_deserializeDocumentReshardingStatus(&sv.Resharding, value); err != nil { + return err + } + + case "ServiceUpdates": + if err := awsAwsjson11_deserializeDocumentPendingModifiedServiceUpdateList(&sv.ServiceUpdates, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentClusterQuotaForCustomerExceededFault(v **types.ClusterQuotaForCustomerExceededFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ClusterQuotaForCustomerExceededFault + if *v == nil { + sv = &types.ClusterQuotaForCustomerExceededFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentDefaultUserRequired(v **types.DefaultUserRequired, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DefaultUserRequired + if *v == nil { + sv = &types.DefaultUserRequired{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentDuplicateUserNameFault(v **types.DuplicateUserNameFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DuplicateUserNameFault + if *v == nil { + sv = &types.DuplicateUserNameFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentEndpoint(v **types.Endpoint, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Endpoint + if *v == nil { + sv = &types.Endpoint{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Address": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Address = ptr.String(jtv) + } + + case "Port": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Port = int32(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentEngineVersionInfo(v **types.EngineVersionInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.EngineVersionInfo + if *v == nil { + sv = &types.EngineVersionInfo{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "EnginePatchVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.EnginePatchVersion = ptr.String(jtv) + } + + case "EngineVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.EngineVersion = ptr.String(jtv) + } + + case "ParameterGroupFamily": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ParameterGroupFamily = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentEngineVersionInfoList(v *[]types.EngineVersionInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.EngineVersionInfo + if *v == nil { + cv = []types.EngineVersionInfo{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.EngineVersionInfo + destAddr := &col + if err := awsAwsjson11_deserializeDocumentEngineVersionInfo(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentEvent(v **types.Event, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Event + if *v == nil { + sv = &types.Event{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Date": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.Date = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected TStamp to be a JSON Number, got %T instead", value) + + } + } + + case "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "SourceName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.SourceName = ptr.String(jtv) + } + + case "SourceType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SourceType to be of type string, got %T instead", value) + } + sv.SourceType = types.SourceType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentEventList(v *[]types.Event, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Event + if *v == nil { + cv = []types.Event{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Event + destAddr := &col + if err := awsAwsjson11_deserializeDocumentEvent(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentInsufficientClusterCapacityFault(v **types.InsufficientClusterCapacityFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InsufficientClusterCapacityFault + if *v == nil { + sv = &types.InsufficientClusterCapacityFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidACLStateFault(v **types.InvalidACLStateFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidACLStateFault + if *v == nil { + sv = &types.InvalidACLStateFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidARNFault(v **types.InvalidARNFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidARNFault + if *v == nil { + sv = &types.InvalidARNFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidClusterStateFault(v **types.InvalidClusterStateFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidClusterStateFault + if *v == nil { + sv = &types.InvalidClusterStateFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidCredentialsException(v **types.InvalidCredentialsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidCredentialsException + if *v == nil { + sv = &types.InvalidCredentialsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidKMSKeyFault(v **types.InvalidKMSKeyFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidKMSKeyFault + if *v == nil { + sv = &types.InvalidKMSKeyFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidNodeStateFault(v **types.InvalidNodeStateFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidNodeStateFault + if *v == nil { + sv = &types.InvalidNodeStateFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidParameterCombinationException(v **types.InvalidParameterCombinationException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidParameterCombinationException + if *v == nil { + sv = &types.InvalidParameterCombinationException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AwsQueryErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidParameterGroupStateFault(v **types.InvalidParameterGroupStateFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidParameterGroupStateFault + if *v == nil { + sv = &types.InvalidParameterGroupStateFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidParameterValueException(v **types.InvalidParameterValueException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidParameterValueException + if *v == nil { + sv = &types.InvalidParameterValueException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AwsQueryErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidSnapshotStateFault(v **types.InvalidSnapshotStateFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidSnapshotStateFault + if *v == nil { + sv = &types.InvalidSnapshotStateFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidSubnet(v **types.InvalidSubnet, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidSubnet + if *v == nil { + sv = &types.InvalidSubnet{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidUserStateFault(v **types.InvalidUserStateFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidUserStateFault + if *v == nil { + sv = &types.InvalidUserStateFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidVPCNetworkStateFault(v **types.InvalidVPCNetworkStateFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidVPCNetworkStateFault + if *v == nil { + sv = &types.InvalidVPCNetworkStateFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentNode(v **types.Node, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Node + if *v == nil { + sv = &types.Node{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AvailabilityZone": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.AvailabilityZone = ptr.String(jtv) + } + + case "CreateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected TStamp to be a JSON Number, got %T instead", value) + + } + } + + case "Endpoint": + if err := awsAwsjson11_deserializeDocumentEndpoint(&sv.Endpoint, value); err != nil { + return err + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Status = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentNodeList(v *[]types.Node, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Node + if *v == nil { + cv = []types.Node{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Node + destAddr := &col + if err := awsAwsjson11_deserializeDocumentNode(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentNodeQuotaForClusterExceededFault(v **types.NodeQuotaForClusterExceededFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.NodeQuotaForClusterExceededFault + if *v == nil { + sv = &types.NodeQuotaForClusterExceededFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentNodeQuotaForCustomerExceededFault(v **types.NodeQuotaForCustomerExceededFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.NodeQuotaForCustomerExceededFault + if *v == nil { + sv = &types.NodeQuotaForCustomerExceededFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentNodeTypeList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentNoOperationFault(v **types.NoOperationFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.NoOperationFault + if *v == nil { + sv = &types.NoOperationFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentParameter(v **types.Parameter, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Parameter + if *v == nil { + sv = &types.Parameter{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AllowedValues": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.AllowedValues = ptr.String(jtv) + } + + case "DataType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.DataType = ptr.String(jtv) + } + + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "MinimumEngineVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.MinimumEngineVersion = ptr.String(jtv) + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "Value": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Value = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentParameterGroup(v **types.ParameterGroup, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ParameterGroup + if *v == nil { + sv = &types.ParameterGroup{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ARN": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ARN = ptr.String(jtv) + } + + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "Family": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Family = ptr.String(jtv) + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentParameterGroupAlreadyExistsFault(v **types.ParameterGroupAlreadyExistsFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ParameterGroupAlreadyExistsFault + if *v == nil { + sv = &types.ParameterGroupAlreadyExistsFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentParameterGroupList(v *[]types.ParameterGroup, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ParameterGroup + if *v == nil { + cv = []types.ParameterGroup{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ParameterGroup + destAddr := &col + if err := awsAwsjson11_deserializeDocumentParameterGroup(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentParameterGroupNotFoundFault(v **types.ParameterGroupNotFoundFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ParameterGroupNotFoundFault + if *v == nil { + sv = &types.ParameterGroupNotFoundFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentParameterGroupQuotaExceededFault(v **types.ParameterGroupQuotaExceededFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ParameterGroupQuotaExceededFault + if *v == nil { + sv = &types.ParameterGroupQuotaExceededFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentParametersList(v *[]types.Parameter, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Parameter + if *v == nil { + cv = []types.Parameter{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Parameter + destAddr := &col + if err := awsAwsjson11_deserializeDocumentParameter(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentPendingModifiedServiceUpdate(v **types.PendingModifiedServiceUpdate, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PendingModifiedServiceUpdate + if *v == nil { + sv = &types.PendingModifiedServiceUpdate{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ServiceUpdateName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ServiceUpdateName = ptr.String(jtv) + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ServiceUpdateStatus to be of type string, got %T instead", value) + } + sv.Status = types.ServiceUpdateStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentPendingModifiedServiceUpdateList(v *[]types.PendingModifiedServiceUpdate, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.PendingModifiedServiceUpdate + if *v == nil { + cv = []types.PendingModifiedServiceUpdate{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.PendingModifiedServiceUpdate + destAddr := &col + if err := awsAwsjson11_deserializeDocumentPendingModifiedServiceUpdate(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentReshardingStatus(v **types.ReshardingStatus, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ReshardingStatus + if *v == nil { + sv = &types.ReshardingStatus{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "SlotMigration": + if err := awsAwsjson11_deserializeDocumentSlotMigration(&sv.SlotMigration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSecurityGroupMembership(v **types.SecurityGroupMembership, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SecurityGroupMembership + if *v == nil { + sv = &types.SecurityGroupMembership{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "SecurityGroupId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.SecurityGroupId = ptr.String(jtv) + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Status = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSecurityGroupMembershipList(v *[]types.SecurityGroupMembership, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.SecurityGroupMembership + if *v == nil { + cv = []types.SecurityGroupMembership{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.SecurityGroupMembership + destAddr := &col + if err := awsAwsjson11_deserializeDocumentSecurityGroupMembership(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentServiceLinkedRoleNotFoundFault(v **types.ServiceLinkedRoleNotFoundFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ServiceLinkedRoleNotFoundFault + if *v == nil { + sv = &types.ServiceLinkedRoleNotFoundFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentServiceUpdate(v **types.ServiceUpdate, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ServiceUpdate + if *v == nil { + sv = &types.ServiceUpdate{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AutoUpdateStartDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.AutoUpdateStartDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected TStamp to be a JSON Number, got %T instead", value) + + } + } + + case "ClusterName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ClusterName = ptr.String(jtv) + } + + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "NodesUpdated": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NodesUpdated = ptr.String(jtv) + } + + case "ReleaseDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ReleaseDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected TStamp to be a JSON Number, got %T instead", value) + + } + } + + case "ServiceUpdateName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ServiceUpdateName = ptr.String(jtv) + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ServiceUpdateStatus to be of type string, got %T instead", value) + } + sv.Status = types.ServiceUpdateStatus(jtv) + } + + case "Type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ServiceUpdateType to be of type string, got %T instead", value) + } + sv.Type = types.ServiceUpdateType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentServiceUpdateList(v *[]types.ServiceUpdate, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ServiceUpdate + if *v == nil { + cv = []types.ServiceUpdate{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ServiceUpdate + destAddr := &col + if err := awsAwsjson11_deserializeDocumentServiceUpdate(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentServiceUpdateNotFoundFault(v **types.ServiceUpdateNotFoundFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ServiceUpdateNotFoundFault + if *v == nil { + sv = &types.ServiceUpdateNotFoundFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentShard(v **types.Shard, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Shard + if *v == nil { + sv = &types.Shard{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "Nodes": + if err := awsAwsjson11_deserializeDocumentNodeList(&sv.Nodes, value); err != nil { + return err + } + + case "NumberOfNodes": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected IntegerOptional to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.NumberOfNodes = ptr.Int32(int32(i64)) + } + + case "Slots": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Slots = ptr.String(jtv) + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Status = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentShardConfiguration(v **types.ShardConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ShardConfiguration + if *v == nil { + sv = &types.ShardConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ReplicaCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected IntegerOptional to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ReplicaCount = ptr.Int32(int32(i64)) + } + + case "Slots": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Slots = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentShardDetail(v **types.ShardDetail, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ShardDetail + if *v == nil { + sv = &types.ShardDetail{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Configuration": + if err := awsAwsjson11_deserializeDocumentShardConfiguration(&sv.Configuration, value); err != nil { + return err + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "Size": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Size = ptr.String(jtv) + } + + case "SnapshotCreationTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.SnapshotCreationTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected TStamp to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentShardDetails(v *[]types.ShardDetail, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ShardDetail + if *v == nil { + cv = []types.ShardDetail{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ShardDetail + destAddr := &col + if err := awsAwsjson11_deserializeDocumentShardDetail(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentShardList(v *[]types.Shard, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Shard + if *v == nil { + cv = []types.Shard{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Shard + destAddr := &col + if err := awsAwsjson11_deserializeDocumentShard(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentShardNotFoundFault(v **types.ShardNotFoundFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ShardNotFoundFault + if *v == nil { + sv = &types.ShardNotFoundFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentShardsPerClusterQuotaExceededFault(v **types.ShardsPerClusterQuotaExceededFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ShardsPerClusterQuotaExceededFault + if *v == nil { + sv = &types.ShardsPerClusterQuotaExceededFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSlotMigration(v **types.SlotMigration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SlotMigration + if *v == nil { + sv = &types.SlotMigration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ProgressPercentage": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ProgressPercentage = f64 + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + sv.ProgressPercentage = f64 + + default: + return fmt.Errorf("expected Double to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSnapshot(v **types.Snapshot, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Snapshot + if *v == nil { + sv = &types.Snapshot{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ARN": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ARN = ptr.String(jtv) + } + + case "ClusterConfiguration": + if err := awsAwsjson11_deserializeDocumentClusterConfiguration(&sv.ClusterConfiguration, value); err != nil { + return err + } + + case "KmsKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.KmsKeyId = ptr.String(jtv) + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "Source": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Source = ptr.String(jtv) + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Status = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSnapshotAlreadyExistsFault(v **types.SnapshotAlreadyExistsFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SnapshotAlreadyExistsFault + if *v == nil { + sv = &types.SnapshotAlreadyExistsFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSnapshotList(v *[]types.Snapshot, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Snapshot + if *v == nil { + cv = []types.Snapshot{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Snapshot + destAddr := &col + if err := awsAwsjson11_deserializeDocumentSnapshot(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentSnapshotNotFoundFault(v **types.SnapshotNotFoundFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SnapshotNotFoundFault + if *v == nil { + sv = &types.SnapshotNotFoundFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSnapshotQuotaExceededFault(v **types.SnapshotQuotaExceededFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SnapshotQuotaExceededFault + if *v == nil { + sv = &types.SnapshotQuotaExceededFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSubnet(v **types.Subnet, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Subnet + if *v == nil { + sv = &types.Subnet{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AvailabilityZone": + if err := awsAwsjson11_deserializeDocumentAvailabilityZone(&sv.AvailabilityZone, value); err != nil { + return err + } + + case "Identifier": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Identifier = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSubnetGroup(v **types.SubnetGroup, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SubnetGroup + if *v == nil { + sv = &types.SubnetGroup{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ARN": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ARN = ptr.String(jtv) + } + + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "Subnets": + if err := awsAwsjson11_deserializeDocumentSubnetList(&sv.Subnets, value); err != nil { + return err + } + + case "VpcId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.VpcId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSubnetGroupAlreadyExistsFault(v **types.SubnetGroupAlreadyExistsFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SubnetGroupAlreadyExistsFault + if *v == nil { + sv = &types.SubnetGroupAlreadyExistsFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSubnetGroupInUseFault(v **types.SubnetGroupInUseFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SubnetGroupInUseFault + if *v == nil { + sv = &types.SubnetGroupInUseFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSubnetGroupList(v *[]types.SubnetGroup, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.SubnetGroup + if *v == nil { + cv = []types.SubnetGroup{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.SubnetGroup + destAddr := &col + if err := awsAwsjson11_deserializeDocumentSubnetGroup(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentSubnetGroupNotFoundFault(v **types.SubnetGroupNotFoundFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SubnetGroupNotFoundFault + if *v == nil { + sv = &types.SubnetGroupNotFoundFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSubnetGroupQuotaExceededFault(v **types.SubnetGroupQuotaExceededFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SubnetGroupQuotaExceededFault + if *v == nil { + sv = &types.SubnetGroupQuotaExceededFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSubnetInUse(v **types.SubnetInUse, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SubnetInUse + if *v == nil { + sv = &types.SubnetInUse{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSubnetList(v *[]types.Subnet, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Subnet + if *v == nil { + cv = []types.Subnet{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Subnet + destAddr := &col + if err := awsAwsjson11_deserializeDocumentSubnet(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentSubnetNotAllowedFault(v **types.SubnetNotAllowedFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SubnetNotAllowedFault + if *v == nil { + sv = &types.SubnetNotAllowedFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSubnetQuotaExceededFault(v **types.SubnetQuotaExceededFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SubnetQuotaExceededFault + if *v == nil { + sv = &types.SubnetQuotaExceededFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTag(v **types.Tag, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Tag + if *v == nil { + sv = &types.Tag{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Key": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Key = ptr.String(jtv) + } + + case "Value": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Value = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTagList(v *[]types.Tag, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Tag + if *v == nil { + cv = []types.Tag{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Tag + destAddr := &col + if err := awsAwsjson11_deserializeDocumentTag(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentTagNotFoundFault(v **types.TagNotFoundFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TagNotFoundFault + if *v == nil { + sv = &types.TagNotFoundFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTagQuotaPerResourceExceeded(v **types.TagQuotaPerResourceExceeded, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TagQuotaPerResourceExceeded + if *v == nil { + sv = &types.TagQuotaPerResourceExceeded{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTestFailoverNotAvailableFault(v **types.TestFailoverNotAvailableFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TestFailoverNotAvailableFault + if *v == nil { + sv = &types.TestFailoverNotAvailableFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentUnprocessedCluster(v **types.UnprocessedCluster, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnprocessedCluster + if *v == nil { + sv = &types.UnprocessedCluster{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ClusterName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ClusterName = ptr.String(jtv) + } + + case "ErrorMessage": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ErrorMessage = ptr.String(jtv) + } + + case "ErrorType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ErrorType = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentUnprocessedClusterList(v *[]types.UnprocessedCluster, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.UnprocessedCluster + if *v == nil { + cv = []types.UnprocessedCluster{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.UnprocessedCluster + destAddr := &col + if err := awsAwsjson11_deserializeDocumentUnprocessedCluster(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentUser(v **types.User, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.User + if *v == nil { + sv = &types.User{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AccessString": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.AccessString = ptr.String(jtv) + } + + case "ACLNames": + if err := awsAwsjson11_deserializeDocumentACLNameList(&sv.ACLNames, value); err != nil { + return err + } + + case "ARN": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ARN = ptr.String(jtv) + } + + case "Authentication": + if err := awsAwsjson11_deserializeDocumentAuthentication(&sv.Authentication, value); err != nil { + return err + } + + case "MinimumEngineVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.MinimumEngineVersion = ptr.String(jtv) + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Status = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentUserAlreadyExistsFault(v **types.UserAlreadyExistsFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UserAlreadyExistsFault + if *v == nil { + sv = &types.UserAlreadyExistsFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentUserList(v *[]types.User, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.User + if *v == nil { + cv = []types.User{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.User + destAddr := &col + if err := awsAwsjson11_deserializeDocumentUser(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentUserNameList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UserName to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentUserNotFoundFault(v **types.UserNotFoundFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UserNotFoundFault + if *v == nil { + sv = &types.UserNotFoundFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentUserQuotaExceededFault(v **types.UserQuotaExceededFault, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UserQuotaExceededFault + if *v == nil { + sv = &types.UserQuotaExceededFault{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentBatchUpdateClusterOutput(v **BatchUpdateClusterOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *BatchUpdateClusterOutput + if *v == nil { + sv = &BatchUpdateClusterOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ProcessedClusters": + if err := awsAwsjson11_deserializeDocumentClusterList(&sv.ProcessedClusters, value); err != nil { + return err + } + + case "UnprocessedClusters": + if err := awsAwsjson11_deserializeDocumentUnprocessedClusterList(&sv.UnprocessedClusters, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCopySnapshotOutput(v **CopySnapshotOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CopySnapshotOutput + if *v == nil { + sv = &CopySnapshotOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Snapshot": + if err := awsAwsjson11_deserializeDocumentSnapshot(&sv.Snapshot, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateACLOutput(v **CreateACLOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateACLOutput + if *v == nil { + sv = &CreateACLOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ACL": + if err := awsAwsjson11_deserializeDocumentACL(&sv.ACL, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateClusterOutput(v **CreateClusterOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateClusterOutput + if *v == nil { + sv = &CreateClusterOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Cluster": + if err := awsAwsjson11_deserializeDocumentCluster(&sv.Cluster, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateParameterGroupOutput(v **CreateParameterGroupOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateParameterGroupOutput + if *v == nil { + sv = &CreateParameterGroupOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ParameterGroup": + if err := awsAwsjson11_deserializeDocumentParameterGroup(&sv.ParameterGroup, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateSnapshotOutput(v **CreateSnapshotOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateSnapshotOutput + if *v == nil { + sv = &CreateSnapshotOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Snapshot": + if err := awsAwsjson11_deserializeDocumentSnapshot(&sv.Snapshot, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateSubnetGroupOutput(v **CreateSubnetGroupOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateSubnetGroupOutput + if *v == nil { + sv = &CreateSubnetGroupOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "SubnetGroup": + if err := awsAwsjson11_deserializeDocumentSubnetGroup(&sv.SubnetGroup, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateUserOutput(v **CreateUserOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateUserOutput + if *v == nil { + sv = &CreateUserOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "User": + if err := awsAwsjson11_deserializeDocumentUser(&sv.User, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteACLOutput(v **DeleteACLOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteACLOutput + if *v == nil { + sv = &DeleteACLOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ACL": + if err := awsAwsjson11_deserializeDocumentACL(&sv.ACL, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteClusterOutput(v **DeleteClusterOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteClusterOutput + if *v == nil { + sv = &DeleteClusterOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Cluster": + if err := awsAwsjson11_deserializeDocumentCluster(&sv.Cluster, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteParameterGroupOutput(v **DeleteParameterGroupOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteParameterGroupOutput + if *v == nil { + sv = &DeleteParameterGroupOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ParameterGroup": + if err := awsAwsjson11_deserializeDocumentParameterGroup(&sv.ParameterGroup, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteSnapshotOutput(v **DeleteSnapshotOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteSnapshotOutput + if *v == nil { + sv = &DeleteSnapshotOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Snapshot": + if err := awsAwsjson11_deserializeDocumentSnapshot(&sv.Snapshot, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteSubnetGroupOutput(v **DeleteSubnetGroupOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteSubnetGroupOutput + if *v == nil { + sv = &DeleteSubnetGroupOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "SubnetGroup": + if err := awsAwsjson11_deserializeDocumentSubnetGroup(&sv.SubnetGroup, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteUserOutput(v **DeleteUserOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteUserOutput + if *v == nil { + sv = &DeleteUserOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "User": + if err := awsAwsjson11_deserializeDocumentUser(&sv.User, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeACLsOutput(v **DescribeACLsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeACLsOutput + if *v == nil { + sv = &DescribeACLsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ACLs": + if err := awsAwsjson11_deserializeDocumentACLList(&sv.ACLs, value); err != nil { + return err + } + + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeClustersOutput(v **DescribeClustersOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeClustersOutput + if *v == nil { + sv = &DescribeClustersOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Clusters": + if err := awsAwsjson11_deserializeDocumentClusterList(&sv.Clusters, value); err != nil { + return err + } + + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeEngineVersionsOutput(v **DescribeEngineVersionsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeEngineVersionsOutput + if *v == nil { + sv = &DescribeEngineVersionsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "EngineVersions": + if err := awsAwsjson11_deserializeDocumentEngineVersionInfoList(&sv.EngineVersions, value); err != nil { + return err + } + + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeEventsOutput(v **DescribeEventsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeEventsOutput + if *v == nil { + sv = &DescribeEventsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Events": + if err := awsAwsjson11_deserializeDocumentEventList(&sv.Events, value); err != nil { + return err + } + + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeParameterGroupsOutput(v **DescribeParameterGroupsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeParameterGroupsOutput + if *v == nil { + sv = &DescribeParameterGroupsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "ParameterGroups": + if err := awsAwsjson11_deserializeDocumentParameterGroupList(&sv.ParameterGroups, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeParametersOutput(v **DescribeParametersOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeParametersOutput + if *v == nil { + sv = &DescribeParametersOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "Parameters": + if err := awsAwsjson11_deserializeDocumentParametersList(&sv.Parameters, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeServiceUpdatesOutput(v **DescribeServiceUpdatesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeServiceUpdatesOutput + if *v == nil { + sv = &DescribeServiceUpdatesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "ServiceUpdates": + if err := awsAwsjson11_deserializeDocumentServiceUpdateList(&sv.ServiceUpdates, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeSnapshotsOutput(v **DescribeSnapshotsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeSnapshotsOutput + if *v == nil { + sv = &DescribeSnapshotsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "Snapshots": + if err := awsAwsjson11_deserializeDocumentSnapshotList(&sv.Snapshots, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeSubnetGroupsOutput(v **DescribeSubnetGroupsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeSubnetGroupsOutput + if *v == nil { + sv = &DescribeSubnetGroupsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "SubnetGroups": + if err := awsAwsjson11_deserializeDocumentSubnetGroupList(&sv.SubnetGroups, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeUsersOutput(v **DescribeUsersOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeUsersOutput + if *v == nil { + sv = &DescribeUsersOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "Users": + if err := awsAwsjson11_deserializeDocumentUserList(&sv.Users, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentFailoverShardOutput(v **FailoverShardOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *FailoverShardOutput + if *v == nil { + sv = &FailoverShardOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Cluster": + if err := awsAwsjson11_deserializeDocumentCluster(&sv.Cluster, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListAllowedNodeTypeUpdatesOutput(v **ListAllowedNodeTypeUpdatesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListAllowedNodeTypeUpdatesOutput + if *v == nil { + sv = &ListAllowedNodeTypeUpdatesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ScaleDownNodeTypes": + if err := awsAwsjson11_deserializeDocumentNodeTypeList(&sv.ScaleDownNodeTypes, value); err != nil { + return err + } + + case "ScaleUpNodeTypes": + if err := awsAwsjson11_deserializeDocumentNodeTypeList(&sv.ScaleUpNodeTypes, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListTagsOutput(v **ListTagsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListTagsOutput + if *v == nil { + sv = &ListTagsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "TagList": + if err := awsAwsjson11_deserializeDocumentTagList(&sv.TagList, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentResetParameterGroupOutput(v **ResetParameterGroupOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ResetParameterGroupOutput + if *v == nil { + sv = &ResetParameterGroupOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ParameterGroup": + if err := awsAwsjson11_deserializeDocumentParameterGroup(&sv.ParameterGroup, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentTagResourceOutput(v **TagResourceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *TagResourceOutput + if *v == nil { + sv = &TagResourceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "TagList": + if err := awsAwsjson11_deserializeDocumentTagList(&sv.TagList, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUntagResourceOutput(v **UntagResourceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UntagResourceOutput + if *v == nil { + sv = &UntagResourceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "TagList": + if err := awsAwsjson11_deserializeDocumentTagList(&sv.TagList, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdateACLOutput(v **UpdateACLOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateACLOutput + if *v == nil { + sv = &UpdateACLOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ACL": + if err := awsAwsjson11_deserializeDocumentACL(&sv.ACL, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdateClusterOutput(v **UpdateClusterOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateClusterOutput + if *v == nil { + sv = &UpdateClusterOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Cluster": + if err := awsAwsjson11_deserializeDocumentCluster(&sv.Cluster, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdateParameterGroupOutput(v **UpdateParameterGroupOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateParameterGroupOutput + if *v == nil { + sv = &UpdateParameterGroupOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ParameterGroup": + if err := awsAwsjson11_deserializeDocumentParameterGroup(&sv.ParameterGroup, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdateSubnetGroupOutput(v **UpdateSubnetGroupOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateSubnetGroupOutput + if *v == nil { + sv = &UpdateSubnetGroupOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "SubnetGroup": + if err := awsAwsjson11_deserializeDocumentSubnetGroup(&sv.SubnetGroup, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdateUserOutput(v **UpdateUserOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateUserOutput + if *v == nil { + sv = &UpdateUserOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "User": + if err := awsAwsjson11_deserializeDocumentUser(&sv.User, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} diff --git a/service/memorydb/doc.go b/service/memorydb/doc.go new file mode 100644 index 00000000000..7163bdb8272 --- /dev/null +++ b/service/memorydb/doc.go @@ -0,0 +1,12 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package memorydb provides the API client, operations, and parameter types for +// Amazon MemoryDB. +// +// Amazon MemoryDB for Redis is a fully managed, Redis-compatible, in-memory +// database that delivers ultra-fast performance and Multi-AZ durability for modern +// applications built using microservices architectures. MemoryDB stores the entire +// database in-memory, enabling low latency and high throughput data access. It is +// compatible with Redis, a popular open source data store, enabling you to +// leverage Redis’ flexible and friendly data structures, APIs, and commands. +package memorydb diff --git a/service/memorydb/endpoints.go b/service/memorydb/endpoints.go new file mode 100644 index 00000000000..a256af32720 --- /dev/null +++ b/service/memorydb/endpoints.go @@ -0,0 +1,160 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/memorydb/internal/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/url" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +func resolveDefaultEndpointConfiguration(o *Options) { + if o.EndpointResolver != nil { + return + } + o.EndpointResolver = NewDefaultEndpointResolver() +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), m.Options) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "memorydb" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolver + resolver EndpointResolver +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + if w.awsResolver == nil { + goto fallback + } + endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region) + if err == nil { + return endpoint, nil + } + + if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) { + return endpoint, err + } + +fallback: + if w.resolver == nil { + return endpoint, fmt.Errorf("default endpoint resolver provided was nil") + } + return w.resolver.ResolveEndpoint(region, options) +} + +// withEndpointResolver returns an EndpointResolver that first delegates endpoint +// resolution to the awsResolver. If awsResolver returns aws.EndpointNotFoundError +// error, the resolver will use the the provided fallbackResolver for resolution. +// awsResolver and fallbackResolver must not be nil +func withEndpointResolver(awsResolver aws.EndpointResolver, fallbackResolver EndpointResolver) EndpointResolver { + return &wrappedEndpointResolver{ + awsResolver: awsResolver, + resolver: fallbackResolver, + } +} diff --git a/service/memorydb/generated.json b/service/memorydb/generated.json new file mode 100644 index 00000000000..9d3a5dd30a7 --- /dev/null +++ b/service/memorydb/generated.json @@ -0,0 +1,59 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_op_BatchUpdateCluster.go", + "api_op_CopySnapshot.go", + "api_op_CreateACL.go", + "api_op_CreateCluster.go", + "api_op_CreateParameterGroup.go", + "api_op_CreateSnapshot.go", + "api_op_CreateSubnetGroup.go", + "api_op_CreateUser.go", + "api_op_DeleteACL.go", + "api_op_DeleteCluster.go", + "api_op_DeleteParameterGroup.go", + "api_op_DeleteSnapshot.go", + "api_op_DeleteSubnetGroup.go", + "api_op_DeleteUser.go", + "api_op_DescribeACLs.go", + "api_op_DescribeClusters.go", + "api_op_DescribeEngineVersions.go", + "api_op_DescribeEvents.go", + "api_op_DescribeParameterGroups.go", + "api_op_DescribeParameters.go", + "api_op_DescribeServiceUpdates.go", + "api_op_DescribeSnapshots.go", + "api_op_DescribeSubnetGroups.go", + "api_op_DescribeUsers.go", + "api_op_FailoverShard.go", + "api_op_ListAllowedNodeTypeUpdates.go", + "api_op_ListTags.go", + "api_op_ResetParameterGroup.go", + "api_op_TagResource.go", + "api_op_UntagResource.go", + "api_op_UpdateACL.go", + "api_op_UpdateCluster.go", + "api_op_UpdateParameterGroup.go", + "api_op_UpdateSubnetGroup.go", + "api_op_UpdateUser.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/enums.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/memorydb", + "unstable": false +} diff --git a/service/memorydb/go.mod b/service/memorydb/go.mod new file mode 100644 index 00000000000..e46d285ae64 --- /dev/null +++ b/service/memorydb/go.mod @@ -0,0 +1,10 @@ +module github.com/aws/aws-sdk-go-v2/service/memorydb + +go 1.15 + +require ( + github.com/aws/aws-sdk-go-v2 v1.8.0 + github.com/aws/smithy-go v1.7.0 +) + +replace github.com/aws/aws-sdk-go-v2 => ../../ diff --git a/service/memorydb/go.sum b/service/memorydb/go.sum new file mode 100644 index 00000000000..1fe5ea17219 --- /dev/null +++ b/service/memorydb/go.sum @@ -0,0 +1,14 @@ +github.com/aws/smithy-go v1.7.0 h1:+cLHMRrDZvQ4wk+KuQ9yH6eEg6KZEJ9RI2IkDqnygCg= +github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/service/memorydb/go_module_metadata.go b/service/memorydb/go_module_metadata.go new file mode 100644 index 00000000000..617a5a0a523 --- /dev/null +++ b/service/memorydb/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package memorydb + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "tip" diff --git a/service/memorydb/internal/endpoints/endpoints.go b/service/memorydb/internal/endpoints/endpoints.go new file mode 100644 index 00000000000..1220d9969b1 --- /dev/null +++ b/service/memorydb/internal/endpoints/endpoints.go @@ -0,0 +1,106 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/endpoints" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + DisableHTTPS bool +} + +// Resolver MemoryDB endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := endpoints.Options{ + DisableHTTPS: options.DisableHTTPS, + } + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: endpoints.Endpoint{ + Hostname: "memory-db.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + }, + { + ID: "aws-cn", + Defaults: endpoints.Endpoint{ + Hostname: "memory-db.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + }, + { + ID: "aws-iso", + Defaults: endpoints.Endpoint{ + Hostname: "memory-db.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + }, + { + ID: "aws-iso-b", + Defaults: endpoints.Endpoint{ + Hostname: "memory-db.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + }, + { + ID: "aws-us-gov", + Defaults: endpoints.Endpoint{ + Hostname: "memory-db.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + }, +} diff --git a/service/memorydb/internal/endpoints/endpoints_test.go b/service/memorydb/internal/endpoints/endpoints_test.go new file mode 100644 index 00000000000..08e5da2d833 --- /dev/null +++ b/service/memorydb/internal/endpoints/endpoints_test.go @@ -0,0 +1,11 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "testing" +) + +func TestRegexCompile(t *testing.T) { + _ = defaultPartitions +} diff --git a/service/memorydb/protocol_test.go b/service/memorydb/protocol_test.go new file mode 100644 index 00000000000..38fc533870d --- /dev/null +++ b/service/memorydb/protocol_test.go @@ -0,0 +1,3 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb diff --git a/service/memorydb/serializers.go b/service/memorydb/serializers.go new file mode 100644 index 00000000000..cb84f1c0dfb --- /dev/null +++ b/service/memorydb/serializers.go @@ -0,0 +1,2883 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + smithyjson "github.com/aws/smithy-go/encoding/json" + "github.com/aws/smithy-go/middleware" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type awsAwsjson11_serializeOpBatchUpdateCluster struct { +} + +func (*awsAwsjson11_serializeOpBatchUpdateCluster) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpBatchUpdateCluster) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*BatchUpdateClusterInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.BatchUpdateCluster") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentBatchUpdateClusterInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCopySnapshot struct { +} + +func (*awsAwsjson11_serializeOpCopySnapshot) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCopySnapshot) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CopySnapshotInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.CopySnapshot") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCopySnapshotInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateACL struct { +} + +func (*awsAwsjson11_serializeOpCreateACL) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateACL) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateACLInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.CreateACL") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateACLInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateCluster struct { +} + +func (*awsAwsjson11_serializeOpCreateCluster) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateCluster) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateClusterInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.CreateCluster") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateClusterInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateParameterGroup struct { +} + +func (*awsAwsjson11_serializeOpCreateParameterGroup) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateParameterGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateParameterGroupInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.CreateParameterGroup") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateParameterGroupInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateSnapshot struct { +} + +func (*awsAwsjson11_serializeOpCreateSnapshot) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateSnapshot) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateSnapshotInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.CreateSnapshot") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateSnapshotInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateSubnetGroup struct { +} + +func (*awsAwsjson11_serializeOpCreateSubnetGroup) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateSubnetGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateSubnetGroupInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.CreateSubnetGroup") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateSubnetGroupInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateUser struct { +} + +func (*awsAwsjson11_serializeOpCreateUser) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateUser) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateUserInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.CreateUser") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateUserInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteACL struct { +} + +func (*awsAwsjson11_serializeOpDeleteACL) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteACL) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteACLInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.DeleteACL") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteACLInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteCluster struct { +} + +func (*awsAwsjson11_serializeOpDeleteCluster) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteCluster) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteClusterInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.DeleteCluster") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteClusterInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteParameterGroup struct { +} + +func (*awsAwsjson11_serializeOpDeleteParameterGroup) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteParameterGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteParameterGroupInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.DeleteParameterGroup") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteParameterGroupInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteSnapshot struct { +} + +func (*awsAwsjson11_serializeOpDeleteSnapshot) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteSnapshot) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteSnapshotInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.DeleteSnapshot") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteSnapshotInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteSubnetGroup struct { +} + +func (*awsAwsjson11_serializeOpDeleteSubnetGroup) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteSubnetGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteSubnetGroupInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.DeleteSubnetGroup") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteSubnetGroupInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteUser struct { +} + +func (*awsAwsjson11_serializeOpDeleteUser) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteUser) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteUserInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.DeleteUser") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteUserInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeACLs struct { +} + +func (*awsAwsjson11_serializeOpDescribeACLs) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeACLs) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeACLsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.DescribeACLs") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeACLsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeClusters struct { +} + +func (*awsAwsjson11_serializeOpDescribeClusters) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeClusters) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeClustersInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.DescribeClusters") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeClustersInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeEngineVersions struct { +} + +func (*awsAwsjson11_serializeOpDescribeEngineVersions) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeEngineVersions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeEngineVersionsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.DescribeEngineVersions") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeEngineVersionsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeEvents struct { +} + +func (*awsAwsjson11_serializeOpDescribeEvents) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeEvents) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeEventsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.DescribeEvents") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeEventsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeParameterGroups struct { +} + +func (*awsAwsjson11_serializeOpDescribeParameterGroups) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeParameterGroups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeParameterGroupsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.DescribeParameterGroups") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeParameterGroupsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeParameters struct { +} + +func (*awsAwsjson11_serializeOpDescribeParameters) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeParameters) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeParametersInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.DescribeParameters") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeParametersInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeServiceUpdates struct { +} + +func (*awsAwsjson11_serializeOpDescribeServiceUpdates) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeServiceUpdates) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeServiceUpdatesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.DescribeServiceUpdates") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeServiceUpdatesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeSnapshots struct { +} + +func (*awsAwsjson11_serializeOpDescribeSnapshots) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeSnapshots) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeSnapshotsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.DescribeSnapshots") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeSnapshotsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeSubnetGroups struct { +} + +func (*awsAwsjson11_serializeOpDescribeSubnetGroups) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeSubnetGroups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeSubnetGroupsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.DescribeSubnetGroups") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeSubnetGroupsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeUsers struct { +} + +func (*awsAwsjson11_serializeOpDescribeUsers) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeUsers) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeUsersInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.DescribeUsers") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeUsersInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpFailoverShard struct { +} + +func (*awsAwsjson11_serializeOpFailoverShard) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpFailoverShard) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*FailoverShardInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.FailoverShard") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentFailoverShardInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListAllowedNodeTypeUpdates struct { +} + +func (*awsAwsjson11_serializeOpListAllowedNodeTypeUpdates) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListAllowedNodeTypeUpdates) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListAllowedNodeTypeUpdatesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.ListAllowedNodeTypeUpdates") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListAllowedNodeTypeUpdatesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListTags struct { +} + +func (*awsAwsjson11_serializeOpListTags) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListTags) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListTagsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.ListTags") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListTagsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpResetParameterGroup struct { +} + +func (*awsAwsjson11_serializeOpResetParameterGroup) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpResetParameterGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ResetParameterGroupInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.ResetParameterGroup") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentResetParameterGroupInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpTagResource struct { +} + +func (*awsAwsjson11_serializeOpTagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpTagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*TagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.TagResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentTagResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUntagResource struct { +} + +func (*awsAwsjson11_serializeOpUntagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUntagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UntagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.UntagResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUntagResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdateACL struct { +} + +func (*awsAwsjson11_serializeOpUpdateACL) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateACL) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateACLInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.UpdateACL") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateACLInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdateCluster struct { +} + +func (*awsAwsjson11_serializeOpUpdateCluster) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateCluster) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateClusterInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.UpdateCluster") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateClusterInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdateParameterGroup struct { +} + +func (*awsAwsjson11_serializeOpUpdateParameterGroup) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateParameterGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateParameterGroupInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.UpdateParameterGroup") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateParameterGroupInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdateSubnetGroup struct { +} + +func (*awsAwsjson11_serializeOpUpdateSubnetGroup) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateSubnetGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateSubnetGroupInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.UpdateSubnetGroup") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateSubnetGroupInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdateUser struct { +} + +func (*awsAwsjson11_serializeOpUpdateUser) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateUser) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateUserInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonMemoryDB.UpdateUser") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateUserInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsAwsjson11_serializeDocumentAuthenticationMode(v *types.AuthenticationMode, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Passwords != nil { + ok := object.Key("Passwords") + if err := awsAwsjson11_serializeDocumentPasswordListInput(v.Passwords, ok); err != nil { + return err + } + } + + if len(v.Type) > 0 { + ok := object.Key("Type") + ok.String(string(v.Type)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentClusterNameList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentFilter(v *types.Filter, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + if v.Values != nil { + ok := object.Key("Values") + if err := awsAwsjson11_serializeDocumentFilterValueList(v.Values, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentFilterList(v []types.Filter, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentFilter(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentFilterValueList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentKeyList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentParameterNameList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentParameterNameValue(v *types.ParameterNameValue, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ParameterName != nil { + ok := object.Key("ParameterName") + ok.String(*v.ParameterName) + } + + if v.ParameterValue != nil { + ok := object.Key("ParameterValue") + ok.String(*v.ParameterValue) + } + + return nil +} + +func awsAwsjson11_serializeDocumentParameterNameValueList(v []types.ParameterNameValue, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentParameterNameValue(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentPasswordListInput(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentReplicaConfigurationRequest(v *types.ReplicaConfigurationRequest, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ReplicaCount != 0 { + ok := object.Key("ReplicaCount") + ok.Integer(v.ReplicaCount) + } + + return nil +} + +func awsAwsjson11_serializeDocumentSecurityGroupIdsList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentServiceUpdateRequest(v *types.ServiceUpdateRequest, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ServiceUpdateNameToApply != nil { + ok := object.Key("ServiceUpdateNameToApply") + ok.String(*v.ServiceUpdateNameToApply) + } + + return nil +} + +func awsAwsjson11_serializeDocumentServiceUpdateStatusList(v []types.ServiceUpdateStatus, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(string(v[i])) + } + return nil +} + +func awsAwsjson11_serializeDocumentShardConfigurationRequest(v *types.ShardConfigurationRequest, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ShardCount != 0 { + ok := object.Key("ShardCount") + ok.Integer(v.ShardCount) + } + + return nil +} + +func awsAwsjson11_serializeDocumentSnapshotArnsList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentSubnetIdentifierList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentTag(v *types.Tag, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Key != nil { + ok := object.Key("Key") + ok.String(*v.Key) + } + + if v.Value != nil { + ok := object.Key("Value") + ok.String(*v.Value) + } + + return nil +} + +func awsAwsjson11_serializeDocumentTagList(v []types.Tag, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentTag(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentUserNameListInput(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeOpDocumentBatchUpdateClusterInput(v *BatchUpdateClusterInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClusterNames != nil { + ok := object.Key("ClusterNames") + if err := awsAwsjson11_serializeDocumentClusterNameList(v.ClusterNames, ok); err != nil { + return err + } + } + + if v.ServiceUpdate != nil { + ok := object.Key("ServiceUpdate") + if err := awsAwsjson11_serializeDocumentServiceUpdateRequest(v.ServiceUpdate, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCopySnapshotInput(v *CopySnapshotInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KmsKeyId != nil { + ok := object.Key("KmsKeyId") + ok.String(*v.KmsKeyId) + } + + if v.SourceSnapshotName != nil { + ok := object.Key("SourceSnapshotName") + ok.String(*v.SourceSnapshotName) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + if v.TargetBucket != nil { + ok := object.Key("TargetBucket") + ok.String(*v.TargetBucket) + } + + if v.TargetSnapshotName != nil { + ok := object.Key("TargetSnapshotName") + ok.String(*v.TargetSnapshotName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateACLInput(v *CreateACLInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ACLName != nil { + ok := object.Key("ACLName") + ok.String(*v.ACLName) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + if v.UserNames != nil { + ok := object.Key("UserNames") + if err := awsAwsjson11_serializeDocumentUserNameListInput(v.UserNames, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateClusterInput(v *CreateClusterInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ACLName != nil { + ok := object.Key("ACLName") + ok.String(*v.ACLName) + } + + if v.AutoMinorVersionUpgrade != nil { + ok := object.Key("AutoMinorVersionUpgrade") + ok.Boolean(*v.AutoMinorVersionUpgrade) + } + + if v.ClusterName != nil { + ok := object.Key("ClusterName") + ok.String(*v.ClusterName) + } + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if v.EngineVersion != nil { + ok := object.Key("EngineVersion") + ok.String(*v.EngineVersion) + } + + if v.KmsKeyId != nil { + ok := object.Key("KmsKeyId") + ok.String(*v.KmsKeyId) + } + + if v.MaintenanceWindow != nil { + ok := object.Key("MaintenanceWindow") + ok.String(*v.MaintenanceWindow) + } + + if v.NodeType != nil { + ok := object.Key("NodeType") + ok.String(*v.NodeType) + } + + if v.NumReplicasPerShard != nil { + ok := object.Key("NumReplicasPerShard") + ok.Integer(*v.NumReplicasPerShard) + } + + if v.NumShards != nil { + ok := object.Key("NumShards") + ok.Integer(*v.NumShards) + } + + if v.ParameterGroupName != nil { + ok := object.Key("ParameterGroupName") + ok.String(*v.ParameterGroupName) + } + + if v.Port != nil { + ok := object.Key("Port") + ok.Integer(*v.Port) + } + + if v.SecurityGroupIds != nil { + ok := object.Key("SecurityGroupIds") + if err := awsAwsjson11_serializeDocumentSecurityGroupIdsList(v.SecurityGroupIds, ok); err != nil { + return err + } + } + + if v.SnapshotArns != nil { + ok := object.Key("SnapshotArns") + if err := awsAwsjson11_serializeDocumentSnapshotArnsList(v.SnapshotArns, ok); err != nil { + return err + } + } + + if v.SnapshotName != nil { + ok := object.Key("SnapshotName") + ok.String(*v.SnapshotName) + } + + if v.SnapshotRetentionLimit != nil { + ok := object.Key("SnapshotRetentionLimit") + ok.Integer(*v.SnapshotRetentionLimit) + } + + if v.SnapshotWindow != nil { + ok := object.Key("SnapshotWindow") + ok.String(*v.SnapshotWindow) + } + + if v.SnsTopicArn != nil { + ok := object.Key("SnsTopicArn") + ok.String(*v.SnsTopicArn) + } + + if v.SubnetGroupName != nil { + ok := object.Key("SubnetGroupName") + ok.String(*v.SubnetGroupName) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + if v.TLSEnabled != nil { + ok := object.Key("TLSEnabled") + ok.Boolean(*v.TLSEnabled) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateParameterGroupInput(v *CreateParameterGroupInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if v.Family != nil { + ok := object.Key("Family") + ok.String(*v.Family) + } + + if v.ParameterGroupName != nil { + ok := object.Key("ParameterGroupName") + ok.String(*v.ParameterGroupName) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateSnapshotInput(v *CreateSnapshotInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClusterName != nil { + ok := object.Key("ClusterName") + ok.String(*v.ClusterName) + } + + if v.KmsKeyId != nil { + ok := object.Key("KmsKeyId") + ok.String(*v.KmsKeyId) + } + + if v.SnapshotName != nil { + ok := object.Key("SnapshotName") + ok.String(*v.SnapshotName) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateSubnetGroupInput(v *CreateSubnetGroupInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if v.SubnetGroupName != nil { + ok := object.Key("SubnetGroupName") + ok.String(*v.SubnetGroupName) + } + + if v.SubnetIds != nil { + ok := object.Key("SubnetIds") + if err := awsAwsjson11_serializeDocumentSubnetIdentifierList(v.SubnetIds, ok); err != nil { + return err + } + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateUserInput(v *CreateUserInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AccessString != nil { + ok := object.Key("AccessString") + ok.String(*v.AccessString) + } + + if v.AuthenticationMode != nil { + ok := object.Key("AuthenticationMode") + if err := awsAwsjson11_serializeDocumentAuthenticationMode(v.AuthenticationMode, ok); err != nil { + return err + } + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + if v.UserName != nil { + ok := object.Key("UserName") + ok.String(*v.UserName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteACLInput(v *DeleteACLInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ACLName != nil { + ok := object.Key("ACLName") + ok.String(*v.ACLName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteClusterInput(v *DeleteClusterInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClusterName != nil { + ok := object.Key("ClusterName") + ok.String(*v.ClusterName) + } + + if v.FinalSnapshotName != nil { + ok := object.Key("FinalSnapshotName") + ok.String(*v.FinalSnapshotName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteParameterGroupInput(v *DeleteParameterGroupInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ParameterGroupName != nil { + ok := object.Key("ParameterGroupName") + ok.String(*v.ParameterGroupName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteSnapshotInput(v *DeleteSnapshotInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.SnapshotName != nil { + ok := object.Key("SnapshotName") + ok.String(*v.SnapshotName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteSubnetGroupInput(v *DeleteSubnetGroupInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.SubnetGroupName != nil { + ok := object.Key("SubnetGroupName") + ok.String(*v.SubnetGroupName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteUserInput(v *DeleteUserInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.UserName != nil { + ok := object.Key("UserName") + ok.String(*v.UserName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeACLsInput(v *DescribeACLsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ACLName != nil { + ok := object.Key("ACLName") + ok.String(*v.ACLName) + } + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeClustersInput(v *DescribeClustersInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClusterName != nil { + ok := object.Key("ClusterName") + ok.String(*v.ClusterName) + } + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.ShowShardDetails != nil { + ok := object.Key("ShowShardDetails") + ok.Boolean(*v.ShowShardDetails) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeEngineVersionsInput(v *DescribeEngineVersionsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DefaultOnly { + ok := object.Key("DefaultOnly") + ok.Boolean(v.DefaultOnly) + } + + if v.EngineVersion != nil { + ok := object.Key("EngineVersion") + ok.String(*v.EngineVersion) + } + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.ParameterGroupFamily != nil { + ok := object.Key("ParameterGroupFamily") + ok.String(*v.ParameterGroupFamily) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeEventsInput(v *DescribeEventsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Duration != nil { + ok := object.Key("Duration") + ok.Integer(*v.Duration) + } + + if v.EndTime != nil { + ok := object.Key("EndTime") + ok.Double(smithytime.FormatEpochSeconds(*v.EndTime)) + } + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.SourceName != nil { + ok := object.Key("SourceName") + ok.String(*v.SourceName) + } + + if len(v.SourceType) > 0 { + ok := object.Key("SourceType") + ok.String(string(v.SourceType)) + } + + if v.StartTime != nil { + ok := object.Key("StartTime") + ok.Double(smithytime.FormatEpochSeconds(*v.StartTime)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeParameterGroupsInput(v *DescribeParameterGroupsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.ParameterGroupName != nil { + ok := object.Key("ParameterGroupName") + ok.String(*v.ParameterGroupName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeParametersInput(v *DescribeParametersInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.ParameterGroupName != nil { + ok := object.Key("ParameterGroupName") + ok.String(*v.ParameterGroupName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeServiceUpdatesInput(v *DescribeServiceUpdatesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClusterNames != nil { + ok := object.Key("ClusterNames") + if err := awsAwsjson11_serializeDocumentClusterNameList(v.ClusterNames, ok); err != nil { + return err + } + } + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.ServiceUpdateName != nil { + ok := object.Key("ServiceUpdateName") + ok.String(*v.ServiceUpdateName) + } + + if v.Status != nil { + ok := object.Key("Status") + if err := awsAwsjson11_serializeDocumentServiceUpdateStatusList(v.Status, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeSnapshotsInput(v *DescribeSnapshotsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClusterName != nil { + ok := object.Key("ClusterName") + ok.String(*v.ClusterName) + } + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.ShowDetail != nil { + ok := object.Key("ShowDetail") + ok.Boolean(*v.ShowDetail) + } + + if v.SnapshotName != nil { + ok := object.Key("SnapshotName") + ok.String(*v.SnapshotName) + } + + if v.Source != nil { + ok := object.Key("Source") + ok.String(*v.Source) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeSubnetGroupsInput(v *DescribeSubnetGroupsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.SubnetGroupName != nil { + ok := object.Key("SubnetGroupName") + ok.String(*v.SubnetGroupName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeUsersInput(v *DescribeUsersInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Filters != nil { + ok := object.Key("Filters") + if err := awsAwsjson11_serializeDocumentFilterList(v.Filters, ok); err != nil { + return err + } + } + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.UserName != nil { + ok := object.Key("UserName") + ok.String(*v.UserName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentFailoverShardInput(v *FailoverShardInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClusterName != nil { + ok := object.Key("ClusterName") + ok.String(*v.ClusterName) + } + + if v.ShardName != nil { + ok := object.Key("ShardName") + ok.String(*v.ShardName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListAllowedNodeTypeUpdatesInput(v *ListAllowedNodeTypeUpdatesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClusterName != nil { + ok := object.Key("ClusterName") + ok.String(*v.ClusterName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListTagsInput(v *ListTagsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ResourceArn != nil { + ok := object.Key("ResourceArn") + ok.String(*v.ResourceArn) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentResetParameterGroupInput(v *ResetParameterGroupInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AllParameters { + ok := object.Key("AllParameters") + ok.Boolean(v.AllParameters) + } + + if v.ParameterGroupName != nil { + ok := object.Key("ParameterGroupName") + ok.String(*v.ParameterGroupName) + } + + if v.ParameterNames != nil { + ok := object.Key("ParameterNames") + if err := awsAwsjson11_serializeDocumentParameterNameList(v.ParameterNames, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentTagResourceInput(v *TagResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ResourceArn != nil { + ok := object.Key("ResourceArn") + ok.String(*v.ResourceArn) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUntagResourceInput(v *UntagResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ResourceArn != nil { + ok := object.Key("ResourceArn") + ok.String(*v.ResourceArn) + } + + if v.TagKeys != nil { + ok := object.Key("TagKeys") + if err := awsAwsjson11_serializeDocumentKeyList(v.TagKeys, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateACLInput(v *UpdateACLInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ACLName != nil { + ok := object.Key("ACLName") + ok.String(*v.ACLName) + } + + if v.UserNamesToAdd != nil { + ok := object.Key("UserNamesToAdd") + if err := awsAwsjson11_serializeDocumentUserNameListInput(v.UserNamesToAdd, ok); err != nil { + return err + } + } + + if v.UserNamesToRemove != nil { + ok := object.Key("UserNamesToRemove") + if err := awsAwsjson11_serializeDocumentUserNameListInput(v.UserNamesToRemove, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateClusterInput(v *UpdateClusterInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ACLName != nil { + ok := object.Key("ACLName") + ok.String(*v.ACLName) + } + + if v.ClusterName != nil { + ok := object.Key("ClusterName") + ok.String(*v.ClusterName) + } + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if v.EngineVersion != nil { + ok := object.Key("EngineVersion") + ok.String(*v.EngineVersion) + } + + if v.MaintenanceWindow != nil { + ok := object.Key("MaintenanceWindow") + ok.String(*v.MaintenanceWindow) + } + + if v.NodeType != nil { + ok := object.Key("NodeType") + ok.String(*v.NodeType) + } + + if v.ParameterGroupName != nil { + ok := object.Key("ParameterGroupName") + ok.String(*v.ParameterGroupName) + } + + if v.ReplicaConfiguration != nil { + ok := object.Key("ReplicaConfiguration") + if err := awsAwsjson11_serializeDocumentReplicaConfigurationRequest(v.ReplicaConfiguration, ok); err != nil { + return err + } + } + + if v.SecurityGroupIds != nil { + ok := object.Key("SecurityGroupIds") + if err := awsAwsjson11_serializeDocumentSecurityGroupIdsList(v.SecurityGroupIds, ok); err != nil { + return err + } + } + + if v.ShardConfiguration != nil { + ok := object.Key("ShardConfiguration") + if err := awsAwsjson11_serializeDocumentShardConfigurationRequest(v.ShardConfiguration, ok); err != nil { + return err + } + } + + if v.SnapshotRetentionLimit != nil { + ok := object.Key("SnapshotRetentionLimit") + ok.Integer(*v.SnapshotRetentionLimit) + } + + if v.SnapshotWindow != nil { + ok := object.Key("SnapshotWindow") + ok.String(*v.SnapshotWindow) + } + + if v.SnsTopicArn != nil { + ok := object.Key("SnsTopicArn") + ok.String(*v.SnsTopicArn) + } + + if v.SnsTopicStatus != nil { + ok := object.Key("SnsTopicStatus") + ok.String(*v.SnsTopicStatus) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateParameterGroupInput(v *UpdateParameterGroupInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ParameterGroupName != nil { + ok := object.Key("ParameterGroupName") + ok.String(*v.ParameterGroupName) + } + + if v.ParameterNameValues != nil { + ok := object.Key("ParameterNameValues") + if err := awsAwsjson11_serializeDocumentParameterNameValueList(v.ParameterNameValues, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateSubnetGroupInput(v *UpdateSubnetGroupInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if v.SubnetGroupName != nil { + ok := object.Key("SubnetGroupName") + ok.String(*v.SubnetGroupName) + } + + if v.SubnetIds != nil { + ok := object.Key("SubnetIds") + if err := awsAwsjson11_serializeDocumentSubnetIdentifierList(v.SubnetIds, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateUserInput(v *UpdateUserInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AccessString != nil { + ok := object.Key("AccessString") + ok.String(*v.AccessString) + } + + if v.AuthenticationMode != nil { + ok := object.Key("AuthenticationMode") + if err := awsAwsjson11_serializeDocumentAuthenticationMode(v.AuthenticationMode, ok); err != nil { + return err + } + } + + if v.UserName != nil { + ok := object.Key("UserName") + ok.String(*v.UserName) + } + + return nil +} diff --git a/service/memorydb/types/enums.go b/service/memorydb/types/enums.go new file mode 100644 index 00000000000..ea6ac901445 --- /dev/null +++ b/service/memorydb/types/enums.go @@ -0,0 +1,119 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +type AuthenticationType string + +// Enum values for AuthenticationType +const ( + AuthenticationTypePassword AuthenticationType = "password" + AuthenticationTypeNoPassword AuthenticationType = "no-password" +) + +// Values returns all known values for AuthenticationType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (AuthenticationType) Values() []AuthenticationType { + return []AuthenticationType{ + "password", + "no-password", + } +} + +type AZStatus string + +// Enum values for AZStatus +const ( + AZStatusSingleAZ AZStatus = "singleaz" + AZStatusMultiAZ AZStatus = "multiaz" +) + +// Values returns all known values for AZStatus. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (AZStatus) Values() []AZStatus { + return []AZStatus{ + "singleaz", + "multiaz", + } +} + +type InputAuthenticationType string + +// Enum values for InputAuthenticationType +const ( + InputAuthenticationTypePassword InputAuthenticationType = "password" +) + +// Values returns all known values for InputAuthenticationType. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (InputAuthenticationType) Values() []InputAuthenticationType { + return []InputAuthenticationType{ + "password", + } +} + +type ServiceUpdateStatus string + +// Enum values for ServiceUpdateStatus +const ( + ServiceUpdateStatusNotApplied ServiceUpdateStatus = "available" + ServiceUpdateStatusInProgress ServiceUpdateStatus = "in-progress" + ServiceUpdateStatusComplete ServiceUpdateStatus = "complete" + ServiceUpdateStatusScheduled ServiceUpdateStatus = "scheduled" +) + +// Values returns all known values for ServiceUpdateStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ServiceUpdateStatus) Values() []ServiceUpdateStatus { + return []ServiceUpdateStatus{ + "available", + "in-progress", + "complete", + "scheduled", + } +} + +type ServiceUpdateType string + +// Enum values for ServiceUpdateType +const ( + ServiceUpdateTypeSecurityUpdate ServiceUpdateType = "security-update" +) + +// Values returns all known values for ServiceUpdateType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ServiceUpdateType) Values() []ServiceUpdateType { + return []ServiceUpdateType{ + "security-update", + } +} + +type SourceType string + +// Enum values for SourceType +const ( + SourceTypeNode SourceType = "node" + SourceTypeParameterGroup SourceType = "parameter-group" + SourceTypeSubnetGroup SourceType = "subnet-group" + SourceTypeCluster SourceType = "cluster" + SourceTypeUser SourceType = "user" + SourceTypeAcl SourceType = "acl" +) + +// Values returns all known values for SourceType. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (SourceType) Values() []SourceType { + return []SourceType{ + "node", + "parameter-group", + "subnet-group", + "cluster", + "user", + "acl", + } +} diff --git a/service/memorydb/types/errors.go b/service/memorydb/types/errors.go new file mode 100644 index 00000000000..e62500062d4 --- /dev/null +++ b/service/memorydb/types/errors.go @@ -0,0 +1,966 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// +type ACLAlreadyExistsFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ACLAlreadyExistsFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ACLAlreadyExistsFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ACLAlreadyExistsFault) ErrorCode() string { return "ACLAlreadyExistsFault" } +func (e *ACLAlreadyExistsFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type ACLNotFoundFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ACLNotFoundFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ACLNotFoundFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ACLNotFoundFault) ErrorCode() string { return "ACLNotFoundFault" } +func (e *ACLNotFoundFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type ACLQuotaExceededFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ACLQuotaExceededFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ACLQuotaExceededFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ACLQuotaExceededFault) ErrorCode() string { return "ACLQuotaExceededFault" } +func (e *ACLQuotaExceededFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type APICallRateForCustomerExceededFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *APICallRateForCustomerExceededFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *APICallRateForCustomerExceededFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *APICallRateForCustomerExceededFault) ErrorCode() string { + return "APICallRateForCustomerExceededFault" +} +func (e *APICallRateForCustomerExceededFault) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// +type ClusterAlreadyExistsFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ClusterAlreadyExistsFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ClusterAlreadyExistsFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ClusterAlreadyExistsFault) ErrorCode() string { return "ClusterAlreadyExistsFault" } +func (e *ClusterAlreadyExistsFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type ClusterNotFoundFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ClusterNotFoundFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ClusterNotFoundFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ClusterNotFoundFault) ErrorCode() string { return "ClusterNotFoundFault" } +func (e *ClusterNotFoundFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type ClusterQuotaForCustomerExceededFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ClusterQuotaForCustomerExceededFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ClusterQuotaForCustomerExceededFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ClusterQuotaForCustomerExceededFault) ErrorCode() string { + return "ClusterQuotaForCustomerExceededFault" +} +func (e *ClusterQuotaForCustomerExceededFault) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// +type DefaultUserRequired struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *DefaultUserRequired) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *DefaultUserRequired) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *DefaultUserRequired) ErrorCode() string { return "DefaultUserRequired" } +func (e *DefaultUserRequired) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type DuplicateUserNameFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *DuplicateUserNameFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *DuplicateUserNameFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *DuplicateUserNameFault) ErrorCode() string { return "DuplicateUserNameFault" } +func (e *DuplicateUserNameFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type InsufficientClusterCapacityFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InsufficientClusterCapacityFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InsufficientClusterCapacityFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InsufficientClusterCapacityFault) ErrorCode() string { + return "InsufficientClusterCapacityFault" +} +func (e *InsufficientClusterCapacityFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type InvalidACLStateFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidACLStateFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidACLStateFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidACLStateFault) ErrorCode() string { return "InvalidACLStateFault" } +func (e *InvalidACLStateFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type InvalidARNFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidARNFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidARNFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidARNFault) ErrorCode() string { return "InvalidARNFault" } +func (e *InvalidARNFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type InvalidClusterStateFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidClusterStateFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidClusterStateFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidClusterStateFault) ErrorCode() string { return "InvalidClusterStateFault" } +func (e *InvalidClusterStateFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type InvalidCredentialsException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidCredentialsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidCredentialsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidCredentialsException) ErrorCode() string { return "InvalidCredentialsException" } +func (e *InvalidCredentialsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type InvalidKMSKeyFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidKMSKeyFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidKMSKeyFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidKMSKeyFault) ErrorCode() string { return "InvalidKMSKeyFault" } +func (e *InvalidKMSKeyFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type InvalidNodeStateFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidNodeStateFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidNodeStateFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidNodeStateFault) ErrorCode() string { return "InvalidNodeStateFault" } +func (e *InvalidNodeStateFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type InvalidParameterCombinationException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidParameterCombinationException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidParameterCombinationException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidParameterCombinationException) ErrorCode() string { + return "InvalidParameterCombinationException" +} +func (e *InvalidParameterCombinationException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// +type InvalidParameterGroupStateFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidParameterGroupStateFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidParameterGroupStateFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidParameterGroupStateFault) ErrorCode() string { + return "InvalidParameterGroupStateFault" +} +func (e *InvalidParameterGroupStateFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type InvalidParameterValueException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidParameterValueException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidParameterValueException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidParameterValueException) ErrorCode() string { return "InvalidParameterValueException" } +func (e *InvalidParameterValueException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type InvalidSnapshotStateFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidSnapshotStateFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidSnapshotStateFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidSnapshotStateFault) ErrorCode() string { return "InvalidSnapshotStateFault" } +func (e *InvalidSnapshotStateFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type InvalidSubnet struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidSubnet) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidSubnet) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidSubnet) ErrorCode() string { return "InvalidSubnet" } +func (e *InvalidSubnet) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type InvalidUserStateFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidUserStateFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidUserStateFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidUserStateFault) ErrorCode() string { return "InvalidUserStateFault" } +func (e *InvalidUserStateFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type InvalidVPCNetworkStateFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidVPCNetworkStateFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidVPCNetworkStateFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidVPCNetworkStateFault) ErrorCode() string { return "InvalidVPCNetworkStateFault" } +func (e *InvalidVPCNetworkStateFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type NodeQuotaForClusterExceededFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *NodeQuotaForClusterExceededFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *NodeQuotaForClusterExceededFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *NodeQuotaForClusterExceededFault) ErrorCode() string { + return "NodeQuotaForClusterExceededFault" +} +func (e *NodeQuotaForClusterExceededFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type NodeQuotaForCustomerExceededFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *NodeQuotaForCustomerExceededFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *NodeQuotaForCustomerExceededFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *NodeQuotaForCustomerExceededFault) ErrorCode() string { + return "NodeQuotaForCustomerExceededFault" +} +func (e *NodeQuotaForCustomerExceededFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type NoOperationFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *NoOperationFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *NoOperationFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *NoOperationFault) ErrorCode() string { return "NoOperationFault" } +func (e *NoOperationFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type ParameterGroupAlreadyExistsFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ParameterGroupAlreadyExistsFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ParameterGroupAlreadyExistsFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ParameterGroupAlreadyExistsFault) ErrorCode() string { + return "ParameterGroupAlreadyExistsFault" +} +func (e *ParameterGroupAlreadyExistsFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type ParameterGroupNotFoundFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ParameterGroupNotFoundFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ParameterGroupNotFoundFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ParameterGroupNotFoundFault) ErrorCode() string { return "ParameterGroupNotFoundFault" } +func (e *ParameterGroupNotFoundFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type ParameterGroupQuotaExceededFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ParameterGroupQuotaExceededFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ParameterGroupQuotaExceededFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ParameterGroupQuotaExceededFault) ErrorCode() string { + return "ParameterGroupQuotaExceededFault" +} +func (e *ParameterGroupQuotaExceededFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type ServiceLinkedRoleNotFoundFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ServiceLinkedRoleNotFoundFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ServiceLinkedRoleNotFoundFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ServiceLinkedRoleNotFoundFault) ErrorCode() string { return "ServiceLinkedRoleNotFoundFault" } +func (e *ServiceLinkedRoleNotFoundFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type ServiceUpdateNotFoundFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ServiceUpdateNotFoundFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ServiceUpdateNotFoundFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ServiceUpdateNotFoundFault) ErrorCode() string { return "ServiceUpdateNotFoundFault" } +func (e *ServiceUpdateNotFoundFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +type ShardNotFoundFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ShardNotFoundFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ShardNotFoundFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ShardNotFoundFault) ErrorCode() string { return "ShardNotFoundFault" } +func (e *ShardNotFoundFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type ShardsPerClusterQuotaExceededFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ShardsPerClusterQuotaExceededFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ShardsPerClusterQuotaExceededFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ShardsPerClusterQuotaExceededFault) ErrorCode() string { + return "ShardsPerClusterQuotaExceededFault" +} +func (e *ShardsPerClusterQuotaExceededFault) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// +type SnapshotAlreadyExistsFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *SnapshotAlreadyExistsFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *SnapshotAlreadyExistsFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *SnapshotAlreadyExistsFault) ErrorCode() string { return "SnapshotAlreadyExistsFault" } +func (e *SnapshotAlreadyExistsFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type SnapshotNotFoundFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *SnapshotNotFoundFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *SnapshotNotFoundFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *SnapshotNotFoundFault) ErrorCode() string { return "SnapshotNotFoundFault" } +func (e *SnapshotNotFoundFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type SnapshotQuotaExceededFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *SnapshotQuotaExceededFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *SnapshotQuotaExceededFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *SnapshotQuotaExceededFault) ErrorCode() string { return "SnapshotQuotaExceededFault" } +func (e *SnapshotQuotaExceededFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type SubnetGroupAlreadyExistsFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *SubnetGroupAlreadyExistsFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *SubnetGroupAlreadyExistsFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *SubnetGroupAlreadyExistsFault) ErrorCode() string { return "SubnetGroupAlreadyExistsFault" } +func (e *SubnetGroupAlreadyExistsFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type SubnetGroupInUseFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *SubnetGroupInUseFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *SubnetGroupInUseFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *SubnetGroupInUseFault) ErrorCode() string { return "SubnetGroupInUseFault" } +func (e *SubnetGroupInUseFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type SubnetGroupNotFoundFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *SubnetGroupNotFoundFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *SubnetGroupNotFoundFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *SubnetGroupNotFoundFault) ErrorCode() string { return "SubnetGroupNotFoundFault" } +func (e *SubnetGroupNotFoundFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type SubnetGroupQuotaExceededFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *SubnetGroupQuotaExceededFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *SubnetGroupQuotaExceededFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *SubnetGroupQuotaExceededFault) ErrorCode() string { return "SubnetGroupQuotaExceededFault" } +func (e *SubnetGroupQuotaExceededFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type SubnetInUse struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *SubnetInUse) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *SubnetInUse) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *SubnetInUse) ErrorCode() string { return "SubnetInUse" } +func (e *SubnetInUse) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type SubnetNotAllowedFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *SubnetNotAllowedFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *SubnetNotAllowedFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *SubnetNotAllowedFault) ErrorCode() string { return "SubnetNotAllowedFault" } +func (e *SubnetNotAllowedFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type SubnetQuotaExceededFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *SubnetQuotaExceededFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *SubnetQuotaExceededFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *SubnetQuotaExceededFault) ErrorCode() string { return "SubnetQuotaExceededFault" } +func (e *SubnetQuotaExceededFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type TagNotFoundFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *TagNotFoundFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TagNotFoundFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TagNotFoundFault) ErrorCode() string { return "TagNotFoundFault" } +func (e *TagNotFoundFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type TagQuotaPerResourceExceeded struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *TagQuotaPerResourceExceeded) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TagQuotaPerResourceExceeded) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TagQuotaPerResourceExceeded) ErrorCode() string { return "TagQuotaPerResourceExceeded" } +func (e *TagQuotaPerResourceExceeded) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type TestFailoverNotAvailableFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *TestFailoverNotAvailableFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TestFailoverNotAvailableFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TestFailoverNotAvailableFault) ErrorCode() string { return "TestFailoverNotAvailableFault" } +func (e *TestFailoverNotAvailableFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type UserAlreadyExistsFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *UserAlreadyExistsFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UserAlreadyExistsFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UserAlreadyExistsFault) ErrorCode() string { return "UserAlreadyExistsFault" } +func (e *UserAlreadyExistsFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type UserNotFoundFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *UserNotFoundFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UserNotFoundFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UserNotFoundFault) ErrorCode() string { return "UserNotFoundFault" } +func (e *UserNotFoundFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// +type UserQuotaExceededFault struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *UserQuotaExceededFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UserQuotaExceededFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UserQuotaExceededFault) ErrorCode() string { return "UserQuotaExceededFault" } +func (e *UserQuotaExceededFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/service/memorydb/types/types.go b/service/memorydb/types/types.go new file mode 100644 index 00000000000..4bce29cf56f --- /dev/null +++ b/service/memorydb/types/types.go @@ -0,0 +1,688 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" + "time" +) + +// An Access Control List. You can authenticate users with Access Contol Lists. +// ACLs enable you to control cluster access by grouping users. These Access +// control lists are designed as a way to organize access to clusters. +type ACL struct { + + // The Amazon Resource Name (ARN) of the ACL + ARN *string + + // A list of clusters associated with the ACL. + Clusters []string + + // The minimum engine version supported for the ACL + MinimumEngineVersion *string + + // The name of the Access Control List + Name *string + + // A list of updates being applied to the ACL. + PendingChanges *ACLPendingChanges + + // Indicates ACL status. Can be "creating", "active", "modifying", "deleting". + Status *string + + // The list of user names that belong to the ACL. + UserNames []string + + noSmithyDocumentSerde +} + +// Returns the updates being applied to the ACL. +type ACLPendingChanges struct { + + // A list of users being added to the ACL + UserNamesToAdd []string + + // A list of user names being removed from the ACL + UserNamesToRemove []string + + noSmithyDocumentSerde +} + +// The status of the ACL update +type ACLsUpdateStatus struct { + + // A list of ACLs pending to be applied. + ACLToApply *string + + noSmithyDocumentSerde +} + +// Denotes the user's authentication properties, such as whether it requires a +// password to authenticate. Used in output responses. +type Authentication struct { + + // The number of passwords belonging to the user. The maximum is two. + PasswordCount *int32 + + // Indicates whether the user requires a password to authenticate. + Type AuthenticationType + + noSmithyDocumentSerde +} + +// Denotes the user's authentication properties, such as whether it requires a +// password to authenticate. Used in output responses. +type AuthenticationMode struct { + + // The password(s) used for authentication + Passwords []string + + // Indicates whether the user requires a password to authenticate. All + // newly-created users require a password. + Type InputAuthenticationType + + noSmithyDocumentSerde +} + +// Indicates if the cluster has a Multi-AZ configuration (multiaz) or not +// (singleaz). +type AvailabilityZone struct { + + // The name of the Availability Zone. + Name *string + + noSmithyDocumentSerde +} + +// Contains all of the attributes of a specific cluster. +type Cluster struct { + + // The name of the Access Control List associated with this cluster. + ACLName *string + + // The Amazon Resource Name (ARN) of the cluster. + ARN *string + + // When set to true, the cluster will automatically receive minor engine version + // upgrades after launch. + AutoMinorVersionUpgrade *bool + + // Indicates if the cluster has a Multi-AZ configuration (multiaz) or not + // (singleaz). + AvailabilityMode AZStatus + + // The cluster's configuration endpoint + ClusterEndpoint *Endpoint + + // A description of the cluster + Description *string + + // The Redis engine patch version used by the cluster + EnginePatchVersion *string + + // The Redis engine version used by the cluster + EngineVersion *string + + // The ID of the KMS key used to encrypt the cluster + KmsKeyId *string + + // Specifies the weekly time range during which maintenance on the cluster is + // performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H + // Clock UTC). The minimum maintenance window is a 60 minute period. + MaintenanceWindow *string + + // The user-supplied name of the cluster. This identifier is a unique key that + // identifies a cluster. + Name *string + + // The cluster's node type + NodeType *string + + // The number of shards in the cluster + NumberOfShards *int32 + + // The name of the parameter group used by the cluster + ParameterGroupName *string + + // The status of the parameter group used by the cluster, for example 'active' or + // 'applying'. + ParameterGroupStatus *string + + // A group of settings that are currently being applied. + PendingUpdates *ClusterPendingUpdates + + // A list of security groups used by the cluster + SecurityGroups []SecurityGroupMembership + + // A list of shards that are members of the cluster. + Shards []Shard + + // The number of days for which MemoryDB retains automatic snapshots before + // deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot + // that was taken today is retained for 5 days before being deleted. + SnapshotRetentionLimit *int32 + + // The daily time range (in UTC) during which MemoryDB begins taking a daily + // snapshot of your shard. Example: 05:00-09:00 If you do not specify this + // parameter, MemoryDB automatically chooses an appropriate time range. + SnapshotWindow *string + + // The Amazon Resource Name (ARN) of the SNS notification topic + SnsTopicArn *string + + // The SNS topic must be in Active status to receive notifications + SnsTopicStatus *string + + // The status of the cluster. For example, Available, Updating, Creating. + Status *string + + // The name of the subnet group used by the cluster + SubnetGroupName *string + + // A flag to indicate if In-transit encryption is enabled + TLSEnabled *bool + + noSmithyDocumentSerde +} + +// A list of cluster configuration options. +type ClusterConfiguration struct { + + // The description of the cluster configuration + Description *string + + // The Redis engine version used by the cluster + EngineVersion *string + + // The specified maintenance window for the cluster + MaintenanceWindow *string + + // The name of the cluster + Name *string + + // The node type used for the cluster + NodeType *string + + // The number of shards in the cluster + NumShards *int32 + + // The name of parameter group used by the cluster + ParameterGroupName *string + + // The port used by the cluster + Port *int32 + + // The list of shards in the cluster + Shards []ShardDetail + + // The snapshot retention limit set by the cluster + SnapshotRetentionLimit *int32 + + // The snapshot window set by the cluster + SnapshotWindow *string + + // The name of the subnet group used by the cluster + SubnetGroupName *string + + // The Amazon Resource Name (ARN) of the SNS notification topic for the cluster + TopicArn *string + + // The ID of the VPC the cluster belongs to + VpcId *string + + noSmithyDocumentSerde +} + +// A list of updates being applied to the cluster +type ClusterPendingUpdates struct { + + // A list of ACLs associated with the cluster that are being updated + ACLs *ACLsUpdateStatus + + // The status of an online resharding operation. + Resharding *ReshardingStatus + + // A list of service updates being applied to the cluster + ServiceUpdates []PendingModifiedServiceUpdate + + noSmithyDocumentSerde +} + +// Represents the information required for client programs to connect to the +// cluster and its nodes. +type Endpoint struct { + + // The DNS hostname of the node. + Address *string + + // The port number that the engine is listening on. + Port int32 + + noSmithyDocumentSerde +} + +// Provides details of the Redis engine version +type EngineVersionInfo struct { + + // The patched engine version + EnginePatchVersion *string + + // The engine version + EngineVersion *string + + // Specifies the name of the parameter group family to which the engine default + // parameters apply. + ParameterGroupFamily *string + + noSmithyDocumentSerde +} + +// Represents a single occurrence of something interesting within the system. Some +// examples of events are creating a cluster or adding or removing a node. +type Event struct { + + // The date and time when the event occurred. + Date *time.Time + + // The text of the event. + Message *string + + // The name for the source of the event. For example, if the event occurred at the + // cluster level, the identifier would be the name of the cluster. + SourceName *string + + // Specifies the origin of this event - a cluster, a parameter group, a security + // group, etc. + SourceType SourceType + + noSmithyDocumentSerde +} + +// Used to streamline results of a search based on the property being filtered. +type Filter struct { + + // The property being filtered. For example, UserName. + // + // This member is required. + Name *string + + // The property values to filter on. For example, "user-123". + // + // This member is required. + Values []string + + noSmithyDocumentSerde +} + +// Represents an individual node within a cluster. Each node runs its own instance +// of the cluster's protocol-compliant caching software. +type Node struct { + + // The Availability Zone in which the node resides + AvailabilityZone *string + + // The date and time when the node was created. + CreateTime *time.Time + + // The hostname for connecting to this node. + Endpoint *Endpoint + + // The node identifier. A node name is a numeric identifier (0001, 0002, etc.). The + // combination of cluster name, shard name and node name uniquely identifies every + // node used in a customer's Amazon account. + Name *string + + // The status of the service update on the node + Status *string + + noSmithyDocumentSerde +} + +// Describes an individual setting that controls some aspect of MemoryDB behavior. +type Parameter struct { + + // The valid range of values for the parameter. + AllowedValues *string + + // The parameter's data type + DataType *string + + // A description of the parameter + Description *string + + // The earliest engine version to which the parameter can apply. + MinimumEngineVersion *string + + // The name of the parameter + Name *string + + // The value of the parameter + Value *string + + noSmithyDocumentSerde +} + +// Represents the output of a CreateParameterGroup operation. A parameter group +// represents a combination of specific values for the parameters that are passed +// to the engine software during startup. +type ParameterGroup struct { + + // The Amazon Resource Name (ARN) of the parameter group + ARN *string + + // A description of the parameter group + Description *string + + // The name of the parameter group family that this parameter group is compatible + // with. + Family *string + + // The name of the parameter group + Name *string + + noSmithyDocumentSerde +} + +// Describes a name-value pair that is used to update the value of a parameter. +type ParameterNameValue struct { + + // The name of the parameter + ParameterName *string + + // The value of the parameter + ParameterValue *string + + noSmithyDocumentSerde +} + +// Update action that has yet to be processed for the corresponding apply/stop +// request +type PendingModifiedServiceUpdate struct { + + // The unique ID of the service update + ServiceUpdateName *string + + // The status of the service update + Status ServiceUpdateStatus + + noSmithyDocumentSerde +} + +// A request to configure the number of replicas in a shard +type ReplicaConfigurationRequest struct { + + // The number of replicas to scale up or down to + ReplicaCount int32 + + noSmithyDocumentSerde +} + +// The status of the online resharding +type ReshardingStatus struct { + + // The status of the online resharding slot migration + SlotMigration *SlotMigration + + noSmithyDocumentSerde +} + +// Represents a single security group and its status. +type SecurityGroupMembership struct { + + // The identifier of the security group. + SecurityGroupId *string + + // The status of the security group membership. The status changes whenever a + // security group is modified, or when the security groups assigned to a cluster + // are modified. + Status *string + + noSmithyDocumentSerde +} + +// An update that you can apply to your MemoryDB clusters. +type ServiceUpdate struct { + + // The date at which the service update will be automatically applied + AutoUpdateStartDate *time.Time + + // The name of the cluster to which the service update applies + ClusterName *string + + // Provides details of the service update + Description *string + + // A list of nodes updated by the service update + NodesUpdated *string + + // The date when the service update is initially available + ReleaseDate *time.Time + + // The unique ID of the service update + ServiceUpdateName *string + + // The status of the service update + Status ServiceUpdateStatus + + // Reflects the nature of the service update + Type ServiceUpdateType + + noSmithyDocumentSerde +} + +// A request to apply a service update +type ServiceUpdateRequest struct { + + // The unique ID of the service update + ServiceUpdateNameToApply *string + + noSmithyDocumentSerde +} + +// Represents a collection of nodes in a cluster. One node in the node group is the +// read/write primary node. All the other nodes are read-only Replica nodes. +type Shard struct { + + // The name of the shard + Name *string + + // A list containing information about individual nodes within the shard + Nodes []Node + + // The number of nodes in the shard + NumberOfNodes *int32 + + // The keyspace for this shard. + Slots *string + + // The current state of this replication group - creating, available, modifying, + // deleting. + Status *string + + noSmithyDocumentSerde +} + +// Shard configuration options. Each shard configuration has the following: Slots +// and ReplicaCount. +type ShardConfiguration struct { + + // The number of read replica nodes in this shard. + ReplicaCount *int32 + + // A string that specifies the keyspace for a particular node group. Keyspaces + // range from 0 to 16,383. The string is in the format startkey-endkey. + Slots *string + + noSmithyDocumentSerde +} + +// A request to configure the sharding properties of a cluster +type ShardConfigurationRequest struct { + + // The number of shards in the cluster + ShardCount int32 + + noSmithyDocumentSerde +} + +// Provides details of a shard in a snapshot +type ShardDetail struct { + + // The configuration details of the shard + Configuration *ShardConfiguration + + // The name of the shard + Name *string + + // The size of the shard's snapshot + Size *string + + // The date and time that the shard's snapshot was created + SnapshotCreationTime *time.Time + + noSmithyDocumentSerde +} + +// Represents the progress of an online resharding operation. +type SlotMigration struct { + + // The percentage of the slot migration that is complete. + ProgressPercentage float64 + + noSmithyDocumentSerde +} + +// Represents a copy of an entire cluster as of the time when the snapshot was +// taken. +type Snapshot struct { + + // The ARN (Amazon Resource Name) of the snapshot. + ARN *string + + // The configuration of the cluster from which the snapshot was taken + ClusterConfiguration *ClusterConfiguration + + // The ID of the KMS key used to encrypt the snapshot. + KmsKeyId *string + + // The name of the snapshot + Name *string + + // Indicates whether the snapshot is from an automatic backup (automated) or was + // created manually (manual). + Source *string + + // The status of the snapshot. Valid values: creating | available | restoring | + // copying | deleting. + Status *string + + noSmithyDocumentSerde +} + +// Represents the subnet associated with a cluster. This parameter refers to +// subnets defined in Amazon Virtual Private Cloud (Amazon VPC) and used with +// MemoryDB. +type Subnet struct { + + // The Availability Zone where the subnet resides + AvailabilityZone *AvailabilityZone + + // The unique identifier for the subnet. + Identifier *string + + noSmithyDocumentSerde +} + +// Represents the output of one of the following operations: +// +// * +// CreateSubnetGroup +// +// * UpdateSubnetGroup +// +// A subnet group is a collection of +// subnets (typically private) that you can designate for your clusters running in +// an Amazon Virtual Private Cloud (VPC) environment. +type SubnetGroup struct { + + // The ARN (Amazon Resource Name) of the subnet group. + ARN *string + + // A description of the subnet group + Description *string + + // The name of the subnet group + Name *string + + // A list of subnets associated with the subnet group. + Subnets []Subnet + + // The Amazon Virtual Private Cloud identifier (VPC ID) of the subnet group. + VpcId *string + + noSmithyDocumentSerde +} + +// A tag that can be added to an MemoryDB resource. Tags are composed of a +// Key/Value pair. You can use tags to categorize and track all your MemoryDB +// resources. When you add or remove tags on clusters, those actions will be +// replicated to all nodes in the cluster. A tag with a null Value is permitted. +// For more information, see Tagging your MemoryDB resources +// (https://docs.aws.amazon.com/AmazonMemoryDB/latest/devguide/Tagging-Resources.html) +type Tag struct { + + // The key for the tag. May not be null. + Key *string + + // The tag's value. May be null. + Value *string + + noSmithyDocumentSerde +} + +// A cluster whose updates have failed +type UnprocessedCluster struct { + + // The name of the cluster + ClusterName *string + + // The error message associated with the update failure + ErrorMessage *string + + // The error type associated with the update failure + ErrorType *string + + noSmithyDocumentSerde +} + +// You create users and assign them specific permissions by using an access string. +// You assign the users to Access Control Lists aligned with a specific role +// (administrators, human resources) that are then deployed to one or more MemoryDB +// clusters. +type User struct { + + // The names of the Access Control Lists to which the user belongs + ACLNames []string + + // The Amazon Resource Name (ARN) of the user. + ARN *string + + // Access permissions string used for this user. + AccessString *string + + // Denotes whether the user requires a password to authenticate. + Authentication *Authentication + + // The minimum engine version supported for the user + MinimumEngineVersion *string + + // The name of the user + Name *string + + // Indicates the user status. Can be "active", "modifying" or "deleting". + Status *string + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/service/memorydb/validators.go b/service/memorydb/validators.go new file mode 100644 index 00000000000..544497aa099 --- /dev/null +++ b/service/memorydb/validators.go @@ -0,0 +1,1137 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package memorydb + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/memorydb/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpBatchUpdateCluster struct { +} + +func (*validateOpBatchUpdateCluster) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpBatchUpdateCluster) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*BatchUpdateClusterInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpBatchUpdateClusterInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCopySnapshot struct { +} + +func (*validateOpCopySnapshot) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCopySnapshot) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CopySnapshotInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCopySnapshotInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateACL struct { +} + +func (*validateOpCreateACL) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateACL) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateACLInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateACLInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateCluster struct { +} + +func (*validateOpCreateCluster) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateCluster) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateClusterInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateClusterInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateParameterGroup struct { +} + +func (*validateOpCreateParameterGroup) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateParameterGroup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateParameterGroupInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateParameterGroupInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateSnapshot struct { +} + +func (*validateOpCreateSnapshot) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateSnapshot) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateSnapshotInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateSnapshotInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateSubnetGroup struct { +} + +func (*validateOpCreateSubnetGroup) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateSubnetGroup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateSubnetGroupInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateSubnetGroupInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateUser struct { +} + +func (*validateOpCreateUser) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateUser) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateUserInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateUserInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteACL struct { +} + +func (*validateOpDeleteACL) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteACL) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteACLInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteACLInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteCluster struct { +} + +func (*validateOpDeleteCluster) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteCluster) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteClusterInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteClusterInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteParameterGroup struct { +} + +func (*validateOpDeleteParameterGroup) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteParameterGroup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteParameterGroupInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteParameterGroupInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteSnapshot struct { +} + +func (*validateOpDeleteSnapshot) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteSnapshot) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteSnapshotInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteSnapshotInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteSubnetGroup struct { +} + +func (*validateOpDeleteSubnetGroup) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteSubnetGroup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteSubnetGroupInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteSubnetGroupInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteUser struct { +} + +func (*validateOpDeleteUser) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteUser) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteUserInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteUserInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeParameters struct { +} + +func (*validateOpDescribeParameters) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeParameters) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeParametersInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeParametersInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeUsers struct { +} + +func (*validateOpDescribeUsers) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeUsers) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeUsersInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeUsersInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpFailoverShard struct { +} + +func (*validateOpFailoverShard) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpFailoverShard) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*FailoverShardInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpFailoverShardInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListAllowedNodeTypeUpdates struct { +} + +func (*validateOpListAllowedNodeTypeUpdates) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListAllowedNodeTypeUpdates) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListAllowedNodeTypeUpdatesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListAllowedNodeTypeUpdatesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListTags struct { +} + +func (*validateOpListTags) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListTags) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListTagsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListTagsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpResetParameterGroup struct { +} + +func (*validateOpResetParameterGroup) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpResetParameterGroup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ResetParameterGroupInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpResetParameterGroupInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpTagResource struct { +} + +func (*validateOpTagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpTagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*TagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpTagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUntagResource struct { +} + +func (*validateOpUntagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUntagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UntagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUntagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateACL struct { +} + +func (*validateOpUpdateACL) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateACL) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateACLInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateACLInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateCluster struct { +} + +func (*validateOpUpdateCluster) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateCluster) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateClusterInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateClusterInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateParameterGroup struct { +} + +func (*validateOpUpdateParameterGroup) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateParameterGroup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateParameterGroupInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateParameterGroupInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateSubnetGroup struct { +} + +func (*validateOpUpdateSubnetGroup) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateSubnetGroup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateSubnetGroupInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateSubnetGroupInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateUser struct { +} + +func (*validateOpUpdateUser) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateUser) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateUserInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateUserInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpBatchUpdateClusterValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpBatchUpdateCluster{}, middleware.After) +} + +func addOpCopySnapshotValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCopySnapshot{}, middleware.After) +} + +func addOpCreateACLValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateACL{}, middleware.After) +} + +func addOpCreateClusterValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateCluster{}, middleware.After) +} + +func addOpCreateParameterGroupValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateParameterGroup{}, middleware.After) +} + +func addOpCreateSnapshotValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateSnapshot{}, middleware.After) +} + +func addOpCreateSubnetGroupValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateSubnetGroup{}, middleware.After) +} + +func addOpCreateUserValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateUser{}, middleware.After) +} + +func addOpDeleteACLValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteACL{}, middleware.After) +} + +func addOpDeleteClusterValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteCluster{}, middleware.After) +} + +func addOpDeleteParameterGroupValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteParameterGroup{}, middleware.After) +} + +func addOpDeleteSnapshotValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteSnapshot{}, middleware.After) +} + +func addOpDeleteSubnetGroupValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteSubnetGroup{}, middleware.After) +} + +func addOpDeleteUserValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteUser{}, middleware.After) +} + +func addOpDescribeParametersValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeParameters{}, middleware.After) +} + +func addOpDescribeUsersValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeUsers{}, middleware.After) +} + +func addOpFailoverShardValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpFailoverShard{}, middleware.After) +} + +func addOpListAllowedNodeTypeUpdatesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListAllowedNodeTypeUpdates{}, middleware.After) +} + +func addOpListTagsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListTags{}, middleware.After) +} + +func addOpResetParameterGroupValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpResetParameterGroup{}, middleware.After) +} + +func addOpTagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpTagResource{}, middleware.After) +} + +func addOpUntagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUntagResource{}, middleware.After) +} + +func addOpUpdateACLValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateACL{}, middleware.After) +} + +func addOpUpdateClusterValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateCluster{}, middleware.After) +} + +func addOpUpdateParameterGroupValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateParameterGroup{}, middleware.After) +} + +func addOpUpdateSubnetGroupValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateSubnetGroup{}, middleware.After) +} + +func addOpUpdateUserValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateUser{}, middleware.After) +} + +func validateFilter(v *types.Filter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Filter"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.Values == nil { + invalidParams.Add(smithy.NewErrParamRequired("Values")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateFilterList(v []types.Filter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "FilterList"} + for i := range v { + if err := validateFilter(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpBatchUpdateClusterInput(v *BatchUpdateClusterInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BatchUpdateClusterInput"} + if v.ClusterNames == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClusterNames")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCopySnapshotInput(v *CopySnapshotInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CopySnapshotInput"} + if v.SourceSnapshotName == nil { + invalidParams.Add(smithy.NewErrParamRequired("SourceSnapshotName")) + } + if v.TargetSnapshotName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TargetSnapshotName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateACLInput(v *CreateACLInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateACLInput"} + if v.ACLName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ACLName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateClusterInput(v *CreateClusterInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateClusterInput"} + if v.ClusterName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClusterName")) + } + if v.NodeType == nil { + invalidParams.Add(smithy.NewErrParamRequired("NodeType")) + } + if v.ACLName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ACLName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateParameterGroupInput(v *CreateParameterGroupInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateParameterGroupInput"} + if v.ParameterGroupName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ParameterGroupName")) + } + if v.Family == nil { + invalidParams.Add(smithy.NewErrParamRequired("Family")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateSnapshotInput(v *CreateSnapshotInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateSnapshotInput"} + if v.ClusterName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClusterName")) + } + if v.SnapshotName == nil { + invalidParams.Add(smithy.NewErrParamRequired("SnapshotName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateSubnetGroupInput(v *CreateSubnetGroupInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateSubnetGroupInput"} + if v.SubnetGroupName == nil { + invalidParams.Add(smithy.NewErrParamRequired("SubnetGroupName")) + } + if v.SubnetIds == nil { + invalidParams.Add(smithy.NewErrParamRequired("SubnetIds")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateUserInput(v *CreateUserInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateUserInput"} + if v.UserName == nil { + invalidParams.Add(smithy.NewErrParamRequired("UserName")) + } + if v.AuthenticationMode == nil { + invalidParams.Add(smithy.NewErrParamRequired("AuthenticationMode")) + } + if v.AccessString == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessString")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteACLInput(v *DeleteACLInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteACLInput"} + if v.ACLName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ACLName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteClusterInput(v *DeleteClusterInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteClusterInput"} + if v.ClusterName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClusterName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteParameterGroupInput(v *DeleteParameterGroupInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteParameterGroupInput"} + if v.ParameterGroupName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ParameterGroupName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteSnapshotInput(v *DeleteSnapshotInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteSnapshotInput"} + if v.SnapshotName == nil { + invalidParams.Add(smithy.NewErrParamRequired("SnapshotName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteSubnetGroupInput(v *DeleteSubnetGroupInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteSubnetGroupInput"} + if v.SubnetGroupName == nil { + invalidParams.Add(smithy.NewErrParamRequired("SubnetGroupName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteUserInput(v *DeleteUserInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteUserInput"} + if v.UserName == nil { + invalidParams.Add(smithy.NewErrParamRequired("UserName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeParametersInput(v *DescribeParametersInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeParametersInput"} + if v.ParameterGroupName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ParameterGroupName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeUsersInput(v *DescribeUsersInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeUsersInput"} + if v.Filters != nil { + if err := validateFilterList(v.Filters); err != nil { + invalidParams.AddNested("Filters", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpFailoverShardInput(v *FailoverShardInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "FailoverShardInput"} + if v.ClusterName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClusterName")) + } + if v.ShardName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ShardName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListAllowedNodeTypeUpdatesInput(v *ListAllowedNodeTypeUpdatesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListAllowedNodeTypeUpdatesInput"} + if v.ClusterName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClusterName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListTagsInput(v *ListTagsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListTagsInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpResetParameterGroupInput(v *ResetParameterGroupInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ResetParameterGroupInput"} + if v.ParameterGroupName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ParameterGroupName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpTagResourceInput(v *TagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagResourceInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if v.Tags == nil { + invalidParams.Add(smithy.NewErrParamRequired("Tags")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUntagResourceInput(v *UntagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UntagResourceInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if v.TagKeys == nil { + invalidParams.Add(smithy.NewErrParamRequired("TagKeys")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateACLInput(v *UpdateACLInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateACLInput"} + if v.ACLName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ACLName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateClusterInput(v *UpdateClusterInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateClusterInput"} + if v.ClusterName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClusterName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateParameterGroupInput(v *UpdateParameterGroupInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateParameterGroupInput"} + if v.ParameterGroupName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ParameterGroupName")) + } + if v.ParameterNameValues == nil { + invalidParams.Add(smithy.NewErrParamRequired("ParameterNameValues")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateSubnetGroupInput(v *UpdateSubnetGroupInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateSubnetGroupInput"} + if v.SubnetGroupName == nil { + invalidParams.Add(smithy.NewErrParamRequired("SubnetGroupName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateUserInput(v *UpdateUserInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateUserInput"} + if v.UserName == nil { + invalidParams.Add(smithy.NewErrParamRequired("UserName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/service/quicksight/api_op_CreateAccountCustomization.go b/service/quicksight/api_op_CreateAccountCustomization.go index 670f72d0ac5..d7a260a379a 100644 --- a/service/quicksight/api_op_CreateAccountCustomization.go +++ b/service/quicksight/api_op_CreateAccountCustomization.go @@ -11,11 +11,11 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates Amazon QuickSight customizations the current Region;. Currently, you can -// add a custom default theme by using the CreateAccountCustomization or -// UpdateAccountCustomization API operation. To further customize QuickSight by -// removing QuickSight sample assets and videos for all new users, see Customizing -// QuickSight +// Creates Amazon QuickSight customizations the current Amazon Web Services +// Region;. Currently, you can add a custom default theme by using the +// CreateAccountCustomization or UpdateAccountCustomization API operation. To +// further customize QuickSight by removing QuickSight sample assets and videos for +// all new users, see Customizing QuickSight // (https://docs.aws.amazon.com/quicksight/latest/user/customizing-quicksight.html) // in the Amazon QuickSight User Guide. You can create customizations for your // Amazon Web Services account; or, if you specify a namespace, for a QuickSight @@ -46,12 +46,12 @@ func (c *Client) CreateAccountCustomization(ctx context.Context, params *CreateA type CreateAccountCustomizationInput struct { - // The QuickSight customizations you're adding in the current Region;. You can add - // these to an Amazon Web Services account; and a QuickSight namespace. For - // example, you can add a default theme by setting AccountCustomization to the - // midnight theme: "AccountCustomization": { "DefaultTheme": - // "arn:aws:quicksight::aws:theme/MIDNIGHT" }. Or, you can add a custom theme by - // specifying "AccountCustomization": { "DefaultTheme": + // The QuickSight customizations you're adding in the current Amazon Web Services + // Region;. You can add these to an Amazon Web Services account; and a QuickSight + // namespace. For example, you can add a default theme by setting + // AccountCustomization to the midnight theme: "AccountCustomization": { + // "DefaultTheme": "arn:aws:quicksight::aws:theme/MIDNIGHT" }. Or, you can add a + // custom theme by specifying "AccountCustomization": { "DefaultTheme": // "arn:aws:quicksight:us-west-2:111122223333:theme/bdb844d0-0fe9-4d9d-b520-0fe602d93639" // }. // @@ -75,7 +75,8 @@ type CreateAccountCustomizationInput struct { type CreateAccountCustomizationOutput struct { - // The QuickSight customizations you're adding in the current Region;. + // The QuickSight customizations you're adding in the current Amazon Web Services + // Region;. AccountCustomization *types.AccountCustomization // The Amazon Resource Name (ARN) for the customization that you created for this diff --git a/service/quicksight/api_op_CreateDashboard.go b/service/quicksight/api_op_CreateDashboard.go index 97ff8ca49cc..5bcde40b8b7 100644 --- a/service/quicksight/api_op_CreateDashboard.go +++ b/service/quicksight/api_op_CreateDashboard.go @@ -56,10 +56,10 @@ type CreateDashboardInput struct { // you need to create a dashboard from an analysis, first convert the analysis to a // template by using the CreateTemplate API operation. For SourceTemplate, specify // the Amazon Resource Name (ARN) of the source template. The SourceTemplateARN can - // contain any Amazon Web Services account; and any QuickSight-supported Region;. - // Use the DataSetReferences entity within SourceTemplate to list the replacement - // datasets for the placeholders listed in the original. The schema in each dataset - // must match its placeholder. + // contain any Amazon Web Services account; and any QuickSight-supported Amazon Web + // Services Region;. Use the DataSetReferences entity within SourceTemplate to list + // the replacement datasets for the placeholders listed in the original. The schema + // in each dataset must match its placeholder. // // This member is required. SourceEntity *types.DashboardSourceEntity diff --git a/service/quicksight/api_op_CreateDataSet.go b/service/quicksight/api_op_CreateDataSet.go index 2fac4c0b21f..bc10e1d4563 100644 --- a/service/quicksight/api_op_CreateDataSet.go +++ b/service/quicksight/api_op_CreateDataSet.go @@ -34,8 +34,8 @@ type CreateDataSetInput struct { // This member is required. AwsAccountId *string - // An ID for the dataset that you want to create. This ID is unique per Region; for - // each Amazon Web Services account;. + // An ID for the dataset that you want to create. This ID is unique per Amazon Web + // Services Region; for each Amazon Web Services account;. // // This member is required. DataSetId *string @@ -91,8 +91,8 @@ type CreateDataSetOutput struct { // The Amazon Resource Name (ARN) of the dataset. Arn *string - // The ID for the dataset that you want to create. This ID is unique per Region; - // for each Amazon Web Services account;. + // The ID for the dataset that you want to create. This ID is unique per Amazon Web + // Services Region; for each Amazon Web Services account;. DataSetId *string // The ARN for the ingestion, which is triggered as a result of dataset creation if diff --git a/service/quicksight/api_op_CreateDataSource.go b/service/quicksight/api_op_CreateDataSource.go index b7f6e276d5f..66293f5068d 100644 --- a/service/quicksight/api_op_CreateDataSource.go +++ b/service/quicksight/api_op_CreateDataSource.go @@ -34,8 +34,8 @@ type CreateDataSourceInput struct { // This member is required. AwsAccountId *string - // An ID for the data source. This ID is unique per Region; for each Amazon Web - // Services account;. + // An ID for the data source. This ID is unique per Amazon Web Services Region; for + // each Amazon Web Services account;. // // This member is required. DataSourceId *string @@ -87,8 +87,8 @@ type CreateDataSourceOutput struct { // The status of creating the data source. CreationStatus types.ResourceStatus - // The ID of the data source. This ID is unique per Region; for each Amazon Web - // Services account;. + // The ID of the data source. This ID is unique per Amazon Web Services Region; for + // each Amazon Web Services account;. DataSourceId *string // The Amazon Web Services request ID for this operation. diff --git a/service/quicksight/api_op_CreateNamespace.go b/service/quicksight/api_op_CreateNamespace.go index cf60809fd95..8be2e3d298f 100644 --- a/service/quicksight/api_op_CreateNamespace.go +++ b/service/quicksight/api_op_CreateNamespace.go @@ -65,8 +65,9 @@ type CreateNamespaceOutput struct { // The ARN of the QuickSight namespace you created. Arn *string - // The Region; that you want to use for the free SPICE capacity for the new - // namespace. This is set to the region that you run CreateNamespace in. + // The Amazon Web Services Region; that you want to use for the free SPICE capacity + // for the new namespace. This is set to the region that you run CreateNamespace + // in. CapacityRegion *string // The status of the creation of the namespace. This is an asynchronous process. A diff --git a/service/quicksight/api_op_CreateTemplate.go b/service/quicksight/api_op_CreateTemplate.go index 9cd1e93778d..85bdd1f69cd 100644 --- a/service/quicksight/api_op_CreateTemplate.go +++ b/service/quicksight/api_op_CreateTemplate.go @@ -49,16 +49,16 @@ type CreateTemplateInput struct { // require an Amazon Resource Name (ARN). For SourceTemplate, specify the ARN of // the source template. For SourceAnalysis, specify the ARN of the source analysis. // The SourceTemplate ARN can contain any Amazon Web Services account; and any - // QuickSight-supported Region;. Use the DataSetReferences entity within - // SourceTemplate or SourceAnalysis to list the replacement datasets for the - // placeholders listed in the original. The schema in each dataset must match its - // placeholder. + // QuickSight-supported Amazon Web Services Region;. Use the DataSetReferences + // entity within SourceTemplate or SourceAnalysis to list the replacement datasets + // for the placeholders listed in the original. The schema in each dataset must + // match its placeholder. // // This member is required. SourceEntity *types.TemplateSourceEntity // An ID for the template that you want to create. This template is unique per - // Region; in each Amazon Web Services account;. + // Amazon Web Services Region; in each Amazon Web Services account;. // // This member is required. TemplateId *string diff --git a/service/quicksight/api_op_CreateTheme.go b/service/quicksight/api_op_CreateTheme.go index b33920d45c9..7de7f85cb07 100644 --- a/service/quicksight/api_op_CreateTheme.go +++ b/service/quicksight/api_op_CreateTheme.go @@ -57,8 +57,8 @@ type CreateThemeInput struct { // This member is required. Name *string - // An ID for the theme that you want to create. The theme ID is unique per Region; - // in each Amazon Web Services account;. + // An ID for the theme that you want to create. The theme ID is unique per Amazon + // Web Services Region; in each Amazon Web Services account;. // // This member is required. ThemeId *string diff --git a/service/quicksight/api_op_DeleteAccountCustomization.go b/service/quicksight/api_op_DeleteAccountCustomization.go index 3210a792da4..8adf9a57257 100644 --- a/service/quicksight/api_op_DeleteAccountCustomization.go +++ b/service/quicksight/api_op_DeleteAccountCustomization.go @@ -10,8 +10,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes all Amazon QuickSight customizations in this Region; for the specified -// Amazon Web Services account; and QuickSight namespace. +// Deletes all Amazon QuickSight customizations in this Amazon Web Services Region; +// for the specified Amazon Web Services account; and QuickSight namespace. func (c *Client) DeleteAccountCustomization(ctx context.Context, params *DeleteAccountCustomizationInput, optFns ...func(*Options)) (*DeleteAccountCustomizationOutput, error) { if params == nil { params = &DeleteAccountCustomizationInput{} @@ -30,7 +30,7 @@ func (c *Client) DeleteAccountCustomization(ctx context.Context, params *DeleteA type DeleteAccountCustomizationInput struct { // The ID for the Amazon Web Services account; that you want to delete QuickSight - // customizations from in this Region;. + // customizations from in this Amazon Web Services Region;. // // This member is required. AwsAccountId *string diff --git a/service/quicksight/api_op_DeleteDataSet.go b/service/quicksight/api_op_DeleteDataSet.go index d60bab7bdbd..2c3efa16be6 100644 --- a/service/quicksight/api_op_DeleteDataSet.go +++ b/service/quicksight/api_op_DeleteDataSet.go @@ -33,8 +33,8 @@ type DeleteDataSetInput struct { // This member is required. AwsAccountId *string - // The ID for the dataset that you want to create. This ID is unique per Region; - // for each Amazon Web Services account;. + // The ID for the dataset that you want to create. This ID is unique per Amazon Web + // Services Region; for each Amazon Web Services account;. // // This member is required. DataSetId *string @@ -47,8 +47,8 @@ type DeleteDataSetOutput struct { // The Amazon Resource Name (ARN) of the dataset. Arn *string - // The ID for the dataset that you want to create. This ID is unique per Region; - // for each Amazon Web Services account;. + // The ID for the dataset that you want to create. This ID is unique per Amazon Web + // Services Region; for each Amazon Web Services account;. DataSetId *string // The Amazon Web Services request ID for this operation. diff --git a/service/quicksight/api_op_DeleteDataSource.go b/service/quicksight/api_op_DeleteDataSource.go index 88a4e17722e..34f5f302de9 100644 --- a/service/quicksight/api_op_DeleteDataSource.go +++ b/service/quicksight/api_op_DeleteDataSource.go @@ -34,8 +34,8 @@ type DeleteDataSourceInput struct { // This member is required. AwsAccountId *string - // The ID of the data source. This ID is unique per Region; for each Amazon Web - // Services account;. + // The ID of the data source. This ID is unique per Amazon Web Services Region; for + // each Amazon Web Services account;. // // This member is required. DataSourceId *string @@ -48,8 +48,8 @@ type DeleteDataSourceOutput struct { // The Amazon Resource Name (ARN) of the data source that you deleted. Arn *string - // The ID of the data source. This ID is unique per Region; for each Amazon Web - // Services account;. + // The ID of the data source. This ID is unique per Amazon Web Services Region; for + // each Amazon Web Services account;. DataSourceId *string // The Amazon Web Services request ID for this operation. diff --git a/service/quicksight/api_op_DescribeAccountCustomization.go b/service/quicksight/api_op_DescribeAccountCustomization.go index 2819f2221b1..cb07ddf86e1 100644 --- a/service/quicksight/api_op_DescribeAccountCustomization.go +++ b/service/quicksight/api_op_DescribeAccountCustomization.go @@ -12,28 +12,30 @@ import ( ) // Describes the customizations associated with the provided Amazon Web Services -// account; and Amazon QuickSight namespace in an Region;. The QuickSight console -// evaluates which customizations to apply by running this API operation with the -// Resolved flag included. To determine what customizations display when you run -// this command, it can help to visualize the relationship of the entities -// involved. +// account; and Amazon QuickSight namespace in an Amazon Web Services Region;. The +// QuickSight console evaluates which customizations to apply by running this API +// operation with the Resolved flag included. To determine what customizations +// display when you run this command, it can help to visualize the relationship of +// the entities involved. // -// * Amazon Web Services account; - The Amazon Web Services account; -// exists at the top of the hierarchy. It has the potential to use all of the -// Regions; and AWS Services. When you subscribe to QuickSight, you choose one -// Region; to use as your home Region. That's where your free SPICE capacity is -// located. You can use QuickSight in any supported Region;. +// * Amazon Web Services account; - The Amazon Web Services +// account; exists at the top of the hierarchy. It has the potential to use all of +// the Amazon Web Services Regions; and AWS Services. When you subscribe to +// QuickSight, you choose one Amazon Web Services Region; to use as your home +// Region. That's where your free SPICE capacity is located. You can use QuickSight +// in any supported Amazon Web Services Region;. // -// * Region; - In each -// Region; where you sign in to QuickSight at least once, QuickSight acts as a -// separate instance of the same service. If you have a user directory, it resides -// in us-east-1, which is the US East (N. Virginia). Generally speaking, these -// users have access to QuickSight in any Region;, unless they are constrained to a -// namespace. To run the command in a different Region;, you change your Region -// settings. If you're using the AWS CLI, you can use one of the following -// options: +// * Amazon Web Services Region; - +// In each Amazon Web Services Region; where you sign in to QuickSight at least +// once, QuickSight acts as a separate instance of the same service. If you have a +// user directory, it resides in us-east-1, which is the US East (N. Virginia). +// Generally speaking, these users have access to QuickSight in any Amazon Web +// Services Region;, unless they are constrained to a namespace. To run the command +// in a different Amazon Web Services Region;, you change your Region settings. If +// you're using the AWS CLI, you can use one of the following options: // -// * Use command line options +// * Use +// command line options // (https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-options.html). // // * @@ -41,8 +43,9 @@ import ( // (https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html). // // * -// Run aws configure to change your default Region;. Use Enter to key the same -// settings for your keys. For more information, see Configuring the AWS CLI +// Run aws configure to change your default Amazon Web Services Region;. Use Enter +// to key the same settings for your keys. For more information, see Configuring +// the AWS CLI // (https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html). // // * @@ -51,14 +54,16 @@ import ( // specific namespace, users and groups must also be part of the same namespace. // People who share a namespace are completely isolated from users and assets in // other namespaces, even if they are in the same Amazon Web Services account; and -// Region;. +// Amazon Web Services Region;. // -// * Applied customizations - Within an Region;, a set of QuickSight -// customizations can apply to an Amazon Web Services account; or to a namespace. -// Settings that you apply to a namespace override settings that you apply to an -// Amazon Web Services account;. All settings are isolated to a single Region;. To -// apply them in other Regions;, run the CreateAccountCustomization command in each -// Region; where you want to apply the same customizations. +// * Applied customizations - Within an Amazon Web +// Services Region;, a set of QuickSight customizations can apply to an Amazon Web +// Services account; or to a namespace. Settings that you apply to a namespace +// override settings that you apply to an Amazon Web Services account;. All +// settings are isolated to a single Amazon Web Services Region;. To apply them in +// other Amazon Web Services Regions;, run the CreateAccountCustomization command +// in each Amazon Web Services Region; where you want to apply the same +// customizations. func (c *Client) DescribeAccountCustomization(ctx context.Context, params *DescribeAccountCustomizationInput, optFns ...func(*Options)) (*DescribeAccountCustomizationOutput, error) { if params == nil { params = &DescribeAccountCustomizationInput{} @@ -98,7 +103,8 @@ type DescribeAccountCustomizationInput struct { type DescribeAccountCustomizationOutput struct { - // The QuickSight customizations that exist in the current Region;. + // The QuickSight customizations that exist in the current Amazon Web Services + // Region;. AccountCustomization *types.AccountCustomization // The Amazon Resource Name (ARN) of the customization that's associated with this diff --git a/service/quicksight/api_op_DescribeAccountSettings.go b/service/quicksight/api_op_DescribeAccountSettings.go index 3e585a9c23b..5c513be4419 100644 --- a/service/quicksight/api_op_DescribeAccountSettings.go +++ b/service/quicksight/api_op_DescribeAccountSettings.go @@ -48,7 +48,7 @@ type DescribeAccountSettingsOutput struct { // QuickSight "account" even though it's technically not an account by itself. // Instead, it's a subscription to the QuickSight service for your Amazon Web // Services account;. The edition that you subscribe to applies to QuickSight in - // every Region; where you use it. + // every Amazon Web Services Region; where you use it. AccountSettings *types.AccountSettings // The Amazon Web Services request ID for this operation. diff --git a/service/quicksight/api_op_DescribeDataSet.go b/service/quicksight/api_op_DescribeDataSet.go index 013cc00da42..43512927e7f 100644 --- a/service/quicksight/api_op_DescribeDataSet.go +++ b/service/quicksight/api_op_DescribeDataSet.go @@ -34,8 +34,8 @@ type DescribeDataSetInput struct { // This member is required. AwsAccountId *string - // The ID for the dataset that you want to create. This ID is unique per Region; - // for each Amazon Web Services account;. + // The ID for the dataset that you want to create. This ID is unique per Amazon Web + // Services Region; for each Amazon Web Services account;. // // This member is required. DataSetId *string diff --git a/service/quicksight/api_op_DescribeDataSetPermissions.go b/service/quicksight/api_op_DescribeDataSetPermissions.go index e347748cd40..3310e88f412 100644 --- a/service/quicksight/api_op_DescribeDataSetPermissions.go +++ b/service/quicksight/api_op_DescribeDataSetPermissions.go @@ -35,8 +35,8 @@ type DescribeDataSetPermissionsInput struct { // This member is required. AwsAccountId *string - // The ID for the dataset that you want to create. This ID is unique per Region; - // for each Amazon Web Services account;. + // The ID for the dataset that you want to create. This ID is unique per Amazon Web + // Services Region; for each Amazon Web Services account;. // // This member is required. DataSetId *string @@ -49,8 +49,8 @@ type DescribeDataSetPermissionsOutput struct { // The Amazon Resource Name (ARN) of the dataset. DataSetArn *string - // The ID for the dataset that you want to create. This ID is unique per Region; - // for each Amazon Web Services account;. + // The ID for the dataset that you want to create. This ID is unique per Amazon Web + // Services Region; for each Amazon Web Services account;. DataSetId *string // A list of resource permissions on the dataset. diff --git a/service/quicksight/api_op_DescribeDataSource.go b/service/quicksight/api_op_DescribeDataSource.go index 1e077883a45..ef2635e1217 100644 --- a/service/quicksight/api_op_DescribeDataSource.go +++ b/service/quicksight/api_op_DescribeDataSource.go @@ -34,8 +34,8 @@ type DescribeDataSourceInput struct { // This member is required. AwsAccountId *string - // The ID of the data source. This ID is unique per Region; for each Amazon Web - // Services account;. + // The ID of the data source. This ID is unique per Amazon Web Services Region; for + // each Amazon Web Services account;. // // This member is required. DataSourceId *string diff --git a/service/quicksight/api_op_DescribeDataSourcePermissions.go b/service/quicksight/api_op_DescribeDataSourcePermissions.go index f9d3ee8ceb2..0e2a610aa16 100644 --- a/service/quicksight/api_op_DescribeDataSourcePermissions.go +++ b/service/quicksight/api_op_DescribeDataSourcePermissions.go @@ -34,8 +34,8 @@ type DescribeDataSourcePermissionsInput struct { // This member is required. AwsAccountId *string - // The ID of the data source. This ID is unique per Region; for each Amazon Web - // Services account;. + // The ID of the data source. This ID is unique per Amazon Web Services Region; for + // each Amazon Web Services account;. // // This member is required. DataSourceId *string @@ -48,8 +48,8 @@ type DescribeDataSourcePermissionsOutput struct { // The Amazon Resource Name (ARN) of the data source. DataSourceArn *string - // The ID of the data source. This ID is unique per Region; for each Amazon Web - // Services account;. + // The ID of the data source. This ID is unique per Amazon Web Services Region; for + // each Amazon Web Services account;. DataSourceId *string // A list of resource permissions on the data source. diff --git a/service/quicksight/api_op_DescribeNamespace.go b/service/quicksight/api_op_DescribeNamespace.go index 71085e42bff..8a0b026cbc0 100644 --- a/service/quicksight/api_op_DescribeNamespace.go +++ b/service/quicksight/api_op_DescribeNamespace.go @@ -46,10 +46,10 @@ type DescribeNamespaceInput struct { type DescribeNamespaceOutput struct { // The information about the namespace that you're describing. The response - // includes the namespace ARN, name, Region;, creation status, and identity store. - // DescribeNamespace also works for namespaces that are in the process of being - // created. For incomplete namespaces, this API operation lists the namespace error - // types and messages associated with the creation process. + // includes the namespace ARN, name, Amazon Web Services Region;, creation status, + // and identity store. DescribeNamespace also works for namespaces that are in the + // process of being created. For incomplete namespaces, this API operation lists + // the namespace error types and messages associated with the creation process. Namespace *types.NamespaceInfoV2 // The Amazon Web Services request ID for this operation. diff --git a/service/quicksight/api_op_GetDashboardEmbedUrl.go b/service/quicksight/api_op_GetDashboardEmbedUrl.go index 13d46058d65..52acfbbfc77 100644 --- a/service/quicksight/api_op_GetDashboardEmbedUrl.go +++ b/service/quicksight/api_op_GetDashboardEmbedUrl.go @@ -29,9 +29,9 @@ import ( // resulting user session is valid for 10 hours. // // For more information, see -// Embedded Analytics -// (https://docs.aws.amazon.com/quicksight/latest/user/embedded-analytics.html) in -// the Amazon QuickSight User Guide. For more information about the high-level +// Embedding Analytics Using GetDashboardEmbedUrl +// (https://docs.aws.amazon.com/quicksight/latest/user/embedded-analytics-deprecated.html) +// in the Amazon QuickSight User Guide. For more information about the high-level // steps for embedding and for an interactive demo of the ways you can customize // embedding, visit the Amazon QuickSight Developer Portal // (https://docs.aws.amazon.com/quicksight/latest/user/quicksight-dev-portal.html). diff --git a/service/quicksight/api_op_GetSessionEmbedUrl.go b/service/quicksight/api_op_GetSessionEmbedUrl.go index d47d0a7485a..1a597fe3700 100644 --- a/service/quicksight/api_op_GetSessionEmbedUrl.go +++ b/service/quicksight/api_op_GetSessionEmbedUrl.go @@ -21,8 +21,8 @@ import ( // information, see the following sections in the Amazon QuickSight User Guide: // // * -// Embedding the Amazon QuickSight Console -// (https://docs.aws.amazon.com/quicksight/latest/user/embedding-the-quicksight-console.html) +// Embedding the Amazon QuickSight Console Using GetSessionEmbedUrl +// (https://docs.aws.amazon.com/quicksight/latest/user/embedded-analytics-full-console-for-authenticated-users-get.html) // // * // Customizing Access to the Amazon QuickSight Console diff --git a/service/quicksight/api_op_ListDataSets.go b/service/quicksight/api_op_ListDataSets.go index ba5cfce07c9..35ab34ece84 100644 --- a/service/quicksight/api_op_ListDataSets.go +++ b/service/quicksight/api_op_ListDataSets.go @@ -13,7 +13,7 @@ import ( ) // Lists all of the datasets belonging to the current Amazon Web Services account; -// in an Region;. The permissions resource is +// in an Amazon Web Services Region;. The permissions resource is // arn:aws:quicksight:region:aws-account-id:dataset/*. func (c *Client) ListDataSets(ctx context.Context, params *ListDataSetsInput, optFns ...func(*Options)) (*ListDataSetsOutput, error) { if params == nil { diff --git a/service/quicksight/api_op_ListDataSources.go b/service/quicksight/api_op_ListDataSources.go index fdf72d1b318..3f99eac5551 100644 --- a/service/quicksight/api_op_ListDataSources.go +++ b/service/quicksight/api_op_ListDataSources.go @@ -12,8 +12,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Lists data sources in current Region; that belong to this Amazon Web Services -// account;. +// Lists data sources in current Amazon Web Services Region; that belong to this +// Amazon Web Services account;. func (c *Client) ListDataSources(ctx context.Context, params *ListDataSourcesInput, optFns ...func(*Options)) (*ListDataSourcesOutput, error) { if params == nil { params = &ListDataSourcesInput{} diff --git a/service/quicksight/api_op_ListNamespaces.go b/service/quicksight/api_op_ListNamespaces.go index de1fc204b14..5b596c92dcb 100644 --- a/service/quicksight/api_op_ListNamespaces.go +++ b/service/quicksight/api_op_ListNamespaces.go @@ -48,8 +48,8 @@ type ListNamespacesInput struct { type ListNamespacesOutput struct { // The information about the namespaces in this Amazon Web Services account;. The - // response includes the namespace ARN, name, Region;, notification email address, - // creation status, and identity store. + // response includes the namespace ARN, name, Amazon Web Services Region;, + // notification email address, creation status, and identity store. Namespaces []types.NamespaceInfoV2 // A pagination token that can be used in a subsequent request. diff --git a/service/quicksight/api_op_UpdateAccountCustomization.go b/service/quicksight/api_op_UpdateAccountCustomization.go index 55ede12548b..10d8049628d 100644 --- a/service/quicksight/api_op_UpdateAccountCustomization.go +++ b/service/quicksight/api_op_UpdateAccountCustomization.go @@ -11,12 +11,13 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Updates Amazon QuickSight customizations the current Region;. Currently, the -// only customization you can use is a theme. You can use customizations for your -// Amazon Web Services account; or, if you specify a namespace, for a QuickSight -// namespace instead. Customizations that apply to a namespace override -// customizations that apply to an Amazon Web Services account;. To find out which -// customizations apply, use the DescribeAccountCustomization API operation. +// Updates Amazon QuickSight customizations the current Amazon Web Services +// Region;. Currently, the only customization you can use is a theme. You can use +// customizations for your Amazon Web Services account; or, if you specify a +// namespace, for a QuickSight namespace instead. Customizations that apply to a +// namespace override customizations that apply to an Amazon Web Services account;. +// To find out which customizations apply, use the DescribeAccountCustomization API +// operation. func (c *Client) UpdateAccountCustomization(ctx context.Context, params *UpdateAccountCustomizationInput, optFns ...func(*Options)) (*UpdateAccountCustomizationOutput, error) { if params == nil { params = &UpdateAccountCustomizationInput{} @@ -34,7 +35,8 @@ func (c *Client) UpdateAccountCustomization(ctx context.Context, params *UpdateA type UpdateAccountCustomizationInput struct { - // The QuickSight customizations you're updating in the current Region;. + // The QuickSight customizations you're updating in the current Amazon Web Services + // Region;. // // This member is required. AccountCustomization *types.AccountCustomization @@ -53,7 +55,8 @@ type UpdateAccountCustomizationInput struct { type UpdateAccountCustomizationOutput struct { - // The QuickSight customizations you're updating in the current Region;. + // The QuickSight customizations you're updating in the current Amazon Web Services + // Region;. AccountCustomization *types.AccountCustomization // The Amazon Resource Name (ARN) for the updated customization for this Amazon Web diff --git a/service/quicksight/api_op_UpdateDashboard.go b/service/quicksight/api_op_UpdateDashboard.go index a48cc8be3b5..1cead027474 100644 --- a/service/quicksight/api_op_UpdateDashboard.go +++ b/service/quicksight/api_op_UpdateDashboard.go @@ -54,10 +54,10 @@ type UpdateDashboardInput struct { // you need to update a dashboard from an analysis, first convert the analysis to a // template by using the CreateTemplate API operation. For SourceTemplate, specify // the Amazon Resource Name (ARN) of the source template. The SourceTemplate ARN - // can contain any Amazon Web Services account; and any QuickSight-supported - // Region;. Use the DataSetReferences entity within SourceTemplate to list the - // replacement datasets for the placeholders listed in the original. The schema in - // each dataset must match its placeholder. + // can contain any Amazon Web Services account; and any QuickSight-supported Amazon + // Web Services Region;. Use the DataSetReferences entity within SourceTemplate to + // list the replacement datasets for the placeholders listed in the original. The + // schema in each dataset must match its placeholder. // // This member is required. SourceEntity *types.DashboardSourceEntity diff --git a/service/quicksight/api_op_UpdateDataSet.go b/service/quicksight/api_op_UpdateDataSet.go index 5d9614b6fc0..bc89049a6fb 100644 --- a/service/quicksight/api_op_UpdateDataSet.go +++ b/service/quicksight/api_op_UpdateDataSet.go @@ -34,8 +34,8 @@ type UpdateDataSetInput struct { // This member is required. AwsAccountId *string - // The ID for the dataset that you want to update. This ID is unique per Region; - // for each Amazon Web Services account;. + // The ID for the dataset that you want to update. This ID is unique per Amazon Web + // Services Region; for each Amazon Web Services account;. // // This member is required. DataSetId *string @@ -84,8 +84,8 @@ type UpdateDataSetOutput struct { // The Amazon Resource Name (ARN) of the dataset. Arn *string - // The ID for the dataset that you want to create. This ID is unique per Region; - // for each Amazon Web Services account;. + // The ID for the dataset that you want to create. This ID is unique per Amazon Web + // Services Region; for each Amazon Web Services account;. DataSetId *string // The ARN for the ingestion, which is triggered as a result of dataset creation if diff --git a/service/quicksight/api_op_UpdateDataSetPermissions.go b/service/quicksight/api_op_UpdateDataSetPermissions.go index e8695313782..c1424fe9cf1 100644 --- a/service/quicksight/api_op_UpdateDataSetPermissions.go +++ b/service/quicksight/api_op_UpdateDataSetPermissions.go @@ -36,7 +36,7 @@ type UpdateDataSetPermissionsInput struct { AwsAccountId *string // The ID for the dataset whose permissions you want to update. This ID is unique - // per Region; for each Amazon Web Services account;. + // per Amazon Web Services Region; for each Amazon Web Services account;. // // This member is required. DataSetId *string @@ -56,7 +56,7 @@ type UpdateDataSetPermissionsOutput struct { DataSetArn *string // The ID for the dataset whose permissions you want to update. This ID is unique - // per Region; for each Amazon Web Services account;. + // per Amazon Web Services Region; for each Amazon Web Services account;. DataSetId *string // The Amazon Web Services request ID for this operation. diff --git a/service/quicksight/api_op_UpdateDataSource.go b/service/quicksight/api_op_UpdateDataSource.go index 3f3e5638b6e..e06e4d6f3f9 100644 --- a/service/quicksight/api_op_UpdateDataSource.go +++ b/service/quicksight/api_op_UpdateDataSource.go @@ -34,8 +34,8 @@ type UpdateDataSourceInput struct { // This member is required. AwsAccountId *string - // The ID of the data source. This ID is unique per Region; for each Amazon Web - // Services account;. + // The ID of the data source. This ID is unique per Amazon Web Services Region; for + // each Amazon Web Services account;. // // This member is required. DataSourceId *string @@ -68,8 +68,8 @@ type UpdateDataSourceOutput struct { // The Amazon Resource Name (ARN) of the data source. Arn *string - // The ID of the data source. This ID is unique per Region; for each Amazon Web - // Services account;. + // The ID of the data source. This ID is unique per Amazon Web Services Region; for + // each Amazon Web Services account;. DataSourceId *string // The Amazon Web Services request ID for this operation. diff --git a/service/quicksight/api_op_UpdateDataSourcePermissions.go b/service/quicksight/api_op_UpdateDataSourcePermissions.go index f7e79b90b47..b5327721d9c 100644 --- a/service/quicksight/api_op_UpdateDataSourcePermissions.go +++ b/service/quicksight/api_op_UpdateDataSourcePermissions.go @@ -34,8 +34,8 @@ type UpdateDataSourcePermissionsInput struct { // This member is required. AwsAccountId *string - // The ID of the data source. This ID is unique per Region; for each Amazon Web - // Services account;. + // The ID of the data source. This ID is unique per Amazon Web Services Region; for + // each Amazon Web Services account;. // // This member is required. DataSourceId *string @@ -54,8 +54,8 @@ type UpdateDataSourcePermissionsOutput struct { // The Amazon Resource Name (ARN) of the data source. DataSourceArn *string - // The ID of the data source. This ID is unique per Region; for each Amazon Web - // Services account;. + // The ID of the data source. This ID is unique per Amazon Web Services Region; for + // each Amazon Web Services account;. DataSourceId *string // The Amazon Web Services request ID for this operation. diff --git a/service/quicksight/api_op_UpdateTemplate.go b/service/quicksight/api_op_UpdateTemplate.go index dad30e0b78a..3388f1f7bed 100644 --- a/service/quicksight/api_op_UpdateTemplate.go +++ b/service/quicksight/api_op_UpdateTemplate.go @@ -42,10 +42,10 @@ type UpdateTemplateInput struct { // require an Amazon Resource Name (ARN). For SourceTemplate, specify the ARN of // the source template. For SourceAnalysis, specify the ARN of the source analysis. // The SourceTemplate ARN can contain any Amazon Web Services account; and any - // QuickSight-supported Region;. Use the DataSetReferences entity within - // SourceTemplate or SourceAnalysis to list the replacement datasets for the - // placeholders listed in the original. The schema in each dataset must match its - // placeholder. + // QuickSight-supported Amazon Web Services Region;. Use the DataSetReferences + // entity within SourceTemplate or SourceAnalysis to list the replacement datasets + // for the placeholders listed in the original. The schema in each dataset must + // match its placeholder. // // This member is required. SourceEntity *types.TemplateSourceEntity diff --git a/service/quicksight/doc.go b/service/quicksight/doc.go index e92c6536b85..48ffdd9439c 100644 --- a/service/quicksight/doc.go +++ b/service/quicksight/doc.go @@ -4,8 +4,8 @@ // Amazon QuickSight. // // Amazon QuickSight API Reference Amazon QuickSight is a fully managed, serverless -// business intelligence service for the Cloud that makes it easy to extend data -// and insights to every user in your organization. This API reference contains -// documentation for a programming interface that you can use to manage Amazon -// QuickSight. +// business intelligence service for the Amazon Web Services Cloud that makes it +// easy to extend data and insights to every user in your organization. This API +// reference contains documentation for a programming interface that you can use to +// manage Amazon QuickSight. package quicksight diff --git a/service/quicksight/types/types.go b/service/quicksight/types/types.go index bc3828cf3a1..2b29d6e8265 100644 --- a/service/quicksight/types/types.go +++ b/service/quicksight/types/types.go @@ -8,7 +8,7 @@ import ( ) // The Amazon QuickSight customizations associated with your Amazon Web Services -// account; or a QuickSight namespace in a specific Region;. +// account; or a QuickSight namespace in a specific Amazon Web Services Region;. type AccountCustomization struct { // The default theme for this QuickSight subscription. @@ -830,8 +830,8 @@ type DataSource struct { // The time that this data source was created. CreatedTime *time.Time - // The ID of the data source. This ID is unique per Region; for each Amazon Web - // Services account;. + // The ID of the data source. This ID is unique per Amazon Web Services Region; for + // each Amazon Web Services account;. DataSourceId *string // The parameters that Amazon QuickSight uses to connect to your underlying source. @@ -1625,7 +1625,7 @@ type NamespaceInfoV2 struct { // The namespace ARN. Arn *string - // The namespace Region;. + // The namespace Amazon Web Services Region;. CapacityRegion *string // The creation status of a namespace that is not yet completely created. @@ -2326,8 +2326,8 @@ type Template struct { // The display name of the template. Name *string - // The ID for the template. This is unique per Region; for each Amazon Web Services - // account;. + // The ID for the template. This is unique per Amazon Web Services Region; for each + // Amazon Web Services account;. TemplateId *string // A structure describing the versions of the template. @@ -2421,8 +2421,8 @@ type TemplateSummary struct { // A display name for the template. Name *string - // The ID of the template. This ID is unique per Region; for each Amazon Web - // Services account;. + // The ID of the template. This ID is unique per Amazon Web Services Region; for + // each Amazon Web Services account;. TemplateId *string noSmithyDocumentSerde @@ -2597,8 +2597,8 @@ type ThemeSummary struct { // the display name for the theme. Name *string - // The ID of the theme. This ID is unique per Region; for each Amazon Web Services - // account;. + // The ID of the theme. This ID is unique per Amazon Web Services Region; for each + // Amazon Web Services account;. ThemeId *string noSmithyDocumentSerde diff --git a/service/route53/types/types.go b/service/route53/types/types.go index 2e73527e43a..86fd5f4cbe3 100644 --- a/service/route53/types/types.go +++ b/service/route53/types/types.go @@ -821,7 +821,7 @@ type HealthCheckConfig struct { // If you don't specify a value for // FullyQualifiedDomainName, Route 53 substitutes the value of IPAddress in the // Host header in each of the preceding cases. If you don't specify a value for - // IPAddress : Route 53 sends a DNS request to the domain that you specify for + // IPAddress: Route 53 sends a DNS request to the domain that you specify for // FullyQualifiedDomainName at the interval that you specify for RequestInterval. // Using an IPv4 address that DNS returns, Route 53 then checks the health of the // endpoint. If you don't specify a value for IPAddress, Route 53 uses only IPv4 to diff --git a/service/route53resolver/api_op_CreateResolverQueryLogConfig.go b/service/route53resolver/api_op_CreateResolverQueryLogConfig.go index b139d57b6b0..36eee20ff7b 100644 --- a/service/route53resolver/api_op_CreateResolverQueryLogConfig.go +++ b/service/route53resolver/api_op_CreateResolverQueryLogConfig.go @@ -19,10 +19,10 @@ import ( // AssociateResolverQueryLogConfig. For more information, see // AssociateResolverQueryLogConfig // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_AssociateResolverQueryLogConfig.html). -// You can optionally use AWS Resource Access Manager (AWS RAM) to share a query -// logging configuration with other AWS accounts. The other accounts can then -// associate VPCs with the configuration. The query logs that Resolver creates for -// a configuration include all DNS queries that originate in all VPCs that are +// You can optionally use Resource Access Manager (RAM) to share a query logging +// configuration with other Amazon Web Services accounts. The other accounts can +// then associate VPCs with the configuration. The query logs that Resolver creates +// for a configuration include all DNS queries that originate in all VPCs that are // associated with the configuration. func (c *Client) CreateResolverQueryLogConfig(ctx context.Context, params *CreateResolverQueryLogConfigInput, optFns ...func(*Options)) (*CreateResolverQueryLogConfigOutput, error) { if params == nil { diff --git a/service/route53resolver/api_op_DeleteResolverQueryLogConfig.go b/service/route53resolver/api_op_DeleteResolverQueryLogConfig.go index 5243d9ad73c..c13d9e56916 100644 --- a/service/route53resolver/api_op_DeleteResolverQueryLogConfig.go +++ b/service/route53resolver/api_op_DeleteResolverQueryLogConfig.go @@ -14,10 +14,10 @@ import ( // Deletes a query logging configuration. When you delete a configuration, Resolver // stops logging DNS queries for all of the Amazon VPCs that are associated with // the configuration. This also applies if the query logging configuration is -// shared with other AWS accounts, and the other accounts have associated VPCs with -// the shared configuration. Before you can delete a query logging configuration, -// you must first disassociate all VPCs from the configuration. See -// DisassociateResolverQueryLogConfig +// shared with other Amazon Web Services accounts, and the other accounts have +// associated VPCs with the shared configuration. Before you can delete a query +// logging configuration, you must first disassociate all VPCs from the +// configuration. See DisassociateResolverQueryLogConfig // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DisassociateResolverQueryLogConfig.html). // If you used Resource Access Manager (RAM) to share a query logging configuration // with other accounts, you must stop sharing the configuration before you can diff --git a/service/route53resolver/api_op_DisassociateResolverQueryLogConfig.go b/service/route53resolver/api_op_DisassociateResolverQueryLogConfig.go index e1299fc096c..aaa768d40dd 100644 --- a/service/route53resolver/api_op_DisassociateResolverQueryLogConfig.go +++ b/service/route53resolver/api_op_DisassociateResolverQueryLogConfig.go @@ -13,9 +13,9 @@ import ( // Disassociates a VPC from a query logging configuration. Before you can delete a // query logging configuration, you must first disassociate all VPCs from the -// configuration. If you used AWS Resource Access Manager (AWS RAM) to share a -// query logging configuration with other accounts, VPCs can be disassociated from -// the configuration in the following ways: +// configuration. If you used Resource Access Manager (RAM) to share a query +// logging configuration with other accounts, VPCs can be disassociated from the +// configuration in the following ways: // // * The accounts that you shared the // configuration with can disassociate VPCs from the configuration. diff --git a/service/route53resolver/api_op_GetFirewallRuleGroupPolicy.go b/service/route53resolver/api_op_GetFirewallRuleGroupPolicy.go index 136e87642e7..d1daab29984 100644 --- a/service/route53resolver/api_op_GetFirewallRuleGroupPolicy.go +++ b/service/route53resolver/api_op_GetFirewallRuleGroupPolicy.go @@ -10,9 +10,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the AWS Identity and Access Management (AWS IAM) policy for sharing the -// specified rule group. You can use the policy to share the rule group using AWS -// Resource Access Manager (AWS RAM). +// Returns the Identity and Access Management (Amazon Web Services IAM) policy for +// sharing the specified rule group. You can use the policy to share the rule group +// using Resource Access Manager (RAM). func (c *Client) GetFirewallRuleGroupPolicy(ctx context.Context, params *GetFirewallRuleGroupPolicyInput, optFns ...func(*Options)) (*GetFirewallRuleGroupPolicyOutput, error) { if params == nil { params = &GetFirewallRuleGroupPolicyInput{} @@ -40,9 +40,9 @@ type GetFirewallRuleGroupPolicyInput struct { type GetFirewallRuleGroupPolicyOutput struct { - // The AWS Identity and Access Management (AWS IAM) policy for sharing the - // specified rule group. You can use the policy to share the rule group using AWS - // Resource Access Manager (AWS RAM). + // The Identity and Access Management (Amazon Web Services IAM) policy for sharing + // the specified rule group. You can use the policy to share the rule group using + // Resource Access Manager (RAM). FirewallRuleGroupPolicy *string // Metadata pertaining to the operation's result. diff --git a/service/route53resolver/api_op_GetResolverQueryLogConfigPolicy.go b/service/route53resolver/api_op_GetResolverQueryLogConfigPolicy.go index bed037a54b3..3a5cd96ec71 100644 --- a/service/route53resolver/api_op_GetResolverQueryLogConfigPolicy.go +++ b/service/route53resolver/api_op_GetResolverQueryLogConfigPolicy.go @@ -12,7 +12,7 @@ import ( // Gets information about a query logging policy. A query logging policy specifies // the Resolver query logging operations and resources that you want to allow -// another AWS account to be able to use. +// another Amazon Web Services account to be able to use. func (c *Client) GetResolverQueryLogConfigPolicy(ctx context.Context, params *GetResolverQueryLogConfigPolicyInput, optFns ...func(*Options)) (*GetResolverQueryLogConfigPolicyOutput, error) { if params == nil { params = &GetResolverQueryLogConfigPolicyInput{} diff --git a/service/route53resolver/api_op_ListResolverDnssecConfigs.go b/service/route53resolver/api_op_ListResolverDnssecConfigs.go index d7d43f8cf50..1258f6063b1 100644 --- a/service/route53resolver/api_op_ListResolverDnssecConfigs.go +++ b/service/route53resolver/api_op_ListResolverDnssecConfigs.go @@ -13,7 +13,7 @@ import ( ) // Lists the configurations for DNSSEC validation that are associated with the -// current AWS account. +// current Amazon Web Services account. func (c *Client) ListResolverDnssecConfigs(ctx context.Context, params *ListResolverDnssecConfigsInput, optFns ...func(*Options)) (*ListResolverDnssecConfigsOutput, error) { if params == nil { params = &ListResolverDnssecConfigsInput{} @@ -39,11 +39,11 @@ type ListResolverDnssecConfigsInput struct { // for MaxResults, Route 53 returns up to 100 configuration per page. MaxResults *int32 - // (Optional) If the current AWS account has more than MaxResults DNSSEC - // configurations, use NextToken to get the second and subsequent pages of results. - // For the first ListResolverDnssecConfigs request, omit this value. For the second - // and subsequent requests, get the value of NextToken from the previous response - // and specify that value for NextToken in the request. + // (Optional) If the current Amazon Web Services account has more than MaxResults + // DNSSEC configurations, use NextToken to get the second and subsequent pages of + // results. For the first ListResolverDnssecConfigs request, omit this value. For + // the second and subsequent requests, get the value of NextToken from the previous + // response and specify that value for NextToken in the request. NextToken *string noSmithyDocumentSerde @@ -52,9 +52,9 @@ type ListResolverDnssecConfigsInput struct { type ListResolverDnssecConfigsOutput struct { // If a response includes the last of the DNSSEC configurations that are associated - // with the current AWS account, NextToken doesn't appear in the response. If a - // response doesn't include the last of the configurations, you can get more - // configurations by submitting another ListResolverDnssecConfigs + // with the current Amazon Web Services account, NextToken doesn't appear in the + // response. If a response doesn't include the last of the configurations, you can + // get more configurations by submitting another ListResolverDnssecConfigs // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_ListResolverDnssecConfigs.html) // request. Get the value of NextToken that Amazon Route 53 returned in the // previous response and include it in NextToken in the next request. @@ -63,7 +63,7 @@ type ListResolverDnssecConfigsOutput struct { // An array that contains one ResolverDnssecConfig // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_ResolverDnssecConfig.html) // element for each configuration for DNSSEC validation that is associated with the - // current AWS account. + // current Amazon Web Services account. ResolverDnssecConfigs []types.ResolverDnssecConfig // Metadata pertaining to the operation's result. diff --git a/service/route53resolver/api_op_ListResolverEndpoints.go b/service/route53resolver/api_op_ListResolverEndpoints.go index a3920f012fd..f0de85304c3 100644 --- a/service/route53resolver/api_op_ListResolverEndpoints.go +++ b/service/route53resolver/api_op_ListResolverEndpoints.go @@ -12,8 +12,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Lists all the Resolver endpoints that were created using the current AWS -// account. +// Lists all the Resolver endpoints that were created using the current Amazon Web +// Services account. func (c *Client) ListResolverEndpoints(ctx context.Context, params *ListResolverEndpointsInput, optFns ...func(*Options)) (*ListResolverEndpointsOutput, error) { if params == nil { params = &ListResolverEndpointsInput{} @@ -61,8 +61,8 @@ type ListResolverEndpointsOutput struct { // the next request, specify the value of NextToken from the previous response. NextToken *string - // The Resolver endpoints that were created by using the current AWS account, and - // that match the specified filters, if any. + // The Resolver endpoints that were created by using the current Amazon Web + // Services account, and that match the specified filters, if any. ResolverEndpoints []types.ResolverEndpoint // Metadata pertaining to the operation's result. diff --git a/service/route53resolver/api_op_ListResolverQueryLogConfigs.go b/service/route53resolver/api_op_ListResolverQueryLogConfigs.go index 3cec1b5b264..ed1d035848b 100644 --- a/service/route53resolver/api_op_ListResolverQueryLogConfigs.go +++ b/service/route53resolver/api_op_ListResolverQueryLogConfigs.go @@ -76,34 +76,35 @@ type ListResolverQueryLogConfigsInput struct { // * Name: The name of the configuration // // * - // OwnerId: The AWS account number of the account that created the configuration + // OwnerId: The Amazon Web Services account number of the account that created the + // configuration // - // * - // ShareStatus: Whether the configuration is shared with other AWS accounts or - // shared with the current account by another AWS account. Sharing is configured - // through AWS Resource Access Manager (AWS RAM). + // * ShareStatus: Whether the configuration is shared with other + // Amazon Web Services accounts or shared with the current account by another + // Amazon Web Services account. Sharing is configured through Resource Access + // Manager (RAM). // - // * Status: The current status of - // the configuration. Valid values include the following: + // * Status: The current status of the configuration. Valid values + // include the following: // - // * CREATING: Resolver is - // creating the query logging configuration. + // * CREATING: Resolver is creating the query logging + // configuration. // - // * CREATED: The query logging - // configuration was successfully created. Resolver is logging queries that - // originate in the specified VPC. + // * CREATED: The query logging configuration was successfully + // created. Resolver is logging queries that originate in the specified VPC. // - // * DELETING: Resolver is deleting this query - // logging configuration. + // * + // DELETING: Resolver is deleting this query logging configuration. // - // * FAILED: Resolver either couldn't create or couldn't - // delete the query logging configuration. Here are two common causes: + // * FAILED: + // Resolver either couldn't create or couldn't delete the query logging + // configuration. Here are two common causes: // - // * The - // specified destination (for example, an Amazon S3 bucket) was deleted. + // * The specified destination (for + // example, an Amazon S3 bucket) was deleted. // - // * - // Permissions don't allow sending logs to the destination. + // * Permissions don't allow sending + // logs to the destination. SortBy *string // If you specified a value for SortBy, the order that you want query logging diff --git a/service/route53resolver/api_op_ListResolverRuleAssociations.go b/service/route53resolver/api_op_ListResolverRuleAssociations.go index cb934e0eeeb..e7fcfd3c977 100644 --- a/service/route53resolver/api_op_ListResolverRuleAssociations.go +++ b/service/route53resolver/api_op_ListResolverRuleAssociations.go @@ -13,7 +13,7 @@ import ( ) // Lists the associations that were created between Resolver rules and VPCs using -// the current AWS account. +// the current Amazon Web Services account. func (c *Client) ListResolverRuleAssociations(ctx context.Context, params *ListResolverRuleAssociationsInput, optFns ...func(*Options)) (*ListResolverRuleAssociationsOutput, error) { if params == nil { params = &ListResolverRuleAssociationsInput{} @@ -64,7 +64,8 @@ type ListResolverRuleAssociationsOutput struct { NextToken *string // The associations that were created between Resolver rules and VPCs using the - // current AWS account, and that match the specified filters, if any. + // current Amazon Web Services account, and that match the specified filters, if + // any. ResolverRuleAssociations []types.ResolverRuleAssociation // Metadata pertaining to the operation's result. diff --git a/service/route53resolver/api_op_ListResolverRules.go b/service/route53resolver/api_op_ListResolverRules.go index 92f9daa80fd..4e0c7a6d4ef 100644 --- a/service/route53resolver/api_op_ListResolverRules.go +++ b/service/route53resolver/api_op_ListResolverRules.go @@ -12,7 +12,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Lists the Resolver rules that were created using the current AWS account. +// Lists the Resolver rules that were created using the current Amazon Web Services +// account. func (c *Client) ListResolverRules(ctx context.Context, params *ListResolverRulesInput, optFns ...func(*Options)) (*ListResolverRulesOutput, error) { if params == nil { params = &ListResolverRulesInput{} @@ -61,8 +62,8 @@ type ListResolverRulesOutput struct { // the next request, specify the value of NextToken from the previous response. NextToken *string - // The Resolver rules that were created using the current AWS account and that - // match the specified filters, if any. + // The Resolver rules that were created using the current Amazon Web Services + // account and that match the specified filters, if any. ResolverRules []types.ResolverRule // Metadata pertaining to the operation's result. diff --git a/service/route53resolver/api_op_PutFirewallRuleGroupPolicy.go b/service/route53resolver/api_op_PutFirewallRuleGroupPolicy.go index 20d48801a47..676bb00fb1c 100644 --- a/service/route53resolver/api_op_PutFirewallRuleGroupPolicy.go +++ b/service/route53resolver/api_op_PutFirewallRuleGroupPolicy.go @@ -10,9 +10,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Attaches an AWS Identity and Access Management (AWS IAM) policy for sharing the -// rule group. You can use the policy to share the rule group using AWS Resource -// Access Manager (AWS RAM). +// Attaches an Identity and Access Management (Amazon Web Services IAM) policy for +// sharing the rule group. You can use the policy to share the rule group using +// Resource Access Manager (RAM). func (c *Client) PutFirewallRuleGroupPolicy(ctx context.Context, params *PutFirewallRuleGroupPolicyInput, optFns ...func(*Options)) (*PutFirewallRuleGroupPolicyOutput, error) { if params == nil { params = &PutFirewallRuleGroupPolicyInput{} @@ -35,8 +35,8 @@ type PutFirewallRuleGroupPolicyInput struct { // This member is required. Arn *string - // The AWS Identity and Access Management (AWS IAM) policy to attach to the rule - // group. + // The Identity and Access Management (Amazon Web Services IAM) policy to attach to + // the rule group. // // This member is required. FirewallRuleGroupPolicy *string diff --git a/service/route53resolver/api_op_PutResolverQueryLogConfigPolicy.go b/service/route53resolver/api_op_PutResolverQueryLogConfigPolicy.go index 71cdf9d5c1b..d3416c0452b 100644 --- a/service/route53resolver/api_op_PutResolverQueryLogConfigPolicy.go +++ b/service/route53resolver/api_op_PutResolverQueryLogConfigPolicy.go @@ -10,9 +10,10 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Specifies an AWS account that you want to share a query logging configuration -// with, the query logging configuration that you want to share, and the operations -// that you want the account to be able to perform on the configuration. +// Specifies an Amazon Web Services account that you want to share a query logging +// configuration with, the query logging configuration that you want to share, and +// the operations that you want the account to be able to perform on the +// configuration. func (c *Client) PutResolverQueryLogConfigPolicy(ctx context.Context, params *PutResolverQueryLogConfigPolicyInput, optFns ...func(*Options)) (*PutResolverQueryLogConfigPolicyOutput, error) { if params == nil { params = &PutResolverQueryLogConfigPolicyInput{} @@ -35,10 +36,10 @@ type PutResolverQueryLogConfigPolicyInput struct { // This member is required. Arn *string - // An AWS Identity and Access Management policy statement that lists the query - // logging configurations that you want to share with another AWS account and the - // operations that you want the account to be able to perform. You can specify the - // following operations in the Actions section of the statement: + // An Identity and Access Management policy statement that lists the query logging + // configurations that you want to share with another Amazon Web Services account + // and the operations that you want the account to be able to perform. You can + // specify the following operations in the Actions section of the statement: // // * // route53resolver:AssociateResolverQueryLogConfig diff --git a/service/route53resolver/api_op_PutResolverRulePolicy.go b/service/route53resolver/api_op_PutResolverRulePolicy.go index c6236da7435..8f4a30e9bbe 100644 --- a/service/route53resolver/api_op_PutResolverRulePolicy.go +++ b/service/route53resolver/api_op_PutResolverRulePolicy.go @@ -10,9 +10,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Specifies an AWS rule that you want to share with another account, the account -// that you want to share the rule with, and the operations that you want the -// account to be able to perform on the rule. +// Specifies an Amazon Web Services rule that you want to share with another +// account, the account that you want to share the rule with, and the operations +// that you want the account to be able to perform on the rule. func (c *Client) PutResolverRulePolicy(ctx context.Context, params *PutResolverRulePolicyInput, optFns ...func(*Options)) (*PutResolverRulePolicyOutput, error) { if params == nil { params = &PutResolverRulePolicyInput{} @@ -36,15 +36,15 @@ type PutResolverRulePolicyInput struct { // This member is required. Arn *string - // An AWS Identity and Access Management policy statement that lists the rules that - // you want to share with another AWS account and the operations that you want the - // account to be able to perform. You can specify the following operations in the - // Action section of the statement: - // - // * route53resolver:GetResolverRule + // An Identity and Access Management policy statement that lists the rules that you + // want to share with another Amazon Web Services account and the operations that + // you want the account to be able to perform. You can specify the following + // operations in the Action section of the statement: // // * - // route53resolver:AssociateResolverRule + // route53resolver:GetResolverRule + // + // * route53resolver:AssociateResolverRule // // * // route53resolver:DisassociateResolverRule diff --git a/service/route53resolver/doc.go b/service/route53resolver/doc.go index f4778e8369e..6e05b4a4b88 100644 --- a/service/route53resolver/doc.go +++ b/service/route53resolver/doc.go @@ -11,10 +11,10 @@ // between your VPC and your network over a Direct Connect or VPN connection: // Forward DNS queries from resolvers on your network to Route 53 Resolver DNS // resolvers on your network can forward DNS queries to Resolver in a specified -// VPC. This allows your DNS resolvers to easily resolve domain names for AWS -// resources such as EC2 instances or records in a Route 53 private hosted zone. -// For more information, see How DNS Resolvers on Your Network Forward DNS Queries -// to Route 53 Resolver +// VPC. This allows your DNS resolvers to easily resolve domain names for Amazon +// Web Services resources such as EC2 instances or records in a Route 53 private +// hosted zone. For more information, see How DNS Resolvers on Your Network Forward +// DNS Queries to Route 53 Resolver // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resolver.html#resolver-overview-forward-network-to-vpc) // in the Amazon Route 53 Developer Guide. Conditionally forward queries from a VPC // to resolvers on your network You can configure Resolver to forward queries that diff --git a/service/route53resolver/types/types.go b/service/route53resolver/types/types.go index 53fc3d08470..3d24e083421 100644 --- a/service/route53resolver/types/types.go +++ b/service/route53resolver/types/types.go @@ -49,7 +49,7 @@ type Filter struct { // endpoints. If you specify DIRECTION for Name, specify INBOUND or OUTBOUND for // Values. // - // * HostVpcId: The ID of the VPC that inbound DNS queries pass through on + // * HostVPCId: The ID of the VPC that inbound DNS queries pass through on // the way from your network to your VPCs in a region, or the VPC that outbound // queries pass through on the way from your VPCs to your network. In a // CreateResolverEndpoint @@ -133,38 +133,39 @@ type Filter struct { // * CreatorRequestId: A unique string that identifies the request that // created the query logging configuration. // - // * Destination: The AWS service that - // you want to forward query logs to. Valid values include the following: + // * Destination: The Amazon Web Services + // service that you want to forward query logs to. Valid values include the + // following: // // * S3 // - // * - // CloudWatchLogs + // * CloudWatchLogs // // * KinesisFirehose // - // * DestinationArn: The ARN of the location - // that Resolver is sending query logs to. This value can be the ARN for an S3 - // bucket, a CloudWatch Logs log group, or a Kinesis Data Firehose delivery - // stream. + // * DestinationArn: The ARN + // of the location that Resolver is sending query logs to. This value can be the + // ARN for an S3 bucket, a CloudWatch Logs log group, or a Kinesis Data Firehose + // delivery stream. // // * Id: The ID of the query logging configuration // - // * Name: The name of - // the query logging configuration + // * Name: The + // name of the query logging configuration // - // * OwnerId: The AWS account ID for the account - // that created the query logging configuration. + // * OwnerId: The Amazon Web Services + // account ID for the account that created the query logging configuration. // - // * ShareStatus: An indication of - // whether the query logging configuration is shared with other AWS accounts, or - // was shared with the current account by another AWS account. Valid values - // include: NOT_SHARED, SHARED_WITH_ME, or SHARED_BY_ME. - // - // * Status: The status of - // the query logging configuration. If you specify Status for Name, specify the - // applicable status code for Values: CREATING, CREATED, DELETING, or FAILED. For - // more information, see Status + // * + // ShareStatus: An indication of whether the query logging configuration is shared + // with other Amazon Web Services accounts, or was shared with the current account + // by another Amazon Web Services account. Valid values include: NOT_SHARED, + // SHARED_WITH_ME, or SHARED_BY_ME. + // + // * Status: The status of the query logging + // configuration. If you specify Status for Name, specify the applicable status + // code for Values: CREATING, CREATED, DELETING, or FAILED. For more information, + // see Status // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ResolverQueryLogConfig.html#Route53Resolver-Type-route53resolver_ResolverQueryLogConfig-Status). // // ListResolverQueryLogConfigAssociations @@ -227,8 +228,8 @@ type FirewallConfig struct { // The ID of the firewall configuration. Id *string - // The AWS account ID of the owner of the VPC that this firewall configuration - // applies to. + // The Amazon Web Services account ID of the owner of the VPC that this firewall + // configuration applies to. OwnerId *string // The ID of the VPC that this firewall configuration applies to. @@ -410,17 +411,17 @@ type FirewallRuleGroup struct { // The name of the rule group. Name *string - // The AWS account ID for the account that created the rule group. When a rule - // group is shared with your account, this is the account that has shared the rule - // group with you. + // The Amazon Web Services account ID for the account that created the rule group. + // When a rule group is shared with your account, this is the account that has + // shared the rule group with you. OwnerId *string // The number of rules in the rule group. RuleCount *int32 - // Whether the rule group is shared with other AWS accounts, or was shared with the - // current account by another AWS account. Sharing is configured through AWS - // Resource Access Manager (AWS RAM). + // Whether the rule group is shared with other Amazon Web Services accounts, or was + // shared with the current account by another Amazon Web Services account. Sharing + // is configured through Resource Access Manager (RAM). ShareStatus ShareStatus // The status of the domain list. @@ -455,8 +456,8 @@ type FirewallRuleGroupAssociation struct { Id *string // The owner of the association, used only for associations that are not managed by - // you. If you use AWS Firewall Manager to manage your DNS Firewalls, then this - // reports Firewall Manager as the managed owner. + // you. If you use Firewall Manager to manage your DNS Firewalls, then this reports + // Firewall Manager as the managed owner. ManagedOwnerName *string // The date and time that the association was last modified, in Unix time format @@ -507,14 +508,14 @@ type FirewallRuleGroupMetadata struct { // The name of the rule group. Name *string - // The AWS account ID for the account that created the rule group. When a rule - // group is shared with your account, this is the account that has shared the rule - // group with you. + // The Amazon Web Services account ID for the account that created the rule group. + // When a rule group is shared with your account, this is the account that has + // shared the rule group with you. OwnerId *string - // Whether the rule group is shared with other AWS accounts, or was shared with the - // current account by another AWS account. Sharing is configured through AWS - // Resource Access Manager (AWS RAM). + // Whether the rule group is shared with other Amazon Web Services accounts, or was + // shared with the current account by another Amazon Web Services account. Sharing + // is configured through Resource Access Manager (RAM). ShareStatus ShareStatus noSmithyDocumentSerde @@ -768,12 +769,14 @@ type ResolverQueryLogConfig struct { // The name of the query logging configuration. Name *string - // The AWS account ID for the account that created the query logging configuration. + // The Amazon Web Services account ID for the account that created the query + // logging configuration. OwnerId *string // An indication of whether the query logging configuration is shared with other - // AWS accounts, or was shared with the current account by another AWS account. - // Sharing is configured through AWS Resource Access Manager (AWS RAM). + // Amazon Web Services accounts, or was shared with the current account by another + // Amazon Web Services account. Sharing is configured through Resource Access + // Manager (RAM). ShareStatus ShareStatus // The status of the specified query logging configuration. Valid values include @@ -909,8 +912,8 @@ type ResolverRule struct { // Resolver rule. Name *string - // When a rule is shared with another AWS account, the account ID of the account - // that the rule is shared with. + // When a rule is shared with another Amazon Web Services account, the account ID + // of the account that the rule is shared with. OwnerId *string // The ID of the endpoint that the rule is associated with. diff --git a/service/s3/api_op_AbortMultipartUpload.go b/service/s3/api_op_AbortMultipartUpload.go index c64275d0931..12f57bbe09e 100644 --- a/service/s3/api_op_AbortMultipartUpload.go +++ b/service/s3/api_op_AbortMultipartUpload.go @@ -66,17 +66,17 @@ type AbortMultipartUploadInput struct { // with an access point, you must direct requests to the access point hostname. The // access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action using S3 on Outposts through the AWS SDKs, you provide the Outposts - // bucket ARN in place of the bucket name. For more information about S3 on - // Outposts ARNs, see Using S3 on Outposts + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the // Amazon S3 User Guide. // diff --git a/service/s3/api_op_CompleteMultipartUpload.go b/service/s3/api_op_CompleteMultipartUpload.go index e0729ab3fb6..d5af16fad9b 100644 --- a/service/s3/api_op_CompleteMultipartUpload.go +++ b/service/s3/api_op_CompleteMultipartUpload.go @@ -112,7 +112,23 @@ func (c *Client) CompleteMultipartUpload(ctx context.Context, params *CompleteMu type CompleteMultipartUploadInput struct { - // Name of the bucket to which the multipart upload was initiated. + // Name of the bucket to which the multipart upload was initiated. When using this + // action with an access point, you must direct requests to the access point + // hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. // // This member is required. Bucket *string @@ -147,27 +163,28 @@ type CompleteMultipartUploadInput struct { type CompleteMultipartUploadOutput struct { - // The name of the bucket that contains the newly created object. When using this - // action with an access point, you must direct requests to the access point - // hostname. The access point hostname takes the form + // The name of the bucket that contains the newly created object. Does not return + // the access point ARN or access point alias if used. When using this action with + // an access point, you must direct requests to the access point hostname. The + // access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action using S3 on Outposts through the AWS SDKs, you provide the Outposts - // bucket ARN in place of the bucket name. For more information about S3 on - // Outposts ARNs, see Using S3 on Outposts + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the // Amazon S3 User Guide. Bucket *string // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with AWS KMS (SSE-KMS). + // encryption with Amazon Web Services KMS (SSE-KMS). BucketKeyEnabled bool // Entity tag that identifies the newly created object's data. Objects with @@ -192,15 +209,15 @@ type CompleteMultipartUploadOutput struct { // request. RequestCharged types.RequestCharged - // If present, specifies the ID of the AWS Key Management Service (AWS KMS) - // symmetric customer managed customer master key (CMK) that was used for the - // object. + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) + // that was used for the object. SSEKMSKeyId *string // If you specified server-side encryption either with an Amazon S3-managed - // encryption key or an AWS KMS customer master key (CMK) in your initiate - // multipart upload request, the response includes this header. It confirms the - // encryption algorithm that Amazon S3 used to encrypt the object. + // encryption key or an Amazon Web Services KMS customer master key (CMK) in your + // initiate multipart upload request, the response includes this header. It + // confirms the encryption algorithm that Amazon S3 used to encrypt the object. ServerSideEncryption types.ServerSideEncryption // Version ID of the newly created object, in case the bucket has versioning turned diff --git a/service/s3/api_op_CopyObject.go b/service/s3/api_op_CopyObject.go index 4d686840c28..bf282ec0812 100644 --- a/service/s3/api_op_CopyObject.go +++ b/service/s3/api_op_CopyObject.go @@ -94,11 +94,12 @@ import ( // All headers with the x-amz- prefix, including // x-amz-copy-source, must be signed. Server-side encryption When you perform a // CopyObject operation, you can optionally use the appropriate encryption-related -// headers to encrypt the object using server-side encryption with AWS managed -// encryption keys (SSE-S3 or SSE-KMS) or a customer-provided encryption key. With -// server-side encryption, Amazon S3 encrypts your data as it writes it to disks in -// its data centers and decrypts the data when you access it. For more information -// about server-side encryption, see Using Server-Side Encryption +// headers to encrypt the object using server-side encryption with Amazon Web +// Services managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided +// encryption key. With server-side encryption, Amazon S3 encrypts your data as it +// writes it to disks in its data centers and decrypts the data when you access it. +// For more information about server-side encryption, see Using Server-Side +// Encryption // (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). If // a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. // For more information, see Amazon S3 Bucket Keys @@ -106,9 +107,10 @@ import ( // S3 User Guide. Access Control List (ACL)-Specific Request Headers When copying // an object, you can optionally use headers to grant ACL-based permissions. By // default, all objects are private. Only the owner has full access control. When -// adding a new object, you can grant permissions to individual AWS accounts or to -// predefined groups defined by Amazon S3. These permissions are then added to the -// ACL on the object. For more information, see Access Control List (ACL) Overview +// adding a new object, you can grant permissions to individual Amazon Web Services +// accounts or to predefined groups defined by Amazon S3. These permissions are +// then added to the ACL on the object. For more information, see Access Control +// List (ACL) Overview // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) and Managing // ACLs Using the REST API // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). @@ -161,17 +163,17 @@ type CopyObjectInput struct { // The name of the destination bucket. When using this action with an access point, // you must direct requests to the access point hostname. The access point hostname // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the AWS SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action using S3 on Outposts through the AWS SDKs, you provide the Outposts - // bucket ARN in place of the bucket name. For more information about S3 on - // Outposts ARNs, see Using S3 on Outposts + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the // Amazon S3 User Guide. // @@ -197,9 +199,9 @@ type CopyObjectInput struct { // 123456789012 in Region us-west-2, use the URL encoding of // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. // The value must be URL encoded. Amazon S3 supports copy operations using access - // points only when the source and destination buckets are in the same AWS Region. - // Alternatively, for objects accessed through Amazon S3 on Outposts, specify the - // ARN of the object as accessed in the format + // points only when the source and destination buckets are in the same Amazon Web + // Services Region. Alternatively, for objects accessed through Amazon S3 on + // Outposts, specify the ARN of the object as accessed in the format // arn:aws:s3-outposts:::outpost//object/. For example, to copy the object // reports/january.pdf through outpost my-outpost owned by account 123456789012 in // Region us-west-2, use the URL encoding of @@ -343,16 +345,16 @@ type CopyObjectInput struct { // encryption key was transmitted without error. SSECustomerKeyMD5 *string - // Specifies the AWS KMS Encryption Context to use for object encryption. The value - // of this header is a base64-encoded UTF-8 string holding JSON with the encryption - // context key-value pairs. + // Specifies the Amazon Web Services KMS Encryption Context to use for object + // encryption. The value of this header is a base64-encoded UTF-8 string holding + // JSON with the encryption context key-value pairs. SSEKMSEncryptionContext *string - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL or - // using SigV4. For information about configuring using any of the officially - // supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request - // Authentication + // Specifies the Amazon Web Services KMS key ID to use for object encryption. All + // GET and PUT requests for an object protected by Amazon Web Services KMS will + // fail if not made via SSL or using SigV4. For information about configuring using + // any of the officially supported Amazon Web Services SDKs and Amazon Web Services + // CLI, see Specifying the Signature Version in Request Authentication // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) // in the Amazon S3 User Guide. SSEKMSKeyId *string @@ -390,7 +392,7 @@ type CopyObjectInput struct { type CopyObjectOutput struct { // Indicates whether the copied object uses an S3 Bucket Key for server-side - // encryption with AWS KMS (SSE-KMS). + // encryption with Amazon Web Services KMS (SSE-KMS). BucketKeyEnabled bool // Container for all response elements. @@ -415,14 +417,14 @@ type CopyObjectOutput struct { // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string - // If present, specifies the AWS KMS Encryption Context to use for object - // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. + // If present, specifies the Amazon Web Services KMS Encryption Context to use for + // object encryption. The value of this header is a base64-encoded UTF-8 string + // holding JSON with the encryption context key-value pairs. SSEKMSEncryptionContext *string - // If present, specifies the ID of the AWS Key Management Service (AWS KMS) - // symmetric customer managed customer master key (CMK) that was used for the - // object. + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) + // that was used for the object. SSEKMSKeyId *string // The server-side encryption algorithm used when storing this object in Amazon S3 diff --git a/service/s3/api_op_CreateBucket.go b/service/s3/api_op_CreateBucket.go index bfbff554b4e..00e79b1fd86 100644 --- a/service/s3/api_op_CreateBucket.go +++ b/service/s3/api_op_CreateBucket.go @@ -13,10 +13,10 @@ import ( ) // Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 -// and have a valid AWS Access Key ID to authenticate requests. Anonymous requests -// are never allowed to create buckets. By creating the bucket, you become the -// bucket owner. Not every string is an acceptable bucket name. For information -// about bucket naming restrictions, see Bucket naming rules +// and have a valid Amazon Web Services Access Key ID to authenticate requests. +// Anonymous requests are never allowed to create buckets. By creating the bucket, +// you become the bucket owner. Not every string is an acceptable bucket name. For +// information about bucket naming restrictions, see Bucket naming rules // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). // If you want to create an Amazon S3 on Outposts bucket, see Create Bucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html). @@ -55,46 +55,55 @@ import ( // each grantee as a type=value pair, where the type is one of the following: // // * id -// – if the value specified is the canonical user ID of an AWS account +// – if the value specified is the canonical user ID of an Amazon Web Services +// account // -// * uri – if -// you are granting permissions to a predefined group +// * uri – if you are granting permissions to a predefined group // -// * emailAddress – if the -// value specified is the email address of an AWS account Using email addresses to -// specify a grantee is only supported in the following AWS Regions: +// * +// emailAddress – if the value specified is the email address of an Amazon Web +// Services account Using email addresses to specify a grantee is only supported in +// the following Amazon Web Services Regions: // -// * US East (N. -// Virginia) +// * US East (N. Virginia) // -// * US West (N. California) +// * US West +// (N. California) // // * US West (Oregon) // -// * Asia Pacific -// (Singapore) +// * Asia Pacific (Singapore) // -// * Asia Pacific (Sydney) +// * Asia Pacific +// (Sydney) // // * Asia Pacific (Tokyo) // -// * Europe -// (Ireland) +// * Europe (Ireland) // -// * South America (São Paulo) +// * South America (São +// Paulo) // -// For a list of all the Amazon S3 -// supported Regions and endpoints, see Regions and Endpoints -// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the AWS -// General Reference. +// For a list of all the Amazon S3 supported Regions and endpoints, see +// Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the +// Amazon Web Services General Reference. // -// For example, the following x-amz-grant-read header grants -// the AWS accounts identified by account IDs permissions to read object data and -// its metadata: x-amz-grant-read: id="11112222333", id="444455556666" +// For example, the following +// x-amz-grant-read header grants the Amazon Web Services accounts identified by +// account IDs permissions to read object data and its metadata: x-amz-grant-read: +// id="11112222333", id="444455556666" // -// You can use -// either a canned ACL or specify access permissions explicitly. You cannot do -// both. The following operations are related to CreateBucket: +// You can use either a canned ACL or specify +// access permissions explicitly. You cannot do both. Permissions If your +// CreateBucket request specifies ACL permissions and the ACL is public-read, +// public-read-write, authenticated-read, or if you specify access permissions +// explicitly through any other ACL, both s3:CreateBucket and s3:PutBucketAcl +// permissions are needed. If the ACL the CreateBucket request is private, only +// s3:CreateBucket permission is needed. If ObjectLockEnabledForBucket is set to +// true in your CreateBucket request, s3:PutBucketObjectLockConfiguration and +// s3:PutBucketVersioning permissions are required. The following operations are +// related to CreateBucket: // // * PutObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) diff --git a/service/s3/api_op_CreateMultipartUpload.go b/service/s3/api_op_CreateMultipartUpload.go index 902933ef0bd..821905530dd 100644 --- a/service/s3/api_op_CreateMultipartUpload.go +++ b/service/s3/api_op_CreateMultipartUpload.go @@ -35,7 +35,8 @@ import ( // initiate a multipart upload, send one or more requests to upload parts, and then // complete the multipart upload process. You sign each request individually. There // is nothing special about signing multipart upload requests. For more information -// about signing, see Authenticating Requests (AWS Signature Version 4) +// about signing, see Authenticating Requests (Amazon Web Services Signature +// Version 4) // (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). // After you initiate a multipart upload and upload one or more parts, to stop // being charged for storing the uploaded parts, you must either complete or abort @@ -44,26 +45,27 @@ import ( // multipart upload. You can optionally request server-side encryption. For // server-side encryption, Amazon S3 encrypts your data as it writes it to disks in // its data centers and decrypts it when you access it. You can provide your own -// encryption key, or use AWS Key Management Service (AWS KMS) customer master keys -// (CMKs) or Amazon S3-managed encryption keys. If you choose to provide your own -// encryption key, the request headers you provide in UploadPart +// encryption key, or use Amazon Web Services Key Management Service (Amazon Web +// Services KMS) customer master keys (CMKs) or Amazon S3-managed encryption keys. +// If you choose to provide your own encryption key, the request headers you +// provide in UploadPart // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) and // UploadPartCopy // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) // requests must match the headers you used in the request to initiate the upload // by using CreateMultipartUpload. To perform a multipart upload with encryption -// using an AWS KMS CMK, the requester must have permission to the kms:Decrypt and -// kms:GenerateDataKey* actions on the key. These permissions are required because -// Amazon S3 must decrypt and read data from the encrypted file parts before it -// completes the multipart upload. For more information, see Multipart upload API -// and permissions +// using an Amazon Web Services KMS CMK, the requester must have permission to the +// kms:Decrypt and kms:GenerateDataKey* actions on the key. These permissions are +// required because Amazon S3 must decrypt and read data from the encrypted file +// parts before it completes the multipart upload. For more information, see +// Multipart upload API and permissions // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) -// in the Amazon S3 User Guide. If your AWS Identity and Access Management (IAM) -// user or role is in the same AWS account as the AWS KMS CMK, then you must have -// these permissions on the key policy. If your IAM user or role belongs to a -// different account than the key, then you must have the permissions on both the -// key policy and your IAM user or role. For more information, see Protecting Data -// Using Server-Side Encryption +// in the Amazon S3 User Guide. If your Identity and Access Management (IAM) user +// or role is in the same Amazon Web Services account as the Amazon Web Services +// KMS CMK, then you must have these permissions on the key policy. If your IAM +// user or role belongs to a different account than the key, then you must have the +// permissions on both the key policy and your IAM user or role. For more +// information, see Protecting Data Using Server-Side Encryption // (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). // Access Permissions When copying an object, you can optionally specify the // accounts or groups that should be granted specific permissions on the new @@ -87,12 +89,13 @@ import ( // Amazon S3 to encrypt data at rest using server-side encryption. Server-side // encryption is for data encryption at rest. Amazon S3 encrypts your data as it // writes it to disks in its data centers and decrypts it when you access it. The -// option you use depends on whether you want to use AWS managed encryption keys or -// provide your own encryption key. +// option you use depends on whether you want to use Amazon Web Services managed +// encryption keys or provide your own encryption key. // -// * Use encryption keys managed by Amazon S3 or -// customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) – If -// you want AWS to manage the keys used to encrypt data, specify the following +// * Use encryption keys +// managed by Amazon S3 or customer master keys (CMKs) stored in Amazon Web +// Services Key Management Service (Amazon Web Services KMS) – If you want Amazon +// Web Services to manage the keys used to encrypt data, specify the following // headers in the request. // // * x-amz-server-side-encryption @@ -105,11 +108,13 @@ import ( // // If you specify // x-amz-server-side-encryption:aws:kms, but don't provide -// x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK -// in AWS KMS to protect the data. All GET and PUT requests for an object protected -// by AWS KMS fail if you don't make them with SSL or by using SigV4. For more -// information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), -// see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS +// x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web +// Services managed CMK in Amazon Web Services KMS to protect the data. All GET and +// PUT requests for an object protected by Amazon Web Services KMS fail if you +// don't make them with SSL or by using SigV4. For more information about +// server-side encryption with CMKs stored in Amazon Web Services KMS (SSE-KMS), +// see Protecting Data Using Server-Side Encryption with CMKs stored in Amazon Web +// Services KMS // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). // // * @@ -126,17 +131,18 @@ import ( // x-amz-server-side-encryption-customer-key-MD5 // // For more information about -// server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting -// Data Using Server-Side Encryption with CMKs stored in AWS KMS +// server-side encryption with CMKs stored in Amazon Web Services KMS (SSE-KMS), +// see Protecting Data Using Server-Side Encryption with CMKs stored in Amazon Web +// Services KMS // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). // // Access-Control-List // (ACL)-Specific Request Headers You also can use the following access // control–related headers with this operation. By default, all objects are // private. Only the owner has full access control. When adding a new object, you -// can grant permissions to individual AWS accounts or to predefined groups defined -// by Amazon S3. These permissions are then added to the access control list (ACL) -// on the object. For more information, see Using ACLs +// can grant permissions to individual Amazon Web Services accounts or to +// predefined groups defined by Amazon S3. These permissions are then added to the +// access control list (ACL) on the object. For more information, see Using ACLs // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). With // this operation, you can grant access permissions using one of the following two // methods: @@ -148,9 +154,9 @@ import ( // // * // Specify access permissions explicitly — To explicitly grant access permissions -// to specific AWS accounts or groups, use the following headers. Each header maps -// to specific permissions that Amazon S3 supports in an ACL. For more information, -// see Access Control List (ACL) Overview +// to specific Amazon Web Services accounts or groups, use the following headers. +// Each header maps to specific permissions that Amazon S3 supports in an ACL. For +// more information, see Access Control List (ACL) Overview // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). In the // header, you specify a list of grantees who get the specific permission. To grant // permissions explicitly, use: @@ -170,45 +176,47 @@ import ( // specify each grantee as a type=value pair, where the type is one of the // following: // -// * id – if the value specified is the canonical user ID of an AWS -// account +// * id – if the value specified is the canonical user ID of an Amazon +// Web Services account // -// * uri – if you are granting permissions to a predefined group +// * uri – if you are granting permissions to a predefined +// group // -// * -// emailAddress – if the value specified is the email address of an AWS account -// Using email addresses to specify a grantee is only supported in the following -// AWS Regions: +// * emailAddress – if the value specified is the email address of an Amazon +// Web Services account Using email addresses to specify a grantee is only +// supported in the following Amazon Web Services Regions: // -// * US East (N. Virginia) +// * US East (N. +// Virginia) // // * US West (N. California) // -// * US West -// (Oregon) +// * US West (Oregon) // -// * Asia Pacific (Singapore) +// * Asia Pacific +// (Singapore) // // * Asia Pacific (Sydney) // -// * Asia Pacific -// (Tokyo) +// * Asia Pacific (Tokyo) // -// * Europe (Ireland) +// * Europe +// (Ireland) // // * South America (São Paulo) // -// For a list of all the -// Amazon S3 supported Regions and endpoints, see Regions and Endpoints -// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the AWS -// General Reference. +// For a list of all the Amazon S3 +// supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the +// Amazon Web Services General Reference. // -// For example, the following x-amz-grant-read header grants -// the AWS accounts identified by account IDs permissions to read object data and -// its metadata: x-amz-grant-read: id="11112222333", id="444455556666" +// For example, the following +// x-amz-grant-read header grants the Amazon Web Services accounts identified by +// account IDs permissions to read object data and its metadata: x-amz-grant-read: +// id="11112222333", id="444455556666" // -// The -// following operations are related to CreateMultipartUpload: +// The following operations are related to +// CreateMultipartUpload: // // * UploadPart // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) @@ -249,17 +257,17 @@ type CreateMultipartUploadInput struct { // with an access point, you must direct requests to the access point hostname. The // access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action using S3 on Outposts through the AWS SDKs, you provide the Outposts - // bucket ARN in place of the bucket name. For more information about S3 on - // Outposts ARNs, see Using S3 on Outposts + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the // Amazon S3 User Guide. // @@ -358,16 +366,17 @@ type CreateMultipartUploadInput struct { // encryption key was transmitted without error. SSECustomerKeyMD5 *string - // Specifies the AWS KMS Encryption Context to use for object encryption. The value - // of this header is a base64-encoded UTF-8 string holding JSON with the encryption - // context key-value pairs. + // Specifies the Amazon Web Services KMS Encryption Context to use for object + // encryption. The value of this header is a base64-encoded UTF-8 string holding + // JSON with the encryption context key-value pairs. SSEKMSEncryptionContext *string - // Specifies the ID of the symmetric customer managed AWS KMS CMK to use for object - // encryption. All GET and PUT requests for an object protected by AWS KMS will - // fail if not made via SSL or using SigV4. For information about configuring using - // any of the officially supported AWS SDKs and AWS CLI, see Specifying the - // Signature Version in Request Authentication + // Specifies the ID of the symmetric customer managed Amazon Web Services KMS CMK + // to use for object encryption. All GET and PUT requests for an object protected + // by Amazon Web Services KMS will fail if not made via SSL or using SigV4. For + // information about configuring using any of the officially supported Amazon Web + // Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version + // in Request Authentication // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) // in the Amazon S3 User Guide. SSEKMSKeyId *string @@ -414,27 +423,28 @@ type CreateMultipartUploadOutput struct { // incomplete multipart uploads. AbortRuleId *string - // The name of the bucket to which the multipart upload was initiated. When using - // this action with an access point, you must direct requests to the access point + // The name of the bucket to which the multipart upload was initiated. Does not + // return the access point ARN or access point alias if used. When using this + // action with an access point, you must direct requests to the access point // hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action using S3 on Outposts through the AWS SDKs, you provide the Outposts - // bucket ARN in place of the bucket name. For more information about S3 on - // Outposts ARNs, see Using S3 on Outposts + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the // Amazon S3 User Guide. Bucket *string // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with AWS KMS (SSE-KMS). + // encryption with Amazon Web Services KMS (SSE-KMS). BucketKeyEnabled bool // Object key for which the multipart upload was initiated. @@ -453,14 +463,14 @@ type CreateMultipartUploadOutput struct { // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string - // If present, specifies the AWS KMS Encryption Context to use for object - // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. + // If present, specifies the Amazon Web Services KMS Encryption Context to use for + // object encryption. The value of this header is a base64-encoded UTF-8 string + // holding JSON with the encryption context key-value pairs. SSEKMSEncryptionContext *string - // If present, specifies the ID of the AWS Key Management Service (AWS KMS) - // symmetric customer managed customer master key (CMK) that was used for the - // object. + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) + // that was used for the object. SSEKMSKeyId *string // The server-side encryption algorithm used when storing this object in Amazon S3 diff --git a/service/s3/api_op_DeleteBucketPolicy.go b/service/s3/api_op_DeleteBucketPolicy.go index 13e47baa5ea..6b2b03b1d09 100644 --- a/service/s3/api_op_DeleteBucketPolicy.go +++ b/service/s3/api_op_DeleteBucketPolicy.go @@ -13,16 +13,17 @@ import ( // This implementation of the DELETE action uses the policy subresource to delete // the policy of a specified bucket. If you are using an identity other than the -// root user of the AWS account that owns the bucket, the calling identity must -// have the DeleteBucketPolicy permissions on the specified bucket and belong to -// the bucket owner's account to use this operation. If you don't have -// DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If -// you have the correct permissions, but you're not using an identity that belongs -// to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error. -// As a security precaution, the root user of the AWS account that owns a bucket -// can always use this operation, even if the policy explicitly denies the root -// user the ability to perform this action. For more information about bucket -// policies, see Using Bucket Policies and UserPolicies +// root user of the Amazon Web Services account that owns the bucket, the calling +// identity must have the DeleteBucketPolicy permissions on the specified bucket +// and belong to the bucket owner's account to use this operation. If you don't +// have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied +// error. If you have the correct permissions, but you're not using an identity +// that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not +// Allowed error. As a security precaution, the root user of the Amazon Web +// Services account that owns a bucket can always use this operation, even if the +// policy explicitly denies the root user the ability to perform this action. For +// more information about bucket policies, see Using Bucket Policies and +// UserPolicies // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). The // following operations are related to DeleteBucketPolicy // diff --git a/service/s3/api_op_DeleteObject.go b/service/s3/api_op_DeleteObject.go index b8d57c8a99b..c285a32cd07 100644 --- a/service/s3/api_op_DeleteObject.go +++ b/service/s3/api_op_DeleteObject.go @@ -57,17 +57,17 @@ type DeleteObjectInput struct { // an access point, you must direct requests to the access point hostname. The // access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action using S3 on Outposts through the AWS SDKs, you provide the Outposts - // bucket ARN in place of the bucket name. For more information about S3 on - // Outposts ARNs, see Using S3 on Outposts + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the // Amazon S3 User Guide. // diff --git a/service/s3/api_op_DeleteObjectTagging.go b/service/s3/api_op_DeleteObjectTagging.go index c0aac44b4f7..b952ee9f0cc 100644 --- a/service/s3/api_op_DeleteObjectTagging.go +++ b/service/s3/api_op_DeleteObjectTagging.go @@ -47,17 +47,17 @@ type DeleteObjectTaggingInput struct { // this action with an access point, you must direct requests to the access point // hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action using S3 on Outposts through the AWS SDKs, you provide the Outposts - // bucket ARN in place of the bucket name. For more information about S3 on - // Outposts ARNs, see Using S3 on Outposts + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the // Amazon S3 User Guide. // diff --git a/service/s3/api_op_DeleteObjects.go b/service/s3/api_op_DeleteObjects.go index eb10d71ef28..45ee168266a 100644 --- a/service/s3/api_op_DeleteObjects.go +++ b/service/s3/api_op_DeleteObjects.go @@ -78,17 +78,17 @@ type DeleteObjectsInput struct { // access point, you must direct requests to the access point hostname. The access // point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action using S3 on Outposts through the AWS SDKs, you provide the Outposts - // bucket ARN in place of the bucket name. For more information about S3 on - // Outposts ARNs, see Using S3 on Outposts + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the // Amazon S3 User Guide. // diff --git a/service/s3/api_op_GetBucketLocation.go b/service/s3/api_op_GetBucketLocation.go index d2666a0274a..cd896c89c44 100644 --- a/service/s3/api_op_GetBucketLocation.go +++ b/service/s3/api_op_GetBucketLocation.go @@ -23,10 +23,12 @@ import ( // LocationConstraint request parameter in a CreateBucket request. For more // information, see CreateBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). To use -// this implementation of the operation, you must be the bucket owner. The -// following operations are related to GetBucketLocation: +// this implementation of the operation, you must be the bucket owner. To use this +// API against an access point, provide the alias of the access point in place of +// the bucket name. The following operations are related to GetBucketLocation: // -// * GetObject +// * +// GetObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // // * diff --git a/service/s3/api_op_GetBucketNotificationConfiguration.go b/service/s3/api_op_GetBucketNotificationConfiguration.go index 439952d274d..471b9cce9e4 100644 --- a/service/s3/api_op_GetBucketNotificationConfiguration.go +++ b/service/s3/api_op_GetBucketNotificationConfiguration.go @@ -60,7 +60,7 @@ type GetBucketNotificationConfigurationInput struct { // element is empty, notifications are turned off for the bucket. type GetBucketNotificationConfigurationOutput struct { - // Describes the AWS Lambda functions to invoke and the events for which to invoke + // Describes the Lambda functions to invoke and the events for which to invoke // them. LambdaFunctionConfigurations []types.LambdaFunctionConfiguration diff --git a/service/s3/api_op_GetBucketPolicy.go b/service/s3/api_op_GetBucketPolicy.go index e317dc2cc61..ca63f969963 100644 --- a/service/s3/api_op_GetBucketPolicy.go +++ b/service/s3/api_op_GetBucketPolicy.go @@ -12,16 +12,17 @@ import ( ) // Returns the policy of a specified bucket. If you are using an identity other -// than the root user of the AWS account that owns the bucket, the calling identity -// must have the GetBucketPolicy permissions on the specified bucket and belong to -// the bucket owner's account in order to use this operation. If you don't have -// GetBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you -// have the correct permissions, but you're not using an identity that belongs to -// the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error. As -// a security precaution, the root user of the AWS account that owns a bucket can -// always use this operation, even if the policy explicitly denies the root user -// the ability to perform this action. For more information about bucket policies, -// see Using Bucket Policies and User Policies +// than the root user of the Amazon Web Services account that owns the bucket, the +// calling identity must have the GetBucketPolicy permissions on the specified +// bucket and belong to the bucket owner's account in order to use this operation. +// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a 405 +// Method Not Allowed error. As a security precaution, the root user of the Amazon +// Web Services account that owns a bucket can always use this operation, even if +// the policy explicitly denies the root user the ability to perform this action. +// For more information about bucket policies, see Using Bucket Policies and User +// Policies // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). The // following action is related to GetBucketPolicy: // diff --git a/service/s3/api_op_GetObject.go b/service/s3/api_op_GetObject.go index c3ee47815ed..54222ddcd68 100644 --- a/service/s3/api_op_GetObject.go +++ b/service/s3/api_op_GetObject.go @@ -44,12 +44,12 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). // Encryption request headers, like x-amz-server-side-encryption, should not be // sent for GET requests if your object uses server-side encryption with CMKs -// stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed -// encryption keys (SSE-S3). If your object does use these types of keys, you’ll -// get an HTTP 400 BadRequest error. If you encrypt an object by using server-side -// encryption with customer-provided encryption keys (SSE-C) when you store the -// object in Amazon S3, then when you GET the object, you must use the following -// headers: +// stored in Amazon Web Services KMS (SSE-KMS) or server-side encryption with +// Amazon S3–managed encryption keys (SSE-S3). If your object does use these types +// of keys, you’ll get an HTTP 400 BadRequest error. If you encrypt an object by +// using server-side encryption with customer-provided encryption keys (SSE-C) when +// you store the object in Amazon S3, then when you GET the object, you must use +// the following headers: // // * x-amz-server-side-encryption-customer-algorithm // @@ -83,10 +83,17 @@ import ( // error. // // Versioning By default, the GET action returns the current version of an -// object. To return a different version, use the versionId subresource. If the -// current version of the object is a delete marker, Amazon S3 behaves as if the -// object was deleted and includes x-amz-delete-marker: true in the response. For -// more information about versioning, see PutBucketVersioning +// object. To return a different version, use the versionId subresource. +// +// * You +// need the s3:GetObjectVersion permission to access a specific version of an +// object. +// +// * If the current version of the object is a delete marker, Amazon S3 +// behaves as if the object was deleted and includes x-amz-delete-marker: true in +// the response. +// +// For more information about versioning, see PutBucketVersioning // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html). // Overriding Response Header Values There are times when you want to override // certain response header values in a GET response. For example, you might @@ -155,17 +162,17 @@ type GetObjectInput struct { // point, you must direct requests to the access point hostname. The access point // hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action using S3 on Outposts through the AWS SDKs, you provide the Outposts - // bucket ARN in place of the bucket name. For more information about S3 on - // Outposts ARNs, see Using S3 on Outposts + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the // Amazon S3 User Guide. // @@ -266,7 +273,7 @@ type GetObjectOutput struct { Body io.ReadCloser // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with AWS KMS (SSE-KMS). + // with Amazon Web Services KMS (SSE-KMS). BucketKeyEnabled bool // Specifies caching behavior along the request/reply chain. @@ -357,9 +364,9 @@ type GetObjectOutput struct { // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string - // If present, specifies the ID of the AWS Key Management Service (AWS KMS) - // symmetric customer managed customer master key (CMK) that was used for the - // object. + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) + // that was used for the object. SSEKMSKeyId *string // The server-side encryption algorithm used when storing this object in Amazon S3 diff --git a/service/s3/api_op_GetObjectAcl.go b/service/s3/api_op_GetObjectAcl.go index 1d2c2f697a2..f7d4f119d42 100644 --- a/service/s3/api_op_GetObjectAcl.go +++ b/service/s3/api_op_GetObjectAcl.go @@ -49,9 +49,9 @@ type GetObjectAclInput struct { // When using this action with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // diff --git a/service/s3/api_op_GetObjectLegalHold.go b/service/s3/api_op_GetObjectLegalHold.go index 0d158460933..997da86f726 100644 --- a/service/s3/api_op_GetObjectLegalHold.go +++ b/service/s3/api_op_GetObjectLegalHold.go @@ -36,9 +36,9 @@ type GetObjectLegalHoldInput struct { // retrieve. When using this action with an access point, you must direct requests // to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // diff --git a/service/s3/api_op_GetObjectLockConfiguration.go b/service/s3/api_op_GetObjectLockConfiguration.go index 95ab79768de..6441ac3ea8c 100644 --- a/service/s3/api_op_GetObjectLockConfiguration.go +++ b/service/s3/api_op_GetObjectLockConfiguration.go @@ -37,9 +37,9 @@ type GetObjectLockConfigurationInput struct { // action with an access point, you must direct requests to the access point // hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // diff --git a/service/s3/api_op_GetObjectRetention.go b/service/s3/api_op_GetObjectRetention.go index 2d0ec99ac52..63ebb3e2458 100644 --- a/service/s3/api_op_GetObjectRetention.go +++ b/service/s3/api_op_GetObjectRetention.go @@ -36,9 +36,9 @@ type GetObjectRetentionInput struct { // retrieve. When using this action with an access point, you must direct requests // to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // diff --git a/service/s3/api_op_GetObjectTagging.go b/service/s3/api_op_GetObjectTagging.go index 5eef621867d..fe2a88b3112 100644 --- a/service/s3/api_op_GetObjectTagging.go +++ b/service/s3/api_op_GetObjectTagging.go @@ -51,17 +51,17 @@ type GetObjectTaggingInput struct { // When using this action with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action using S3 on Outposts through the AWS SDKs, you provide the Outposts - // bucket ARN in place of the bucket name. For more information about S3 on - // Outposts ARNs, see Using S3 on Outposts + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the // Amazon S3 User Guide. // diff --git a/service/s3/api_op_HeadBucket.go b/service/s3/api_op_HeadBucket.go index 4310fa9aaae..8b1bc3983ed 100644 --- a/service/s3/api_op_HeadBucket.go +++ b/service/s3/api_op_HeadBucket.go @@ -30,6 +30,14 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// To use this API against an access point, you must provide the alias of the +// access point in place of the bucket name or specify the access point ARN. When +// using the access point ARN, you must direct requests to the access point +// hostname. The access point hostname takes the form +// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using the +// Amazon Web Services SDKs, you provide the ARN in place of the bucket name. For +// more information see, Using access points +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html). func (c *Client) HeadBucket(ctx context.Context, params *HeadBucketInput, optFns ...func(*Options)) (*HeadBucketOutput, error) { if params == nil { params = &HeadBucketInput{} @@ -50,17 +58,17 @@ type HeadBucketInput struct { // The bucket name. When using this action with an access point, you must direct // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action using S3 on Outposts through the AWS SDKs, you provide the Outposts - // bucket ARN in place of the bucket name. For more information about S3 on - // Outposts ARNs, see Using S3 on Outposts + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the // Amazon S3 User Guide. // diff --git a/service/s3/api_op_HeadObject.go b/service/s3/api_op_HeadObject.go index 03fbc4959df..f133f534e27 100644 --- a/service/s3/api_op_HeadObject.go +++ b/service/s3/api_op_HeadObject.go @@ -45,15 +45,15 @@ import ( // * // Encryption request headers, like x-amz-server-side-encryption, should not be // sent for GET requests if your object uses server-side encryption with CMKs -// stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed -// encryption keys (SSE-S3). If your object does use these types of keys, you’ll -// get an HTTP 400 BadRequest error. +// stored in Amazon Web Services KMS (SSE-KMS) or server-side encryption with +// Amazon S3–managed encryption keys (SSE-S3). If your object does use these types +// of keys, you’ll get an HTTP 400 BadRequest error. // -// * The last modified property in this case is -// the creation date of the object. +// * The last modified property +// in this case is the creation date of the object. // -// Request headers are limited to 8 KB in size. -// For more information, see Common Request Headers +// Request headers are limited to +// 8 KB in size. For more information, see Common Request Headers // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). // Consider the following when using request headers: // @@ -122,17 +122,17 @@ type HeadObjectInput struct { // access point, you must direct requests to the access point hostname. The access // point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action using S3 on Outposts through the AWS SDKs, you provide the Outposts - // bucket ARN in place of the bucket name. For more information about S3 on - // Outposts ARNs, see Using S3 on Outposts + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the // Amazon S3 User Guide. // @@ -216,7 +216,7 @@ type HeadObjectOutput struct { ArchiveStatus types.ArchiveStatus // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with AWS KMS (SSE-KMS). + // with Amazon Web Services KMS (SSE-KMS). BucketKeyEnabled bool // Specifies caching behavior along the request/reply chain. @@ -351,15 +351,16 @@ type HeadObjectOutput struct { // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string - // If present, specifies the ID of the AWS Key Management Service (AWS KMS) - // symmetric customer managed customer master key (CMK) that was used for the - // object. + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) + // that was used for the object. SSEKMSKeyId *string - // If the object is stored using server-side encryption either with an AWS KMS - // customer master key (CMK) or an Amazon S3-managed encryption key, the response - // includes this header with the value of the server-side encryption algorithm used - // when storing this object in Amazon S3 (for example, AES256, aws:kms). + // If the object is stored using server-side encryption either with an Amazon Web + // Services KMS customer master key (CMK) or an Amazon S3-managed encryption key, + // the response includes this header with the value of the server-side encryption + // algorithm used when storing this object in Amazon S3 (for example, AES256, + // aws:kms). ServerSideEncryption types.ServerSideEncryption // Provides storage class information of the object. Amazon S3 returns this header diff --git a/service/s3/api_op_ListMultipartUploads.go b/service/s3/api_op_ListMultipartUploads.go index 4ee4c92f836..1e82450e2a1 100644 --- a/service/s3/api_op_ListMultipartUploads.go +++ b/service/s3/api_op_ListMultipartUploads.go @@ -73,17 +73,17 @@ type ListMultipartUploadsInput struct { // this action with an access point, you must direct requests to the access point // hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action using S3 on Outposts through the AWS SDKs, you provide the Outposts - // bucket ARN in place of the bucket name. For more information about S3 on - // Outposts ARNs, see Using S3 on Outposts + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the // Amazon S3 User Guide. // @@ -141,7 +141,8 @@ type ListMultipartUploadsInput struct { type ListMultipartUploadsOutput struct { - // The name of the bucket to which the multipart upload was initiated. + // The name of the bucket to which the multipart upload was initiated. Does not + // return the access point ARN or access point alias if used. Bucket *string // If you specify a delimiter in the request, then the result returns each distinct diff --git a/service/s3/api_op_ListObjects.go b/service/s3/api_op_ListObjects.go index 31df00bbc1b..2fed3ed9a55 100644 --- a/service/s3/api_op_ListObjects.go +++ b/service/s3/api_op_ListObjects.go @@ -62,17 +62,17 @@ type ListObjectsInput struct { // access point, you must direct requests to the access point hostname. The access // point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action using S3 on Outposts through the AWS SDKs, you provide the Outposts - // bucket ARN in place of the bucket name. For more information about S3 on - // Outposts ARNs, see Using S3 on Outposts + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the // Amazon S3 User Guide. // @@ -94,7 +94,8 @@ type ListObjectsInput struct { // different account, the request will fail with an HTTP 403 (Access Denied) error. ExpectedBucketOwner *string - // Specifies the key to start with when listing objects in a bucket. + // Marker is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. Marker can be any key in the bucket. Marker *string // Sets the maximum number of keys returned in the response. By default the action diff --git a/service/s3/api_op_ListObjectsV2.go b/service/s3/api_op_ListObjectsV2.go index 7b76c13b6f0..32c5ebde9cc 100644 --- a/service/s3/api_op_ListObjectsV2.go +++ b/service/s3/api_op_ListObjectsV2.go @@ -22,7 +22,7 @@ import ( // objects, see Listing object keys programmatically // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) // To use this operation, you must have READ access to the bucket. To use this -// action in an AWS Identity and Access Management (IAM) policy, you must have +// action in an Identity and Access Management (IAM) policy, you must have // permissions to perform the s3:ListBucket action. The bucket owner has this // permission by default and can grant this permission to others. For more // information about permissions, see Permissions Related to Bucket Subresource @@ -68,17 +68,17 @@ type ListObjectsV2Input struct { // Bucket name to list. When using this action with an access point, you must // direct requests to the access point hostname. The access point hostname takes // the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When - // using this action with an access point through the AWS SDKs, you provide the - // access point ARN in place of the bucket name. For more information about access - // point ARNs, see Using access points + // using this action with an access point through the Amazon Web Services SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action using S3 on Outposts through the AWS SDKs, you provide the Outposts - // bucket ARN in place of the bucket name. For more information about S3 on - // Outposts ARNs, see Using S3 on Outposts + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the // Amazon S3 User Guide. // @@ -173,17 +173,17 @@ type ListObjectsV2Output struct { // The bucket name. When using this action with an access point, you must direct // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action using S3 on Outposts through the AWS SDKs, you provide the Outposts - // bucket ARN in place of the bucket name. For more information about S3 on - // Outposts ARNs, see Using S3 on Outposts + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the // Amazon S3 User Guide. Name *string diff --git a/service/s3/api_op_ListParts.go b/service/s3/api_op_ListParts.go index 0a6a6036c4a..5da2dc65c4e 100644 --- a/service/s3/api_op_ListParts.go +++ b/service/s3/api_op_ListParts.go @@ -71,17 +71,17 @@ type ListPartsInput struct { // action with an access point, you must direct requests to the access point // hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action using S3 on Outposts through the AWS SDKs, you provide the Outposts - // bucket ARN in place of the bucket name. For more information about S3 on - // Outposts ARNs, see Using S3 on Outposts + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the // Amazon S3 User Guide. // @@ -138,13 +138,14 @@ type ListPartsOutput struct { // incomplete multipart uploads. AbortRuleId *string - // The name of the bucket to which the multipart upload was initiated. + // The name of the bucket to which the multipart upload was initiated. Does not + // return the access point ARN or access point alias if used. Bucket *string // Container element that identifies who initiated the multipart upload. If the - // initiator is an AWS account, this element provides the same information as the - // Owner element. If the initiator is an IAM User, this element provides the user - // ARN and display name. + // initiator is an Amazon Web Services account, this element provides the same + // information as the Owner element. If the initiator is an IAM User, this element + // provides the user ARN and display name. Initiator *types.Initiator // Indicates whether the returned list of parts is truncated. A true value diff --git a/service/s3/api_op_PutBucketAcl.go b/service/s3/api_op_PutBucketAcl.go index 54876e53171..528fa2e2757 100644 --- a/service/s3/api_op_PutBucketAcl.go +++ b/service/s3/api_op_PutBucketAcl.go @@ -43,74 +43,77 @@ import ( // Specify access permissions explicitly with the x-amz-grant-read, // x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control // headers. When using these headers, you specify explicit access permissions and -// grantees (AWS accounts or Amazon S3 groups) who will receive the permission. If -// you use these ACL-specific headers, you cannot use the x-amz-acl header to set a -// canned ACL. These parameters map to the set of permissions that Amazon S3 -// supports in an ACL. For more information, see Access Control List (ACL) Overview +// grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the +// permission. If you use these ACL-specific headers, you cannot use the x-amz-acl +// header to set a canned ACL. These parameters map to the set of permissions that +// Amazon S3 supports in an ACL. For more information, see Access Control List +// (ACL) Overview // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You specify // each grantee as a type=value pair, where the type is one of the following: // // * id -// – if the value specified is the canonical user ID of an AWS account +// – if the value specified is the canonical user ID of an Amazon Web Services +// account // -// * uri – if -// you are granting permissions to a predefined group +// * uri – if you are granting permissions to a predefined group // -// * emailAddress – if the -// value specified is the email address of an AWS account Using email addresses to -// specify a grantee is only supported in the following AWS Regions: +// * +// emailAddress – if the value specified is the email address of an Amazon Web +// Services account Using email addresses to specify a grantee is only supported in +// the following Amazon Web Services Regions: // -// * US East (N. -// Virginia) +// * US East (N. Virginia) // -// * US West (N. California) +// * US West +// (N. California) // // * US West (Oregon) // -// * Asia Pacific -// (Singapore) +// * Asia Pacific (Singapore) // -// * Asia Pacific (Sydney) +// * Asia Pacific +// (Sydney) // // * Asia Pacific (Tokyo) // -// * Europe -// (Ireland) +// * Europe (Ireland) // -// * South America (São Paulo) +// * South America (São +// Paulo) // -// For a list of all the Amazon S3 -// supported Regions and endpoints, see Regions and Endpoints -// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the AWS -// General Reference. +// For a list of all the Amazon S3 supported Regions and endpoints, see +// Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the +// Amazon Web Services General Reference. // -// For example, the following x-amz-grant-write header grants -// create, overwrite, and delete objects permission to LogDelivery group predefined -// by Amazon S3 and two AWS accounts identified by their email addresses. -// x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery", -// id="111122223333", id="555566667777" +// For example, the following +// x-amz-grant-write header grants create, overwrite, and delete objects permission +// to LogDelivery group predefined by Amazon S3 and two Amazon Web Services +// accounts identified by their email addresses. x-amz-grant-write: +// uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", +// id="555566667777" // -// You can use either a canned ACL or specify -// access permissions explicitly. You cannot do both. Grantee Values You can -// specify the person (grantee) to whom you're assigning access rights (using -// request elements) in the following ways: +// You can use either a canned ACL or specify access permissions +// explicitly. You cannot do both. Grantee Values You can specify the person +// (grantee) to whom you're assigning access rights (using request elements) in the +// following ways: // -// * By the person's ID: -// <>ID<><>GranteesEmail<> DisplayName is optional and ignored in the request +// * By the person's ID: <>ID<><>GranteesEmail<> DisplayName is +// optional and ignored in the request // -// * -// By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// * By URI: +// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> // -// * By -// Email address: <>Grantees@email.com<>lt;/Grantee> The grantee is resolved to the +// * By Email +// address: <>Grantees@email.com<>lt;/Grantee> The grantee is resolved to the // CanonicalUser and, in a response to a GET Object acl request, appears as the // CanonicalUser. Using email addresses to specify a grantee is only supported in -// the following AWS Regions: +// the following Amazon Web Services Regions: // // * US East (N. Virginia) // -// * US West (N. -// California) +// * US West +// (N. California) // // * US West (Oregon) // @@ -128,8 +131,8 @@ import ( // // For a list of all the Amazon S3 supported Regions and endpoints, see // Regions and Endpoints -// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the AWS -// General Reference. +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the +// Amazon Web Services General Reference. // // Related Resources // @@ -174,8 +177,9 @@ type PutBucketAclInput struct { // The base64-encoded 128-bit MD5 digest of the data. This header must be used as a // message integrity check to verify that the request body was not corrupted in // transit. For more information, go to RFC 1864. - // (http://www.ietf.org/rfc/rfc1864.txt) For requests made using the AWS Command - // Line Interface (CLI) or AWS SDKs, this field is calculated automatically. + // (http://www.ietf.org/rfc/rfc1864.txt) For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the bucket is owned by a diff --git a/service/s3/api_op_PutBucketCors.go b/service/s3/api_op_PutBucketCors.go index 6fd12de15e4..fbe55db92b5 100644 --- a/service/s3/api_op_PutBucketCors.go +++ b/service/s3/api_op_PutBucketCors.go @@ -87,8 +87,9 @@ type PutBucketCorsInput struct { // The base64-encoded 128-bit MD5 digest of the data. This header must be used as a // message integrity check to verify that the request body was not corrupted in // transit. For more information, go to RFC 1864. - // (http://www.ietf.org/rfc/rfc1864.txt) For requests made using the AWS Command - // Line Interface (CLI) or AWS SDKs, this field is calculated automatically. + // (http://www.ietf.org/rfc/rfc1864.txt) For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the bucket is owned by a diff --git a/service/s3/api_op_PutBucketEncryption.go b/service/s3/api_op_PutBucketEncryption.go index 9cd445b98dc..b8124cb2091 100644 --- a/service/s3/api_op_PutBucketEncryption.go +++ b/service/s3/api_op_PutBucketEncryption.go @@ -14,15 +14,16 @@ import ( // This action uses the encryption subresource to configure default encryption and // Amazon S3 Bucket Key for an existing bucket. Default encryption for a bucket can -// use server-side encryption with Amazon S3-managed keys (SSE-S3) or AWS KMS -// customer master keys (SSE-KMS). If you specify default encryption using SSE-KMS, -// you can also configure Amazon S3 Bucket Key. For information about default -// encryption, see Amazon S3 default bucket encryption +// use server-side encryption with Amazon S3-managed keys (SSE-S3) or Amazon Web +// Services KMS customer master keys (SSE-KMS). If you specify default encryption +// using SSE-KMS, you can also configure Amazon S3 Bucket Key. For information +// about default encryption, see Amazon S3 default bucket encryption // (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) in the // Amazon S3 User Guide. For more information about S3 Bucket Keys, see Amazon S3 // Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) in -// the Amazon S3 User Guide. This action requires AWS Signature Version 4. For more -// information, see Authenticating Requests (AWS Signature Version 4) +// the Amazon S3 User Guide. This action requires Amazon Web Services Signature +// Version 4. For more information, see Authenticating Requests (Amazon Web +// Services Signature Version 4) // (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). // To use this operation, you must have permissions to perform the // s3:PutEncryptionConfiguration action. The bucket owner has this permission by @@ -58,9 +59,9 @@ func (c *Client) PutBucketEncryption(ctx context.Context, params *PutBucketEncry type PutBucketEncryptionInput struct { // Specifies default encryption for a bucket using server-side encryption with - // Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS - // (SSE-KMS). For information about the Amazon S3 default encryption feature, see - // Amazon S3 Default Bucket Encryption + // Amazon S3-managed keys (SSE-S3) or customer master keys stored in Amazon Web + // Services KMS (SSE-KMS). For information about the Amazon S3 default encryption + // feature, see Amazon S3 Default Bucket Encryption // (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) in the // Amazon S3 User Guide. // @@ -73,8 +74,9 @@ type PutBucketEncryptionInput struct { ServerSideEncryptionConfiguration *types.ServerSideEncryptionConfiguration // The base64-encoded 128-bit MD5 digest of the server-side encryption - // configuration. For requests made using the AWS Command Line Interface (CLI) or - // AWS SDKs, this field is calculated automatically. + // configuration. For requests made using the Amazon Web Services Command Line + // Interface (CLI) or Amazon Web Services SDKs, this field is calculated + // automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the bucket is owned by a diff --git a/service/s3/api_op_PutBucketInventoryConfiguration.go b/service/s3/api_op_PutBucketInventoryConfiguration.go index 04f96bcb84c..596df6f65dd 100644 --- a/service/s3/api_op_PutBucketInventoryConfiguration.go +++ b/service/s3/api_op_PutBucketInventoryConfiguration.go @@ -18,12 +18,13 @@ import ( // of the objects in the bucket on a daily or weekly basis, and the results are // published to a flat file. The bucket that is inventoried is called the source // bucket, and the bucket where the inventory flat file is stored is called the -// destination bucket. The destination bucket must be in the same AWS Region as the -// source bucket. When you configure an inventory for a source bucket, you specify -// the destination bucket where you want the inventory to be stored, and whether to -// generate the inventory daily or weekly. You can also configure what object -// metadata to include and whether to inventory all object versions or only current -// versions. For more information, see Amazon S3 Inventory +// destination bucket. The destination bucket must be in the same Amazon Web +// Services Region as the source bucket. When you configure an inventory for a +// source bucket, you specify the destination bucket where you want the inventory +// to be stored, and whether to generate the inventory daily or weekly. You can +// also configure what object metadata to include and whether to inventory all +// object versions or only current versions. For more information, see Amazon S3 +// Inventory // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) in the // Amazon S3 User Guide. You must create a bucket policy on the destination bucket // to grant permissions to Amazon S3 to write objects to the bucket in the defined diff --git a/service/s3/api_op_PutBucketLifecycleConfiguration.go b/service/s3/api_op_PutBucketLifecycleConfiguration.go index e7a4e952553..2a30a2a7490 100644 --- a/service/s3/api_op_PutBucketLifecycleConfiguration.go +++ b/service/s3/api_op_PutBucketLifecycleConfiguration.go @@ -47,14 +47,14 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html). // Permissions By default, all Amazon S3 resources are private, including buckets, // objects, and related subresources (for example, lifecycle configuration and -// website configuration). Only the resource owner (that is, the AWS account that -// created it) can access the resource. The resource owner can optionally grant -// access permissions to others by writing an access policy. For this operation, a -// user must get the s3:PutLifecycleConfiguration permission. You can also -// explicitly deny permissions. Explicit deny also supersedes any other -// permissions. If you want to block users or accounts from removing or deleting -// objects from your bucket, you must deny them permissions for the following -// actions: +// website configuration). Only the resource owner (that is, the Amazon Web +// Services account that created it) can access the resource. The resource owner +// can optionally grant access permissions to others by writing an access policy. +// For this operation, a user must get the s3:PutLifecycleConfiguration permission. +// You can also explicitly deny permissions. Explicit deny also supersedes any +// other permissions. If you want to block users or accounts from removing or +// deleting objects from your bucket, you must deny them permissions for the +// following actions: // // * s3:DeleteObject // diff --git a/service/s3/api_op_PutBucketLogging.go b/service/s3/api_op_PutBucketLogging.go index 15b7d5d51fb..5a3c9e67241 100644 --- a/service/s3/api_op_PutBucketLogging.go +++ b/service/s3/api_op_PutBucketLogging.go @@ -14,20 +14,20 @@ import ( // Set the logging parameters for a bucket and to specify permissions for who can // view and modify the logging parameters. All logs are saved to buckets in the -// same AWS Region as the source bucket. To set the logging status of a bucket, you -// must be the bucket owner. The bucket owner is automatically granted FULL_CONTROL -// to all logs. You use the Grantee request element to grant access to other -// people. The Permissions request element specifies the kind of access the grantee -// has to the logs. Grantee Values You can specify the person (grantee) to whom -// you're assigning access rights (using request elements) in the following -// ways: +// same Amazon Web Services Region as the source bucket. To set the logging status +// of a bucket, you must be the bucket owner. The bucket owner is automatically +// granted FULL_CONTROL to all logs. You use the Grantee request element to grant +// access to other people. The Permissions request element specifies the kind of +// access the grantee has to the logs. Grantee Values You can specify the person +// (grantee) to whom you're assigning access rights (using request elements) in the +// following ways: // -// * By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional -// and ignored in the request. +// * By the person's ID: <>ID<><>GranteesEmail<> DisplayName is +// optional and ignored in the request. // -// * By Email address: <>Grantees@email.com<> The -// grantee is resolved to the CanonicalUser and, in a response to a GET Object acl -// request, appears as the CanonicalUser. +// * By Email address: +// <>Grantees@email.com<> The grantee is resolved to the CanonicalUser and, in a +// response to a GET Object acl request, appears as the CanonicalUser. // // * By URI: // <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> @@ -86,8 +86,8 @@ type PutBucketLoggingInput struct { BucketLoggingStatus *types.BucketLoggingStatus // The MD5 hash of the PutBucketLogging request body. For requests made using the - // AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated - // automatically. + // Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, + // this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the bucket is owned by a diff --git a/service/s3/api_op_PutBucketNotificationConfiguration.go b/service/s3/api_op_PutBucketNotificationConfiguration.go index 3821bbc8814..421d1a7b862 100644 --- a/service/s3/api_op_PutBucketNotificationConfiguration.go +++ b/service/s3/api_op_PutBucketNotificationConfiguration.go @@ -25,7 +25,7 @@ import ( // the request body. After Amazon S3 receives this request, it first verifies that // any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue // Service (Amazon SQS) destination exists, and that the bucket owner has -// permission to publish to it by sending a test notification. In the case of AWS +// permission to publish to it by sending a test notification. In the case of // Lambda destinations, Amazon S3 verifies that the Lambda function permissions // grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For // more information, see Configuring Notifications for Amazon S3 Events diff --git a/service/s3/api_op_PutBucketOwnershipControls.go b/service/s3/api_op_PutBucketOwnershipControls.go index 4ae5265846e..91e7e3b882e 100644 --- a/service/s3/api_op_PutBucketOwnershipControls.go +++ b/service/s3/api_op_PutBucketOwnershipControls.go @@ -53,8 +53,8 @@ type PutBucketOwnershipControlsInput struct { OwnershipControls *types.OwnershipControls // The MD5 hash of the OwnershipControls request body. For requests made using the - // AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated - // automatically. + // Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, + // this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the bucket is owned by a diff --git a/service/s3/api_op_PutBucketPolicy.go b/service/s3/api_op_PutBucketPolicy.go index 57038f3d397..3ec46f7fc7d 100644 --- a/service/s3/api_op_PutBucketPolicy.go +++ b/service/s3/api_op_PutBucketPolicy.go @@ -12,18 +12,18 @@ import ( ) // Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an -// identity other than the root user of the AWS account that owns the bucket, the -// calling identity must have the PutBucketPolicy permissions on the specified -// bucket and belong to the bucket owner's account in order to use this operation. -// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access -// Denied error. If you have the correct permissions, but you're not using an -// identity that belongs to the bucket owner's account, Amazon S3 returns a 405 -// Method Not Allowed error. As a security precaution, the root user of the AWS -// account that owns a bucket can always use this operation, even if the policy -// explicitly denies the root user the ability to perform this action. For more -// information about bucket policies, see Using Bucket Policies and User Policies -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). The -// following operations are related to PutBucketPolicy: +// identity other than the root user of the Amazon Web Services account that owns +// the bucket, the calling identity must have the PutBucketPolicy permissions on +// the specified bucket and belong to the bucket owner's account in order to use +// this operation. If you don't have PutBucketPolicy permissions, Amazon S3 returns +// a 403 Access Denied error. If you have the correct permissions, but you're not +// using an identity that belongs to the bucket owner's account, Amazon S3 returns +// a 405 Method Not Allowed error. As a security precaution, the root user of the +// Amazon Web Services account that owns a bucket can always use this operation, +// even if the policy explicitly denies the root user the ability to perform this +// action. For more information, see Bucket policy examples +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html). +// The following operations are related to PutBucketPolicy: // // * CreateBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) @@ -62,8 +62,9 @@ type PutBucketPolicyInput struct { // to change this bucket policy in the future. ConfirmRemoveSelfBucketAccess bool - // The MD5 hash of the request body. For requests made using the AWS Command Line - // Interface (CLI) or AWS SDKs, this field is calculated automatically. + // The MD5 hash of the request body. For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the bucket is owned by a diff --git a/service/s3/api_op_PutBucketReplication.go b/service/s3/api_op_PutBucketReplication.go index ffbf29820e6..625abb5f735 100644 --- a/service/s3/api_op_PutBucketReplication.go +++ b/service/s3/api_op_PutBucketReplication.go @@ -15,10 +15,7 @@ import ( // Creates a replication configuration or replaces an existing one. For more // information, see Replication // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) in the Amazon -// S3 User Guide. To perform this operation, the user or role performing the action -// must have the iam:PassRole -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) -// permission. Specify the replication configuration in the request body. In the +// S3 User Guide. Specify the replication configuration in the request body. In the // replication configuration, you provide the name of the destination bucket or // buckets where you want Amazon S3 to replicate objects, the IAM role that Amazon // S3 can assume to replicate objects on your behalf, and other relevant @@ -35,26 +32,31 @@ import ( // delete markers differently. For more information, see Backward Compatibility // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). // For information about enabling versioning on a bucket, see Using Versioning -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). By default, a -// resource owner, in this case the AWS account that created the bucket, can -// perform this operation. The resource owner can also grant others permissions to -// perform the operation. For more information about permissions, see Specifying -// Permissions in a Policy -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) and -// Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// Handling Replication of Encrypted Objects By default, Amazon S3 doesn't -// replicate objects that are stored at rest using server-side encryption with CMKs -// stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following: -// SourceSelectionCriteria, SseKmsEncryptedObjects, Status, +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). Handling +// Replication of Encrypted Objects By default, Amazon S3 doesn't replicate objects +// that are stored at rest using server-side encryption with CMKs stored in Amazon +// Web Services KMS. To replicate Amazon Web Services KMS-encrypted objects, add +// the following: SourceSelectionCriteria, SseKmsEncryptedObjects, Status, // EncryptionConfiguration, and ReplicaKmsKeyID. For information about replication -// configuration, see Replicating Objects Created with SSE Using CMKs stored in AWS -// KMS +// configuration, see Replicating Objects Created with SSE Using CMKs stored in +// Amazon Web Services KMS // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html). // For information on PutBucketReplication errors, see List of replication-related // error codes // (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) -// The following operations are related to PutBucketReplication: +// Permissions To create a PutBucketReplication request, you must have +// s3:PutReplicationConfiguration permissions for the bucket. By default, a +// resource owner, in this case the Amazon Web Services account that created the +// bucket, can perform this operation. The resource owner can also grant others +// permissions to perform the operation. For more information about permissions, +// see Specifying Permissions in a Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) and +// Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// To perform this operation, the user or role performing the action must have the +// iam:PassRole +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) +// permission. The following operations are related to PutBucketReplication: // // * // GetBucketReplication @@ -94,8 +96,9 @@ type PutBucketReplicationInput struct { // The base64-encoded 128-bit MD5 digest of the data. You must use this header as a // message integrity check to verify that the request body was not corrupted in // transit. For more information, see RFC 1864 - // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the AWS Command - // Line Interface (CLI) or AWS SDKs, this field is calculated automatically. + // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the bucket is owned by a diff --git a/service/s3/api_op_PutBucketRequestPayment.go b/service/s3/api_op_PutBucketRequestPayment.go index c46821befd6..de568c3ace5 100644 --- a/service/s3/api_op_PutBucketRequestPayment.go +++ b/service/s3/api_op_PutBucketRequestPayment.go @@ -55,8 +55,9 @@ type PutBucketRequestPaymentInput struct { // The base64-encoded 128-bit MD5 digest of the data. You must use this header as a // message integrity check to verify that the request body was not corrupted in // transit. For more information, see RFC 1864 - // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the AWS Command - // Line Interface (CLI) or AWS SDKs, this field is calculated automatically. + // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the bucket is owned by a diff --git a/service/s3/api_op_PutBucketTagging.go b/service/s3/api_op_PutBucketTagging.go index b3b13d86993..36704a9339b 100644 --- a/service/s3/api_op_PutBucketTagging.go +++ b/service/s3/api_op_PutBucketTagging.go @@ -12,13 +12,14 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sets the tags for a bucket. Use tags to organize your AWS bill to reflect your -// own cost structure. To do this, sign up to get your AWS account bill with tag -// key values included. Then, to see the cost of combined resources, organize your -// billing information according to resources with the same tag key values. For -// example, you can tag several resources with a specific application name, and -// then organize your billing information to see the total cost of that application -// across several services. For more information, see Cost Allocation and Tagging +// Sets the tags for a bucket. Use tags to organize your Amazon Web Services bill +// to reflect your own cost structure. To do this, sign up to get your Amazon Web +// Services account bill with tag key values included. Then, to see the cost of +// combined resources, organize your billing information according to resources +// with the same tag key values. For example, you can tag several resources with a +// specific application name, and then organize your billing information to see the +// total cost of that application across several services. For more information, +// see Cost Allocation and Tagging // (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) // and Using Cost Allocation in Amazon S3 Bucket Tags // (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html). When @@ -40,7 +41,7 @@ import ( // can occur if the tag did not pass input validation. For information about tag // restrictions, see User-Defined Tag Restrictions // (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) -// and AWS-Generated Cost Allocation Tag Restrictions +// and Amazon Web Services-Generated Cost Allocation Tag Restrictions // (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html). // // * @@ -99,8 +100,9 @@ type PutBucketTaggingInput struct { // The base64-encoded 128-bit MD5 digest of the data. You must use this header as a // message integrity check to verify that the request body was not corrupted in // transit. For more information, see RFC 1864 - // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the AWS Command - // Line Interface (CLI) or AWS SDKs, this field is calculated automatically. + // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the bucket is owned by a diff --git a/service/s3/api_op_PutBucketVersioning.go b/service/s3/api_op_PutBucketVersioning.go index 26938462bb4..9e4246a13c1 100644 --- a/service/s3/api_op_PutBucketVersioning.go +++ b/service/s3/api_op_PutBucketVersioning.go @@ -73,8 +73,9 @@ type PutBucketVersioningInput struct { // >The base64-encoded 128-bit MD5 digest of the data. You must use this header as // a message integrity check to verify that the request body was not corrupted in // transit. For more information, see RFC 1864 - // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the AWS Command - // Line Interface (CLI) or AWS SDKs, this field is calculated automatically. + // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the bucket is owned by a diff --git a/service/s3/api_op_PutBucketWebsite.go b/service/s3/api_op_PutBucketWebsite.go index cae57672bef..e7207eb80cb 100644 --- a/service/s3/api_op_PutBucketWebsite.go +++ b/service/s3/api_op_PutBucketWebsite.go @@ -113,8 +113,9 @@ type PutBucketWebsiteInput struct { // The base64-encoded 128-bit MD5 digest of the data. You must use this header as a // message integrity check to verify that the request body was not corrupted in // transit. For more information, see RFC 1864 - // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the AWS Command - // Line Interface (CLI) or AWS SDKs, this field is calculated automatically. + // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the bucket is owned by a diff --git a/service/s3/api_op_PutObject.go b/service/s3/api_op_PutObject.go index a0b82573b1b..135a77e881a 100644 --- a/service/s3/api_op_PutObject.go +++ b/service/s3/api_op_PutObject.go @@ -26,27 +26,38 @@ import ( // header, Amazon S3 checks the object against the provided MD5 value and, if they // do not match, returns an error. Additionally, you can calculate the MD5 while // putting an object to Amazon S3 and compare the returned ETag to the calculated -// MD5 value. The Content-MD5 header is required for any request to upload an +// MD5 value. +// +// * To successfully complete the PutObject request, you must have the +// s3:PutObject in your IAM permissions. +// +// * To successfully change the objects acl +// of your PutObject request, you must have the s3:PutObjectAcl in your IAM +// permissions. +// +// * The Content-MD5 header is required for any request to upload an // object with a retention period configured using Amazon S3 Object Lock. For more // information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) in -// the Amazon S3 User Guide. Server-side Encryption You can optionally request +// the Amazon S3 User Guide. +// +// Server-side Encryption You can optionally request // server-side encryption. With server-side encryption, Amazon S3 encrypts your // data as it writes it to disks in its data centers and decrypts the data when you -// access it. You have the option to provide your own encryption key or use AWS -// managed encryption keys (SSE-S3 or SSE-KMS). For more information, see Using -// Server-Side Encryption +// access it. You have the option to provide your own encryption key or use Amazon +// Web Services managed encryption keys (SSE-S3 or SSE-KMS). For more information, +// see Using Server-Side Encryption // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). -// If you request server-side encryption using AWS Key Management Service -// (SSE-KMS), you can enable an S3 Bucket Key at the object-level. For more +// If you request server-side encryption using Amazon Web Services Key Management +// Service (SSE-KMS), you can enable an S3 Bucket Key at the object-level. For more // information, see Amazon S3 Bucket Keys // (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) in the Amazon // S3 User Guide. Access Control List (ACL)-Specific Request Headers You can use // headers to grant ACL- based permissions. By default, all objects are private. // Only the owner has full access control. When adding a new object, you can grant -// permissions to individual AWS accounts or to predefined groups defined by Amazon -// S3. These permissions are then added to the ACL on the object. For more -// information, see Access Control List (ACL) Overview +// permissions to individual Amazon Web Services accounts or to predefined groups +// defined by Amazon S3. These permissions are then added to the ACL on the object. +// For more information, see Access Control List (ACL) Overview // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) and Managing // ACLs Using the REST API // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). @@ -95,17 +106,17 @@ type PutObjectInput struct { // with an access point, you must direct requests to the access point hostname. The // access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action using S3 on Outposts through the AWS SDKs, you provide the Outposts - // bucket ARN in place of the bucket name. For more information about S3 on - // Outposts ARNs, see Using S3 on Outposts + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the // Amazon S3 User Guide. // @@ -236,18 +247,20 @@ type PutObjectInput struct { // encryption key was transmitted without error. SSECustomerKeyMD5 *string - // Specifies the AWS KMS Encryption Context to use for object encryption. The value - // of this header is a base64-encoded UTF-8 string holding JSON with the encryption - // context key-value pairs. + // Specifies the Amazon Web Services KMS Encryption Context to use for object + // encryption. The value of this header is a base64-encoded UTF-8 string holding + // JSON with the encryption context key-value pairs. SSEKMSEncryptionContext *string // If x-amz-server-side-encryption is present and has the value of aws:kms, this - // header specifies the ID of the AWS Key Management Service (AWS KMS) symmetrical - // customer managed customer master key (CMK) that was used for the object. If you - // specify x-amz-server-side-encryption:aws:kms, but do not provide - // x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK - // in AWS to protect the data. If the KMS key does not exist in the same account - // issuing the command, you must use the full ARN and not just the ID. + // header specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetrical customer managed customer master key (CMK) + // that was used for the object. If you specify + // x-amz-server-side-encryption:aws:kms, but do not provide + // x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web + // Services managed CMK in Amazon Web Services to protect the data. If the KMS key + // does not exist in the same account issuing the command, you must use the full + // ARN and not just the ID. SSEKMSKeyId *string // The server-side encryption algorithm used when storing this object in Amazon S3 @@ -289,7 +302,7 @@ type PutObjectInput struct { type PutObjectOutput struct { // Indicates whether the uploaded object uses an S3 Bucket Key for server-side - // encryption with AWS KMS (SSE-KMS). + // encryption with Amazon Web Services KMS (SSE-KMS). BucketKeyEnabled bool // Entity tag for the uploaded object. @@ -316,20 +329,21 @@ type PutObjectOutput struct { // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string - // If present, specifies the AWS KMS Encryption Context to use for object - // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. + // If present, specifies the Amazon Web Services KMS Encryption Context to use for + // object encryption. The value of this header is a base64-encoded UTF-8 string + // holding JSON with the encryption context key-value pairs. SSEKMSEncryptionContext *string // If x-amz-server-side-encryption is present and has the value of aws:kms, this - // header specifies the ID of the AWS Key Management Service (AWS KMS) symmetric - // customer managed customer master key (CMK) that was used for the object. + // header specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) + // that was used for the object. SSEKMSKeyId *string - // If you specified server-side encryption either with an AWS KMS customer master - // key (CMK) or Amazon S3-managed encryption key in your PUT request, the response - // includes this header. It confirms the encryption algorithm that Amazon S3 used - // to encrypt the object. + // If you specified server-side encryption either with an Amazon Web Services KMS + // customer master key (CMK) or Amazon S3-managed encryption key in your PUT + // request, the response includes this header. It confirms the encryption algorithm + // that Amazon S3 used to encrypt the object. ServerSideEncryption types.ServerSideEncryption // Version of the object. diff --git a/service/s3/api_op_PutObjectAcl.go b/service/s3/api_op_PutObjectAcl.go index a872b66e75c..a3e1c11acf2 100644 --- a/service/s3/api_op_PutObjectAcl.go +++ b/service/s3/api_op_PutObjectAcl.go @@ -39,59 +39,62 @@ import ( // Specify access permissions explicitly with the x-amz-grant-read, // x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control // headers. When using these headers, you specify explicit access permissions and -// grantees (AWS accounts or Amazon S3 groups) who will receive the permission. If -// you use these ACL-specific headers, you cannot use x-amz-acl header to set a -// canned ACL. These parameters map to the set of permissions that Amazon S3 -// supports in an ACL. For more information, see Access Control List (ACL) Overview +// grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the +// permission. If you use these ACL-specific headers, you cannot use x-amz-acl +// header to set a canned ACL. These parameters map to the set of permissions that +// Amazon S3 supports in an ACL. For more information, see Access Control List +// (ACL) Overview // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You specify // each grantee as a type=value pair, where the type is one of the following: // // * id -// – if the value specified is the canonical user ID of an AWS account +// – if the value specified is the canonical user ID of an Amazon Web Services +// account // -// * uri – if -// you are granting permissions to a predefined group +// * uri – if you are granting permissions to a predefined group // -// * emailAddress – if the -// value specified is the email address of an AWS account Using email addresses to -// specify a grantee is only supported in the following AWS Regions: +// * +// emailAddress – if the value specified is the email address of an Amazon Web +// Services account Using email addresses to specify a grantee is only supported in +// the following Amazon Web Services Regions: // -// * US East (N. -// Virginia) +// * US East (N. Virginia) // -// * US West (N. California) +// * US West +// (N. California) // // * US West (Oregon) // -// * Asia Pacific -// (Singapore) +// * Asia Pacific (Singapore) // -// * Asia Pacific (Sydney) +// * Asia Pacific +// (Sydney) // // * Asia Pacific (Tokyo) // -// * Europe -// (Ireland) +// * Europe (Ireland) // -// * South America (São Paulo) +// * South America (São +// Paulo) // -// For a list of all the Amazon S3 -// supported Regions and endpoints, see Regions and Endpoints -// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the AWS -// General Reference. +// For a list of all the Amazon S3 supported Regions and endpoints, see +// Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the +// Amazon Web Services General Reference. // -// For example, the following x-amz-grant-read header grants -// list objects permission to the two AWS accounts identified by their email -// addresses. x-amz-grant-read: emailAddress="xyz@amazon.com", -// emailAddress="abc@amazon.com" +// For example, the following +// x-amz-grant-read header grants list objects permission to the two Amazon Web +// Services accounts identified by their email addresses. x-amz-grant-read: +// emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" // -// You can use either a canned ACL or specify access -// permissions explicitly. You cannot do both. Grantee Values You can specify the -// person (grantee) to whom you're assigning access rights (using request elements) -// in the following ways: +// You can use either +// a canned ACL or specify access permissions explicitly. You cannot do both. +// Grantee Values You can specify the person (grantee) to whom you're assigning +// access rights (using request elements) in the following ways: // -// * By the person's ID: <>ID<><>GranteesEmail<> -// DisplayName is optional and ignored in the request. +// * By the person's +// ID: <>ID<><>GranteesEmail<> DisplayName is optional and ignored in the +// request. // // * By URI: // <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> @@ -100,12 +103,12 @@ import ( // address: <>Grantees@email.com<>lt;/Grantee> The grantee is resolved to the // CanonicalUser and, in a response to a GET Object acl request, appears as the // CanonicalUser. Using email addresses to specify a grantee is only supported in -// the following AWS Regions: +// the following Amazon Web Services Regions: // // * US East (N. Virginia) // -// * US West (N. -// California) +// * US West +// (N. California) // // * US West (Oregon) // @@ -123,13 +126,13 @@ import ( // // For a list of all the Amazon S3 supported Regions and endpoints, see // Regions and Endpoints -// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the AWS -// General Reference. +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the +// Amazon Web Services General Reference. // -// Versioning The ACL of an object is set at the object version -// level. By default, PUT sets the ACL of the current version of an object. To set -// the ACL of a different version, use the versionId subresource. Related -// Resources +// Versioning The ACL of an object is set +// at the object version level. By default, PUT sets the ACL of the current version +// of an object. To set the ACL of a different version, use the versionId +// subresource. Related Resources // // * CopyObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) @@ -157,9 +160,9 @@ type PutObjectAclInput struct { // When using this action with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // @@ -170,17 +173,17 @@ type PutObjectAclInput struct { // access point, you must direct requests to the access point hostname. The access // point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action using S3 on Outposts through the AWS SDKs, you provide the Outposts - // bucket ARN in place of the bucket name. For more information about S3 on - // Outposts ARNs, see Using S3 on Outposts + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the // Amazon S3 User Guide. // @@ -197,8 +200,9 @@ type PutObjectAclInput struct { // The base64-encoded 128-bit MD5 digest of the data. This header must be used as a // message integrity check to verify that the request body was not corrupted in // transit. For more information, go to RFC 1864.> - // (http://www.ietf.org/rfc/rfc1864.txt) For requests made using the AWS Command - // Line Interface (CLI) or AWS SDKs, this field is calculated automatically. + // (http://www.ietf.org/rfc/rfc1864.txt) For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the bucket is owned by a diff --git a/service/s3/api_op_PutObjectLegalHold.go b/service/s3/api_op_PutObjectLegalHold.go index f8e9c1e0576..6afead3b348 100644 --- a/service/s3/api_op_PutObjectLegalHold.go +++ b/service/s3/api_op_PutObjectLegalHold.go @@ -37,9 +37,9 @@ type PutObjectLegalHoldInput struct { // When using this action with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // @@ -51,8 +51,9 @@ type PutObjectLegalHoldInput struct { // This member is required. Key *string - // The MD5 hash for the request body. For requests made using the AWS Command Line - // Interface (CLI) or AWS SDKs, this field is calculated automatically. + // The MD5 hash for the request body. For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the bucket is owned by a diff --git a/service/s3/api_op_PutObjectLockConfiguration.go b/service/s3/api_op_PutObjectLockConfiguration.go index ae27df18032..54f9d7fd3ac 100644 --- a/service/s3/api_op_PutObjectLockConfiguration.go +++ b/service/s3/api_op_PutObjectLockConfiguration.go @@ -26,7 +26,7 @@ import ( // // * You can only enable Object // Lock for new buckets. If you want to turn on Object Lock for an existing bucket, -// contact AWS Support. +// contact Amazon Web Services Support. func (c *Client) PutObjectLockConfiguration(ctx context.Context, params *PutObjectLockConfigurationInput, optFns ...func(*Options)) (*PutObjectLockConfigurationOutput, error) { if params == nil { params = &PutObjectLockConfigurationInput{} @@ -49,8 +49,9 @@ type PutObjectLockConfigurationInput struct { // This member is required. Bucket *string - // The MD5 hash for the request body. For requests made using the AWS Command Line - // Interface (CLI) or AWS SDKs, this field is calculated automatically. + // The MD5 hash for the request body. For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the bucket is owned by a diff --git a/service/s3/api_op_PutObjectRetention.go b/service/s3/api_op_PutObjectRetention.go index 694cf53c46a..ebdae5f9e34 100644 --- a/service/s3/api_op_PutObjectRetention.go +++ b/service/s3/api_op_PutObjectRetention.go @@ -14,8 +14,14 @@ import ( // Places an Object Retention configuration on an object. For more information, see // Locking Objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). This action -// is not supported by Amazon S3 on Outposts. +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). Users or +// accounts require the s3:PutObjectRetention permission in order to place an +// Object Retention configuration on objects. Bypassing a Governance Retention +// configuration requires the s3:BypassGovernanceRetention permission. This action +// is not supported by Amazon S3 on Outposts. Permissions When the Object Lock +// retention mode is set to compliance, you need s3:PutObjectRetention and +// s3:BypassGovernanceRetention permissions. For other requests to +// PutObjectRetention, only s3:PutObjectRetention permissions are required. func (c *Client) PutObjectRetention(ctx context.Context, params *PutObjectRetentionInput, optFns ...func(*Options)) (*PutObjectRetentionOutput, error) { if params == nil { params = &PutObjectRetentionInput{} @@ -37,9 +43,9 @@ type PutObjectRetentionInput struct { // configuration to. When using this action with an access point, you must direct // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // @@ -55,8 +61,9 @@ type PutObjectRetentionInput struct { // Indicates whether this action should bypass Governance-mode restrictions. BypassGovernanceRetention bool - // The MD5 hash for the request body. For requests made using the AWS Command Line - // Interface (CLI) or AWS SDKs, this field is calculated automatically. + // The MD5 hash for the request body. For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the bucket is owned by a diff --git a/service/s3/api_op_PutObjectTagging.go b/service/s3/api_op_PutObjectTagging.go index 5d8befb36fc..733fb4969d4 100644 --- a/service/s3/api_op_PutObjectTagging.go +++ b/service/s3/api_op_PutObjectTagging.go @@ -83,17 +83,17 @@ type PutObjectTaggingInput struct { // point, you must direct requests to the access point hostname. The access point // hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action using S3 on Outposts through the AWS SDKs, you provide the Outposts - // bucket ARN in place of the bucket name. For more information about S3 on - // Outposts ARNs, see Using S3 on Outposts + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the // Amazon S3 User Guide. // @@ -110,8 +110,9 @@ type PutObjectTaggingInput struct { // This member is required. Tagging *types.Tagging - // The MD5 hash for the request body. For requests made using the AWS Command Line - // Interface (CLI) or AWS SDKs, this field is calculated automatically. + // The MD5 hash for the request body. For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the bucket is owned by a diff --git a/service/s3/api_op_PutPublicAccessBlock.go b/service/s3/api_op_PutPublicAccessBlock.go index 63d01877144..de9b77fe377 100644 --- a/service/s3/api_op_PutPublicAccessBlock.go +++ b/service/s3/api_op_PutPublicAccessBlock.go @@ -75,8 +75,8 @@ type PutPublicAccessBlockInput struct { PublicAccessBlockConfiguration *types.PublicAccessBlockConfiguration // The MD5 hash of the PutPublicAccessBlock request body. For requests made using - // the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated - // automatically. + // the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services + // SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the bucket is owned by a diff --git a/service/s3/api_op_RestoreObject.go b/service/s3/api_op_RestoreObject.go index 4a503e79f4d..a8b36240079 100644 --- a/service/s3/api_op_RestoreObject.go +++ b/service/s3/api_op_RestoreObject.go @@ -41,11 +41,11 @@ import ( // // * // Define an output location for the select query's output. This must be an Amazon -// S3 bucket in the same AWS Region as the bucket that contains the archive object -// that is being queried. The AWS account that initiates the job must have -// permissions to write to the S3 bucket. You can specify the storage class and -// encryption for the output objects stored in the bucket. For more information -// about output, see Querying Archived Objects +// S3 bucket in the same Amazon Web Services Region as the bucket that contains the +// archive object that is being queried. The Amazon Web Services account that +// initiates the job must have permissions to write to the S3 bucket. You can +// specify the storage class and encryption for the output objects stored in the +// bucket. For more information about output, see Querying Archived Objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) // in the Amazon S3 User Guide. For more information about the S3 structure in the // request body, see the following: @@ -251,17 +251,17 @@ type RestoreObjectInput struct { // access point, you must direct requests to the access point hostname. The access // point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action using S3 on Outposts through the AWS SDKs, you provide the Outposts - // bucket ARN in place of the bucket name. For more information about S3 on - // Outposts ARNs, see Using S3 on Outposts + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the // Amazon S3 User Guide. // diff --git a/service/s3/api_op_UploadPart.go b/service/s3/api_op_UploadPart.go index 3dfb6ef28f3..39b9b831c09 100644 --- a/service/s3/api_op_UploadPart.go +++ b/service/s3/api_op_UploadPart.go @@ -32,9 +32,10 @@ import ( // when traversing the network, specify the Content-MD5 header in the upload part // request. Amazon S3 checks the part data against the provided MD5 value. If they // do not match, Amazon S3 returns an error. If the upload request is signed with -// Signature Version 4, then AWS S3 uses the x-amz-content-sha256 header as a -// checksum instead of Content-MD5. For more information see Authenticating -// Requests: Using the Authorization Header (AWS Signature Version 4) +// Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 +// header as a checksum instead of Content-MD5. For more information see +// Authenticating Requests: Using the Authorization Header (Amazon Web Services +// Signature Version 4) // (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html). // Note: After you initiate multipart upload and upload one or more parts, you must // either complete or abort multipart upload in order to stop getting charged for @@ -48,10 +49,10 @@ import ( // Amazon S3 User Guide. You can optionally request server-side encryption where // Amazon S3 encrypts your data as it writes it to disks in its data centers and // decrypts it for you when you access it. You have the option of providing your -// own encryption key, or you can use the AWS managed encryption keys. If you -// choose to provide your own encryption key, the request headers you provide in -// the request must match the headers you used in the request to initiate the -// upload by using CreateMultipartUpload +// own encryption key, or you can use the Amazon Web Services managed encryption +// keys. If you choose to provide your own encryption key, the request headers you +// provide in the request must match the headers you used in the request to +// initiate the upload by using CreateMultipartUpload // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). // For more information, go to Using Server-Side Encryption // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) @@ -130,17 +131,17 @@ type UploadPartInput struct { // this action with an access point, you must direct requests to the access point // hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action using S3 on Outposts through the AWS SDKs, you provide the Outposts - // bucket ARN in place of the bucket name. For more information about S3 on - // Outposts ARNs, see Using S3 on Outposts + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the // Amazon S3 User Guide. // @@ -210,7 +211,7 @@ type UploadPartInput struct { type UploadPartOutput struct { // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with AWS KMS (SSE-KMS). + // encryption with Amazon Web Services KMS (SSE-KMS). BucketKeyEnabled bool // Entity tag for the uploaded object. @@ -229,8 +230,9 @@ type UploadPartOutput struct { // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string - // If present, specifies the ID of the AWS Key Management Service (AWS KMS) - // symmetric customer managed customer master key (CMK) was used for the object. + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) + // was used for the object. SSEKMSKeyId *string // The server-side encryption algorithm used when storing this object in Amazon S3 diff --git a/service/s3/api_op_UploadPartCopy.go b/service/s3/api_op_UploadPartCopy.go index faf9d3a971f..96e4f9a1f4d 100644 --- a/service/s3/api_op_UploadPartCopy.go +++ b/service/s3/api_op_UploadPartCopy.go @@ -140,17 +140,17 @@ type UploadPartCopyInput struct { // The bucket name. When using this action with an access point, you must direct // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the AWS SDKs, you provide the access point - // ARN in place of the bucket name. For more information about access point ARNs, - // see Using access points + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action using S3 on Outposts through the AWS SDKs, you provide the Outposts - // bucket ARN in place of the bucket name. For more information about S3 on - // Outposts ARNs, see Using S3 on Outposts + // this action using S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using S3 on Outposts // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the // Amazon S3 User Guide. // @@ -176,9 +176,9 @@ type UploadPartCopyInput struct { // 123456789012 in Region us-west-2, use the URL encoding of // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. // The value must be URL encoded. Amazon S3 supports copy operations using access - // points only when the source and destination buckets are in the same AWS Region. - // Alternatively, for objects accessed through Amazon S3 on Outposts, specify the - // ARN of the object as accessed in the format + // points only when the source and destination buckets are in the same Amazon Web + // Services Region. Alternatively, for objects accessed through Amazon S3 on + // Outposts, specify the ARN of the object as accessed in the format // arn:aws:s3-outposts:::outpost//object/. For example, to copy the object // reports/january.pdf through outpost my-outpost owned by account 123456789012 in // Region us-west-2, use the URL encoding of @@ -284,7 +284,7 @@ type UploadPartCopyInput struct { type UploadPartCopyOutput struct { // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with AWS KMS (SSE-KMS). + // encryption with Amazon Web Services KMS (SSE-KMS). BucketKeyEnabled bool // Container for all response elements. @@ -307,9 +307,9 @@ type UploadPartCopyOutput struct { // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string - // If present, specifies the ID of the AWS Key Management Service (AWS KMS) - // symmetric customer managed customer master key (CMK) that was used for the - // object. + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) + // that was used for the object. SSEKMSKeyId *string // The server-side encryption algorithm used when storing this object in Amazon S3 diff --git a/service/s3/api_op_WriteGetObjectResponse.go b/service/s3/api_op_WriteGetObjectResponse.go index 530f193cf6b..0dfd5d07a60 100644 --- a/service/s3/api_op_WriteGetObjectResponse.go +++ b/service/s3/api_op_WriteGetObjectResponse.go @@ -26,29 +26,33 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html), in // addition to RequestRoute, RequestToken, StatusCode, ErrorCode, and ErrorMessage. // The GetObject response metadata is supported so that the WriteGetObjectResponse -// caller, typically an AWS Lambda function, can provide the same metadata when it +// caller, typically an Lambda function, can provide the same metadata when it // internally invokes GetObject. When WriteGetObjectResponse is called by a // customer-owned Lambda function, the metadata returned to the end user GetObject -// call might differ from what Amazon S3 would normally return. AWS provides some -// prebuilt Lambda functions that you can use with S3 Object Lambda to detect and -// redact personally identifiable information (PII) and decompress S3 objects. -// These Lambda functions are available in the AWS Serverless Application -// Repository, and can be selected through the AWS Management Console when you -// create your Object Lambda Access Point. Example 1: PII Access Control - This -// Lambda function uses Amazon Comprehend, a natural language processing (NLP) -// service using machine learning to find insights and relationships in text. It -// automatically detects personally identifiable information (PII) such as names, -// addresses, dates, credit card numbers, and social security numbers from -// documents in your Amazon S3 bucket. Example 2: PII Redaction - This Lambda -// function uses Amazon Comprehend, a natural language processing (NLP) service -// using machine learning to find insights and relationships in text. It -// automatically redacts personally identifiable information (PII) such as names, -// addresses, dates, credit card numbers, and social security numbers from -// documents in your Amazon S3 bucket. Example 3: Decompression - The Lambda -// function S3ObjectLambdaDecompression, is equipped to decompress objects stored -// in S3 in one of six compressed file formats including bzip2, gzip, snappy, zlib, -// zstandard and ZIP. For information on how to view and use these functions, see -// Using AWS built Lambda functions +// call might differ from what Amazon S3 would normally return. You can include any +// number of metadata headers. When including a metadata header, it should be +// prefaced with x-amz-meta. For example, x-amz-meta-my-custom-header: +// MyCustomValue. The primary use case for this is to forward GetObject metadata. +// Amazon Web Services provides some prebuilt Lambda functions that you can use +// with S3 Object Lambda to detect and redact personally identifiable information +// (PII) and decompress S3 objects. These Lambda functions are available in the +// Amazon Web Services Serverless Application Repository, and can be selected +// through the Amazon Web Services Management Console when you create your Object +// Lambda Access Point. Example 1: PII Access Control - This Lambda function uses +// Amazon Comprehend, a natural language processing (NLP) service using machine +// learning to find insights and relationships in text. It automatically detects +// personally identifiable information (PII) such as names, addresses, dates, +// credit card numbers, and social security numbers from documents in your Amazon +// S3 bucket. Example 2: PII Redaction - This Lambda function uses Amazon +// Comprehend, a natural language processing (NLP) service using machine learning +// to find insights and relationships in text. It automatically redacts personally +// identifiable information (PII) such as names, addresses, dates, credit card +// numbers, and social security numbers from documents in your Amazon S3 bucket. +// Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is +// equipped to decompress objects stored in S3 in one of six compressed file +// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. For information +// on how to view and use these functions, see Using Amazon Web Services built +// Lambda functions // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html) in // the Amazon S3 User Guide. func (c *Client) WriteGetObjectResponse(ctx context.Context, params *WriteGetObjectResponseInput, optFns ...func(*Options)) (*WriteGetObjectResponseOutput, error) { @@ -86,7 +90,7 @@ type WriteGetObjectResponseInput struct { Body io.Reader // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for - // server-side encryption with AWS KMS (SSE-KMS). + // server-side encryption with Amazon Web Services KMS (SSE-KMS). BucketKeyEnabled bool // Specifies caching behavior along the request/reply chain. @@ -189,9 +193,9 @@ type WriteGetObjectResponseInput struct { // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html). SSECustomerKeyMD5 *string - // If present, specifies the ID of the AWS Key Management Service (AWS KMS) - // symmetric customer managed customer master key (CMK) that was used for stored in - // Amazon S3 object. + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) + // that was used for stored in Amazon S3 object. SSEKMSKeyId *string // The server-side encryption algorithm used when storing requested object in diff --git a/service/s3/types/errors.go b/service/s3/types/errors.go index 893c5c1551a..8c3a386f7f2 100644 --- a/service/s3/types/errors.go +++ b/service/s3/types/errors.go @@ -28,10 +28,10 @@ func (e *BucketAlreadyExists) ErrorCode() string { return "BucketAlr func (e *BucketAlreadyExists) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The bucket you tried to create already exists, and you own it. Amazon S3 returns -// this error in all AWS Regions except in the North Virginia Region. For legacy -// compatibility, if you re-create an existing bucket that you already own in the -// North Virginia Region, Amazon S3 returns 200 OK and resets the bucket access -// control lists (ACLs). +// this error in all Amazon Web Services Regions except in the North Virginia +// Region. For legacy compatibility, if you re-create an existing bucket that you +// already own in the North Virginia Region, Amazon S3 returns 200 OK and resets +// the bucket access control lists (ACLs). type BucketAlreadyOwnedByYou struct { Message *string diff --git a/service/s3/types/types.go b/service/s3/types/types.go index 4e5a49cb6dd..c06c1dfb1bd 100644 --- a/service/s3/types/types.go +++ b/service/s3/types/types.go @@ -175,7 +175,8 @@ type AnalyticsS3BucketDestination struct { } // In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name is -// globally unique, and the namespace is shared by all AWS accounts. +// globally unique, and the namespace is shared by all Amazon Web Services +// accounts. type Bucket struct { // Date the bucket was created. This date can change when making changes to your @@ -281,8 +282,7 @@ type Condition struct { type CopyObjectResult struct { // Returns the ETag of the new object. The ETag reflects only changes to the - // contents of an object, not its metadata. The source and destination ETag is - // identical for a successfully copied non-multipart object. + // contents of an object, not its metadata. ETag *string // Creation date of the object. @@ -563,16 +563,16 @@ type Destination struct { // Specify this only in a cross-account scenario (where source and destination // bucket owners are not the same), and you want to change replica ownership to the - // AWS account that owns the destination bucket. If this is not specified in the - // replication configuration, the replicas are owned by same AWS account that owns - // the source object. + // Amazon Web Services account that owns the destination bucket. If this is not + // specified in the replication configuration, the replicas are owned by same + // Amazon Web Services account that owns the source object. AccessControlTranslation *AccessControlTranslation // Destination bucket owner account ID. In a cross-account scenario, if you direct - // Amazon S3 to change replica ownership to the AWS account that owns the - // destination bucket by specifying the AccessControlTranslation property, this is - // the account ID of the destination bucket owner. For more information, see - // Replication Additional Configuration: Changing the Replica Owner + // Amazon S3 to change replica ownership to the Amazon Web Services account that + // owns the destination bucket by specifying the AccessControlTranslation property, + // this is the account ID of the destination bucket owner. For more information, + // see Replication Additional Configuration: Changing the Replica Owner // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html) // in the Amazon S3 User Guide. Account *string @@ -615,11 +615,11 @@ type Encryption struct { KMSContext *string // If the encryption type is aws:kms, this optional value specifies the ID of the - // symmetric customer managed AWS KMS CMK to use for encryption of job results. - // Amazon S3 only supports symmetric CMKs. For more information, see Using - // symmetric and asymmetric keys + // symmetric customer managed Amazon Web Services KMS CMK to use for encryption of + // job results. Amazon S3 only supports symmetric CMKs. For more information, see + // Using symmetric and asymmetric keys // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) - // in the AWS Key Management Service Developer Guide. + // in the Amazon Web Services Key Management Service Developer Guide. KMSKeyId *string noSmithyDocumentSerde @@ -629,13 +629,13 @@ type Encryption struct { // destination for replicated objects. type EncryptionConfiguration struct { - // Specifies the ID (Key ARN or Alias ARN) of the customer managed AWS KMS key - // stored in AWS Key Management Service (KMS) for the destination bucket. Amazon S3 - // uses this key to encrypt replica objects. Amazon S3 only supports symmetric, - // customer managed KMS keys. For more information, see Using symmetric and - // asymmetric keys + // Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web + // Services KMS key stored in Amazon Web Services Key Management Service (KMS) for + // the destination bucket. Amazon S3 uses this key to encrypt replica objects. + // Amazon S3 only supports symmetric, customer managed KMS keys. For more + // information, see Using symmetric and asymmetric keys // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) - // in the AWS Key Management Service Developer Guide. + // in the Amazon Web Services Key Management Service Developer Guide. ReplicaKmsKeyID *string noSmithyDocumentSerde @@ -660,79 +660,80 @@ type Error struct { // * // Code: AccountProblem // - // * Description: There is a problem with your AWS account - // that prevents the action from completing successfully. Contact AWS Support for - // further assistance. + // * Description: There is a problem with your Amazon Web + // Services account that prevents the action from completing successfully. Contact + // Amazon Web Services Support for further assistance. // - // * HTTP Status Code: 403 Forbidden + // * HTTP Status Code: 403 + // Forbidden // - // * SOAP Fault Code - // Prefix: Client + // * SOAP Fault Code Prefix: Client // // * Code: AllAccessDisabled // - // * Description: All access to this - // Amazon S3 resource has been disabled. Contact AWS Support for further - // assistance. + // * + // Description: All access to this Amazon S3 resource has been disabled. Contact + // Amazon Web Services Support for further assistance. // - // * HTTP Status Code: 403 Forbidden + // * HTTP Status Code: 403 + // Forbidden // - // * SOAP Fault Code Prefix: - // Client + // * SOAP Fault Code Prefix: Client // - // * Code: AmbiguousGrantByEmailAddress + // * Code: + // AmbiguousGrantByEmailAddress // - // * Description: The email address - // you provided is associated with more than one account. + // * Description: The email address you provided is + // associated with more than one account. // - // * HTTP Status Code: 400 - // Bad Request + // * HTTP Status Code: 400 Bad Request // - // * SOAP Fault Code Prefix: Client + // * + // SOAP Fault Code Prefix: Client // - // * Code: - // AuthorizationHeaderMalformed + // * Code: AuthorizationHeaderMalformed // - // * Description: The authorization header you - // provided is invalid. + // * + // Description: The authorization header you provided is invalid. // - // * HTTP Status Code: 400 Bad Request + // * HTTP Status + // Code: 400 Bad Request // - // * HTTP Status Code: - // N/A + // * HTTP Status Code: N/A // // * Code: BadDigest // - // * Description: The Content-MD5 you specified did not - // match what we received. + // * + // Description: The Content-MD5 you specified did not match what we received. // - // * HTTP Status Code: 400 Bad Request + // * + // HTTP Status Code: 400 Bad Request // - // * SOAP Fault Code - // Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: BucketAlreadyExists + // * Code: + // BucketAlreadyExists // - // * Description: The requested bucket - // name is not available. The bucket namespace is shared by all users of the - // system. Please select a different name and try again. + // * Description: The requested bucket name is not available. + // The bucket namespace is shared by all users of the system. Please select a + // different name and try again. // - // * HTTP Status Code: 409 - // Conflict + // * HTTP Status Code: 409 Conflict // - // * SOAP Fault Code Prefix: Client + // * SOAP Fault + // Code Prefix: Client // // * Code: BucketAlreadyOwnedByYou // - // * - // Description: The bucket you tried to create already exists, and you own it. - // Amazon S3 returns this error in all AWS Regions except in the North Virginia - // Region. For legacy compatibility, if you re-create an existing bucket that you - // already own in the North Virginia Region, Amazon S3 returns 200 OK and resets - // the bucket access control lists (ACLs). + // * Description: The bucket + // you tried to create already exists, and you own it. Amazon S3 returns this error + // in all Amazon Web Services Regions except in the North Virginia Region. For + // legacy compatibility, if you re-create an existing bucket that you already own + // in the North Virginia Region, Amazon S3 returns 200 OK and resets the bucket + // access control lists (ACLs). // - // * Code: 409 Conflict (in all Regions - // except the North Virginia Region) + // * Code: 409 Conflict (in all Regions except the + // North Virginia Region) // // * SOAP Fault Code Prefix: Client // @@ -853,80 +854,80 @@ type Error struct { // // * Code: InvalidAccessKeyId // - // * Description: The AWS access key ID you - // provided does not exist in our records. + // * Description: The Amazon Web Services + // access key ID you provided does not exist in our records. // - // * HTTP Status Code: 403 Forbidden + // * HTTP Status Code: + // 403 Forbidden // - // * - // SOAP Fault Code Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: InvalidAddressingHeader + // * Code: + // InvalidAddressingHeader // - // * Description: - // You must specify the Anonymous role. + // * Description: You must specify the Anonymous role. // - // * HTTP Status Code: N/A + // * + // HTTP Status Code: N/A // - // * SOAP Fault Code - // Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: InvalidArgument + // * Code: + // InvalidArgument // // * Description: Invalid Argument // - // * HTTP - // Status Code: 400 Bad Request + // * HTTP Status Code: 400 Bad + // Request // // * SOAP Fault Code Prefix: Client // - // * Code: - // InvalidBucketName + // * Code: InvalidBucketName // - // * Description: The specified bucket is not valid. + // * + // Description: The specified bucket is not valid. // - // * HTTP - // Status Code: 400 Bad Request + // * HTTP Status Code: 400 Bad + // Request // // * SOAP Fault Code Prefix: Client // - // * Code: - // InvalidBucketState + // * Code: InvalidBucketState // - // * Description: The request is not valid with the current - // state of the bucket. + // * + // Description: The request is not valid with the current state of the bucket. // - // * HTTP Status Code: 409 Conflict + // * + // HTTP Status Code: 409 Conflict // - // * SOAP Fault Code - // Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: InvalidDigest + // * Code: + // InvalidDigest // - // * Description: The Content-MD5 you - // specified is not valid. + // * Description: The Content-MD5 you specified is not valid. // - // * HTTP Status Code: 400 Bad Request + // * + // HTTP Status Code: 400 Bad Request // - // * SOAP Fault Code - // Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: InvalidEncryptionAlgorithmError + // * Code: + // InvalidEncryptionAlgorithmError // - // * Description: The - // encryption request you specified is not valid. The valid value is AES256. + // * Description: The encryption request you + // specified is not valid. The valid value is AES256. // - // * - // HTTP Status Code: 400 Bad Request + // * HTTP Status Code: 400 Bad + // Request // // * SOAP Fault Code Prefix: Client // - // * Code: - // InvalidLocationConstraint + // * Code: InvalidLocationConstraint // - // * Description: The specified location constraint is - // not valid. For more information about Regions, see How to Select a Region for - // Your Buckets + // * + // Description: The specified location constraint is not valid. For more + // information about Regions, see How to Select a Region for Your Buckets // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). // // * @@ -970,194 +971,195 @@ type Error struct { // * Code: InvalidPayer // // * Description: All access to - // this object has been disabled. Please contact AWS Support for further - // assistance. + // this object has been disabled. Please contact Amazon Web Services Support for + // further assistance. // // * HTTP Status Code: 403 Forbidden // - // * SOAP Fault Code Prefix: - // Client + // * SOAP Fault Code + // Prefix: Client // // * Code: InvalidPolicyDocument // - // * Description: The content of the form - // does not meet the conditions specified in the policy document. + // * Description: The content of the + // form does not meet the conditions specified in the policy document. // - // * HTTP Status - // Code: 400 Bad Request + // * HTTP + // Status Code: 400 Bad Request // // * SOAP Fault Code Prefix: Client // - // * Code: InvalidRange + // * Code: + // InvalidRange // - // * - // Description: The requested range cannot be satisfied. + // * Description: The requested range cannot be satisfied. // - // * HTTP Status Code: 416 - // Requested Range Not Satisfiable + // * HTTP + // Status Code: 416 Requested Range Not Satisfiable // - // * SOAP Fault Code Prefix: Client + // * SOAP Fault Code Prefix: + // Client // - // * Code: - // InvalidRequest + // * Code: InvalidRequest // // * Description: Please use AWS4-HMAC-SHA256. // - // * HTTP Status Code: - // 400 Bad Request + // * + // HTTP Status Code: 400 Bad Request // // * Code: N/A // // * Code: InvalidRequest // - // * Description: SOAP - // requests must be made over an HTTPS connection. + // * + // Description: SOAP requests must be made over an HTTPS connection. // - // * HTTP Status Code: 400 Bad - // Request + // * HTTP Status + // Code: 400 Bad Request // // * SOAP Fault Code Prefix: Client // - // * Code: InvalidRequest + // * Code: + // InvalidRequest // - // * - // Description: Amazon S3 Transfer Acceleration is not supported for buckets with - // non-DNS compliant names. + // * Description: Amazon S3 Transfer Acceleration is not supported + // for buckets with non-DNS compliant names. // - // * HTTP Status Code: 400 Bad Request + // * HTTP Status Code: 400 Bad + // Request // // * Code: N/A // - // * - // Code: InvalidRequest + // * Code: InvalidRequest // - // * Description: Amazon S3 Transfer Acceleration is not - // supported for buckets with periods (.) in their names. + // * Description: Amazon S3 Transfer + // Acceleration is not supported for buckets with periods (.) in their names. // - // * HTTP Status Code: 400 - // Bad Request + // * + // HTTP Status Code: 400 Bad Request // // * Code: N/A // // * Code: InvalidRequest // - // * Description: Amazon S3 - // Transfer Accelerate endpoint only supports virtual style requests. + // * + // Description: Amazon S3 Transfer Accelerate endpoint only supports virtual style + // requests. // - // * HTTP - // Status Code: 400 Bad Request + // * HTTP Status Code: 400 Bad Request // // * Code: N/A // - // * Code: InvalidRequest + // * Code: + // InvalidRequest // - // * - // Description: Amazon S3 Transfer Accelerate is not configured on this bucket. + // * Description: Amazon S3 Transfer Accelerate is not configured + // on this bucket. // - // * - // HTTP Status Code: 400 Bad Request + // * HTTP Status Code: 400 Bad Request // // * Code: N/A // - // * Code: InvalidRequest + // * Code: + // InvalidRequest // - // * - // Description: Amazon S3 Transfer Accelerate is disabled on this bucket. + // * Description: Amazon S3 Transfer Accelerate is disabled on this + // bucket. // - // * HTTP - // Status Code: 400 Bad Request + // * HTTP Status Code: 400 Bad Request // // * Code: N/A // - // * Code: InvalidRequest + // * Code: + // InvalidRequest // - // * - // Description: Amazon S3 Transfer Acceleration is not supported on this bucket. - // Contact AWS Support for more information. + // * Description: Amazon S3 Transfer Acceleration is not supported + // on this bucket. Contact Amazon Web Services Support for more information. // - // * HTTP Status Code: 400 Bad - // Request + // * + // HTTP Status Code: 400 Bad Request // // * Code: N/A // // * Code: InvalidRequest // - // * Description: Amazon S3 Transfer - // Acceleration cannot be enabled on this bucket. Contact AWS Support for more - // information. + // * + // Description: Amazon S3 Transfer Acceleration cannot be enabled on this bucket. + // Contact Amazon Web Services Support for more information. // - // * HTTP Status Code: 400 Bad Request + // * HTTP Status Code: + // 400 Bad Request // // * Code: N/A // - // * Code: - // InvalidSecurity + // * Code: InvalidSecurity // - // * Description: The provided security credentials are not - // valid. + // * Description: The + // provided security credentials are not valid. // - // * HTTP Status Code: 403 Forbidden + // * HTTP Status Code: 403 + // Forbidden // // * SOAP Fault Code Prefix: Client // - // * - // Code: InvalidSOAPRequest - // - // * Description: The SOAP request body is invalid. + // * Code: InvalidSOAPRequest // // * - // HTTP Status Code: 400 Bad Request + // Description: The SOAP request body is invalid. + // + // * HTTP Status Code: 400 Bad + // Request // // * SOAP Fault Code Prefix: Client // - // * Code: - // InvalidStorageClass + // * Code: InvalidStorageClass // - // * Description: The storage class you specified is not - // valid. + // * + // Description: The storage class you specified is not valid. // - // * HTTP Status Code: 400 Bad Request + // * HTTP Status Code: + // 400 Bad Request // // * SOAP Fault Code Prefix: Client // - // * - // Code: InvalidTargetBucketForLogging + // * Code: + // InvalidTargetBucketForLogging // - // * Description: The target bucket for - // logging does not exist, is not owned by you, or does not have the appropriate - // grants for the log-delivery group. + // * Description: The target bucket for logging does + // not exist, is not owned by you, or does not have the appropriate grants for the + // log-delivery group. // // * HTTP Status Code: 400 Bad Request // - // * SOAP - // Fault Code Prefix: Client + // * SOAP Fault Code + // Prefix: Client // // * Code: InvalidToken // - // * Description: The provided - // token is malformed or otherwise invalid. + // * Description: The provided token is + // malformed or otherwise invalid. // // * HTTP Status Code: 400 Bad Request // - // * - // SOAP Fault Code Prefix: Client + // * SOAP + // Fault Code Prefix: Client // // * Code: InvalidURI // - // * Description: Couldn't - // parse the specified URI. + // * Description: Couldn't parse the + // specified URI. // // * HTTP Status Code: 400 Bad Request // - // * SOAP Fault Code - // Prefix: Client + // * SOAP Fault Code Prefix: + // Client // // * Code: KeyTooLongError // // * Description: Your key is too long. // - // * - // HTTP Status Code: 400 Bad Request + // * HTTP + // Status Code: 400 Bad Request // // * SOAP Fault Code Prefix: Client // @@ -1375,42 +1377,42 @@ type Error struct { // // * Description: Your account is not signed up for the Amazon S3 // service. You must sign up before you can use Amazon S3. You can sign up at the - // following URL: https://aws.amazon.com/s3 + // following URL: Amazon S3 (http://aws.amazon.com/s3) // - // * HTTP Status Code: 403 Forbidden + // * HTTP Status Code: 403 + // Forbidden // - // * - // SOAP Fault Code Prefix: Client + // * SOAP Fault Code Prefix: Client // // * Code: OperationAborted // - // * Description: A - // conflicting conditional action is currently in progress against this resource. - // Try again. + // * + // Description: A conflicting conditional action is currently in progress against + // this resource. Try again. // // * HTTP Status Code: 409 Conflict // - // * SOAP Fault Code Prefix: - // Client + // * SOAP Fault Code + // Prefix: Client // // * Code: PermanentRedirect // - // * Description: The bucket you are attempting - // to access must be addressed using the specified endpoint. Send all future - // requests to this endpoint. + // * Description: The bucket you are + // attempting to access must be addressed using the specified endpoint. Send all + // future requests to this endpoint. // // * HTTP Status Code: 301 Moved Permanently // - // * SOAP - // Fault Code Prefix: Client + // * + // SOAP Fault Code Prefix: Client // // * Code: PreconditionFailed // - // * Description: At least - // one of the preconditions you specified did not hold. + // * Description: At + // least one of the preconditions you specified did not hold. // - // * HTTP Status Code: 412 - // Precondition Failed + // * HTTP Status Code: + // 412 Precondition Failed // // * SOAP Fault Code Prefix: Client // @@ -1480,8 +1482,8 @@ type Error struct { // // * // Description: The request signature we calculated does not match the signature - // you provided. Check your AWS secret access key and signing method. For more - // information, see REST Authentication + // you provided. Check your Amazon Web Services secret access key and signing + // method. For more information, see REST Authentication // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) and // SOAP Authentication // (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) for @@ -1673,31 +1675,31 @@ type Grantee struct { DisplayName *string // Email address of the grantee. Using email addresses to specify a grantee is only - // supported in the following AWS Regions: + // supported in the following Amazon Web Services Regions: // - // * US East (N. Virginia) + // * US East (N. + // Virginia) // - // * US West (N. - // California) + // * US West (N. California) // // * US West (Oregon) // - // * Asia Pacific (Singapore) - // // * Asia Pacific - // (Sydney) + // (Singapore) + // + // * Asia Pacific (Sydney) // // * Asia Pacific (Tokyo) // - // * Europe (Ireland) + // * Europe + // (Ireland) // - // * South America (São - // Paulo) + // * South America (São Paulo) // - // For a list of all the Amazon S3 supported Regions and endpoints, see - // Regions and Endpoints - // (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the AWS - // General Reference. + // For a list of all the Amazon S3 + // supported Regions and endpoints, see Regions and Endpoints + // (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the + // Amazon Web Services General Reference. EmailAddress *string // The canonical user ID of the grantee. @@ -1733,8 +1735,8 @@ type Initiator struct { // Name of the Principal. DisplayName *string - // If the principal is an AWS account, it provides the Canonical User ID. If the - // principal is an IAM User, it provides a user ARN value. + // If the principal is an Amazon Web Services account, it provides the Canonical + // User ID. If the principal is an IAM User, it provides a user ARN value. ID *string noSmithyDocumentSerde @@ -1967,10 +1969,10 @@ type JSONOutput struct { noSmithyDocumentSerde } -// A container for specifying the configuration for AWS Lambda notifications. +// A container for specifying the configuration for Lambda notifications. type LambdaFunctionConfiguration struct { - // The Amazon S3 bucket event for which to invoke the AWS Lambda function. For more + // The Amazon S3 bucket event for which to invoke the Lambda function. For more // information, see Supported Event Types // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the // Amazon S3 User Guide. @@ -1978,7 +1980,7 @@ type LambdaFunctionConfiguration struct { // This member is required. Events []Event - // The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3 invokes + // The Amazon Resource Name (ARN) of the Lambda function that Amazon S3 invokes // when the specified event type occurs. // // This member is required. @@ -2336,7 +2338,7 @@ type NoncurrentVersionTransition struct { // element is empty, notifications are turned off for the bucket. type NotificationConfiguration struct { - // Describes the AWS Lambda functions to invoke and the events for which to invoke + // Describes the Lambda functions to invoke and the events for which to invoke // them. LambdaFunctionConfigurations []LambdaFunctionConfiguration @@ -2372,17 +2374,18 @@ type Object struct { // created and how it is encrypted as described below: // // * Objects created by the - // PUT Object, POST Object, or Copy operation, or through the AWS Management - // Console, and are encrypted by SSE-S3 or plaintext, have ETags that are an MD5 - // digest of their object data. + // PUT Object, POST Object, or Copy operation, or through the Amazon Web Services + // Management Console, and are encrypted by SSE-S3 or plaintext, have ETags that + // are an MD5 digest of their object data. // - // * Objects created by the PUT Object, POST Object, - // or Copy operation, or through the AWS Management Console, and are encrypted by - // SSE-C or SSE-KMS, have ETags that are not an MD5 digest of their object data. + // * Objects created by the PUT Object, + // POST Object, or Copy operation, or through the Amazon Web Services Management + // Console, and are encrypted by SSE-C or SSE-KMS, have ETags that are not an MD5 + // digest of their object data. // - // * - // If an object is created by either the Multipart Upload or Part Copy operation, - // the ETag is not an MD5 digest, regardless of the method of encryption. + // * If an object is created by either the Multipart + // Upload or Part Copy operation, the ETag is not an MD5 digest, regardless of the + // method of encryption. ETag *string // The name that you assign to an object. You use the object key to retrieve the @@ -2632,11 +2635,12 @@ type PublicAccessBlockConfiguration struct { IgnorePublicAcls bool // Specifies whether Amazon S3 should restrict public bucket policies for this - // bucket. Setting this element to TRUE restricts access to this bucket to only AWS - // service principals and authorized users within this account if the bucket has a - // public policy. Enabling this setting doesn't affect previously stored bucket - // policies, except that public and cross-account access within any public bucket - // policy, including non-public delegation to specific accounts, is blocked. + // bucket. Setting this element to TRUE restricts access to this bucket to only + // Amazon Web Service principals and authorized users within this account if the + // bucket has a public policy. Enabling this setting doesn't affect previously + // stored bucket policies, except that public and cross-account access within any + // public bucket policy, including non-public delegation to specific accounts, is + // blocked. RestrictPublicBuckets bool noSmithyDocumentSerde @@ -2745,9 +2749,9 @@ type ReplicaModifications struct { // size of a replication configuration is 2 MB. type ReplicationConfiguration struct { - // The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) - // role that Amazon S3 assumes when replicating objects. For more information, see - // How to Set Up Replication + // The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role + // that Amazon S3 assumes when replicating objects. For more information, see How + // to Set Up Replication // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) in // the Amazon S3 User Guide. // @@ -2827,7 +2831,7 @@ type ReplicationRule struct { // that you want to replicate. You can choose to enable or disable the replication // of these objects. Currently, Amazon S3 supports only the filter that you can // specify for objects created with server-side encryption using a customer master - // key (CMK) stored in AWS Key Management Service (SSE-KMS). + // key (CMK) stored in Amazon Web Services Key Management Service (SSE-KMS). SourceSelectionCriteria *SourceSelectionCriteria noSmithyDocumentSerde @@ -2928,7 +2932,7 @@ type ReplicationTime struct { // and replication metrics EventThreshold. type ReplicationTimeValue struct { - // Contains an integer specifying time in minutes. Valid values: 15 minutes. + // Contains an integer specifying time in minutes. Valid value: 15 Minutes int32 noSmithyDocumentSerde @@ -3081,12 +3085,12 @@ type ServerSideEncryptionByDefault struct { // This member is required. SSEAlgorithm ServerSideEncryption - // AWS Key Management Service (KMS) customer AWS KMS key ID to use for the default - // encryption. This parameter is allowed if and only if SSEAlgorithm is set to - // aws:kms. You can specify the key ID or the Amazon Resource Name (ARN) of the KMS - // key. However, if you are using encryption with cross-account operations, you - // must use a fully qualified KMS key ARN. For more information, see Using - // encryption for cross-account operations + // Amazon Web Services Key Management Service (KMS) customer Amazon Web Services + // KMS key ID to use for the default encryption. This parameter is allowed if and + // only if SSEAlgorithm is set to aws:kms. You can specify the key ID or the Amazon + // Resource Name (ARN) of the KMS key. However, if you are using encryption with + // cross-account operations, you must use a fully qualified KMS key ARN. For more + // information, see Using encryption for cross-account operations // (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy). // For example: // @@ -3099,7 +3103,7 @@ type ServerSideEncryptionByDefault struct { // S3 only supports symmetric KMS keys and not asymmetric KMS keys. For more // information, see Using symmetric and asymmetric keys // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) - // in the AWS Key Management Service Developer Guide. + // in the Amazon Web Services Key Management Service Developer Guide. KMSMasterKeyID *string noSmithyDocumentSerde @@ -3141,7 +3145,7 @@ type ServerSideEncryptionRule struct { // that you want to replicate. You can choose to enable or disable the replication // of these objects. Currently, Amazon S3 supports only the filter that you can // specify for objects created with server-side encryption using a customer master -// key (CMK) stored in AWS Key Management Service (SSE-KMS). +// key (CMK) stored in Amazon Web Services Key Management Service (SSE-KMS). type SourceSelectionCriteria struct { // A filter that you can specify for selections for modifications on replicas. @@ -3154,8 +3158,8 @@ type SourceSelectionCriteria struct { ReplicaModifications *ReplicaModifications // A container for filter information for the selection of Amazon S3 objects - // encrypted with AWS KMS. If you include SourceSelectionCriteria in the - // replication configuration, this element is required. + // encrypted with Amazon Web Services KMS. If you include SourceSelectionCriteria + // in the replication configuration, this element is required. SseKmsEncryptedObjects *SseKmsEncryptedObjects noSmithyDocumentSerde @@ -3164,8 +3168,9 @@ type SourceSelectionCriteria struct { // Specifies the use of SSE-KMS to encrypt delivered inventory reports. type SSEKMS struct { - // Specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer - // managed customer master key (CMK) to use for encrypting inventory reports. + // Specifies the ID of the Amazon Web Services Key Management Service (Amazon Web + // Services KMS) symmetric customer managed customer master key (CMK) to use for + // encrypting inventory reports. // // This member is required. KeyId *string @@ -3174,11 +3179,12 @@ type SSEKMS struct { } // A container for filter information for the selection of S3 objects encrypted -// with AWS KMS. +// with Amazon Web Services KMS. type SseKmsEncryptedObjects struct { // Specifies whether Amazon S3 replicates objects created with server-side - // encryption using an AWS KMS key stored in AWS Key Management Service. + // encryption using an Amazon Web Services KMS key stored in Amazon Web Services + // Key Management Service. // // This member is required. Status SseKmsEncryptedObjectsStatus diff --git a/service/sagemaker/api_op_CreateAutoMLJob.go b/service/sagemaker/api_op_CreateAutoMLJob.go index 459c2b5c623..bdbd1465f90 100644 --- a/service/sagemaker/api_op_CreateAutoMLJob.go +++ b/service/sagemaker/api_op_CreateAutoMLJob.go @@ -11,7 +11,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates an Autopilot job. Find the best performing model after you run an +// Creates an Autopilot job. Find the best-performing model after you run an // Autopilot job by calling . For information about how to use Autopilot, see // Automate Model Development with Amazon SageMaker Autopilot // (https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html). @@ -88,7 +88,7 @@ type CreateAutoMLJobInput struct { type CreateAutoMLJobOutput struct { - // The unique ARN that is assigned to the AutoML job when it is created. + // The unique ARN assigned to the AutoML job when it is created. // // This member is required. AutoMLJobArn *string diff --git a/service/sagemaker/api_op_CreateEndpointConfig.go b/service/sagemaker/api_op_CreateEndpointConfig.go index 9ddadd076bd..0c8bb19fb14 100644 --- a/service/sagemaker/api_op_CreateEndpointConfig.go +++ b/service/sagemaker/api_op_CreateEndpointConfig.go @@ -68,6 +68,12 @@ type CreateEndpointConfigInput struct { // This member is required. ProductionVariants []types.ProductionVariant + // Specifies configuration for how an endpoint performs asynchronous inference. + // This is a required field in order for your Endpoint to be invoked using + // InvokeEndpointAsync + // (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_runtime_InvokeEndpoint.html). + AsyncInferenceConfig *types.AsyncInferenceConfig + // DataCaptureConfig *types.DataCaptureConfig diff --git a/service/sagemaker/api_op_CreateLabelingJob.go b/service/sagemaker/api_op_CreateLabelingJob.go index 56d20282ce9..c5e7b16cd9e 100644 --- a/service/sagemaker/api_op_CreateLabelingJob.go +++ b/service/sagemaker/api_op_CreateLabelingJob.go @@ -170,6 +170,17 @@ type CreateLabelingJobInput struct { // attributes to your label category configuration file. To learn how, see Create a // Labeling Category Configuration File for 3D Point Cloud Labeling Jobs // (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-point-cloud-label-category-config.html). + // For named entity recognition jobs, in addition to "labels", you must provide + // worker instructions in the label category configuration file using the + // "instructions" parameter: "instructions": {"shortInstruction":" + // Add header + // + // + // Add Instructions + // + // ", "fullInstruction":"Add additional instructions."}. For + // details and an example, see Create a Named Entity Recognition Labeling Job (API) + // (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-named-entity-recg.html#sms-creating-ner-api). // For all other built-in task types // (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-task-types.html) and custom // tasks diff --git a/service/sagemaker/api_op_CreateNotebookInstance.go b/service/sagemaker/api_op_CreateNotebookInstance.go index de5b660b27c..6e9f620dee9 100644 --- a/service/sagemaker/api_op_CreateNotebookInstance.go +++ b/service/sagemaker/api_op_CreateNotebookInstance.go @@ -137,6 +137,9 @@ type CreateNotebookInstanceInput struct { // (https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-lifecycle-config.html). LifecycleConfigName *string + // The platform identifier of the notebook instance runtime environment. + PlatformIdentifier *string + // Whether root access is enabled or disabled for users of the notebook instance. // The default value is Enabled. Lifecycle configurations need root access to be // able to set up a notebook instance. Because of this, lifecycle configurations diff --git a/service/sagemaker/api_op_DescribeAutoMLJob.go b/service/sagemaker/api_op_DescribeAutoMLJob.go index 3deabef3e40..6cae57e131b 100644 --- a/service/sagemaker/api_op_DescribeAutoMLJob.go +++ b/service/sagemaker/api_op_DescribeAutoMLJob.go @@ -122,7 +122,7 @@ type DescribeAutoMLJobOutput struct { // Returns the job's problem type. ProblemType types.ProblemType - // This contains ProblemType, AutoMLJobObjective and CompletionCriteria. If you do + // This contains ProblemType, AutoMLJobObjective, and CompletionCriteria. If you do // not provide these values, they are auto-inferred. If you do provide them, the // values used are the ones you provide. ResolvedAttributes *types.ResolvedAttributes diff --git a/service/sagemaker/api_op_DescribeEndpoint.go b/service/sagemaker/api_op_DescribeEndpoint.go index 13e3583d028..953732e0679 100644 --- a/service/sagemaker/api_op_DescribeEndpoint.go +++ b/service/sagemaker/api_op_DescribeEndpoint.go @@ -108,6 +108,12 @@ type DescribeEndpointOutput struct { // This member is required. LastModifiedTime *time.Time + // Returns the description of an endpoint configuration created using the + // CreateEndpointConfig + // (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateEndpointConfig.html) + // API. + AsyncInferenceConfig *types.AsyncInferenceConfig + // DataCaptureConfig *types.DataCaptureConfigSummary diff --git a/service/sagemaker/api_op_DescribeEndpointConfig.go b/service/sagemaker/api_op_DescribeEndpointConfig.go index 205c9d7677c..8d0003249d8 100644 --- a/service/sagemaker/api_op_DescribeEndpointConfig.go +++ b/service/sagemaker/api_op_DescribeEndpointConfig.go @@ -62,6 +62,12 @@ type DescribeEndpointConfigOutput struct { // This member is required. ProductionVariants []types.ProductionVariant + // Returns the description of an endpoint configuration created using the + // CreateEndpointConfig + // (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateEndpointConfig.html) + // API. + AsyncInferenceConfig *types.AsyncInferenceConfig + // DataCaptureConfig *types.DataCaptureConfig diff --git a/service/sagemaker/api_op_DescribeNotebookInstance.go b/service/sagemaker/api_op_DescribeNotebookInstance.go index 8ba810ccf6a..1c4671a7a2c 100644 --- a/service/sagemaker/api_op_DescribeNotebookInstance.go +++ b/service/sagemaker/api_op_DescribeNotebookInstance.go @@ -118,6 +118,9 @@ type DescribeNotebookInstanceOutput struct { // The status of the notebook instance. NotebookInstanceStatus types.NotebookInstanceStatus + // The platform identifier of the notebook instance runtime environment. + PlatformIdentifier *string + // The Amazon Resource Name (ARN) of the IAM role associated with the instance. RoleArn *string diff --git a/service/sagemaker/api_op_StopPipelineExecution.go b/service/sagemaker/api_op_StopPipelineExecution.go index d874a86f72b..af49a982418 100644 --- a/service/sagemaker/api_op_StopPipelineExecution.go +++ b/service/sagemaker/api_op_StopPipelineExecution.go @@ -11,16 +11,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Stops a pipeline execution. A pipeline execution won't stop while a callback -// step is running. When you call StopPipelineExecution on a pipeline execution -// with a running callback step, SageMaker Pipelines sends an additional Amazon SQS -// message to the specified SQS queue. The body of the SQS message contains a -// "Status" field which is set to "Stopping". You should add logic to your Amazon -// SQS message consumer to take any needed action (for example, resource cleanup) -// upon receipt of the message followed by a call to +// Stops a pipeline execution. Callback Step A pipeline execution won't stop while +// a callback step is running. When you call StopPipelineExecution on a pipeline +// execution with a running callback step, SageMaker Pipelines sends an additional +// Amazon SQS message to the specified SQS queue. The body of the SQS message +// contains a "Status" field which is set to "Stopping". You should add logic to +// your Amazon SQS message consumer to take any needed action (for example, +// resource cleanup) upon receipt of the message followed by a call to // SendPipelineExecutionStepSuccess or SendPipelineExecutionStepFailure. Only when // SageMaker Pipelines receives one of these calls will it stop the pipeline -// execution. +// execution. Lambda Step A pipeline execution can't be stopped while a lambda step +// is running because the Lambda function invoked by the lambda step can't be +// stopped. If you attempt to stop the execution while the Lambda function is +// running, the pipeline waits for the Lambda function to finish or until the +// timeout is hit, whichever occurs first, and then stops. If the Lambda function +// finishes, the pipeline execution status is Stopped. If the timeout is hit the +// pipeline execution status is Failed. func (c *Client) StopPipelineExecution(ctx context.Context, params *StopPipelineExecutionInput, optFns ...func(*Options)) (*StopPipelineExecutionOutput, error) { if params == nil { params = &StopPipelineExecutionInput{} diff --git a/service/sagemaker/deserializers.go b/service/sagemaker/deserializers.go index c085edbc746..af07261e871 100644 --- a/service/sagemaker/deserializers.go +++ b/service/sagemaker/deserializers.go @@ -26859,6 +26859,194 @@ func awsAwsjson11_deserializeDocumentAssociationSummary(v **types.AssociationSum return nil } +func awsAwsjson11_deserializeDocumentAsyncInferenceClientConfig(v **types.AsyncInferenceClientConfig, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AsyncInferenceClientConfig + if *v == nil { + sv = &types.AsyncInferenceClientConfig{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "MaxConcurrentInvocationsPerInstance": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected MaxConcurrentInvocationsPerInstance to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.MaxConcurrentInvocationsPerInstance = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentAsyncInferenceConfig(v **types.AsyncInferenceConfig, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AsyncInferenceConfig + if *v == nil { + sv = &types.AsyncInferenceConfig{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ClientConfig": + if err := awsAwsjson11_deserializeDocumentAsyncInferenceClientConfig(&sv.ClientConfig, value); err != nil { + return err + } + + case "OutputConfig": + if err := awsAwsjson11_deserializeDocumentAsyncInferenceOutputConfig(&sv.OutputConfig, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentAsyncInferenceNotificationConfig(v **types.AsyncInferenceNotificationConfig, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AsyncInferenceNotificationConfig + if *v == nil { + sv = &types.AsyncInferenceNotificationConfig{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ErrorTopic": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SnsTopicArn to be of type string, got %T instead", value) + } + sv.ErrorTopic = ptr.String(jtv) + } + + case "SuccessTopic": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SnsTopicArn to be of type string, got %T instead", value) + } + sv.SuccessTopic = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentAsyncInferenceOutputConfig(v **types.AsyncInferenceOutputConfig, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AsyncInferenceOutputConfig + if *v == nil { + sv = &types.AsyncInferenceOutputConfig{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KmsKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KmsKeyId to be of type string, got %T instead", value) + } + sv.KmsKeyId = ptr.String(jtv) + } + + case "NotificationConfig": + if err := awsAwsjson11_deserializeDocumentAsyncInferenceNotificationConfig(&sv.NotificationConfig, value); err != nil { + return err + } + + case "S3OutputPath": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DestinationS3Uri to be of type string, got %T instead", value) + } + sv.S3OutputPath = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentAthenaDatasetDefinition(v **types.AthenaDatasetDefinition, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -28321,6 +28509,11 @@ func awsAwsjson11_deserializeDocumentCandidateProperties(v **types.CandidateProp return err } + case "CandidateMetrics": + if err := awsAwsjson11_deserializeDocumentMetricDataList(&sv.CandidateMetrics, value); err != nil { + return err + } + default: _, _ = key, value @@ -37913,6 +38106,123 @@ func awsAwsjson11_deserializeDocumentMetricData(v **types.MetricData, value inte return nil } +func awsAwsjson11_deserializeDocumentMetricDataList(v *[]types.MetricDatum, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.MetricDatum + if *v == nil { + cv = []types.MetricDatum{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.MetricDatum + destAddr := &col + if err := awsAwsjson11_deserializeDocumentMetricDatum(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentMetricDatum(v **types.MetricDatum, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MetricDatum + if *v == nil { + sv = &types.MetricDatum{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "MetricName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AutoMLMetricEnum to be of type string, got %T instead", value) + } + sv.MetricName = types.AutoMLMetricEnum(jtv) + } + + case "Set": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MetricSetSource to be of type string, got %T instead", value) + } + sv.Set = types.MetricSetSource(jtv) + } + + case "Value": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.Value = float32(f64) + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + sv.Value = float32(f64) + + default: + return fmt.Errorf("expected Float to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentMetricDefinition(v **types.MetricDefinition, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -44896,7 +45206,7 @@ func awsAwsjson11_deserializeDocumentProductionVariant(v **types.ProductionVaria if value != nil { jtv, ok := value.(json.Number) if !ok { - return fmt.Errorf("expected TaskCount to be json.Number, got %T instead", value) + return fmt.Errorf("expected InitialTaskCount to be json.Number, got %T instead", value) } i64, err := jtv.Int64() if err != nil { @@ -56306,6 +56616,11 @@ func awsAwsjson11_deserializeOpDocumentDescribeEndpointConfigOutput(v **Describe for key, value := range shape { switch key { + case "AsyncInferenceConfig": + if err := awsAwsjson11_deserializeDocumentAsyncInferenceConfig(&sv.AsyncInferenceConfig, value); err != nil { + return err + } + case "CreationTime": if value != nil { switch jtv := value.(type) { @@ -56390,6 +56705,11 @@ func awsAwsjson11_deserializeOpDocumentDescribeEndpointOutput(v **DescribeEndpoi for key, value := range shape { switch key { + case "AsyncInferenceConfig": + if err := awsAwsjson11_deserializeDocumentAsyncInferenceConfig(&sv.AsyncInferenceConfig, value); err != nil { + return err + } + case "CreationTime": if value != nil { switch jtv := value.(type) { @@ -58608,6 +58928,15 @@ func awsAwsjson11_deserializeOpDocumentDescribeNotebookInstanceOutput(v **Descri sv.NotebookInstanceStatus = types.NotebookInstanceStatus(jtv) } + case "PlatformIdentifier": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PlatformIdentifier to be of type string, got %T instead", value) + } + sv.PlatformIdentifier = ptr.String(jtv) + } + case "RoleArn": if value != nil { jtv, ok := value.(string) diff --git a/service/sagemaker/serializers.go b/service/sagemaker/serializers.go index 98171f98cfb..61be75e7421 100644 --- a/service/sagemaker/serializers.go +++ b/service/sagemaker/serializers.go @@ -11297,6 +11297,80 @@ func awsAwsjson11_serializeDocumentArtifactSourceTypes(v []types.ArtifactSourceT return nil } +func awsAwsjson11_serializeDocumentAsyncInferenceClientConfig(v *types.AsyncInferenceClientConfig, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxConcurrentInvocationsPerInstance != nil { + ok := object.Key("MaxConcurrentInvocationsPerInstance") + ok.Integer(*v.MaxConcurrentInvocationsPerInstance) + } + + return nil +} + +func awsAwsjson11_serializeDocumentAsyncInferenceConfig(v *types.AsyncInferenceConfig, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientConfig != nil { + ok := object.Key("ClientConfig") + if err := awsAwsjson11_serializeDocumentAsyncInferenceClientConfig(v.ClientConfig, ok); err != nil { + return err + } + } + + if v.OutputConfig != nil { + ok := object.Key("OutputConfig") + if err := awsAwsjson11_serializeDocumentAsyncInferenceOutputConfig(v.OutputConfig, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentAsyncInferenceNotificationConfig(v *types.AsyncInferenceNotificationConfig, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ErrorTopic != nil { + ok := object.Key("ErrorTopic") + ok.String(*v.ErrorTopic) + } + + if v.SuccessTopic != nil { + ok := object.Key("SuccessTopic") + ok.String(*v.SuccessTopic) + } + + return nil +} + +func awsAwsjson11_serializeDocumentAsyncInferenceOutputConfig(v *types.AsyncInferenceOutputConfig, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KmsKeyId != nil { + ok := object.Key("KmsKeyId") + ok.String(*v.KmsKeyId) + } + + if v.NotificationConfig != nil { + ok := object.Key("NotificationConfig") + if err := awsAwsjson11_serializeDocumentAsyncInferenceNotificationConfig(v.NotificationConfig, ok); err != nil { + return err + } + } + + if v.S3OutputPath != nil { + ok := object.Key("S3OutputPath") + ok.String(*v.S3OutputPath) + } + + return nil +} + func awsAwsjson11_serializeDocumentAthenaDatasetDefinition(v *types.AthenaDatasetDefinition, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -17667,6 +17741,13 @@ func awsAwsjson11_serializeOpDocumentCreateEndpointConfigInput(v *CreateEndpoint object := value.Object() defer object.Close() + if v.AsyncInferenceConfig != nil { + ok := object.Key("AsyncInferenceConfig") + if err := awsAwsjson11_serializeDocumentAsyncInferenceConfig(v.AsyncInferenceConfig, ok); err != nil { + return err + } + } + if v.DataCaptureConfig != nil { ok := object.Key("DataCaptureConfig") if err := awsAwsjson11_serializeDocumentDataCaptureConfig(v.DataCaptureConfig, ok); err != nil { @@ -18517,6 +18598,11 @@ func awsAwsjson11_serializeOpDocumentCreateNotebookInstanceInput(v *CreateNotebo ok.String(*v.NotebookInstanceName) } + if v.PlatformIdentifier != nil { + ok := object.Key("PlatformIdentifier") + ok.String(*v.PlatformIdentifier) + } + if v.RoleArn != nil { ok := object.Key("RoleArn") ok.String(*v.RoleArn) diff --git a/service/sagemaker/types/enums.go b/service/sagemaker/types/enums.go index 85321b86b99..99e5498820f 100644 --- a/service/sagemaker/types/enums.go +++ b/service/sagemaker/types/enums.go @@ -1628,44 +1628,67 @@ type InstanceType string // Enum values for InstanceType const ( - InstanceTypeMlT2Medium InstanceType = "ml.t2.medium" - InstanceTypeMlT2Large InstanceType = "ml.t2.large" - InstanceTypeMlT2Xlarge InstanceType = "ml.t2.xlarge" - InstanceTypeMlT22xlarge InstanceType = "ml.t2.2xlarge" - InstanceTypeMlT3Medium InstanceType = "ml.t3.medium" - InstanceTypeMlT3Large InstanceType = "ml.t3.large" - InstanceTypeMlT3Xlarge InstanceType = "ml.t3.xlarge" - InstanceTypeMlT32xlarge InstanceType = "ml.t3.2xlarge" - InstanceTypeMlM4Xlarge InstanceType = "ml.m4.xlarge" - InstanceTypeMlM42xlarge InstanceType = "ml.m4.2xlarge" - InstanceTypeMlM44xlarge InstanceType = "ml.m4.4xlarge" - InstanceTypeMlM410xlarge InstanceType = "ml.m4.10xlarge" - InstanceTypeMlM416xlarge InstanceType = "ml.m4.16xlarge" - InstanceTypeMlM5Xlarge InstanceType = "ml.m5.xlarge" - InstanceTypeMlM52xlarge InstanceType = "ml.m5.2xlarge" - InstanceTypeMlM54xlarge InstanceType = "ml.m5.4xlarge" - InstanceTypeMlM512xlarge InstanceType = "ml.m5.12xlarge" - InstanceTypeMlM524xlarge InstanceType = "ml.m5.24xlarge" - InstanceTypeMlC4Xlarge InstanceType = "ml.c4.xlarge" - InstanceTypeMlC42xlarge InstanceType = "ml.c4.2xlarge" - InstanceTypeMlC44xlarge InstanceType = "ml.c4.4xlarge" - InstanceTypeMlC48xlarge InstanceType = "ml.c4.8xlarge" - InstanceTypeMlC5Xlarge InstanceType = "ml.c5.xlarge" - InstanceTypeMlC52xlarge InstanceType = "ml.c5.2xlarge" - InstanceTypeMlC54xlarge InstanceType = "ml.c5.4xlarge" - InstanceTypeMlC59xlarge InstanceType = "ml.c5.9xlarge" - InstanceTypeMlC518xlarge InstanceType = "ml.c5.18xlarge" - InstanceTypeMlC5dXlarge InstanceType = "ml.c5d.xlarge" - InstanceTypeMlC5d2xlarge InstanceType = "ml.c5d.2xlarge" - InstanceTypeMlC5d4xlarge InstanceType = "ml.c5d.4xlarge" - InstanceTypeMlC5d9xlarge InstanceType = "ml.c5d.9xlarge" - InstanceTypeMlC5d18xlarge InstanceType = "ml.c5d.18xlarge" - InstanceTypeMlP2Xlarge InstanceType = "ml.p2.xlarge" - InstanceTypeMlP28xlarge InstanceType = "ml.p2.8xlarge" - InstanceTypeMlP216xlarge InstanceType = "ml.p2.16xlarge" - InstanceTypeMlP32xlarge InstanceType = "ml.p3.2xlarge" - InstanceTypeMlP38xlarge InstanceType = "ml.p3.8xlarge" - InstanceTypeMlP316xlarge InstanceType = "ml.p3.16xlarge" + InstanceTypeMlT2Medium InstanceType = "ml.t2.medium" + InstanceTypeMlT2Large InstanceType = "ml.t2.large" + InstanceTypeMlT2Xlarge InstanceType = "ml.t2.xlarge" + InstanceTypeMlT22xlarge InstanceType = "ml.t2.2xlarge" + InstanceTypeMlT3Medium InstanceType = "ml.t3.medium" + InstanceTypeMlT3Large InstanceType = "ml.t3.large" + InstanceTypeMlT3Xlarge InstanceType = "ml.t3.xlarge" + InstanceTypeMlT32xlarge InstanceType = "ml.t3.2xlarge" + InstanceTypeMlM4Xlarge InstanceType = "ml.m4.xlarge" + InstanceTypeMlM42xlarge InstanceType = "ml.m4.2xlarge" + InstanceTypeMlM44xlarge InstanceType = "ml.m4.4xlarge" + InstanceTypeMlM410xlarge InstanceType = "ml.m4.10xlarge" + InstanceTypeMlM416xlarge InstanceType = "ml.m4.16xlarge" + InstanceTypeMlM5Xlarge InstanceType = "ml.m5.xlarge" + InstanceTypeMlM52xlarge InstanceType = "ml.m5.2xlarge" + InstanceTypeMlM54xlarge InstanceType = "ml.m5.4xlarge" + InstanceTypeMlM512xlarge InstanceType = "ml.m5.12xlarge" + InstanceTypeMlM524xlarge InstanceType = "ml.m5.24xlarge" + InstanceTypeMlM5dLarge InstanceType = "ml.m5d.large" + InstanceTypeMlM5dXlarge InstanceType = "ml.m5d.xlarge" + InstanceTypeMlM5d2xlarge InstanceType = "ml.m5d.2xlarge" + InstanceTypeMlM5d4xlarge InstanceType = "ml.m5d.4xlarge" + InstanceTypeMlM5d8xlarge InstanceType = "ml.m5d.8xlarge" + InstanceTypeMlM5d12xlarge InstanceType = "ml.m5d.12xlarge" + InstanceTypeMlM5d16xlarge InstanceType = "ml.m5d.16xlarge" + InstanceTypeMlM5d24xlarge InstanceType = "ml.m5d.24xlarge" + InstanceTypeMlC4Xlarge InstanceType = "ml.c4.xlarge" + InstanceTypeMlC42xlarge InstanceType = "ml.c4.2xlarge" + InstanceTypeMlC44xlarge InstanceType = "ml.c4.4xlarge" + InstanceTypeMlC48xlarge InstanceType = "ml.c4.8xlarge" + InstanceTypeMlC5Xlarge InstanceType = "ml.c5.xlarge" + InstanceTypeMlC52xlarge InstanceType = "ml.c5.2xlarge" + InstanceTypeMlC54xlarge InstanceType = "ml.c5.4xlarge" + InstanceTypeMlC59xlarge InstanceType = "ml.c5.9xlarge" + InstanceTypeMlC518xlarge InstanceType = "ml.c5.18xlarge" + InstanceTypeMlC5dXlarge InstanceType = "ml.c5d.xlarge" + InstanceTypeMlC5d2xlarge InstanceType = "ml.c5d.2xlarge" + InstanceTypeMlC5d4xlarge InstanceType = "ml.c5d.4xlarge" + InstanceTypeMlC5d9xlarge InstanceType = "ml.c5d.9xlarge" + InstanceTypeMlC5d18xlarge InstanceType = "ml.c5d.18xlarge" + InstanceTypeMlP2Xlarge InstanceType = "ml.p2.xlarge" + InstanceTypeMlP28xlarge InstanceType = "ml.p2.8xlarge" + InstanceTypeMlP216xlarge InstanceType = "ml.p2.16xlarge" + InstanceTypeMlP32xlarge InstanceType = "ml.p3.2xlarge" + InstanceTypeMlP38xlarge InstanceType = "ml.p3.8xlarge" + InstanceTypeMlP316xlarge InstanceType = "ml.p3.16xlarge" + InstanceTypeMlP3dn24xlarge InstanceType = "ml.p3dn.24xlarge" + InstanceTypeMlG4dnXlarge InstanceType = "ml.g4dn.xlarge" + InstanceTypeMlG4dn2xlarge InstanceType = "ml.g4dn.2xlarge" + InstanceTypeMlG4dn4xlarge InstanceType = "ml.g4dn.4xlarge" + InstanceTypeMlG4dn8xlarge InstanceType = "ml.g4dn.8xlarge" + InstanceTypeMlG4dn12xlarge InstanceType = "ml.g4dn.12xlarge" + InstanceTypeMlG4dn16xlarge InstanceType = "ml.g4dn.16xlarge" + InstanceTypeMlR5Large InstanceType = "ml.r5.large" + InstanceTypeMlR5Xlarge InstanceType = "ml.r5.xlarge" + InstanceTypeMlR52xlarge InstanceType = "ml.r5.2xlarge" + InstanceTypeMlR54xlarge InstanceType = "ml.r5.4xlarge" + InstanceTypeMlR58xlarge InstanceType = "ml.r5.8xlarge" + InstanceTypeMlR512xlarge InstanceType = "ml.r5.12xlarge" + InstanceTypeMlR516xlarge InstanceType = "ml.r5.16xlarge" + InstanceTypeMlR524xlarge InstanceType = "ml.r5.24xlarge" ) // Values returns all known values for InstanceType. Note that this can be expanded @@ -1691,6 +1714,14 @@ func (InstanceType) Values() []InstanceType { "ml.m5.4xlarge", "ml.m5.12xlarge", "ml.m5.24xlarge", + "ml.m5d.large", + "ml.m5d.xlarge", + "ml.m5d.2xlarge", + "ml.m5d.4xlarge", + "ml.m5d.8xlarge", + "ml.m5d.12xlarge", + "ml.m5d.16xlarge", + "ml.m5d.24xlarge", "ml.c4.xlarge", "ml.c4.2xlarge", "ml.c4.4xlarge", @@ -1711,6 +1742,21 @@ func (InstanceType) Values() []InstanceType { "ml.p3.2xlarge", "ml.p3.8xlarge", "ml.p3.16xlarge", + "ml.p3dn.24xlarge", + "ml.g4dn.xlarge", + "ml.g4dn.2xlarge", + "ml.g4dn.4xlarge", + "ml.g4dn.8xlarge", + "ml.g4dn.12xlarge", + "ml.g4dn.16xlarge", + "ml.r5.large", + "ml.r5.xlarge", + "ml.r5.2xlarge", + "ml.r5.4xlarge", + "ml.r5.8xlarge", + "ml.r5.12xlarge", + "ml.r5.16xlarge", + "ml.r5.24xlarge", } } @@ -1875,6 +1921,26 @@ func (ListWorkteamsSortByOptions) Values() []ListWorkteamsSortByOptions { } } +type MetricSetSource string + +// Enum values for MetricSetSource +const ( + MetricSetSourceTrain MetricSetSource = "Train" + MetricSetSourceValidation MetricSetSource = "Validation" + MetricSetSourceTest MetricSetSource = "Test" +) + +// Values returns all known values for MetricSetSource. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (MetricSetSource) Values() []MetricSetSource { + return []MetricSetSource{ + "Train", + "Validation", + "Test", + } +} + type ModelApprovalStatus string // Enum values for ModelApprovalStatus diff --git a/service/sagemaker/types/types.go b/service/sagemaker/types/types.go index 9a77d05c231..a6344d261cc 100644 --- a/service/sagemaker/types/types.go +++ b/service/sagemaker/types/types.go @@ -435,7 +435,7 @@ type AnnotationConsolidationConfig struct { // estimate the true class of text based on annotations from individual workers. // // * - // rn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClass + // arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClass // // * // arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClass @@ -1336,6 +1336,68 @@ type AssociationSummary struct { noSmithyDocumentSerde } +// Configures the behavior of the client used by Amazon SageMaker to interact with +// the model container during asynchronous inference. +type AsyncInferenceClientConfig struct { + + // The maximum number of concurrent requests sent by the SageMaker client to the + // model container. If no value is provided, Amazon SageMaker will choose an + // optimal value for you. + MaxConcurrentInvocationsPerInstance *int32 + + noSmithyDocumentSerde +} + +// Specifies configuration for how an endpoint performs asynchronous inference. +type AsyncInferenceConfig struct { + + // Specifies the configuration for asynchronous inference invocation outputs. + // + // This member is required. + OutputConfig *AsyncInferenceOutputConfig + + // Configures the behavior of the client used by Amazon SageMaker to interact with + // the model container during asynchronous inference. + ClientConfig *AsyncInferenceClientConfig + + noSmithyDocumentSerde +} + +// Specifies the configuration for notifications of inference results for +// asynchronous inference. +type AsyncInferenceNotificationConfig struct { + + // Amazon SNS topic to post a notification to when inference fails. If no topic is + // provided, no notification is sent on failure. + ErrorTopic *string + + // Amazon SNS topic to post a notification to when inference completes + // successfully. If no topic is provided, no notification is sent on success. + SuccessTopic *string + + noSmithyDocumentSerde +} + +// Specifies the configuration for asynchronous inference invocation outputs. +type AsyncInferenceOutputConfig struct { + + // The Amazon S3 location to upload inference responses to. + // + // This member is required. + S3OutputPath *string + + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) key + // that Amazon SageMaker uses to encrypt the asynchronous inference output in + // Amazon S3. + KmsKeyId *string + + // Specifies the configuration for notifications of inference results for + // asynchronous inference. + NotificationConfig *AsyncInferenceNotificationConfig + + noSmithyDocumentSerde +} + // Configuration for Athena Dataset Definition input. type AthenaDatasetDefinition struct { @@ -1378,8 +1440,8 @@ type AthenaDatasetDefinition struct { noSmithyDocumentSerde } -// An Autopilot job returns recommendations, or candidates. Each candidate has -// futher details about the steps involved and the status. +// Information about a candidate produced by an AutoML training job, including its +// status, steps, and other properties. type AutoMLCandidate struct { // The name of the candidate. @@ -1412,7 +1474,7 @@ type AutoMLCandidate struct { // This member is required. ObjectiveStatus ObjectiveStatus - // The AutoML candidate's properties. + // The properties of an AutoML candidate job. CandidateProperties *CandidateProperties // The end time. @@ -1476,7 +1538,8 @@ type AutoMLChannel struct { // up an AutoML candidate. For more information, see . type AutoMLContainerDefinition struct { - // The ECR path of the container. For more information, see . + // The Amazon Elastic Container Registry (Amazon ECR) path of the container. For + // more information, see . // // This member is required. Image *string @@ -1557,7 +1620,7 @@ type AutoMLJobObjective struct { // MSE: The mean squared error (MSE) is the average of the squared differences // between the predicted and actual values. It is used for regression. MSE values // are always positive: the better a model is at predicting the actual values, the - // smaller the MSE value. When the data contains outliers, they tend to dominate + // smaller the MSE value is. When the data contains outliers, they tend to dominate // the MSE, which might cause subpar prediction performance. // // * Accuracy: The ratio @@ -1632,7 +1695,7 @@ type AutoMLJobSummary struct { // This member is required. AutoMLJobArn *string - // The name of the AutoML you are requesting. + // The name of the AutoML job you are requesting. // // This member is required. AutoMLJobName *string @@ -1801,6 +1864,9 @@ type CandidateProperties struct { // The Amazon S3 prefix to the artifacts generated for an AutoML candidate. CandidateArtifactLocations *CandidateArtifactLocations + // Information about the candidate metrics for an AutoML job. + CandidateMetrics []MetricDatum + noSmithyDocumentSerde } @@ -6323,6 +6389,21 @@ type MetricData struct { noSmithyDocumentSerde } +// Information about the metric for a candidate produced by an AutoML job. +type MetricDatum struct { + + // The name of the metric. + MetricName AutoMLMetricEnum + + // The dataset split from which the AutoML job produced the metric. + Set MetricSetSource + + // The value of the metric. + Value float32 + + noSmithyDocumentSerde +} + // Specifies a metric that the training algorithm writes to stderr or stdout. // Amazon SageMakerhyperparameter tuning captures all defined metrics. You specify // one metric that a hyperparameter tuning job uses as its objective metric to @@ -7629,10 +7710,11 @@ type NotebookInstanceSummary struct { noSmithyDocumentSerde } -// Configures SNS notifications of available or expiring work items for work teams. +// Configures Amazon SNS notifications of available or expiring work items for work +// teams. type NotificationConfiguration struct { - // The ARN for the SNS topic to which notifications should be published. + // The ARN for the Amazon SNS topic to which notifications should be published. NotificationTopicArn *string noSmithyDocumentSerde @@ -11389,17 +11471,25 @@ type TuningJobStepMetaData struct { noSmithyDocumentSerde } -// Provided configuration information for the worker UI for a labeling job. +// Provided configuration information for the worker UI for a labeling job. Provide +// either HumanTaskUiArn or UiTemplateS3Uri. For named entity recognition, 3D point +// cloud and video frame labeling jobs, use HumanTaskUiArn. For all other Ground +// Truth built-in task types and custom task types, use UiTemplateS3Uri to specify +// the location of a worker task template in Amazon S3. type UiConfig struct { // The ARN of the worker task template used to render the worker UI and tools for // labeling job tasks. Use this parameter when you are creating a labeling job for - // 3D point cloud and video fram labeling jobs. Use your labeling job task type to - // select one of the following ARNs and use it with this parameter when you create - // a labeling job. Replace aws-region with the Amazon Web Services region you are - // creating your labeling job in. 3D Point Cloud HumanTaskUiArns Use this - // HumanTaskUiArn for 3D point cloud object detection and 3D point cloud object - // detection adjustment labeling jobs. + // named entity recognition, 3D point cloud and video frame labeling jobs. Use your + // labeling job task type to select one of the following ARNs and use it with this + // parameter when you create a labeling job. Replace aws-region with the Amazon Web + // Services Region you are creating your labeling job in. For example, replace + // aws-region with us-west-1 if you create a labeling job in US West (N. + // California). Named Entity Recognition Use the following HumanTaskUiArn for named + // entity recognition labeling jobs: + // arn:aws:sagemaker:aws-region:394669845002:human-task-ui/NamedEntityRecognition + // 3D Point Cloud HumanTaskUiArns Use this HumanTaskUiArn for 3D point cloud object + // detection and 3D point cloud object detection adjustment labeling jobs. // // * // arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudObjectDetection diff --git a/service/sagemaker/validators.go b/service/sagemaker/validators.go index 7c168ced4d8..a3c5a61137a 100644 --- a/service/sagemaker/validators.go +++ b/service/sagemaker/validators.go @@ -4660,6 +4660,40 @@ func validateArtifactSourceTypes(v []types.ArtifactSourceType) error { } } +func validateAsyncInferenceConfig(v *types.AsyncInferenceConfig) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AsyncInferenceConfig"} + if v.OutputConfig == nil { + invalidParams.Add(smithy.NewErrParamRequired("OutputConfig")) + } else if v.OutputConfig != nil { + if err := validateAsyncInferenceOutputConfig(v.OutputConfig); err != nil { + invalidParams.AddNested("OutputConfig", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAsyncInferenceOutputConfig(v *types.AsyncInferenceOutputConfig) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AsyncInferenceOutputConfig"} + if v.S3OutputPath == nil { + invalidParams.Add(smithy.NewErrParamRequired("S3OutputPath")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateAthenaDatasetDefinition(v *types.AthenaDatasetDefinition) error { if v == nil { return nil @@ -8848,6 +8882,11 @@ func validateOpCreateEndpointConfigInput(v *CreateEndpointConfigInput) error { invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) } } + if v.AsyncInferenceConfig != nil { + if err := validateAsyncInferenceConfig(v.AsyncInferenceConfig); err != nil { + invalidParams.AddNested("AsyncInferenceConfig", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { diff --git a/service/sagemakerruntime/api_op_InvokeEndpointAsync.go b/service/sagemakerruntime/api_op_InvokeEndpointAsync.go new file mode 100644 index 00000000000..4ddb7c12c39 --- /dev/null +++ b/service/sagemakerruntime/api_op_InvokeEndpointAsync.go @@ -0,0 +1,175 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sagemakerruntime + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// After you deploy a model into production using Amazon SageMaker hosting +// services, your client applications use this API to get inferences from the model +// hosted at the specified endpoint in an asynchronous manner. Inference requests +// sent to this API are enqueued for asynchronous processing. The processing of the +// inference request may or may not complete before the you receive a response from +// this API. The response from this API will not contain the result of the +// inference request but contain information about where you can locate it. Amazon +// SageMaker strips all POST headers except those supported by the API. Amazon +// SageMaker might add additional headers. You should not rely on the behavior of +// headers outside those enumerated in the request syntax. Calls to +// InvokeEndpointAsync are authenticated by using AWS Signature Version 4. For +// information, see Authenticating Requests (AWS Signature Version 4) +// (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) +// in the Amazon S3 API Reference. +func (c *Client) InvokeEndpointAsync(ctx context.Context, params *InvokeEndpointAsyncInput, optFns ...func(*Options)) (*InvokeEndpointAsyncOutput, error) { + if params == nil { + params = &InvokeEndpointAsyncInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "InvokeEndpointAsync", params, optFns, c.addOperationInvokeEndpointAsyncMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*InvokeEndpointAsyncOutput) + out.ResultMetadata = metadata + return out, nil +} + +type InvokeEndpointAsyncInput struct { + + // The name of the endpoint that you specified when you created the endpoint using + // the CreateEndpoint + // (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateEndpoint.html) + // API. + // + // This member is required. + EndpointName *string + + // The Amazon S3 URI where the inference request payload is stored. + // + // This member is required. + InputLocation *string + + // The desired MIME type of the inference in the response. + Accept *string + + // The MIME type of the input data in the request body. + ContentType *string + + // Provides additional information about a request for an inference submitted to a + // model hosted at an Amazon SageMaker endpoint. The information is an opaque value + // that is forwarded verbatim. You could use this value, for example, to provide an + // ID that you can use to track a request or to provide other metadata that a + // service endpoint was programmed to process. The value must consist of no more + // than 1024 visible US-ASCII characters as specified in Section 3.3.6. Field Value + // Components (https://datatracker.ietf.org/doc/html/rfc7230#section-3.2.6) of the + // Hypertext Transfer Protocol (HTTP/1.1). The code in your model is responsible + // for setting or updating any custom attributes in the response. If your code does + // not set this value in the response, an empty value is returned. For example, if + // a custom attribute represents the trace ID, your model can prepend the custom + // attribute with Trace ID: in your post-processing function. This feature is + // currently supported in the AWS SDKs but not in the Amazon SageMaker Python SDK. + CustomAttributes *string + + // The identifier for the inference request. Amazon SageMaker will generate an + // identifier for you if none is specified. + InferenceId *string + + // Maximum age in seconds a request can be in the queue before it is marked as + // expired. + RequestTTLSeconds *int32 + + noSmithyDocumentSerde +} + +type InvokeEndpointAsyncOutput struct { + + // Identifier for an inference request. This will be the same as the InferenceId + // specified in the input. Amazon SageMaker will generate an identifier for you if + // you do not specify one. + InferenceId *string + + // The Amazon S3 URI where the inference response payload is stored. + OutputLocation *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationInvokeEndpointAsyncMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpInvokeEndpointAsync{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpInvokeEndpointAsync{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpInvokeEndpointAsyncValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opInvokeEndpointAsync(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opInvokeEndpointAsync(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sagemaker", + OperationName: "InvokeEndpointAsync", + } +} diff --git a/service/sagemakerruntime/deserializers.go b/service/sagemakerruntime/deserializers.go index c81c6639e25..b78d7ae84a1 100644 --- a/service/sagemakerruntime/deserializers.go +++ b/service/sagemakerruntime/deserializers.go @@ -158,6 +158,180 @@ func awsRestjson1_deserializeOpDocumentInvokeEndpointOutput(v *InvokeEndpointOut return nil } +type awsRestjson1_deserializeOpInvokeEndpointAsync struct { +} + +func (*awsRestjson1_deserializeOpInvokeEndpointAsync) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpInvokeEndpointAsync) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorInvokeEndpointAsync(response, &metadata) + } + output := &InvokeEndpointAsyncOutput{} + out.Result = output + + err = awsRestjson1_deserializeOpHttpBindingsInvokeEndpointAsyncOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentInvokeEndpointAsyncOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorInvokeEndpointAsync(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InternalFailure", errorCode): + return awsRestjson1_deserializeErrorInternalFailure(response, errorBody) + + case strings.EqualFold("ServiceUnavailable", errorCode): + return awsRestjson1_deserializeErrorServiceUnavailable(response, errorBody) + + case strings.EqualFold("ValidationError", errorCode): + return awsRestjson1_deserializeErrorValidationError(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpHttpBindingsInvokeEndpointAsyncOutput(v *InvokeEndpointAsyncOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("X-Amzn-SageMaker-OutputLocation"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.OutputLocation = ptr.String(headerValues[0]) + } + + return nil +} +func awsRestjson1_deserializeOpDocumentInvokeEndpointAsyncOutput(v **InvokeEndpointAsyncOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *InvokeEndpointAsyncOutput + if *v == nil { + sv = &InvokeEndpointAsyncOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "InferenceId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Header to be of type string, got %T instead", value) + } + sv.InferenceId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeErrorInternalFailure(response *smithyhttp.Response, errorBody *bytes.Reader) error { output := &types.InternalFailure{} var buff [1024]byte diff --git a/service/sagemakerruntime/generated.json b/service/sagemakerruntime/generated.json index cb89ac03379..c6cb2a6a1b2 100644 --- a/service/sagemakerruntime/generated.json +++ b/service/sagemakerruntime/generated.json @@ -6,6 +6,7 @@ "files": [ "api_client.go", "api_op_InvokeEndpoint.go", + "api_op_InvokeEndpointAsync.go", "deserializers.go", "doc.go", "endpoints.go", diff --git a/service/sagemakerruntime/serializers.go b/service/sagemakerruntime/serializers.go index 98309d08a90..dcfc523a1fe 100644 --- a/service/sagemakerruntime/serializers.go +++ b/service/sagemakerruntime/serializers.go @@ -115,3 +115,91 @@ func awsRestjson1_serializeOpHttpBindingsInvokeEndpointInput(v *InvokeEndpointIn return nil } + +type awsRestjson1_serializeOpInvokeEndpointAsync struct { +} + +func (*awsRestjson1_serializeOpInvokeEndpointAsync) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpInvokeEndpointAsync) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*InvokeEndpointAsyncInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/endpoints/{EndpointName}/async-invocations") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsInvokeEndpointAsyncInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsInvokeEndpointAsyncInput(v *InvokeEndpointAsyncInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Accept != nil && len(*v.Accept) > 0 { + locationName := "X-Amzn-Sagemaker-Accept" + encoder.SetHeader(locationName).String(*v.Accept) + } + + if v.ContentType != nil && len(*v.ContentType) > 0 { + locationName := "X-Amzn-Sagemaker-Content-Type" + encoder.SetHeader(locationName).String(*v.ContentType) + } + + if v.CustomAttributes != nil && len(*v.CustomAttributes) > 0 { + locationName := "X-Amzn-Sagemaker-Custom-Attributes" + encoder.SetHeader(locationName).String(*v.CustomAttributes) + } + + if v.EndpointName == nil || len(*v.EndpointName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member EndpointName must not be empty")} + } + if v.EndpointName != nil { + if err := encoder.SetURI("EndpointName").String(*v.EndpointName); err != nil { + return err + } + } + + if v.InferenceId != nil && len(*v.InferenceId) > 0 { + locationName := "X-Amzn-Sagemaker-Inference-Id" + encoder.SetHeader(locationName).String(*v.InferenceId) + } + + if v.InputLocation != nil && len(*v.InputLocation) > 0 { + locationName := "X-Amzn-Sagemaker-Inputlocation" + encoder.SetHeader(locationName).String(*v.InputLocation) + } + + if v.RequestTTLSeconds != nil { + locationName := "X-Amzn-Sagemaker-Requestttlseconds" + encoder.SetHeader(locationName).Integer(*v.RequestTTLSeconds) + } + + return nil +} diff --git a/service/sagemakerruntime/validators.go b/service/sagemakerruntime/validators.go index 6a31db4cc7e..c4d2bd63c56 100644 --- a/service/sagemakerruntime/validators.go +++ b/service/sagemakerruntime/validators.go @@ -9,6 +9,26 @@ import ( "github.com/aws/smithy-go/middleware" ) +type validateOpInvokeEndpointAsync struct { +} + +func (*validateOpInvokeEndpointAsync) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpInvokeEndpointAsync) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*InvokeEndpointAsyncInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpInvokeEndpointAsyncInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpInvokeEndpoint struct { } @@ -29,10 +49,32 @@ func (m *validateOpInvokeEndpoint) HandleInitialize(ctx context.Context, in midd return next.HandleInitialize(ctx, in) } +func addOpInvokeEndpointAsyncValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpInvokeEndpointAsync{}, middleware.After) +} + func addOpInvokeEndpointValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpInvokeEndpoint{}, middleware.After) } +func validateOpInvokeEndpointAsyncInput(v *InvokeEndpointAsyncInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InvokeEndpointAsyncInput"} + if v.EndpointName == nil { + invalidParams.Add(smithy.NewErrParamRequired("EndpointName")) + } + if v.InputLocation == nil { + invalidParams.Add(smithy.NewErrParamRequired("InputLocation")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpInvokeEndpointInput(v *InvokeEndpointInput) error { if v == nil { return nil