From 241222ec3c705bcc2b729dfe8d0cc4e01bd70b80 Mon Sep 17 00:00:00 2001 From: Jason Del Ponte Date: Mon, 27 Apr 2020 15:03:21 -0700 Subject: [PATCH] Release v0.22.0 (2020-04-27) (#548) Services --- * Synced the V2 SDK with latest AWS service API definitions. Breaking Changes --- * Removed prototype and experiment types from master branch of SDK. --- CHANGELOG.md | 11 + aws/endpoints/defaults.go | 128 +++- aws/version.go | 2 +- .../apis/accessanalyzer/2019-11-01/api-2.json | 28 + .../accessanalyzer/2019-11-01/docs-2.json | 28 +- .../2016-02-06/api-2.json | 11 +- .../2016-02-06/docs-2.json | 150 ++--- .../2016-02-06/examples-1.json | 175 +----- .../codeguru-reviewer/2019-09-19/api-2.json | 469 +++++++++++++- .../codeguru-reviewer/2019-09-19/docs-2.json | 326 +++++++++- .../2019-09-19/paginators-1.json | 15 + .../apis/dataexchange/2017-07-25/api-2.json | 48 +- .../apis/dataexchange/2017-07-25/docs-2.json | 18 +- models/apis/dlm/2018-01-12/docs-2.json | 2 +- models/apis/dms/2016-01-01/api-2.json | 29 +- models/apis/dms/2016-01-01/docs-2.json | 37 +- .../elastic-inference/2017-07-25/api-2.json | 258 +++++++- .../elastic-inference/2017-07-25/docs-2.json | 236 +++++++- .../2017-07-25/paginators-1.json | 6 + models/apis/es/2015-01-01/api-2.json | 3 +- models/apis/es/2015-01-01/docs-2.json | 1 + models/apis/firehose/2015-08-04/api-2.json | 143 ++++- models/apis/firehose/2015-08-04/docs-2.json | 53 +- models/apis/fms/2018-01-01/api-2.json | 5 +- models/apis/fms/2018-01-01/docs-2.json | 8 +- models/apis/iot/2015-05-28/api-2.json | 3 +- .../mediapackage-vod/2018-11-07/api-2.json | 192 ++++++ .../mediapackage-vod/2018-11-07/docs-2.json | 31 +- models/apis/pinpoint/2016-12-01/api-2.json | 59 ++ models/apis/pinpoint/2016-12-01/docs-2.json | 141 +++-- models/apis/ram/2018-01-04/api-2.json | 45 +- models/apis/ram/2018-01-04/docs-2.json | 32 +- models/apis/rds/2014-10-31/api-2.json | 2 + models/apis/rds/2014-10-31/docs-2.json | 8 +- models/apis/redshift/2012-12-01/api-2.json | 199 ++++++ models/apis/redshift/2012-12-01/docs-2.json | 113 +++- .../redshift/2012-12-01/paginators-1.json | 6 + models/apis/sagemaker/2017-07-24/api-2.json | 12 +- models/apis/sagemaker/2017-07-24/docs-2.json | 14 +- .../apis/storagegateway/2013-06-30/api-2.json | 118 ++++ .../storagegateway/2013-06-30/docs-2.json | 209 ++++--- .../2017-10-26/api-2.json | 14 +- .../2017-10-26/docs-2.json | 9 +- models/apis/transfer/2018-11-05/api-2.json | 43 +- models/apis/transfer/2018-11-05/docs-2.json | 277 +++++---- models/endpoints/endpoints.json | 122 +++- service/accessanalyzer/api_enums.go | 18 + service/accessanalyzer/api_types.go | 97 ++- service/applicationautoscaling/api_doc.go | 11 +- service/applicationautoscaling/api_enums.go | 5 + .../api_examples_test.go | 303 +-------- .../api_op_DeleteScalingPolicy.go | 17 +- .../api_op_DeleteScheduledAction.go | 15 +- .../api_op_DeregisterScalableTarget.go | 25 +- .../api_op_DescribeScalableTargets.go | 19 +- .../api_op_DescribeScalingActivities.go | 20 +- .../api_op_DescribeScalingPolicies.go | 20 +- .../api_op_DescribeScheduledActions.go | 19 +- .../api_op_PutScalingPolicy.go | 39 +- .../api_op_PutScheduledAction.go | 50 +- .../api_op_RegisterScalableTarget.go | 73 ++- service/applicationautoscaling/api_types.go | 212 +++++-- service/codegurureviewer/api_enums.go | 52 ++ service/codegurureviewer/api_errors.go | 6 + .../api_op_AssociateRepository.go | 28 +- .../api_op_DescribeCodeReview.go | 146 +++++ .../api_op_DescribeRecommendationFeedback.go | 178 ++++++ .../api_op_DescribeRepositoryAssociation.go | 3 +- .../api_op_ListCodeReviews.go | 297 +++++++++ .../api_op_ListRecommendationFeedback.go | 287 +++++++++ .../api_op_ListRecommendations.go | 239 ++++++++ .../api_op_ListRepositoryAssociations.go | 18 +- .../api_op_PutRecommendationFeedback.go | 179 ++++++ service/codegurureviewer/api_types.go | 573 +++++++++++++++++- .../codegururevieweriface/interface.go | 12 + .../api_op_CreateEndpoint.go | 25 +- .../api_op_CreateReplicationTask.go | 10 +- .../api_op_DescribeReplicationSubnetGroups.go | 2 + .../api_op_ModifyEndpoint.go | 21 +- .../api_op_ModifyReplicationTask.go | 8 +- service/databasemigrationservice/api_types.go | 83 +++ service/dataexchange/api_enums.go | 18 + service/dataexchange/api_types.go | 84 ++- service/dlm/api_types.go | 4 +- service/elasticinference/api_client.go | 2 +- service/elasticinference/api_enums.go | 18 + .../api_op_DescribeAcceleratorOfferings.go | 169 ++++++ .../api_op_DescribeAcceleratorTypes.go | 125 ++++ .../api_op_DescribeAccelerators.go | 265 ++++++++ .../elasticinference/api_op_TagResource.go | 2 +- .../elasticinference/api_op_UntagResource.go | 2 +- service/elasticinference/api_types.go | 295 +++++++++ .../elasticinferenceiface/interface.go | 10 +- service/elasticsearchservice/api_types.go | 10 + service/firehose/api_enums.go | 24 +- .../api_op_StartDeliveryStreamEncryption.go | 16 +- service/firehose/api_types.go | 208 ++++++- service/fms/api_enums.go | 1 + service/fms/api_op_PutPolicy.go | 13 +- service/fms/api_types.go | 71 ++- service/mediapackagevod/api_op_CreateAsset.go | 30 + .../api_op_CreatePackagingConfiguration.go | 30 + .../api_op_CreatePackagingGroup.go | 30 + .../mediapackagevod/api_op_DescribeAsset.go | 15 + .../api_op_DescribePackagingConfiguration.go | 15 + .../api_op_DescribePackagingGroup.go | 15 + .../api_op_ListTagsForResource.go | 144 +++++ service/mediapackagevod/api_op_TagResource.go | 152 +++++ .../mediapackagevod/api_op_UntagResource.go | 152 +++++ service/mediapackagevod/api_types.go | 45 ++ .../mediapackagevodiface/interface.go | 6 + service/pinpoint/api_enums.go | 26 + service/pinpoint/api_op_UpdateEndpoint.go | 5 +- .../pinpoint/api_op_UpdateEndpointsBatch.go | 5 +- service/pinpoint/api_types.go | 408 +++++++++---- service/ram/api_op_ListPrincipals.go | 8 +- service/ram/api_op_ListResourceTypes.go | 166 +++++ service/ram/api_op_ListResources.go | 8 +- service/ram/api_types.go | 34 ++ service/ram/ramiface/interface.go | 2 + ...i_op_DescribeOrderableDBInstanceOptions.go | 7 + service/rds/api_op_RestoreDBClusterFromS3.go | 9 + .../api_op_RestoreDBClusterFromSnapshot.go | 2 + service/rds/api_types.go | 7 +- service/redshift/api_enums.go | 70 +++ service/redshift/api_errors.go | 18 + service/redshift/api_op_CreateUsageLimit.go | 191 ++++++ service/redshift/api_op_DeleteUsageLimit.go | 119 ++++ .../redshift/api_op_DescribeUsageLimits.go | 215 +++++++ service/redshift/api_op_ModifyUsageLimit.go | 158 +++++ .../api_op_RestoreFromClusterSnapshot.go | 2 +- service/redshift/api_types.go | 44 ++ service/redshift/redshiftiface/interface.go | 8 + service/sagemaker/api_types.go | 10 +- service/storagegateway/api_doc.go | 2 +- service/storagegateway/api_op_AddCache.go | 2 +- .../storagegateway/api_op_AddUploadBuffer.go | 2 +- .../api_op_AddWorkingStorage.go | 6 +- .../storagegateway/api_op_AssignTapePool.go | 8 +- .../api_op_CreateCachediSCSIVolume.go | 6 +- .../api_op_CreateNFSFileShare.go | 8 +- .../api_op_CreateSMBFileShare.go | 10 +- .../storagegateway/api_op_CreateSnapshot.go | 11 +- .../api_op_CreateStorediSCSIVolume.go | 4 +- .../api_op_CreateTapeWithBarcode.go | 18 +- service/storagegateway/api_op_CreateTapes.go | 12 +- ...pi_op_DeleteAutomaticTapeCreationPolicy.go | 125 ++++ .../api_op_DeleteBandwidthRateLimit.go | 4 +- .../api_op_DeleteSnapshotSchedule.go | 5 +- service/storagegateway/api_op_DeleteVolume.go | 3 +- .../api_op_DescribeBandwidthRateLimit.go | 2 +- .../storagegateway/api_op_DescribeCache.go | 6 +- .../api_op_DescribeCachediSCSIVolumes.go | 6 +- .../api_op_DescribeMaintenanceStartTime.go | 2 +- .../api_op_DescribeStorediSCSIVolumes.go | 6 +- .../api_op_DescribeTapeArchives.go | 2 +- .../api_op_DescribeVTLDevices.go | 2 +- .../api_op_DescribeWorkingStorage.go | 2 +- .../storagegateway/api_op_DisableGateway.go | 2 +- ...pi_op_ListAutomaticTapeCreationPolicies.go | 121 ++++ .../storagegateway/api_op_ListFileShares.go | 2 +- .../storagegateway/api_op_ListLocalDisks.go | 2 +- service/storagegateway/api_op_ListTapes.go | 6 +- .../api_op_RemoveTagsFromResource.go | 2 +- service/storagegateway/api_op_ResetCache.go | 6 +- .../storagegateway/api_op_ShutdownGateway.go | 8 +- service/storagegateway/api_op_StartGateway.go | 6 +- ...pi_op_UpdateAutomaticTapeCreationPolicy.go | 150 +++++ .../api_op_UpdateBandwidthRateLimit.go | 4 +- .../api_op_UpdateGatewaySoftwareNow.go | 6 +- .../api_op_UpdateMaintenanceStartTime.go | 4 +- .../api_op_UpdateNFSFileShare.go | 6 +- .../api_op_UpdateSMBFileShare.go | 6 +- .../api_op_UpdateSnapshotSchedule.go | 3 +- service/storagegateway/api_types.go | 139 ++++- .../storagegatewayiface/interface.go | 6 + service/transfer/api_doc.go | 21 +- service/transfer/api_enums.go | 43 +- service/transfer/api_errors.go | 18 +- service/transfer/api_op_CreateServer.go | 84 ++- service/transfer/api_op_CreateUser.go | 92 +-- service/transfer/api_op_DeleteServer.go | 7 +- service/transfer/api_op_DeleteSshPublicKey.go | 4 +- service/transfer/api_op_DeleteUser.go | 12 +- service/transfer/api_op_DescribeServer.go | 15 +- service/transfer/api_op_DescribeUser.go | 20 +- service/transfer/api_op_ImportSshPublicKey.go | 23 +- service/transfer/api_op_ListServers.go | 20 +- .../transfer/api_op_ListTagsForResource.go | 4 +- service/transfer/api_op_ListUsers.go | 11 +- service/transfer/api_op_StartServer.go | 11 +- service/transfer/api_op_StopServer.go | 15 +- service/transfer/api_op_TagResource.go | 2 +- .../transfer/api_op_TestIdentityProvider.go | 31 +- service/transfer/api_op_UntagResource.go | 8 +- service/transfer/api_op_UpdateServer.go | 71 ++- service/transfer/api_op_UpdateUser.go | 85 +-- service/transfer/api_types.go | 242 ++++---- service/transfer/transferiface/interface.go | 2 +- 199 files changed, 10177 insertions(+), 1839 deletions(-) create mode 100644 service/codegurureviewer/api_op_DescribeCodeReview.go create mode 100644 service/codegurureviewer/api_op_DescribeRecommendationFeedback.go create mode 100644 service/codegurureviewer/api_op_ListCodeReviews.go create mode 100644 service/codegurureviewer/api_op_ListRecommendationFeedback.go create mode 100644 service/codegurureviewer/api_op_ListRecommendations.go create mode 100644 service/codegurureviewer/api_op_PutRecommendationFeedback.go create mode 100644 service/elasticinference/api_op_DescribeAcceleratorOfferings.go create mode 100644 service/elasticinference/api_op_DescribeAcceleratorTypes.go create mode 100644 service/elasticinference/api_op_DescribeAccelerators.go create mode 100644 service/mediapackagevod/api_op_ListTagsForResource.go create mode 100644 service/mediapackagevod/api_op_TagResource.go create mode 100644 service/mediapackagevod/api_op_UntagResource.go create mode 100644 service/ram/api_op_ListResourceTypes.go create mode 100644 service/redshift/api_op_CreateUsageLimit.go create mode 100644 service/redshift/api_op_DeleteUsageLimit.go create mode 100644 service/redshift/api_op_DescribeUsageLimits.go create mode 100644 service/redshift/api_op_ModifyUsageLimit.go create mode 100644 service/storagegateway/api_op_DeleteAutomaticTapeCreationPolicy.go create mode 100644 service/storagegateway/api_op_ListAutomaticTapeCreationPolicies.go create mode 100644 service/storagegateway/api_op_UpdateAutomaticTapeCreationPolicy.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 380ff5cbe3b..9469ac6c943 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,14 @@ +Release v0.22.0 (2020-04-27) +=== + +Services +--- +* Synced the V2 SDK with latest AWS service API definitions. + +Breaking Changes +--- +* Removed prototype and experiment types from master branch of SDK. + Release v0.21.0 (2020-04-21) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 369732388ed..4526330060e 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -28,7 +28,7 @@ var awsPartition = partition{ DNSSuffix: "amazonaws.com", RegionRegex: regionRegex{ Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me)\\-\\w+\\-\\d+$") + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$") return reg }(), }, @@ -38,6 +38,9 @@ var awsPartition = partition{ SignatureVersions: []string{"v4"}, }, Regions: regions{ + "af-south-1": region{ + Description: "Africa (Cape Town)", + }, "ap-east-1": region{ Description: "Asia Pacific (Hong Kong)", }, @@ -103,6 +106,7 @@ var awsPartition = partition{ "access-analyzer": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -126,6 +130,7 @@ var awsPartition = partition{ "acm": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -234,6 +239,12 @@ var awsPartition = partition{ "api.ecr": service{ Endpoints: endpoints{ + "af-south-1": endpoint{ + Hostname: "api.ecr.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, "ap-east-1": endpoint{ Hostname: "api.ecr.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -368,6 +379,29 @@ var awsPartition = partition{ }, }, }, + "api.elastic-inference": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com", + }, + "ap-northeast-2": endpoint{ + Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com", + }, + "eu-west-1": endpoint{ + Hostname: "api.elastic-inference.eu-west-1.amazonaws.com", + }, + "us-east-1": endpoint{ + Hostname: "api.elastic-inference.us-east-1.amazonaws.com", + }, + "us-east-2": endpoint{ + Hostname: "api.elastic-inference.us-east-2.amazonaws.com", + }, + "us-west-2": endpoint{ + Hostname: "api.elastic-inference.us-west-2.amazonaws.com", + }, + }, + }, "api.mediatailor": service{ Endpoints: endpoints{ @@ -441,6 +475,7 @@ var awsPartition = partition{ "apigateway": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -466,6 +501,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -577,6 +613,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -765,6 +802,7 @@ var awsPartition = partition{ "cloudformation": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -888,6 +926,7 @@ var awsPartition = partition{ "cloudtrail": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1011,6 +1050,7 @@ var awsPartition = partition{ "codedeploy": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1446,6 +1486,7 @@ var awsPartition = partition{ "directconnect": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1501,6 +1542,7 @@ var awsPartition = partition{ "dms": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1668,6 +1710,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1730,6 +1773,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1794,6 +1838,7 @@ var awsPartition = partition{ "ecs": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1838,32 +1883,10 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "elastic-inference": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com", - }, - "ap-northeast-2": endpoint{ - Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com", - }, - "eu-west-1": endpoint{ - Hostname: "api.elastic-inference.eu-west-1.amazonaws.com", - }, - "us-east-1": endpoint{ - Hostname: "api.elastic-inference.us-east-1.amazonaws.com", - }, - "us-east-2": endpoint{ - Hostname: "api.elastic-inference.us-east-2.amazonaws.com", - }, - "us-west-2": endpoint{ - Hostname: "api.elastic-inference.us-west-2.amazonaws.com", - }, - }, - }, "elasticache": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1893,6 +1916,7 @@ var awsPartition = partition{ "elasticbeanstalk": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2073,6 +2097,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2123,6 +2148,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2214,6 +2240,7 @@ var awsPartition = partition{ "es": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2243,6 +2270,7 @@ var awsPartition = partition{ "events": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2482,6 +2510,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, @@ -2518,6 +2547,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2639,6 +2669,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-southeast-2": endpoint{}, "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, "me-south-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -2957,6 +2988,7 @@ var awsPartition = partition{ "kinesis": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3046,6 +3078,7 @@ var awsPartition = partition{ "kms": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3087,6 +3120,7 @@ var awsPartition = partition{ "lambda": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3199,6 +3233,7 @@ var awsPartition = partition{ "logs": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3342,6 +3377,7 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3383,8 +3419,11 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -3394,6 +3433,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3918,6 +3958,7 @@ var awsPartition = partition{ "rds": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3943,6 +3984,7 @@ var awsPartition = partition{ "redshift": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -4013,6 +4055,7 @@ var awsPartition = partition{ "resource-groups": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -4120,8 +4163,11 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -4184,7 +4230,8 @@ var awsPartition = partition{ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ - "ap-east-1": endpoint{}, + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{ Hostname: "s3.ap-northeast-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, @@ -4673,6 +4720,7 @@ var awsPartition = partition{ "sms": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -4832,6 +4880,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -4882,6 +4931,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -4931,6 +4981,7 @@ var awsPartition = partition{ "ssm": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -5002,6 +5053,7 @@ var awsPartition = partition{ "states": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -5137,6 +5189,7 @@ var awsPartition = partition{ PartitionEndpoint: "aws-global", Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -5202,6 +5255,7 @@ var awsPartition = partition{ "swf": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -5249,6 +5303,7 @@ var awsPartition = partition{ "tagging": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -5682,6 +5737,7 @@ var awsPartition = partition{ "xray": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -5851,6 +5907,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "codecommit": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "codedeploy": service{ Endpoints: endpoints{ @@ -7335,6 +7398,13 @@ var awsusgovPartition = partition{ }, }, }, + "securityhub": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "serverlessrepo": service{ Defaults: endpoint{ Protocols: []string{"https"}, @@ -7673,6 +7743,14 @@ var awsisoPartition = partition{ "us-iso-east-1": endpoint{}, }, }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "config": service{ Endpoints: endpoints{ diff --git a/aws/version.go b/aws/version.go index 6a8557741c5..840d9ce6f41 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "0.21.0" +const SDKVersion = "0.22.0" diff --git a/models/apis/accessanalyzer/2019-11-01/api-2.json b/models/apis/accessanalyzer/2019-11-01/api-2.json index 1d8a73732c4..a3bee7bd113 100644 --- a/models/apis/accessanalyzer/2019-11-01/api-2.json +++ b/models/apis/accessanalyzer/2019-11-01/api-2.json @@ -589,6 +589,7 @@ "resource":{"shape":"String"}, "resourceOwnerAccount":{"shape":"String"}, "resourceType":{"shape":"ResourceType"}, + "sources":{"shape":"FindingSourceList"}, "status":{"shape":"FindingStatus"}, "updatedAt":{"shape":"Timestamp"} } @@ -598,6 +599,32 @@ "type":"list", "member":{"shape":"FindingId"} }, + "FindingSource":{ + "type":"structure", + "required":["type"], + "members":{ + "detail":{"shape":"FindingSourceDetail"}, + "type":{"shape":"FindingSourceType"} + } + }, + "FindingSourceDetail":{ + "type":"structure", + "members":{ + "accessPointArn":{"shape":"String"} + } + }, + "FindingSourceList":{ + "type":"list", + "member":{"shape":"FindingSource"} + }, + "FindingSourceType":{ + "type":"string", + "enum":[ + "BUCKET_ACL", + "POLICY", + "S3_ACCESS_POINT" + ] + }, "FindingStatus":{ "type":"string", "enum":[ @@ -637,6 +664,7 @@ "resource":{"shape":"String"}, "resourceOwnerAccount":{"shape":"String"}, "resourceType":{"shape":"ResourceType"}, + "sources":{"shape":"FindingSourceList"}, "status":{"shape":"FindingStatus"}, "updatedAt":{"shape":"Timestamp"} } diff --git a/models/apis/accessanalyzer/2019-11-01/docs-2.json b/models/apis/accessanalyzer/2019-11-01/docs-2.json index 14211b4f153..4d25aa54cd3 100644 --- a/models/apis/accessanalyzer/2019-11-01/docs-2.json +++ b/models/apis/accessanalyzer/2019-11-01/docs-2.json @@ -181,6 +181,31 @@ "UpdateFindingsRequest$ids": "

The IDs of the findings to update.

" } }, + "FindingSource": { + "base": "

The source of the finding. This indicates how the access that generated the finding is granted. It is populated for Amazon S3 bucket findings.

", + "refs": { + "FindingSourceList$member": null + } + }, + "FindingSourceDetail": { + "base": "

Includes details about how the access that generated the finding is granted. This is populated for Amazon S3 bucket findings.

", + "refs": { + "FindingSource$detail": "

Includes details about how the access that generated the finding is granted. This is populated for Amazon S3 bucket findings.

" + } + }, + "FindingSourceList": { + "base": null, + "refs": { + "Finding$sources": "

The sources of the finding. This indicates how the access that generated the finding is granted. It is populated for Amazon S3 bucket findings.

", + "FindingSummary$sources": "

The sources of the finding. This indicates how the access that generated the finding is granted. It is populated for Amazon S3 bucket findings.

" + } + }, + "FindingSourceType": { + "base": null, + "refs": { + "FindingSource$type": "

Indicates the type of access that generated the finding.

" + } + }, "FindingStatus": { "base": null, "refs": { @@ -397,7 +422,7 @@ "SharedViaList": { "base": null, "refs": { - "AnalyzedResource$sharedVia": "

Indicates how the access that generated the finding is granted.

" + "AnalyzedResource$sharedVia": "

Indicates how the access that generated the finding is granted. This is populated for Amazon S3 bucket findings.

" } }, "SortCriteria": { @@ -439,6 +464,7 @@ "Finding$error": "

An error.

", "Finding$resource": "

The resource that an external principal has access to.

", "Finding$resourceOwnerAccount": "

The AWS account ID that owns the resource.

", + "FindingSourceDetail$accessPointArn": "

The ARN of the access point that generated the finding.

", "FindingSummary$error": "

The error that resulted in an Error finding.

", "FindingSummary$resource": "

The resource that the external principal has access to.

", "FindingSummary$resourceOwnerAccount": "

The AWS account ID that owns the resource.

", diff --git a/models/apis/application-autoscaling/2016-02-06/api-2.json b/models/apis/application-autoscaling/2016-02-06/api-2.json index 0b447013c78..27793739639 100644 --- a/models/apis/application-autoscaling/2016-02-06/api-2.json +++ b/models/apis/application-autoscaling/2016-02-06/api-2.json @@ -432,7 +432,9 @@ "ECSServiceAverageMemoryUtilization", "AppStreamAverageCapacityUtilization", "ComprehendInferenceUtilization", - "LambdaProvisionedConcurrencyUtilization" + "LambdaProvisionedConcurrencyUtilization", + "CassandraReadCapacityUtilization", + "CassandraWriteCapacityUtilization" ] }, "MetricUnit":{"type":"string"}, @@ -572,7 +574,9 @@ "sagemaker:variant:DesiredInstanceCount", "custom-resource:ResourceType:Property", "comprehend:document-classifier-endpoint:DesiredInferenceUnits", - "lambda:function:ProvisionedConcurrency" + "lambda:function:ProvisionedConcurrency", + "cassandra:table:ReadCapacityUnits", + "cassandra:table:WriteCapacityUnits" ] }, "ScalableTarget":{ @@ -724,7 +728,8 @@ "sagemaker", "custom-resource", "comprehend", - "lambda" + "lambda", + "cassandra" ] }, "StepAdjustment":{ diff --git a/models/apis/application-autoscaling/2016-02-06/docs-2.json b/models/apis/application-autoscaling/2016-02-06/docs-2.json index 8ea7a510dcd..54f0f220324 100644 --- a/models/apis/application-autoscaling/2016-02-06/docs-2.json +++ b/models/apis/application-autoscaling/2016-02-06/docs-2.json @@ -1,23 +1,23 @@ { "version": "2.0", - "service": "

With Application Auto Scaling, you can configure automatic scaling for the following resources:

API Summary

The Application Auto Scaling service API includes three key sets of actions:

To learn more about Application Auto Scaling, including information about granting IAM users required permissions for Application Auto Scaling actions, see the Application Auto Scaling User Guide.

", + "service": "

With Application Auto Scaling, you can configure automatic scaling for the following resources:

API Summary

The Application Auto Scaling service API includes three key sets of actions:

To learn more about Application Auto Scaling, including information about granting IAM users required permissions for Application Auto Scaling actions, see the Application Auto Scaling User Guide.

", "operations": { - "DeleteScalingPolicy": "

Deletes the specified scaling policy for an Application Auto Scaling scalable target.

Deleting a step scaling policy deletes the underlying alarm action, but does not delete the CloudWatch alarm associated with the scaling policy, even if it no longer has an associated action.

For more information, see Delete a Step Scaling Policy and Delete a Target Tracking Scaling Policy in the Application Auto Scaling User Guide.

To create a scaling policy or update an existing one, see PutScalingPolicy.

", + "DeleteScalingPolicy": "

Deletes the specified scaling policy for an Application Auto Scaling scalable target.

Deleting a step scaling policy deletes the underlying alarm action, but does not delete the CloudWatch alarm associated with the scaling policy, even if it no longer has an associated action.

For more information, see Delete a Step Scaling Policy and Delete a Target Tracking Scaling Policy in the Application Auto Scaling User Guide.

", "DeleteScheduledAction": "

Deletes the specified scheduled action for an Application Auto Scaling scalable target.

For more information, see Delete a Scheduled Action in the Application Auto Scaling User Guide.

", - "DeregisterScalableTarget": "

Deregisters an Application Auto Scaling scalable target.

Deregistering a scalable target deletes the scaling policies that are associated with it.

To create a scalable target or update an existing one, see RegisterScalableTarget.

", - "DescribeScalableTargets": "

Gets information about the scalable targets in the specified namespace.

You can filter the results using ResourceIds and ScalableDimension.

To create a scalable target or update an existing one, see RegisterScalableTarget. If you are no longer using a scalable target, you can deregister it using DeregisterScalableTarget.

", - "DescribeScalingActivities": "

Provides descriptive information about the scaling activities in the specified namespace from the previous six weeks.

You can filter the results using ResourceId and ScalableDimension.

Scaling activities are triggered by CloudWatch alarms that are associated with scaling policies. To view the scaling policies for a service namespace, see DescribeScalingPolicies. To create a scaling policy or update an existing one, see PutScalingPolicy.

", - "DescribeScalingPolicies": "

Describes the Application Auto Scaling scaling policies for the specified service namespace.

You can filter the results using ResourceId, ScalableDimension, and PolicyNames.

To create a scaling policy or update an existing one, see PutScalingPolicy. If you are no longer using a scaling policy, you can delete it using DeleteScalingPolicy.

", - "DescribeScheduledActions": "

Describes the Application Auto Scaling scheduled actions for the specified service namespace.

You can filter the results using the ResourceId, ScalableDimension, and ScheduledActionNames parameters.

To create a scheduled action or update an existing one, see PutScheduledAction. If you are no longer using a scheduled action, you can delete it using DeleteScheduledAction.

", - "PutScalingPolicy": "

Creates or updates a policy for an Application Auto Scaling scalable target.

Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scaling policy applies to the scalable target identified by those three attributes. You cannot create a scaling policy until you have registered the resource as a scalable target using RegisterScalableTarget.

To update a policy, specify its policy name and the parameters that you want to change. Any parameters that you don't specify are not changed by this update request.

You can view the scaling policies for a service namespace using DescribeScalingPolicies. If you are no longer using a scaling policy, you can delete it using DeleteScalingPolicy.

Multiple scaling policies can be in force at the same time for the same scalable target. You can have one or more target tracking scaling policies, one or more step scaling policies, or both. However, there is a chance that multiple policies could conflict, instructing the scalable target to scale out or in at the same time. Application Auto Scaling gives precedence to the policy that provides the largest capacity for both scale out and scale in. For example, if one policy increases capacity by 3, another policy increases capacity by 200 percent, and the current capacity is 10, Application Auto Scaling uses the policy with the highest calculated capacity (200% of 10 = 20) and scales out to 30.

Learn more about how to work with scaling policies in the Application Auto Scaling User Guide.

", - "PutScheduledAction": "

Creates or updates a scheduled action for an Application Auto Scaling scalable target.

Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scheduled action applies to the scalable target identified by those three attributes. You cannot create a scheduled action until you have registered the resource as a scalable target using RegisterScalableTarget.

To update an action, specify its name and the parameters that you want to change. If you don't specify start and end times, the old values are deleted. Any other parameters that you don't specify are not changed by this update request.

You can view the scheduled actions using DescribeScheduledActions. If you are no longer using a scheduled action, you can delete it using DeleteScheduledAction.

Learn more about how to work with scheduled actions in the Application Auto Scaling User Guide.

", - "RegisterScalableTarget": "

Registers or updates a scalable target. A scalable target is a resource that Application Auto Scaling can scale out and scale in. Scalable targets are uniquely identified by the combination of resource ID, scalable dimension, and namespace.

When you register a new scalable target, you must specify values for minimum and maximum capacity. Application Auto Scaling will not scale capacity to values that are outside of this range.

To update a scalable target, specify the parameter that you want to change as well as the following parameters that identify the scalable target: resource ID, scalable dimension, and namespace. Any parameters that you don't specify are not changed by this update request.

After you register a scalable target, you do not need to register it again to use other Application Auto Scaling operations. To see which resources have been registered, use DescribeScalableTargets. You can also view the scaling policies for a service namespace by using DescribeScalableTargets.

If you no longer need a scalable target, you can deregister it by using DeregisterScalableTarget.

" + "DeregisterScalableTarget": "

Deregisters an Application Auto Scaling scalable target when you have finished using it. To see which resources have been registered, use DescribeScalableTargets.

Deregistering a scalable target deletes the scaling policies and the scheduled actions that are associated with it.

", + "DescribeScalableTargets": "

Gets information about the scalable targets in the specified namespace.

You can filter the results using ResourceIds and ScalableDimension.

", + "DescribeScalingActivities": "

Provides descriptive information about the scaling activities in the specified namespace from the previous six weeks.

You can filter the results using ResourceId and ScalableDimension.

", + "DescribeScalingPolicies": "

Describes the Application Auto Scaling scaling policies for the specified service namespace.

You can filter the results using ResourceId, ScalableDimension, and PolicyNames.

For more information, see Target Tracking Scaling Policies and Step Scaling Policies in the Application Auto Scaling User Guide.

", + "DescribeScheduledActions": "

Describes the Application Auto Scaling scheduled actions for the specified service namespace.

You can filter the results using the ResourceId, ScalableDimension, and ScheduledActionNames parameters.

For more information, see Scheduled Scaling in the Application Auto Scaling User Guide.

", + "PutScalingPolicy": "

Creates or updates a scaling policy for an Application Auto Scaling scalable target.

Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scaling policy applies to the scalable target identified by those three attributes. You cannot create a scaling policy until you have registered the resource as a scalable target.

Multiple scaling policies can be in force at the same time for the same scalable target. You can have one or more target tracking scaling policies, one or more step scaling policies, or both. However, there is a chance that multiple policies could conflict, instructing the scalable target to scale out or in at the same time. Application Auto Scaling gives precedence to the policy that provides the largest capacity for both scale out and scale in. For example, if one policy increases capacity by 3, another policy increases capacity by 200 percent, and the current capacity is 10, Application Auto Scaling uses the policy with the highest calculated capacity (200% of 10 = 20) and scales out to 30.

For more information, see Target Tracking Scaling Policies and Step Scaling Policies in the Application Auto Scaling User Guide.

If a scalable target is deregistered, the scalable target is no longer available to execute scaling policies. Any scaling policies that were specified for the scalable target are deleted.

", + "PutScheduledAction": "

Creates or updates a scheduled action for an Application Auto Scaling scalable target.

Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scheduled action applies to the scalable target identified by those three attributes. You cannot create a scheduled action until you have registered the resource as a scalable target.

When start and end times are specified with a recurring schedule using a cron expression or rates, they form the boundaries of when the recurring action starts and stops.

To update a scheduled action, specify the parameters that you want to change. If you don't specify start and end times, the old values are deleted.

For more information, see Scheduled Scaling in the Application Auto Scaling User Guide.

If a scalable target is deregistered, the scalable target is no longer available to run scheduled actions. Any scheduled actions that were specified for the scalable target are deleted.

", + "RegisterScalableTarget": "

Registers or updates a scalable target.

A scalable target is a resource that Application Auto Scaling can scale out and scale in. Scalable targets are uniquely identified by the combination of resource ID, scalable dimension, and namespace.

When you register a new scalable target, you must specify values for minimum and maximum capacity. Application Auto Scaling scaling policies will not scale capacity to values that are outside of this range.

After you register a scalable target, you do not need to register it again to use other Application Auto Scaling operations. To see which resources have been registered, use DescribeScalableTargets. You can also view the scaling policies for a service namespace by using DescribeScalableTargets. If you no longer need a scalable target, you can deregister it by using DeregisterScalableTarget.

To update a scalable target, specify the parameters that you want to change. Include the parameters that identify the scalable target: resource ID, scalable dimension, and namespace. Any parameters that you don't specify are not changed by this update request.

" }, "shapes": { "AdjustmentType": { "base": null, "refs": { - "StepScalingPolicyConfiguration$AdjustmentType": "

Specifies whether the ScalingAdjustment value in a StepAdjustment is an absolute number or a percentage of the current capacity.

" + "StepScalingPolicyConfiguration$AdjustmentType": "

Specifies whether the ScalingAdjustment value in a StepAdjustment is an absolute number or a percentage of the current capacity.

AdjustmentType is required if you are adding a new step scaling policy configuration.

" } }, "Alarm": { @@ -41,13 +41,13 @@ "Cooldown": { "base": null, "refs": { - "StepScalingPolicyConfiguration$Cooldown": "

The amount of time, in seconds, after a scaling activity completes where previous trigger-related scaling activities can influence future scaling events.

For scale-out policies, while the cooldown period is in effect, the capacity that has been added by the previous scale-out event that initiated the cooldown is calculated as part of the desired capacity for the next scale out. The intention is to continuously (but not excessively) scale out. For example, an alarm triggers a step scaling policy to scale out an Amazon ECS service by 2 tasks, the scaling activity completes successfully, and a cooldown period of 5 minutes starts. During the cooldown period, if the alarm triggers the same policy again but at a more aggressive step adjustment to scale out the service by 3 tasks, the 2 tasks that were added in the previous scale-out event are considered part of that capacity and only 1 additional task is added to the desired count.

For scale-in policies, the cooldown period is used to block subsequent scale-in requests until it has expired. The intention is to scale in conservatively to protect your application's availability. However, if another alarm triggers a scale-out policy during the cooldown period after a scale-in, Application Auto Scaling scales out your scalable target immediately.

", - "TargetTrackingScalingPolicyConfiguration$ScaleOutCooldown": "

The amount of time, in seconds, after a scale-out activity completes before another scale-out activity can start.

While the cooldown period is in effect, the capacity that has been added by the previous scale-out event that initiated the cooldown is calculated as part of the desired capacity for the next scale out. The intention is to continuously (but not excessively) scale out.

", - "TargetTrackingScalingPolicyConfiguration$ScaleInCooldown": "

The amount of time, in seconds, after a scale-in activity completes before another scale in activity can start.

The cooldown period is used to block subsequent scale-in requests until it has expired. The intention is to scale in conservatively to protect your application's availability. However, if another alarm triggers a scale-out policy during the cooldown period after a scale-in, Application Auto Scaling scales out your scalable target immediately.

" + "StepScalingPolicyConfiguration$Cooldown": "

The amount of time, in seconds, after a scaling activity completes where previous trigger-related scaling activities can influence future scaling events.

For scale-out policies, while the cooldown period is in effect, the capacity that has been added by the previous scale-out action that initiated the cooldown is calculated as part of the desired capacity for the next scale out. The intention is to continuously (but not excessively) scale out. For example, an alarm triggers a step scaling policy to scale out an Amazon ECS service by 2 tasks, the scaling activity completes successfully, and a cooldown period of 5 minutes starts. During the cooldown period, if the alarm triggers the same policy again but at a more aggressive step adjustment to scale out the service by 3 tasks, the 2 tasks that were added in the previous scale-out action are considered part of that capacity and only 1 additional task is added to the desired count.

For scale-in policies, the cooldown period is used to block subsequent scale-in requests until it has expired. The intention is to scale in conservatively to protect your application's availability. However, if another alarm triggers a scale-out policy during the cooldown period after a scale-in, Application Auto Scaling scales out your scalable target immediately.

Application Auto Scaling provides a default value of 300 for the following scalable targets:

For all other scalable targets, the default value is 0:

", + "TargetTrackingScalingPolicyConfiguration$ScaleOutCooldown": "

The amount of time, in seconds, after a scale-out activity completes before another scale-out activity can start.

While the cooldown period is in effect, the capacity that has been added by the previous scale-out action that initiated the cooldown is calculated as part of the desired capacity for the next scale out. The intention is to continuously (but not excessively) scale out.

Application Auto Scaling provides a default value of 300 for the following scalable targets:

For all other scalable targets, the default value is 0:

", + "TargetTrackingScalingPolicyConfiguration$ScaleInCooldown": "

The amount of time, in seconds, after a scale-in activity completes before another scale in activity can start.

The cooldown period is used to block subsequent scale-in requests until it has expired. The intention is to scale in conservatively to protect your application's availability. However, if another alarm triggers a scale-out policy during the cooldown period after a scale-in, Application Auto Scaling scales out your scalable target immediately.

Application Auto Scaling provides a default value of 300 for the following scalable targets:

For all other scalable targets, the default value is 0:

" } }, "CustomizedMetricSpecification": { - "base": "

Represents a CloudWatch metric of your choosing for a target tracking scaling policy to use with Application Auto Scaling.

To create your customized metric specification:

For more information about CloudWatch, see Amazon CloudWatch Concepts.

", + "base": "

Represents a CloudWatch metric of your choosing for a target tracking scaling policy to use with Application Auto Scaling.

For information about the available metrics for a service, see AWS Services That Publish CloudWatch Metrics in the Amazon CloudWatch User Guide.

To create your customized metric specification:

For more information about CloudWatch, see Amazon CloudWatch Concepts.

", "refs": { "TargetTrackingScalingPolicyConfiguration$CustomizedMetricSpecification": "

A customized metric. You can specify either a predefined metric or a customized metric.

" } @@ -125,7 +125,7 @@ "DisableScaleIn": { "base": null, "refs": { - "TargetTrackingScalingPolicyConfiguration$DisableScaleIn": "

Indicates whether scale in by the target tracking scaling policy is disabled. If the value is true, scale in is disabled and the target tracking scaling policy won't remove capacity from the scalable resource. Otherwise, scale in is enabled and the target tracking scaling policy can remove capacity from the scalable resource. The default value is false.

" + "TargetTrackingScalingPolicyConfiguration$DisableScaleIn": "

Indicates whether scale in by the target tracking scaling policy is disabled. If the value is true, scale in is disabled and the target tracking scaling policy won't remove capacity from the scalable target. Otherwise, scale in is enabled and the target tracking scaling policy can remove capacity from the scalable target. The default value is false.

" } }, "ErrorMessage": { @@ -240,7 +240,7 @@ "MinAdjustmentMagnitude": { "base": null, "refs": { - "StepScalingPolicyConfiguration$MinAdjustmentMagnitude": "

The minimum number to adjust your scalable dimension as a result of a scaling activity. If the adjustment type is PercentChangeInCapacity, the scaling policy changes the scalable dimension of the scalable target by this amount.

For example, suppose that you create a step scaling policy to scale out an Amazon ECS service by 25 percent and you specify a MinAdjustmentMagnitude of 2. If the service has 4 tasks and the scaling policy is performed, 25 percent of 4 is 1. However, because you specified a MinAdjustmentMagnitude of 2, Application Auto Scaling scales out the service by 2 tasks.

" + "StepScalingPolicyConfiguration$MinAdjustmentMagnitude": "

The minimum value to scale by when scaling by percentages. For example, suppose that you create a step scaling policy to scale out an Amazon ECS service by 25 percent and you specify a MinAdjustmentMagnitude of 2. If the service has 4 tasks and the scaling policy is performed, 25 percent of 4 is 1. However, because you specified a MinAdjustmentMagnitude of 2, Application Auto Scaling scales out the service by 2 tasks.

Valid only if the adjustment type is PercentChangeInCapacity.

" } }, "ObjectNotFoundException": { @@ -258,12 +258,12 @@ "PolicyType": { "base": null, "refs": { - "PutScalingPolicyRequest$PolicyType": "

The policy type. This parameter is required if you are creating a scaling policy.

The following policy types are supported:

TargetTrackingScaling—Not supported for Amazon EMR

StepScaling—Not supported for DynamoDB, Amazon Comprehend, or AWS Lambda

For more information, see Target Tracking Scaling Policies and Step Scaling Policies in the Application Auto Scaling User Guide.

", + "PutScalingPolicyRequest$PolicyType": "

The policy type. This parameter is required if you are creating a scaling policy.

The following policy types are supported:

TargetTrackingScaling—Not supported for Amazon EMR

StepScaling—Not supported for DynamoDB, Amazon Comprehend, Lambda, or Amazon Keyspaces for Apache Cassandra.

For more information, see Target Tracking Scaling Policies and Step Scaling Policies in the Application Auto Scaling User Guide.

", "ScalingPolicy$PolicyType": "

The scaling policy type.

" } }, "PredefinedMetricSpecification": { - "base": "

Represents a predefined metric for a target tracking scaling policy to use with Application Auto Scaling.

", + "base": "

Represents a predefined metric for a target tracking scaling policy to use with Application Auto Scaling.

Only the AWS services that you're using send metrics to Amazon CloudWatch. To determine whether a desired metric already exists by looking up its namespace and dimension using the CloudWatch metrics dashboard in the console, follow the procedure in Building Dashboards with CloudWatch in the Application Auto Scaling User Guide.

", "refs": { "TargetTrackingScalingPolicyConfiguration$PredefinedMetricSpecification": "

A predefined metric. You can specify either a predefined metric or a customized metric.

" } @@ -301,11 +301,11 @@ "ResourceCapacity": { "base": null, "refs": { - "RegisterScalableTargetRequest$MinCapacity": "

The minimum value to scale to in response to a scale-in event. MinCapacity is required to register a scalable target.

", - "RegisterScalableTargetRequest$MaxCapacity": "

The maximum value to scale to in response to a scale-out event. MaxCapacity is required to register a scalable target.

", + "RegisterScalableTargetRequest$MinCapacity": "

The minimum value that you plan to scale in to. When a scaling policy is in effect, Application Auto Scaling can scale in (contract) as needed to the minimum capacity limit in response to changing demand.

This parameter is required if you are registering a scalable target. For Lambda provisioned concurrency, the minimum value allowed is 0. For all other resources, the minimum value allowed is 1.

", + "RegisterScalableTargetRequest$MaxCapacity": "

The maximum value that you plan to scale out to. When a scaling policy is in effect, Application Auto Scaling can scale out (expand) as needed to the maximum capacity limit in response to changing demand.

This parameter is required if you are registering a scalable target.

", "ScalableTarget$MinCapacity": "

The minimum value to scale to in response to a scale-in event.

", "ScalableTarget$MaxCapacity": "

The maximum value to scale to in response to a scale-out event.

", - "ScalableTargetAction$MinCapacity": "

The minimum capacity.

", + "ScalableTargetAction$MinCapacity": "

The minimum capacity.

For Lambda provisioned concurrency, the minimum value allowed is 0. For all other resources, the minimum value allowed is 1.

", "ScalableTargetAction$MaxCapacity": "

The maximum capacity.

" } }, @@ -321,34 +321,34 @@ "base": null, "refs": { "DeleteScalingPolicyRequest$PolicyName": "

The name of the scaling policy.

", - "DeleteScalingPolicyRequest$ResourceId": "

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

", + "DeleteScalingPolicyRequest$ResourceId": "

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

", "DeleteScheduledActionRequest$ScheduledActionName": "

The name of the scheduled action.

", - "DeleteScheduledActionRequest$ResourceId": "

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

", - "DeregisterScalableTargetRequest$ResourceId": "

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

", - "DescribeScalingActivitiesRequest$ResourceId": "

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

", - "DescribeScalingPoliciesRequest$ResourceId": "

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

", - "DescribeScheduledActionsRequest$ResourceId": "

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

", - "PutScalingPolicyRequest$ResourceId": "

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

", + "DeleteScheduledActionRequest$ResourceId": "

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

", + "DeregisterScalableTargetRequest$ResourceId": "

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

", + "DescribeScalingActivitiesRequest$ResourceId": "

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

", + "DescribeScalingPoliciesRequest$ResourceId": "

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

", + "DescribeScheduledActionsRequest$ResourceId": "

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

", + "PutScalingPolicyRequest$ResourceId": "

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

", "PutScalingPolicyResponse$PolicyARN": "

The Amazon Resource Name (ARN) of the resulting scaling policy.

", - "PutScheduledActionRequest$Schedule": "

The schedule for this action. The following formats are supported:

At expressions are useful for one-time schedules. Specify the time, in UTC.

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information about cron expressions, see Cron Expressions in the Amazon CloudWatch Events User Guide.

", - "PutScheduledActionRequest$ResourceId": "

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

", - "RegisterScalableTargetRequest$ResourceId": "

The identifier of the resource that is associated with the scalable target. This string consists of the resource type and unique identifier.

", - "RegisterScalableTargetRequest$RoleARN": "

Application Auto Scaling creates a service-linked role that grants it permissions to modify the scalable target on your behalf. For more information, see Service-Linked Roles for Application Auto Scaling.

For Amazon EMR, this parameter is required, and it must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.

", + "PutScheduledActionRequest$Schedule": "

The schedule for this action. The following formats are supported:

At expressions are useful for one-time schedules. Specify the time in UTC.

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information about cron expressions, see Cron Expressions in the Amazon CloudWatch Events User Guide.

For examples of using these expressions, see Scheduled Scaling in the Application Auto Scaling User Guide.

", + "PutScheduledActionRequest$ResourceId": "

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

", + "RegisterScalableTargetRequest$ResourceId": "

The identifier of the resource that is associated with the scalable target. This string consists of the resource type and unique identifier.

", + "RegisterScalableTargetRequest$RoleARN": "

This parameter is required for services that do not support service-linked roles (such as Amazon EMR), and it must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.

If the service supports service-linked roles, Application Auto Scaling uses a service-linked role, which it creates if it does not yet exist. For more information, see Application Auto Scaling IAM Roles.

", "ResourceIdsMaxLen1600$member": null, - "ScalableTarget$ResourceId": "

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

", + "ScalableTarget$ResourceId": "

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

", "ScalableTarget$RoleARN": "

The ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.

", - "ScalingActivity$ResourceId": "

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier.

", + "ScalingActivity$ResourceId": "

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier.

", "ScalingPolicy$PolicyARN": "

The Amazon Resource Name (ARN) of the scaling policy.

", - "ScalingPolicy$ResourceId": "

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

", + "ScalingPolicy$ResourceId": "

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

", "ScheduledAction$ScheduledActionARN": "

The Amazon Resource Name (ARN) of the scheduled action.

", - "ScheduledAction$Schedule": "

The schedule for this action. The following formats are supported:

At expressions are useful for one-time schedules. Specify the time, in UTC.

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information about cron expressions, see Cron Expressions in the Amazon CloudWatch Events User Guide.

", - "ScheduledAction$ResourceId": "

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

" + "ScheduledAction$Schedule": "

The schedule for this action. The following formats are supported:

At expressions are useful for one-time schedules. Specify the time in UTC.

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information about cron expressions, see Cron Expressions in the Amazon CloudWatch Events User Guide.

For examples of using these expressions, see Scheduled Scaling in the Application Auto Scaling User Guide.

", + "ScheduledAction$ResourceId": "

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

" } }, "ResourceIdsMaxLen1600": { "base": null, "refs": { - "DescribeScalableTargetsRequest$ResourceIds": "

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

", + "DescribeScalableTargetsRequest$ResourceIds": "

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

", "DescribeScalingPoliciesRequest$PolicyNames": "

The names of the scaling policies to describe.

", "DescribeScheduledActionsRequest$ScheduledActionNames": "

The names of the scheduled actions to describe.

" } @@ -362,20 +362,20 @@ "ScalableDimension": { "base": null, "refs": { - "DeleteScalingPolicyRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", - "DeleteScheduledActionRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", - "DeregisterScalableTargetRequest$ScalableDimension": "

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

", - "DescribeScalableTargetsRequest$ScalableDimension": "

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

", - "DescribeScalingActivitiesRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

", - "DescribeScalingPoliciesRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

", - "DescribeScheduledActionsRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

", - "PutScalingPolicyRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", - "PutScheduledActionRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", - "RegisterScalableTargetRequest$ScalableDimension": "

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

", - "ScalableTarget$ScalableDimension": "

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

", - "ScalingActivity$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", - "ScalingPolicy$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", - "ScheduledAction$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" + "DeleteScalingPolicyRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", + "DeleteScheduledActionRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", + "DeregisterScalableTargetRequest$ScalableDimension": "

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

", + "DescribeScalableTargetsRequest$ScalableDimension": "

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

", + "DescribeScalingActivitiesRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

", + "DescribeScalingPoliciesRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

", + "DescribeScheduledActionsRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

", + "PutScalingPolicyRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", + "PutScheduledActionRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", + "RegisterScalableTargetRequest$ScalableDimension": "

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

", + "ScalableTarget$ScalableDimension": "

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

", + "ScalingActivity$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", + "ScalingPolicy$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", + "ScheduledAction$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" } }, "ScalableTarget": { @@ -387,8 +387,8 @@ "ScalableTargetAction": { "base": "

Represents the minimum and maximum capacity for a scheduled action.

", "refs": { - "PutScheduledActionRequest$ScalableTargetAction": "

The new minimum and maximum capacity. You can set both values or just one. During the scheduled time, if the current capacity is below the minimum capacity, Application Auto Scaling scales out to the minimum capacity. If the current capacity is above the maximum capacity, Application Auto Scaling scales in to the maximum capacity.

", - "ScheduledAction$ScalableTargetAction": "

The new minimum and maximum capacity. You can set both values or just one. During the scheduled time, if the current capacity is below the minimum capacity, Application Auto Scaling scales out to the minimum capacity. If the current capacity is above the maximum capacity, Application Auto Scaling scales in to the maximum capacity.

" + "PutScheduledActionRequest$ScalableTargetAction": "

The new minimum and maximum capacity. You can set both values or just one. At the scheduled time, if the current capacity is below the minimum capacity, Application Auto Scaling scales out to the minimum capacity. If the current capacity is above the maximum capacity, Application Auto Scaling scales in to the maximum capacity.

", + "ScheduledAction$ScalableTargetAction": "

The new minimum and maximum capacity. You can set both values or just one. At the scheduled time, if the current capacity is below the minimum capacity, Application Auto Scaling scales out to the minimum capacity. If the current capacity is above the maximum capacity, Application Auto Scaling scales in to the maximum capacity.

" } }, "ScalableTargets": { @@ -418,7 +418,7 @@ "ScalingAdjustment": { "base": null, "refs": { - "StepAdjustment$ScalingAdjustment": "

The amount by which to scale, based on the specified adjustment type. A positive value adds to the current scalable dimension while a negative number removes from the current scalable dimension.

" + "StepAdjustment$ScalingAdjustment": "

The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity.

" } }, "ScalingPolicies": { @@ -450,7 +450,7 @@ "ScheduledActionName": { "base": null, "refs": { - "PutScheduledActionRequest$ScheduledActionName": "

The name of the scheduled action.

", + "PutScheduledActionRequest$ScheduledActionName": "

The name of the scheduled action. This name must be unique among all other scheduled actions on the specified scalable target.

", "ScheduledAction$ScheduledActionName": "

The name of the scheduled action.

" } }, @@ -463,24 +463,24 @@ "ServiceNamespace": { "base": null, "refs": { - "DeleteScalingPolicyRequest$ServiceNamespace": "

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

", - "DeleteScheduledActionRequest$ServiceNamespace": "

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

", - "DeregisterScalableTargetRequest$ServiceNamespace": "

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

", - "DescribeScalableTargetsRequest$ServiceNamespace": "

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

", - "DescribeScalingActivitiesRequest$ServiceNamespace": "

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

", - "DescribeScalingPoliciesRequest$ServiceNamespace": "

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

", - "DescribeScheduledActionsRequest$ServiceNamespace": "

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

", - "PutScalingPolicyRequest$ServiceNamespace": "

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

", - "PutScheduledActionRequest$ServiceNamespace": "

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

", - "RegisterScalableTargetRequest$ServiceNamespace": "

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

", - "ScalableTarget$ServiceNamespace": "

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

", - "ScalingActivity$ServiceNamespace": "

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

", - "ScalingPolicy$ServiceNamespace": "

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

", - "ScheduledAction$ServiceNamespace": "

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "DeleteScalingPolicyRequest$ServiceNamespace": "

The namespace of the AWS service that provides the resource. For a resource provided by your own application or service, use custom-resource instead.

", + "DeleteScheduledActionRequest$ServiceNamespace": "

The namespace of the AWS service that provides the resource. For a resource provided by your own application or service, use custom-resource instead.

", + "DeregisterScalableTargetRequest$ServiceNamespace": "

The namespace of the AWS service that provides the resource. For a resource provided by your own application or service, use custom-resource instead.

", + "DescribeScalableTargetsRequest$ServiceNamespace": "

The namespace of the AWS service that provides the resource. For a resource provided by your own application or service, use custom-resource instead.

", + "DescribeScalingActivitiesRequest$ServiceNamespace": "

The namespace of the AWS service that provides the resource. For a resource provided by your own application or service, use custom-resource instead.

", + "DescribeScalingPoliciesRequest$ServiceNamespace": "

The namespace of the AWS service that provides the resource. For a resource provided by your own application or service, use custom-resource instead.

", + "DescribeScheduledActionsRequest$ServiceNamespace": "

The namespace of the AWS service that provides the resource. For a resource provided by your own application or service, use custom-resource instead.

", + "PutScalingPolicyRequest$ServiceNamespace": "

The namespace of the AWS service that provides the resource. For a resource provided by your own application or service, use custom-resource instead.

", + "PutScheduledActionRequest$ServiceNamespace": "

The namespace of the AWS service that provides the resource. For a resource provided by your own application or service, use custom-resource instead.

", + "RegisterScalableTargetRequest$ServiceNamespace": "

The namespace of the AWS service that provides the resource. For a resource provided by your own application or service, use custom-resource instead.

", + "ScalableTarget$ServiceNamespace": "

The namespace of the AWS service that provides the resource, or a custom-resource.

", + "ScalingActivity$ServiceNamespace": "

The namespace of the AWS service that provides the resource, or a custom-resource.

", + "ScalingPolicy$ServiceNamespace": "

The namespace of the AWS service that provides the resource, or a custom-resource.

", + "ScheduledAction$ServiceNamespace": "

The namespace of the AWS service that provides the resource, or a custom-resource.

" } }, "StepAdjustment": { - "base": "

Represents a step adjustment for a StepScalingPolicyConfiguration. Describes an adjustment based on the difference between the value of the aggregated CloudWatch metric and the breach threshold that you've defined for the alarm.

For the following examples, suppose that you have an alarm with a breach threshold of 50:

There are a few rules for the step adjustments for your step policy:

", + "base": "

Represents a step adjustment for a StepScalingPolicyConfiguration. Describes an adjustment based on the difference between the value of the aggregated CloudWatch metric and the breach threshold that you've defined for the alarm.

For the following examples, suppose that you have an alarm with a breach threshold of 50:

There are a few rules for the step adjustments for your step policy:

", "refs": { "StepAdjustments$member": null } @@ -488,7 +488,7 @@ "StepAdjustments": { "base": null, "refs": { - "StepScalingPolicyConfiguration$StepAdjustments": "

A set of adjustments that enable you to scale based on the size of the alarm breach.

" + "StepScalingPolicyConfiguration$StepAdjustments": "

A set of adjustments that enable you to scale based on the size of the alarm breach.

At least one step adjustment is required if you are adding a new step scaling policy configuration.

" } }, "StepScalingPolicyConfiguration": { @@ -515,8 +515,8 @@ "TimestampType": { "base": null, "refs": { - "PutScheduledActionRequest$StartTime": "

The date and time for the scheduled action to start.

", - "PutScheduledActionRequest$EndTime": "

The date and time for the scheduled action to end.

", + "PutScheduledActionRequest$StartTime": "

The date and time for this scheduled action to start.

", + "PutScheduledActionRequest$EndTime": "

The date and time for the recurring schedule to end.

", "ScalableTarget$CreationTime": "

The Unix timestamp for when the scalable target was created.

", "ScalingActivity$StartTime": "

The Unix timestamp for when the scaling activity began.

", "ScalingActivity$EndTime": "

The Unix timestamp for when the scaling activity ended.

", diff --git a/models/apis/application-autoscaling/2016-02-06/examples-1.json b/models/apis/application-autoscaling/2016-02-06/examples-1.json index 8c85b3870a9..5abcd5544a0 100644 --- a/models/apis/application-autoscaling/2016-02-06/examples-1.json +++ b/models/apis/application-autoscaling/2016-02-06/examples-1.json @@ -50,7 +50,7 @@ "output": { "ScalableTargets": [ { - "CreationTime": "2016-05-06T11:21:46.199Z", + "CreationTime": "2019-05-06T11:21:46.199Z", "MaxCapacity": 10, "MinCapacity": 1, "ResourceId": "service/default/web-app", @@ -71,7 +71,7 @@ "output": { } }, - "description": "This example describes the scalable targets for the ecs service namespace.", + "description": "This example describes the scalable targets for the ECS service namespace.", "id": "to-describe-scalable-targets-1470864286961", "title": "To describe scalable targets" } @@ -89,11 +89,11 @@ "ActivityId": "e6c5f7d1-dbbb-4a3f-89b2-51f33e766399", "Cause": "monitor alarm web-app-cpu-lt-25 in state ALARM triggered policy web-app-cpu-lt-25", "Description": "Setting desired count to 1.", - "EndTime": "2016-05-06T16:04:32.111Z", + "EndTime": "2019-05-06T16:04:32.111Z", "ResourceId": "service/default/web-app", "ScalableDimension": "ecs:service:DesiredCount", "ServiceNamespace": "ecs", - "StartTime": "2016-05-06T16:03:58.171Z", + "StartTime": "2019-05-06T16:03:58.171Z", "StatusCode": "Successful", "StatusMessage": "Successfully set desired count to 1. Change successfully fulfilled by ecs." } @@ -125,7 +125,7 @@ "AlarmName": "web-app-cpu-gt-75" } ], - "CreationTime": "2016-05-06T12:11:39.230Z", + "CreationTime": "2019-05-06T12:11:39.230Z", "PolicyARN": "arn:aws:autoscaling:us-west-2:012345678910:scalingPolicy:6d8972f3-efc8-437c-92d1-6270f29a66e7:resource/ecs/service/default/web-app:policyName/web-app-cpu-gt-75", "PolicyName": "web-app-cpu-gt-75", "PolicyType": "StepScaling", @@ -151,7 +151,7 @@ "output": { } }, - "description": "This example describes the scaling policies for the ecs service namespace.", + "description": "This example describes the scaling policies for the ECS service namespace.", "id": "to-describe-scaling-policies-1470864609734", "title": "To describe scaling policies" } @@ -195,153 +195,6 @@ "description": "The following example applies a target tracking scaling policy with a predefined metric specification to an Amazon ECS service called web-app in the default cluster. The policy keeps the average CPU utilization of the service at 75 percent, with scale-out and scale-in cooldown periods of 60 seconds.", "id": "to-apply-a-target-tracking-scaling-policy-with-a-predefined-metric-specification-1569364247984", "title": "To apply a target tracking scaling policy with a predefined metric specification" - }, - { - "input": { - "PolicyName": "cms75-target-tracking-scaling-policy", - "PolicyType": "TargetTrackingScaling", - "ResourceId": "service/default/web-app", - "ScalableDimension": "ecs:service:DesiredCount", - "ServiceNamespace": "ecs", - "TargetTrackingScalingPolicyConfiguration": { - "CustomizedMetricSpecification": { - "Dimensions": [ - { - "Name": "MyOptionalMetricDimensionName", - "Value": "MyOptionalMetricDimensionValue" - } - ], - "MetricName": "MyUtilizationMetric", - "Namespace": "MyNamespace", - "Statistic": "Average", - "Unit": "Percent" - }, - "ScaleInCooldown": 60, - "ScaleOutCooldown": 60, - "TargetValue": 75 - } - }, - "output": { - "Alarms": [ - { - "AlarmARN": "arn:aws:cloudwatch:us-west-2:012345678910:alarm:TargetTracking-service/default/web-app-AlarmHigh-9bc77b56-0571-4276-ba0f-d4178882e0a0", - "AlarmName": "TargetTracking-service/default/web-app-AlarmHigh-9bc77b56-0571-4276-ba0f-d4178882e0a0" - }, - { - "AlarmARN": "arn:aws:cloudwatch:us-west-2:012345678910:alarm:TargetTracking-service/default/web-app-AlarmLow-9b6ad934-6d37-438e-9e05-02836ddcbdc4", - "AlarmName": "TargetTracking-service/default/web-app-AlarmLow-9b6ad934-6d37-438e-9e05-02836ddcbdc4" - } - ], - "PolicyARN": "arn:aws:autoscaling:us-west-2:012345678910:scalingPolicy: 8784a896-b2ba-47a1-b08c-27301cc499a1:resource/ecs/service/default/web-app:policyName/cms75-target-tracking-scaling-policy" - }, - "comments": { - "input": { - }, - "output": { - } - }, - "description": "The following example applies a target tracking scaling policy with a customized metric specification to an Amazon ECS service called web-app in the default cluster. The policy keeps the average utilization of the service at 75 percent, with scale-out and scale-in cooldown periods of 60 seconds. ", - "id": "to-apply-a-target-tracking-scaling-policy-with-a-customized-metric-specification-1569365036205", - "title": "To apply a target tracking scaling policy with a customized metric specification" - }, - { - "input": { - "PolicyName": "alb-scale-out-target-tracking-scaling-policy", - "PolicyType": "TargetTrackingScaling", - "ResourceId": "service/default/web-app", - "ScalableDimension": "ecs:service:DesiredCount", - "ServiceNamespace": "ecs", - "TargetTrackingScalingPolicyConfiguration": { - "DisableScaleIn": true, - "PredefinedMetricSpecification": { - "PredefinedMetricType": "ALBRequestCountPerTarget", - "ResourceLabel": "app/EC2Co-EcsEl-1TKLTMITMM0EO/f37c06a68c1748aa/targetgroup/EC2Co-Defau-LDNM7Q3ZH1ZN/6d4ea56ca2d6a18d" - }, - "ScaleInCooldown": 60, - "ScaleOutCooldown": 60, - "TargetValue": 1000 - } - }, - "output": { - "Alarms": [ - { - "AlarmARN": "TargetTracking-service/default/web-app-AlarmHigh-d4f0770c-b46e-434a-a60f-3b36d653feca", - "AlarmName": "arn:aws:cloudwatch:us-west-2:012345678910:alarm:TargetTracking-service/default/web-app-AlarmHigh-d4f0770c-b46e-434a-a60f-3b36d653feca" - } - ], - "PolicyARN": "arn:aws:autoscaling:us-west-2:012345678910:scalingPolicy:6d8972f3-efc8-437c-92d1-6270f29a66e7:resource/ecs/service/default/web-app:policyName/alb-scale-out-target-tracking-scaling-policy" - }, - "comments": { - "input": { - }, - "output": { - } - }, - "description": "The following example applies a target tracking scaling policy to an Amazon ECS service called web-app in the default cluster. The policy is used to scale out the ECS service when the RequestCountPerTarget metric from the Application Load Balancer exceeds the threshold.", - "id": "to-apply-a-target-tracking-scaling-policy-for-scale-out-only-1569366080118", - "title": "To apply a target tracking scaling policy for scale out only" - }, - { - "input": { - "PolicyName": "web-app-cpu-gt-75", - "PolicyType": "StepScaling", - "ResourceId": "service/default/web-app", - "ScalableDimension": "ecs:service:DesiredCount", - "ServiceNamespace": "ecs", - "StepScalingPolicyConfiguration": { - "AdjustmentType": "PercentChangeInCapacity", - "Cooldown": 60, - "StepAdjustments": [ - { - "MetricIntervalLowerBound": 0, - "ScalingAdjustment": 200 - } - ] - } - }, - "output": { - "PolicyARN": "arn:aws:autoscaling:us-west-2:012345678910:scalingPolicy:6d8972f3-efc8-437c-92d1-6270f29a66e7:resource/ecs/service/default/web-app:policyName/web-app-cpu-gt-75" - }, - "comments": { - "input": { - }, - "output": { - } - }, - "description": "This example applies a step scaling policy to an Amazon ECS service called web-app in the default cluster. The policy increases the desired count of the service by 200%, with a cool down period of 60 seconds.", - "id": "to-apply-a-step-scaling-policy-to-an-amazon-ecs-service-1470864779862", - "title": "To apply a step scaling policy to an Amazon ECS service" - }, - { - "input": { - "PolicyName": "fleet-cpu-gt-75", - "PolicyType": "StepScaling", - "ResourceId": "spot-fleet-request/sfr-45e69d8a-be48-4539-bbf3-3464e99c50c3", - "ScalableDimension": "ec2:spot-fleet-request:TargetCapacity", - "ServiceNamespace": "ec2", - "StepScalingPolicyConfiguration": { - "AdjustmentType": "PercentChangeInCapacity", - "Cooldown": 180, - "StepAdjustments": [ - { - "MetricIntervalLowerBound": 0, - "ScalingAdjustment": 200 - } - ] - } - }, - "output": { - "PolicyARN": "arn:aws:autoscaling:us-east-1:012345678910:scalingPolicy:89406401-0cb7-4130-b770-d97cca0e446b:resource/ec2/spot-fleet-request/sfr-45e69d8a-be48-4539-bbf3-3464e99c50c3:policyName/fleet-cpu-gt-75" - }, - "comments": { - "input": { - }, - "output": { - } - }, - "description": "This example applies a step scaling policy to an Amazon EC2 Spot fleet. The policy increases the target capacity of the spot fleet by 200%, with a cool down period of 180 seconds.\",\n ", - "id": "to-apply-a-step-scaling-policy-to-an-amazon-ec2-spot-fleet-1472073278469", - "title": "To apply a step scaling policy to an Amazon EC2 Spot fleet" } ], "RegisterScalableTarget": [ @@ -362,22 +215,6 @@ "description": "This example registers a scalable target from an Amazon ECS service called web-app that is running on the default cluster, with a minimum desired count of 1 task and a maximum desired count of 10 tasks.", "id": "to-register-a-new-scalable-target-1470864910380", "title": "To register an ECS service as a scalable target" - }, - { - "input": { - "MaxCapacity": 10, - "MinCapacity": 1, - "ResourceId": "spot-fleet-request/sfr-45e69d8a-be48-4539-bbf3-3464e99c50c3", - "ScalableDimension": "ec2:spot-fleet-request:TargetCapacity", - "ServiceNamespace": "ec2" - }, - "output": { - }, - "comments": { - }, - "description": "This example registers a scalable target from an Amazon EC2 Spot fleet with a minimum target capacity of 1 and a maximum of 10.", - "id": "to-register-an-ec2-spot-fleet-as-a-scalable-target-1472072899649", - "title": "To register an EC2 Spot fleet as a scalable target" } ] } diff --git a/models/apis/codeguru-reviewer/2019-09-19/api-2.json b/models/apis/codeguru-reviewer/2019-09-19/api-2.json index d3c6d6c9eec..2c8e9144174 100644 --- a/models/apis/codeguru-reviewer/2019-09-19/api-2.json +++ b/models/apis/codeguru-reviewer/2019-09-19/api-2.json @@ -29,6 +29,38 @@ {"shape":"ThrottlingException"} ] }, + "DescribeCodeReview":{ + "name":"DescribeCodeReview", + "http":{ + "method":"GET", + "requestUri":"/codereviews/{CodeReviewArn}" + }, + "input":{"shape":"DescribeCodeReviewRequest"}, + "output":{"shape":"DescribeCodeReviewResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ] + }, + "DescribeRecommendationFeedback":{ + "name":"DescribeRecommendationFeedback", + "http":{ + "method":"GET", + "requestUri":"/feedback/{CodeReviewArn}" + }, + "input":{"shape":"DescribeRecommendationFeedbackRequest"}, + "output":{"shape":"DescribeRecommendationFeedbackResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ] + }, "DescribeRepositoryAssociation":{ "name":"DescribeRepositoryAssociation", "http":{ @@ -62,6 +94,53 @@ {"shape":"ThrottlingException"} ] }, + "ListCodeReviews":{ + "name":"ListCodeReviews", + "http":{ + "method":"GET", + "requestUri":"/codereviews" + }, + "input":{"shape":"ListCodeReviewsRequest"}, + "output":{"shape":"ListCodeReviewsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ] + }, + "ListRecommendationFeedback":{ + "name":"ListRecommendationFeedback", + "http":{ + "method":"GET", + "requestUri":"/feedback/{CodeReviewArn}/RecommendationFeedback" + }, + "input":{"shape":"ListRecommendationFeedbackRequest"}, + "output":{"shape":"ListRecommendationFeedbackResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ] + }, + "ListRecommendations":{ + "name":"ListRecommendations", + "http":{ + "method":"GET", + "requestUri":"/codereviews/{CodeReviewArn}/Recommendations" + }, + "input":{"shape":"ListRecommendationsRequest"}, + "output":{"shape":"ListRecommendationsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ] + }, "ListRepositoryAssociations":{ "name":"ListRepositoryAssociations", "http":{ @@ -75,6 +154,22 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ] + }, + "PutRecommendationFeedback":{ + "name":"PutRecommendationFeedback", + "http":{ + "method":"PUT", + "requestUri":"/feedback" + }, + "input":{"shape":"PutRecommendationFeedbackRequest"}, + "output":{"shape":"PutRecommendationFeedbackResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ] } }, "shapes":{ @@ -90,7 +185,7 @@ "type":"string", "max":1600, "min":1, - "pattern":"^arn:aws[^:\\s]*:codeguru-reviewer:[^:\\s]+:[\\d]{12}:[a-z]+:[\\w-]+$" + "pattern":"^arn:aws[^:\\s]*:codeguru-reviewer:[^:\\s]+:[\\d]{12}:[a-z-]+:[\\w-]+$" }, "AssociateRepositoryRequest":{ "type":"structure", @@ -127,6 +222,56 @@ "Name":{"shape":"Name"} } }, + "CodeReview":{ + "type":"structure", + "members":{ + "Name":{"shape":"Name"}, + "CodeReviewArn":{"shape":"Arn"}, + "RepositoryName":{"shape":"Name"}, + "Owner":{"shape":"Owner"}, + "ProviderType":{"shape":"ProviderType"}, + "State":{"shape":"JobState"}, + "StateReason":{"shape":"StateReason"}, + "CreatedTimeStamp":{"shape":"TimeStamp"}, + "LastUpdatedTimeStamp":{"shape":"TimeStamp"}, + "Type":{"shape":"Type"}, + "PullRequestId":{"shape":"PullRequestId"}, + "SourceCodeType":{"shape":"SourceCodeType"}, + "Metrics":{"shape":"Metrics"} + } + }, + "CodeReviewSummaries":{ + "type":"list", + "member":{"shape":"CodeReviewSummary"} + }, + "CodeReviewSummary":{ + "type":"structure", + "members":{ + "Name":{"shape":"Name"}, + "CodeReviewArn":{"shape":"Arn"}, + "RepositoryName":{"shape":"Name"}, + "Owner":{"shape":"Owner"}, + "ProviderType":{"shape":"ProviderType"}, + "State":{"shape":"JobState"}, + "CreatedTimeStamp":{"shape":"TimeStamp"}, + "LastUpdatedTimeStamp":{"shape":"TimeStamp"}, + "Type":{"shape":"Type"}, + "PullRequestId":{"shape":"PullRequestId"}, + "MetricsSummary":{"shape":"MetricsSummary"} + } + }, + "CommitDiffSourceCodeType":{ + "type":"structure", + "members":{ + "SourceCommit":{"shape":"CommitId"}, + "DestinationCommit":{"shape":"CommitId"} + } + }, + "CommitId":{ + "type":"string", + "max":64, + "min":6 + }, "ConflictException":{ "type":"structure", "members":{ @@ -135,6 +280,53 @@ "error":{"httpStatusCode":409}, "exception":true }, + "DescribeCodeReviewRequest":{ + "type":"structure", + "required":["CodeReviewArn"], + "members":{ + "CodeReviewArn":{ + "shape":"Arn", + "location":"uri", + "locationName":"CodeReviewArn" + } + } + }, + "DescribeCodeReviewResponse":{ + "type":"structure", + "members":{ + "CodeReview":{"shape":"CodeReview"} + } + }, + "DescribeRecommendationFeedbackRequest":{ + "type":"structure", + "required":[ + "CodeReviewArn", + "RecommendationId" + ], + "members":{ + "CodeReviewArn":{ + "shape":"Arn", + "location":"uri", + "locationName":"CodeReviewArn" + }, + "RecommendationId":{ + "shape":"RecommendationId", + "location":"querystring", + "locationName":"RecommendationId" + }, + "UserId":{ + "shape":"UserId", + "location":"querystring", + "locationName":"UserId" + } + } + }, + "DescribeRecommendationFeedbackResponse":{ + "type":"structure", + "members":{ + "RecommendationFeedback":{"shape":"RecommendationFeedback"} + } + }, "DescribeRepositoryAssociationRequest":{ "type":"structure", "required":["AssociationArn"], @@ -170,6 +362,12 @@ } }, "ErrorMessage":{"type":"string"}, + "FilePath":{ + "type":"string", + "max":1024, + "min":1 + }, + "FindingsCount":{"type":"long"}, "InternalServerException":{ "type":"structure", "members":{ @@ -179,6 +377,136 @@ "exception":true, "fault":true }, + "JobState":{ + "type":"string", + "enum":[ + "Completed", + "Pending", + "Failed", + "Deleting" + ] + }, + "JobStates":{ + "type":"list", + "member":{"shape":"JobState"}, + "max":3, + "min":1 + }, + "LineNumber":{"type":"integer"}, + "ListCodeReviewsMaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "ListCodeReviewsRequest":{ + "type":"structure", + "required":["Type"], + "members":{ + "ProviderTypes":{ + "shape":"ProviderTypes", + "location":"querystring", + "locationName":"ProviderTypes" + }, + "States":{ + "shape":"JobStates", + "location":"querystring", + "locationName":"States" + }, + "RepositoryNames":{ + "shape":"RepositoryNames", + "location":"querystring", + "locationName":"RepositoryNames" + }, + "Type":{ + "shape":"Type", + "location":"querystring", + "locationName":"Type" + }, + "MaxResults":{ + "shape":"ListCodeReviewsMaxResults", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListCodeReviewsResponse":{ + "type":"structure", + "members":{ + "CodeReviewSummaries":{"shape":"CodeReviewSummaries"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListRecommendationFeedbackRequest":{ + "type":"structure", + "required":["CodeReviewArn"], + "members":{ + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "location":"querystring", + "locationName":"MaxResults" + }, + "CodeReviewArn":{ + "shape":"Arn", + "location":"uri", + "locationName":"CodeReviewArn" + }, + "UserIds":{ + "shape":"UserIds", + "location":"querystring", + "locationName":"UserIds" + }, + "RecommendationIds":{ + "shape":"RecommendationIds", + "location":"querystring", + "locationName":"RecommendationIds" + } + } + }, + "ListRecommendationFeedbackResponse":{ + "type":"structure", + "members":{ + "RecommendationFeedbackSummaries":{"shape":"RecommendationFeedbackSummaries"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListRecommendationsRequest":{ + "type":"structure", + "required":["CodeReviewArn"], + "members":{ + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "location":"querystring", + "locationName":"MaxResults" + }, + "CodeReviewArn":{ + "shape":"Arn", + "location":"uri", + "locationName":"CodeReviewArn" + } + } + }, + "ListRecommendationsResponse":{ + "type":"structure", + "members":{ + "RecommendationSummaries":{"shape":"RecommendationSummaries"}, + "NextToken":{"shape":"NextToken"} + } + }, "ListRepositoryAssociationsRequest":{ "type":"structure", "members":{ @@ -226,6 +554,21 @@ "max":100, "min":1 }, + "MeteredLinesOfCodeCount":{"type":"long"}, + "Metrics":{ + "type":"structure", + "members":{ + "MeteredLinesOfCodeCount":{"shape":"MeteredLinesOfCodeCount"}, + "FindingsCount":{"shape":"FindingsCount"} + } + }, + "MetricsSummary":{ + "type":"structure", + "members":{ + "MeteredLinesOfCodeCount":{"shape":"MeteredLinesOfCodeCount"}, + "FindingsCount":{"shape":"FindingsCount"} + } + }, "Name":{ "type":"string", "max":100, @@ -276,6 +619,90 @@ "max":3, "min":1 }, + "PullRequestId":{ + "type":"string", + "max":64, + "min":1 + }, + "PutRecommendationFeedbackRequest":{ + "type":"structure", + "required":[ + "CodeReviewArn", + "RecommendationId", + "Reactions" + ], + "members":{ + "CodeReviewArn":{"shape":"Arn"}, + "RecommendationId":{"shape":"RecommendationId"}, + "Reactions":{"shape":"Reactions"} + } + }, + "PutRecommendationFeedbackResponse":{ + "type":"structure", + "members":{ + } + }, + "Reaction":{ + "type":"string", + "enum":[ + "ThumbsUp", + "ThumbsDown" + ] + }, + "Reactions":{ + "type":"list", + "member":{"shape":"Reaction"}, + "max":1, + "min":0 + }, + "RecommendationFeedback":{ + "type":"structure", + "members":{ + "CodeReviewArn":{"shape":"Arn"}, + "RecommendationId":{"shape":"RecommendationId"}, + "Reactions":{"shape":"Reactions"}, + "UserId":{"shape":"UserId"}, + "CreatedTimeStamp":{"shape":"TimeStamp"}, + "LastUpdatedTimeStamp":{"shape":"TimeStamp"} + } + }, + "RecommendationFeedbackSummaries":{ + "type":"list", + "member":{"shape":"RecommendationFeedbackSummary"} + }, + "RecommendationFeedbackSummary":{ + "type":"structure", + "members":{ + "RecommendationId":{"shape":"RecommendationId"}, + "Reactions":{"shape":"Reactions"}, + "UserId":{"shape":"UserId"} + } + }, + "RecommendationId":{ + "type":"string", + "max":64, + "min":1 + }, + "RecommendationIds":{ + "type":"list", + "member":{"shape":"RecommendationId"}, + "max":100, + "min":1 + }, + "RecommendationSummaries":{ + "type":"list", + "member":{"shape":"RecommendationSummary"} + }, + "RecommendationSummary":{ + "type":"structure", + "members":{ + "FilePath":{"shape":"FilePath"}, + "RecommendationId":{"shape":"RecommendationId"}, + "StartLine":{"shape":"LineNumber"}, + "EndLine":{"shape":"LineNumber"}, + "Description":{"shape":"Text"} + } + }, "Repository":{ "type":"structure", "members":{ @@ -327,11 +754,36 @@ "State":{"shape":"RepositoryAssociationState"} } }, + "RepositoryNames":{ + "type":"list", + "member":{"shape":"Name"}, + "max":100, + "min":1 + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "SourceCodeType":{ + "type":"structure", + "members":{ + "CommitDiff":{"shape":"CommitDiffSourceCodeType"} + } + }, "StateReason":{ "type":"string", "max":256, "min":0 }, + "Text":{ + "type":"string", + "max":2048, + "min":1 + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -341,6 +793,21 @@ "exception":true }, "TimeStamp":{"type":"timestamp"}, + "Type":{ + "type":"string", + "enum":["PullRequest"] + }, + "UserId":{ + "type":"string", + "max":256, + "min":1 + }, + "UserIds":{ + "type":"list", + "member":{"shape":"UserId"}, + "max":100, + "min":1 + }, "ValidationException":{ "type":"structure", "members":{ diff --git a/models/apis/codeguru-reviewer/2019-09-19/docs-2.json b/models/apis/codeguru-reviewer/2019-09-19/docs-2.json index f7c3f697db7..255682260c9 100644 --- a/models/apis/codeguru-reviewer/2019-09-19/docs-2.json +++ b/models/apis/codeguru-reviewer/2019-09-19/docs-2.json @@ -2,10 +2,16 @@ "version": "2.0", "service": "

This section provides documentation for the Amazon CodeGuru Reviewer API operations.

", "operations": { - "AssociateRepository": "

Associates an AWS CodeCommit repository with Amazon CodeGuru Reviewer. When you associate an AWS CodeCommit repository with Amazon CodeGuru Reviewer, Amazon CodeGuru Reviewer will provide recommendations for each pull request. You can view recommendations in the AWS CodeCommit repository.

You can associate a GitHub repository using the Amazon CodeGuru Reviewer console.

", + "AssociateRepository": "

Associates an AWS CodeCommit repository with Amazon CodeGuru Reviewer. When you associate an AWS CodeCommit repository with Amazon CodeGuru Reviewer, Amazon CodeGuru Reviewer will provide recommendations for each pull request raised within the repository. You can view recommendations in the AWS CodeCommit repository.

You can associate a GitHub repository using the Amazon CodeGuru Reviewer console.

", + "DescribeCodeReview": "

Returns the metadaata associated with the code review along with its status.

", + "DescribeRecommendationFeedback": "

Describes the customer feedback for a CodeGuru Reviewer recommendation.

", "DescribeRepositoryAssociation": "

Describes a repository association.

", "DisassociateRepository": "

Removes the association between Amazon CodeGuru Reviewer and a repository.

", - "ListRepositoryAssociations": "

Lists repository associations. You can optionally filter on one or more of the following recommendation properties: provider types, states, names, and owners.

" + "ListCodeReviews": "

Lists all the code reviews that the customer has created in the past 90 days.

", + "ListRecommendationFeedback": "

Lists the customer feedback for a CodeGuru Reviewer recommendation for all users. This API will be used from the console to extract the previously given feedback by the user to pre-populate the feedback emojis for all recommendations.

", + "ListRecommendations": "

Returns the list of all recommendations for a completed code review.

", + "ListRepositoryAssociations": "

Lists repository associations. You can optionally filter on one or more of the following recommendation properties: provider types, states, names, and owners.

", + "PutRecommendationFeedback": "

Stores customer feedback for a CodeGuru-Reviewer recommendation. When this API is called again with different reactions the previous feedback is overwritten.

" }, "shapes": { "AccessDeniedException": { @@ -16,8 +22,16 @@ "Arn": { "base": null, "refs": { - "DescribeRepositoryAssociationRequest$AssociationArn": "

The Amazon Resource Name (ARN) identifying the association.

", + "CodeReview$CodeReviewArn": "

The Amazon Resource Name (ARN) of the code review to describe.

", + "CodeReviewSummary$CodeReviewArn": "

The Amazon Resource Name (ARN) of the code review to describe.

", + "DescribeCodeReviewRequest$CodeReviewArn": "

The Amazon Resource Name (ARN) of the code review to describe.

", + "DescribeRecommendationFeedbackRequest$CodeReviewArn": "

The Amazon Resource Name (ARN) that identifies the code review.

", + "DescribeRepositoryAssociationRequest$AssociationArn": "

The Amazon Resource Name (ARN) identifying the association. You can retrieve this ARN by calling ListRepositories.

", "DisassociateRepositoryRequest$AssociationArn": "

The Amazon Resource Name (ARN) identifying the association.

", + "ListRecommendationFeedbackRequest$CodeReviewArn": "

The Amazon Resource Name (ARN) that identifies the code review.

", + "ListRecommendationsRequest$CodeReviewArn": "

The Amazon Resource Name (ARN) of the code review to describe.

", + "PutRecommendationFeedbackRequest$CodeReviewArn": "

The Amazon Resource Name (ARN) that identifies the code review.

", + "RecommendationFeedback$CodeReviewArn": "

The Amazon Resource Name (ARN) that identifies the code review.

", "RepositoryAssociation$AssociationArn": "

The Amazon Resource Name (ARN) identifying the repository association.

", "RepositoryAssociationSummary$AssociationArn": "

The Amazon Resource Name (ARN) identifying the repository association.

" } @@ -35,14 +49,14 @@ "AssociationId": { "base": null, "refs": { - "RepositoryAssociation$AssociationId": "

The id of the repository association.

", + "RepositoryAssociation$AssociationId": "

The ID of the repository association.

", "RepositoryAssociationSummary$AssociationId": "

The repository association ID.

" } }, "ClientRequestToken": { "base": null, "refs": { - "AssociateRepositoryRequest$ClientRequestToken": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

If you want to add a new repository association, this parameter specifies a unique identifier for the new repository association that helps ensure idempotency.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for new versions and include that value in the request.

You typically only need to interact with this value if you implement your own retry logic and want to ensure that a given repository association is not created twice. We recommend that you generate a UUID-type value to ensure uniqueness within the specified repository association.

Amazon CodeGuru Reviewer uses this value to prevent the accidental creation of duplicate repository associations if there are failures and retries.

" + "AssociateRepositoryRequest$ClientRequestToken": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

To add a new repository association, this parameter specifies a unique identifier for the new repository association that helps ensure idempotency.

If you use the AWS CLI or one of the AWS SDKs to call this operation, you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, you must generate a ClientRequestToken yourself for new versions and include that value in the request.

You typically interact with this value if you implement your own retry logic and want to ensure that a given repository association is not created twice. We recommend that you generate a UUID-type value to ensure uniqueness within the specified repository association.

Amazon CodeGuru Reviewer uses this value to prevent the accidental creation of duplicate repository associations if there are failures and retries.

" } }, "CodeCommitRepository": { @@ -51,11 +65,62 @@ "Repository$CodeCommit": "

Information about an AWS CodeCommit repository.

" } }, + "CodeReview": { + "base": "

Information about a code review.

", + "refs": { + "DescribeCodeReviewResponse$CodeReview": "

Information about the code review.

" + } + }, + "CodeReviewSummaries": { + "base": null, + "refs": { + "ListCodeReviewsResponse$CodeReviewSummaries": "

A list of code reviews that meet the criteria of the request.

" + } + }, + "CodeReviewSummary": { + "base": "

Information about the summary of the code review.

", + "refs": { + "CodeReviewSummaries$member": null + } + }, + "CommitDiffSourceCodeType": { + "base": "

The commit diff for the pull request.

", + "refs": { + "SourceCodeType$CommitDiff": "

The commit diff for the pull request.

" + } + }, + "CommitId": { + "base": null, + "refs": { + "CommitDiffSourceCodeType$SourceCommit": "

Source Commit SHA.

", + "CommitDiffSourceCodeType$DestinationCommit": "

Destination Commit SHA

" + } + }, "ConflictException": { "base": "

The requested operation would cause a conflict with the current state of a service resource associated with the request. Resolve the conflict before retrying this request.

", "refs": { } }, + "DescribeCodeReviewRequest": { + "base": null, + "refs": { + } + }, + "DescribeCodeReviewResponse": { + "base": null, + "refs": { + } + }, + "DescribeRecommendationFeedbackRequest": { + "base": null, + "refs": { + } + }, + "DescribeRecommendationFeedbackResponse": { + "base": null, + "refs": { + } + }, "DescribeRepositoryAssociationRequest": { "base": null, "refs": { @@ -83,15 +148,86 @@ "ConflictException$Message": null, "InternalServerException$Message": null, "NotFoundException$Message": null, + "ResourceNotFoundException$Message": null, "ThrottlingException$Message": null, "ValidationException$Message": null } }, + "FilePath": { + "base": null, + "refs": { + "RecommendationSummary$FilePath": "

Name of the file on which a recommendation is provided.

" + } + }, + "FindingsCount": { + "base": null, + "refs": { + "Metrics$FindingsCount": "

Total number of recommendations found in the code review.

", + "MetricsSummary$FindingsCount": "

Total number of recommendations found in the code review.

" + } + }, "InternalServerException": { "base": "

The server encountered an internal error and is unable to complete the request.

", "refs": { } }, + "JobState": { + "base": null, + "refs": { + "CodeReview$State": "

The state of the code review.

", + "CodeReviewSummary$State": "

The state of the code review.

", + "JobStates$member": null + } + }, + "JobStates": { + "base": null, + "refs": { + "ListCodeReviewsRequest$States": "

List of states for filtering that needs to be applied before displaying the result. For example, \"states=[Pending]\" will list code reviews in the Pending state.

" + } + }, + "LineNumber": { + "base": null, + "refs": { + "RecommendationSummary$StartLine": "

Start line from where the recommendation is applicable in the source commit or source branch.

", + "RecommendationSummary$EndLine": "

Last line where the recommendation is applicable in the source commit or source branch. For a single line comment the start line and end line values will be the same.

" + } + }, + "ListCodeReviewsMaxResults": { + "base": null, + "refs": { + "ListCodeReviewsRequest$MaxResults": "

The maximum number of results that are returned per call. The default is 100.

" + } + }, + "ListCodeReviewsRequest": { + "base": null, + "refs": { + } + }, + "ListCodeReviewsResponse": { + "base": null, + "refs": { + } + }, + "ListRecommendationFeedbackRequest": { + "base": null, + "refs": { + } + }, + "ListRecommendationFeedbackResponse": { + "base": null, + "refs": { + } + }, + "ListRecommendationsRequest": { + "base": null, + "refs": { + } + }, + "ListRecommendationsResponse": { + "base": null, + "refs": { + } + }, "ListRepositoryAssociationsRequest": { "base": null, "refs": { @@ -105,28 +241,60 @@ "MaxResults": { "base": null, "refs": { - "ListRepositoryAssociationsRequest$MaxResults": "

The maximum number of repository association results returned by ListRepositoryAssociations in paginated output. When this parameter is used, ListRepositoryAssociations only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListRepositoryAssociations request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListRepositoryAssociations returns up to 100 results and a nextToken value if applicable.

" + "ListRecommendationFeedbackRequest$MaxResults": "

The maximum number of results that are returned per call. The default is 100.

", + "ListRecommendationsRequest$MaxResults": "

The maximum number of results that are returned per call. The default is 100.

", + "ListRepositoryAssociationsRequest$MaxResults": "

The maximum number of repository association results returned by ListRepositoryAssociations in paginated output. When this parameter is used, ListRepositoryAssociations only returns maxResults results in a single page with a nextToken response element. The remaining results of the initial request can be seen by sending another ListRepositoryAssociations request with the returned nextToken value. This value can be between 1 and 25. If this parameter is not used, ListRepositoryAssociations returns up to 25 results and a nextToken value if applicable.

" + } + }, + "MeteredLinesOfCodeCount": { + "base": null, + "refs": { + "Metrics$MeteredLinesOfCodeCount": "

Lines of code metered in the code review.

", + "MetricsSummary$MeteredLinesOfCodeCount": "

Lines of code metered in the code review.

" + } + }, + "Metrics": { + "base": "

Information about the statistics from the code review.

", + "refs": { + "CodeReview$Metrics": "

The statistics from the code review.

" + } + }, + "MetricsSummary": { + "base": "

Information about metrics summaries.

", + "refs": { + "CodeReviewSummary$MetricsSummary": "

The statistics from the code review.

" } }, "Name": { "base": null, "refs": { "CodeCommitRepository$Name": "

The name of the AWS CodeCommit repository.

", + "CodeReview$Name": "

The name of the code review.

", + "CodeReview$RepositoryName": "

The name of the repository.

", + "CodeReviewSummary$Name": "

The name of the code review.

", + "CodeReviewSummary$RepositoryName": "

The name of the repository.

", "Names$member": null, "RepositoryAssociation$Name": "

The name of the repository.

", - "RepositoryAssociationSummary$Name": "

The name of the repository association.

" + "RepositoryAssociationSummary$Name": "

The name of the repository association.

", + "RepositoryNames$member": null } }, "Names": { "base": null, "refs": { - "ListRepositoryAssociationsRequest$Names": "

List of names to use as a filter.

" + "ListRepositoryAssociationsRequest$Names": "

List of repository names to use as a filter.

" } }, "NextToken": { "base": null, "refs": { - "ListRepositoryAssociationsRequest$NextToken": "

The nextToken value returned from a previous paginated ListRepositoryAssociations request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", + "ListCodeReviewsRequest$NextToken": "

If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

", + "ListCodeReviewsResponse$NextToken": "

Pagination token.

", + "ListRecommendationFeedbackRequest$NextToken": "

If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

", + "ListRecommendationFeedbackResponse$NextToken": "

If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

", + "ListRecommendationsRequest$NextToken": "

Pagination token.

", + "ListRecommendationsResponse$NextToken": "

Pagination token.

", + "ListRepositoryAssociationsRequest$NextToken": "

The nextToken value returned from a previous paginated ListRepositoryAssociations request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

Treat this token as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", "ListRepositoryAssociationsResponse$NextToken": "

The nextToken value to include in a future ListRecommendations request. When the results of a ListRecommendations request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

" } }, @@ -138,6 +306,8 @@ "Owner": { "base": null, "refs": { + "CodeReview$Owner": "

The owner of the repository.

", + "CodeReviewSummary$Owner": "

The owner of the repository.

", "Owners$member": null, "RepositoryAssociation$Owner": "

The owner of the repository.

", "RepositoryAssociationSummary$Owner": "

The owner of the repository association.

" @@ -146,12 +316,14 @@ "Owners": { "base": null, "refs": { - "ListRepositoryAssociationsRequest$Owners": "

List of owners to use as a filter. For AWS CodeCommit, the owner is the AWS account id. For GitHub, it is the GitHub account name.

" + "ListRepositoryAssociationsRequest$Owners": "

List of owners to use as a filter. For GitHub, this is name of the GitHub account that was used to associate the repository. For AWS CodeCommit, it is the name of the CodeCommit account that was used to associate the repository.

" } }, "ProviderType": { "base": null, "refs": { + "CodeReview$ProviderType": "

The provider type of the repository association.

", + "CodeReviewSummary$ProviderType": "

The provider type of the repository association.

", "ProviderTypes$member": null, "RepositoryAssociation$ProviderType": "

The provider type of the repository association.

", "RepositoryAssociationSummary$ProviderType": "

The provider type of the repository association.

" @@ -160,9 +332,88 @@ "ProviderTypes": { "base": null, "refs": { + "ListCodeReviewsRequest$ProviderTypes": "

List of provider types for filtering that needs to be applied before displaying the result. For example, \"providerTypes=[GitHub]\" will list code reviews from GitHub.

", "ListRepositoryAssociationsRequest$ProviderTypes": "

List of provider types to use as a filter.

" } }, + "PullRequestId": { + "base": null, + "refs": { + "CodeReview$PullRequestId": "

The pull request ID for the code review.

", + "CodeReviewSummary$PullRequestId": "

The pull request ID for the code review.

" + } + }, + "PutRecommendationFeedbackRequest": { + "base": null, + "refs": { + } + }, + "PutRecommendationFeedbackResponse": { + "base": null, + "refs": { + } + }, + "Reaction": { + "base": null, + "refs": { + "Reactions$member": null + } + }, + "Reactions": { + "base": null, + "refs": { + "PutRecommendationFeedbackRequest$Reactions": "

List for storing reactions. Reactions are utf-8 text code for emojis. If you send an empty list it clears all your feedback.

", + "RecommendationFeedback$Reactions": "

List for storing reactions. Reactions are utf-8 text code for emojis. You can send an empty list to clear off all your feedback.

", + "RecommendationFeedbackSummary$Reactions": "

List for storing reactions. Reactions are utf-8 text code for emojis.

" + } + }, + "RecommendationFeedback": { + "base": "

Information about the recommendation feedback.

", + "refs": { + "DescribeRecommendationFeedbackResponse$RecommendationFeedback": "

The recommendation feedback given by the user.

" + } + }, + "RecommendationFeedbackSummaries": { + "base": null, + "refs": { + "ListRecommendationFeedbackResponse$RecommendationFeedbackSummaries": "

Recommendation feedback summaries corresponding to the code reivew ARN.

" + } + }, + "RecommendationFeedbackSummary": { + "base": "

Information about recommendation feedback summaries.

", + "refs": { + "RecommendationFeedbackSummaries$member": null + } + }, + "RecommendationId": { + "base": null, + "refs": { + "DescribeRecommendationFeedbackRequest$RecommendationId": "

The recommendation ID that can be used to track the provided recommendations and then to collect the feedback.

", + "PutRecommendationFeedbackRequest$RecommendationId": "

The recommendation ID that can be used to track the provided recommendations and then to collect the feedback.

", + "RecommendationFeedback$RecommendationId": "

The recommendation ID that can be used to track the provided recommendations. Later on it can be used to collect the feedback.

", + "RecommendationFeedbackSummary$RecommendationId": "

The recommendation ID that can be used to track the provided recommendations. Later on it can be used to collect the feedback.

", + "RecommendationIds$member": null, + "RecommendationSummary$RecommendationId": "

The recommendation ID that can be used to track the provided recommendations. Later on it can be used to collect the feedback.

" + } + }, + "RecommendationIds": { + "base": null, + "refs": { + "ListRecommendationFeedbackRequest$RecommendationIds": "

Filter on recommendationIds that need to be applied before displaying the result. This can be used to query all the recommendation feedback for a given recommendation.

" + } + }, + "RecommendationSummaries": { + "base": null, + "refs": { + "ListRecommendationsResponse$RecommendationSummaries": "

List of recommendations for the requested code review.

" + } + }, + "RecommendationSummary": { + "base": "

Information about recommendations.

", + "refs": { + "RecommendationSummaries$member": null + } + }, "Repository": { "base": "

Information about a repository.

", "refs": { @@ -182,7 +433,7 @@ "refs": { "RepositoryAssociation$State": "

The state of the repository association.

", "RepositoryAssociationStates$member": null, - "RepositoryAssociationSummary$State": "

The state of the repository association.

Associated

Amazon CodeGuru Reviewer is associated with the repository.

Associating

The association is in progress.

Failed

The association failed. For more information about troubleshooting (or why it failed), see [troubleshooting topic].

Disassociating

Amazon CodeGuru Reviewer is in the process of disassociating with the repository.

" + "RepositoryAssociationSummary$State": "

The state of the repository association.

Associated

Amazon CodeGuru Reviewer is associated with the repository.

Associating

The association is in progress.

Failed

The association failed.

Disassociating

Amazon CodeGuru Reviewer is in the process of disassociating with the repository.

" } }, "RepositoryAssociationStates": { @@ -203,12 +454,36 @@ "RepositoryAssociationSummaries$member": null } }, + "RepositoryNames": { + "base": null, + "refs": { + "ListCodeReviewsRequest$RepositoryNames": "

List of repository names for filtering that needs to be applied before displaying the result.

" + } + }, + "ResourceNotFoundException": { + "base": "

The resource specified in the request was not found.

", + "refs": { + } + }, + "SourceCodeType": { + "base": "

Information about the source code type.

", + "refs": { + "CodeReview$SourceCodeType": "

The type of the source code for the code review.

" + } + }, "StateReason": { "base": null, "refs": { + "CodeReview$StateReason": "

The reason for the state of the code review.

", "RepositoryAssociation$StateReason": "

A description of why the repository association is in the current state.

" } }, + "Text": { + "base": null, + "refs": { + "RecommendationSummary$Description": "

A description of the recommendation generated by CodeGuru Reviewer for the lines of code between the start line and the end line.

" + } + }, "ThrottlingException": { "base": "

The request was denied due to request throttling.

", "refs": { @@ -217,11 +492,40 @@ "TimeStamp": { "base": null, "refs": { + "CodeReview$CreatedTimeStamp": "

The time, in milliseconds since the epoch, when the code review was created.

", + "CodeReview$LastUpdatedTimeStamp": "

The time, in milliseconds since the epoch, when the code review was last updated.

", + "CodeReviewSummary$CreatedTimeStamp": "

The time, in milliseconds since the epoch, when the code review was created.

", + "CodeReviewSummary$LastUpdatedTimeStamp": "

The time, in milliseconds since the epoch, when the code review was last updated.

", + "RecommendationFeedback$CreatedTimeStamp": "

The time at which the feedback was created.

", + "RecommendationFeedback$LastUpdatedTimeStamp": "

The time at which the feedback was last updated.

", "RepositoryAssociation$LastUpdatedTimeStamp": "

The time, in milliseconds since the epoch, when the repository association was last updated.

", "RepositoryAssociation$CreatedTimeStamp": "

The time, in milliseconds since the epoch, when the repository association was created.

", "RepositoryAssociationSummary$LastUpdatedTimeStamp": "

The time, in milliseconds since the epoch, since the repository association was last updated.

" } }, + "Type": { + "base": null, + "refs": { + "CodeReview$Type": "

The type of code review.

", + "CodeReviewSummary$Type": "

The type of the code review.

", + "ListCodeReviewsRequest$Type": "

The type of code reviews to list in the response.

" + } + }, + "UserId": { + "base": null, + "refs": { + "DescribeRecommendationFeedbackRequest$UserId": "

Optional parameter to describe the feedback for a given user. If this is not supplied, it defaults to the user making the request.

", + "RecommendationFeedback$UserId": "

The user principal that made the API call.

", + "RecommendationFeedbackSummary$UserId": "

The identifier for the user that gave the feedback.

", + "UserIds$member": null + } + }, + "UserIds": { + "base": null, + "refs": { + "ListRecommendationFeedbackRequest$UserIds": "

Filter on userIds that need to be applied before displaying the result. This can be used to query all the recommendation feedback for a code review from a given user.

" + } + }, "ValidationException": { "base": "

The input fails to satisfy the specified constraints.

", "refs": { diff --git a/models/apis/codeguru-reviewer/2019-09-19/paginators-1.json b/models/apis/codeguru-reviewer/2019-09-19/paginators-1.json index bbc1f584fdd..a9c76f5b737 100644 --- a/models/apis/codeguru-reviewer/2019-09-19/paginators-1.json +++ b/models/apis/codeguru-reviewer/2019-09-19/paginators-1.json @@ -1,5 +1,20 @@ { "pagination": { + "ListCodeReviews": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListRecommendationFeedback": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListRecommendations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListRepositoryAssociations": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/models/apis/dataexchange/2017-07-25/api-2.json b/models/apis/dataexchange/2017-07-25/api-2.json index 9c4e91b4c58..b404b96338a 100644 --- a/models/apis/dataexchange/2017-07-25/api-2.json +++ b/models/apis/dataexchange/2017-07-25/api-2.json @@ -1107,6 +1107,9 @@ "DataSetId": { "shape": "Id" }, + "Encryption": { + "shape": "ExportServerSideEncryption" + }, "RevisionId": { "shape": "Id" } @@ -1126,6 +1129,9 @@ "DataSetId": { "shape": "Id" }, + "Encryption": { + "shape": "ExportServerSideEncryption" + }, "RevisionId": { "shape": "Id" } @@ -1136,6 +1142,21 @@ "RevisionId" ] }, + "ExportServerSideEncryption": { + "type": "structure", + "members": { + "KmsKeyArn": { + "shape": "__string" + }, + "Type": { + "shape": "ServerSideEncryptionTypes" + } + }, + "required": [ + "Type", + "KmsKeyArn" + ] + }, "GetAssetRequest": { "type": "structure", "members": { @@ -1872,6 +1893,13 @@ "Size" ] }, + "ServerSideEncryptionTypes": { + "type": "string", + "enum": [ + "aws:kms", + "AES256" + ] + }, "ServiceLimitExceededException": { "type": "structure", "members": { @@ -2239,25 +2267,7 @@ "type": "string", "min": 24, "max": 24, - "pattern": "/^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$/" - } - }, - "authorizers": { - "create_job_authorizer": { - "name": "create_job_authorizer", - "type": "provided", - "placement": { - "location": "header", - "name": "Authorization" - } - }, - "start_cancel_get_job_authorizer": { - "name": "start_cancel_get_job_authorizer", - "type": "provided", - "placement": { - "location": "header", - "name": "Authorization" - } + "pattern": "/^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$/" } } } \ No newline at end of file diff --git a/models/apis/dataexchange/2017-07-25/docs-2.json b/models/apis/dataexchange/2017-07-25/docs-2.json index 2b6b9fc23f6..056aaac5f22 100644 --- a/models/apis/dataexchange/2017-07-25/docs-2.json +++ b/models/apis/dataexchange/2017-07-25/docs-2.json @@ -168,6 +168,13 @@ "ResponseDetails$ExportAssetsToS3" : "

Details for the export to Amazon S3 response.

" } }, + "ExportServerSideEncryption" : { + "base" : "

Encryption configuration of the export job. Includes the encryption type as well as the AWS KMS key. The KMS key is only necessary if you chose the KMS encryption type.

", + "refs" : { + "ExportAssetsToS3RequestDetails$Encryption" : "

Encryption configuration for the export job.

", + "ExportAssetsToS3ResponseDetails$Encryption" : "

Encryption configuration of the export job.

" + } + }, "Id" : { "base" : "

A unique identifier.

", "refs" : { @@ -391,6 +398,12 @@ "AssetDetails$S3SnapshotAsset" : null } }, + "ServerSideEncryptionTypes" : { + "base" : "

The types of encryption supported in export jobs to Amazon S3.

", + "refs" : { + "ExportServerSideEncryption$Type" : "

The type of server side encryption used for encrypting the objects in Amazon S3.

" + } + }, "ServiceLimitExceededException" : { "base" : "

The request has exceeded the quotas imposed by the service.

", "refs" : { } @@ -547,10 +560,11 @@ "ConflictException$Message" : "

The request couldn't be completed because it conflicted with the current state of the resource.

", "ConflictException$ResourceId" : "

The unique identifier for the resource with the conflict.

", "ExportAssetToSignedUrlResponseDetails$SignedUrl" : "

The signed URL for the export request.

", + "ExportServerSideEncryption$KmsKeyArn" : "

The Amazon Resource Name (ARN) of the the AWS KMS key you want to use to encrypt the Amazon S3 objects. This parameter is required if you choose aws:kms as an encryption type.

", "ImportAssetFromSignedUrlResponseDetails$SignedUrl" : "

The signed URL.

", "InternalServerException$Message" : "The message identifying the service exception that occurred.", "JobError$Message" : "The message related to the job error.", - "JobError$ResourceId" : "The unqiue identifier for the resource related to the error.", + "JobError$ResourceId" : "The unique identifier for the resource related to the error.", "OriginDetails$ProductId" : null, "ResourceNotFoundException$Message" : "

The resource couldn't be found.

", "ResourceNotFoundException$ResourceId" : "

The unique identifier for the resource that couldn't be found.

", @@ -578,4 +592,4 @@ } } } -} \ No newline at end of file +} diff --git a/models/apis/dlm/2018-01-12/docs-2.json b/models/apis/dlm/2018-01-12/docs-2.json index 19644fa99b3..4857ef336fc 100644 --- a/models/apis/dlm/2018-01-12/docs-2.json +++ b/models/apis/dlm/2018-01-12/docs-2.json @@ -173,7 +173,7 @@ "Interval": { "base": null, "refs": { - "CreateRule$Interval": "

The interval between snapshots. The supported values are 2, 3, 4, 6, 8, 12, and 24.

", + "CreateRule$Interval": "

The interval between snapshots. The supported values are 1, 2, 3, 4, 6, 8, 12, and 24.

", "CrossRegionCopyRetainRule$Interval": "

The amount of time to retain each snapshot. The maximum is 100 years. This is equivalent to 1200 months, 5200 weeks, or 36500 days.

", "FastRestoreRule$Interval": "

The amount of time to enable fast snapshot restore. The maximum is 100 years. This is equivalent to 1200 months, 5200 weeks, or 36500 days.

", "RetainRule$Interval": "

The amount of time to retain each snapshot. The maximum is 100 years. This is equivalent to 1200 months, 5200 weeks, or 36500 days.

" diff --git a/models/apis/dms/2016-01-01/api-2.json b/models/apis/dms/2016-01-01/api-2.json index 59b8a5034c4..3b3f9a05be5 100644 --- a/models/apis/dms/2016-01-01/api-2.json +++ b/models/apis/dms/2016-01-01/api-2.json @@ -801,6 +801,7 @@ "KinesisSettings":{"shape":"KinesisSettings"}, "KafkaSettings":{"shape":"KafkaSettings"}, "ElasticsearchSettings":{"shape":"ElasticsearchSettings"}, + "NeptuneSettings":{"shape":"NeptuneSettings"}, "RedshiftSettings":{"shape":"RedshiftSettings"} } }, @@ -902,7 +903,8 @@ "CdcStartTime":{"shape":"TStamp"}, "CdcStartPosition":{"shape":"String"}, "CdcStopPosition":{"shape":"String"}, - "Tags":{"shape":"TagList"} + "Tags":{"shape":"TagList"}, + "TaskData":{"shape":"String"} } }, "CreateReplicationTaskResponse":{ @@ -1368,6 +1370,7 @@ "KinesisSettings":{"shape":"KinesisSettings"}, "KafkaSettings":{"shape":"KafkaSettings"}, "ElasticsearchSettings":{"shape":"ElasticsearchSettings"}, + "NeptuneSettings":{"shape":"NeptuneSettings"}, "RedshiftSettings":{"shape":"RedshiftSettings"} } }, @@ -1608,6 +1611,7 @@ "KinesisSettings":{"shape":"KinesisSettings"}, "KafkaSettings":{"shape":"KafkaSettings"}, "ElasticsearchSettings":{"shape":"ElasticsearchSettings"}, + "NeptuneSettings":{"shape":"NeptuneSettings"}, "RedshiftSettings":{"shape":"RedshiftSettings"} } }, @@ -1686,7 +1690,8 @@ "ReplicationTaskSettings":{"shape":"String"}, "CdcStartTime":{"shape":"TStamp"}, "CdcStartPosition":{"shape":"String"}, - "CdcStopPosition":{"shape":"String"} + "CdcStopPosition":{"shape":"String"}, + "TaskData":{"shape":"String"} } }, "ModifyReplicationTaskResponse":{ @@ -1712,6 +1717,22 @@ "KmsKeyId":{"shape":"String"} } }, + "NeptuneSettings":{ + "type":"structure", + "required":[ + "S3BucketName", + "S3BucketFolder" + ], + "members":{ + "ServiceAccessRoleArn":{"shape":"String"}, + "S3BucketName":{"shape":"String"}, + "S3BucketFolder":{"shape":"String"}, + "ErrorRetryDuration":{"shape":"IntegerOptional"}, + "MaxFileSize":{"shape":"IntegerOptional"}, + "MaxRetryCount":{"shape":"IntegerOptional"}, + "IamAuthEnabled":{"shape":"BooleanOptional"} + } + }, "NestingLevelValue":{ "type":"string", "enum":[ @@ -2001,7 +2022,8 @@ "CdcStopPosition":{"shape":"String"}, "RecoveryCheckpoint":{"shape":"String"}, "ReplicationTaskArn":{"shape":"String"}, - "ReplicationTaskStats":{"shape":"ReplicationTaskStats"} + "ReplicationTaskStats":{"shape":"ReplicationTaskStats"}, + "TaskData":{"shape":"String"} } }, "ReplicationTaskAssessmentResult":{ @@ -2217,6 +2239,7 @@ "EngineName":{"shape":"String"}, "SupportsCDC":{"shape":"Boolean"}, "EndpointType":{"shape":"ReplicationEndpointTypeValue"}, + "ReplicationInstanceEngineMinimumVersion":{"shape":"String"}, "EngineDisplayName":{"shape":"String"} } }, diff --git a/models/apis/dms/2016-01-01/docs-2.json b/models/apis/dms/2016-01-01/docs-2.json index f8d4c067357..c55fa1e71d5 100644 --- a/models/apis/dms/2016-01-01/docs-2.json +++ b/models/apis/dms/2016-01-01/docs-2.json @@ -140,6 +140,7 @@ "ModifyEventSubscriptionMessage$Enabled": "

A Boolean value; set to true to activate the subscription.

", "ModifyReplicationInstanceMessage$MultiAZ": "

Specifies whether the replication instance is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the Multi-AZ parameter is set to true.

", "ModifyReplicationInstanceMessage$AutoMinorVersionUpgrade": "

A value that indicates that minor version upgrades are applied automatically to the replication instance during the maintenance window. Changing this parameter doesn't result in an outage, except in the case dsecribed following. The change is asynchronously applied as soon as possible.

An outage does result if these factors apply:

", + "NeptuneSettings$IamAuthEnabled": "

If you want IAM authorization enabled for this endpoint, set this parameter to true and attach the appropriate role policy document to your service role specified by ServiceAccessRoleArn. The default is false.

", "RebootReplicationInstanceMessage$ForceFailover": "

If this parameter is true, the reboot is conducted through a Multi-AZ failover. (If the instance isn't configured for Multi-AZ, then you can't specify true.)

", "RedshiftSettings$AcceptAnyDate": "

A value that indicates to allow any date format, including invalid formats such as 00/00/00 00:00:00, to be loaded without generating an error. You can choose true or false (the default).

This parameter applies only to TIMESTAMP and DATE columns. Always use ACCEPTANYDATE with the DATEFORMAT parameter. If the date format for the data doesn't match the DATEFORMAT specification, Amazon Redshift inserts a NULL value into that field.

", "RedshiftSettings$EmptyAsNull": "

A value that specifies whether AWS DMS should migrate empty CHAR and VARCHAR fields as NULL. A value of true sets empty CHAR and VARCHAR fields to null. The default is false.

", @@ -655,7 +656,7 @@ "DescribeEventsMessage$Filters": "

Filters applied to the action.

", "DescribePendingMaintenanceActionsMessage$Filters": "

", "DescribeReplicationInstancesMessage$Filters": "

Filters applied to the describe action.

Valid filter names: replication-instance-arn | replication-instance-id | replication-instance-class | engine-version

", - "DescribeReplicationSubnetGroupsMessage$Filters": "

Filters applied to the describe action.

", + "DescribeReplicationSubnetGroupsMessage$Filters": "

Filters applied to the describe action.

Valid filter names: replication-subnet-group-id

", "DescribeReplicationTasksMessage$Filters": "

Filters applied to the describe action.

Valid filter names: replication-task-arn | replication-task-id | migration-type | endpoint-arn | replication-instance-arn

", "DescribeTableStatisticsMessage$Filters": "

Filters applied to the describe table statistics action.

Valid filter names: schema-name | table-name | table-state

A combination of filters creates an AND condition where each record matches all specified filters.

" } @@ -724,6 +725,9 @@ "ModifyEndpointMessage$Port": "

The port used by the endpoint database.

", "ModifyReplicationInstanceMessage$AllocatedStorage": "

The amount of storage (in gigabytes) to be allocated for the replication instance.

", "MongoDbSettings$Port": "

The port value for the MongoDB source endpoint.

", + "NeptuneSettings$ErrorRetryDuration": "

The number of milliseconds for AWS DMS to wait to retry a bulk-load of migrated graph data to the Neptune target database before raising an error. The default is 250.

", + "NeptuneSettings$MaxFileSize": "

The maximum size in KB of migrated graph data stored in a CSV file before AWS DMS bulk-loads the data to the Neptune target database. The default is 1048576 KB. If successful, AWS DMS clears the bucket, ready to store the next batch of migrated graph data.

", + "NeptuneSettings$MaxRetryCount": "

The number of times for AWS DMS to retry a bulk-load of migrated graph data to the Neptune target database before raising an error. The default is 5.

", "RedshiftSettings$ConnectionTimeout": "

A value that sets the amount of time to wait (in milliseconds) before timing out, beginning from when you initially establish a connection.

", "RedshiftSettings$FileTransferUploadStreams": "

The number of threads used to upload a single file. This parameter accepts a value from 1 through 64. It defaults to 10.

", "RedshiftSettings$LoadTimeout": "

The amount of time to wait (in milliseconds) before timing out, beginning from when you begin loading.

", @@ -784,9 +788,9 @@ "KafkaSettings": { "base": "

Provides information that describes an Apache Kafka endpoint. This information includes the output format of records applied to the endpoint and details of transaction and control table data information.

", "refs": { - "CreateEndpointMessage$KafkaSettings": "

Settings in JSON format for the target Apache Kafka endpoint. For information about other available settings, see Using Object Mapping to Migrate Data to Apache Kafka in the AWS Database Migration User Guide.

", + "CreateEndpointMessage$KafkaSettings": "

Settings in JSON format for the target Apache Kafka endpoint. For more information about the available settings, see Using Apache Kafka as a Target for AWS Database Migration Service in the AWS Database Migration User Guide.

", "Endpoint$KafkaSettings": "

The settings for the Apache Kafka target endpoint. For more information, see the KafkaSettings structure.

", - "ModifyEndpointMessage$KafkaSettings": "

Settings in JSON format for the target Apache Kafka endpoint. For information about other available settings, see Using Object Mapping to Migrate Data to Apache Kafka in the AWS Database Migration User Guide.

" + "ModifyEndpointMessage$KafkaSettings": "

Settings in JSON format for the target Apache Kafka endpoint. For more information about the available settings, see Using Apache Kafka as a Target for AWS Database Migration Service in the AWS Database Migration User Guide.

" } }, "KeyList": { @@ -798,9 +802,9 @@ "KinesisSettings": { "base": "

Provides information that describes an Amazon Kinesis Data Stream endpoint. This information includes the output format of records applied to the endpoint and details of transaction and control table data information.

", "refs": { - "CreateEndpointMessage$KinesisSettings": "

Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. For information about other available settings, see Using Object Mapping to Migrate Data to a Kinesis Data Stream in the AWS Database Migration User Guide.

", + "CreateEndpointMessage$KinesisSettings": "

Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. For more information about the available settings, see Using Amazon Kinesis Data Streams as a Target for AWS Database Migration Service in the AWS Database Migration User Guide.

", "Endpoint$KinesisSettings": "

The settings for the Amazon Kinesis target endpoint. For more information, see the KinesisSettings structure.

", - "ModifyEndpointMessage$KinesisSettings": "

Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. For information about other available settings, see Using Object Mapping to Migrate Data to a Kinesis Data Stream in the AWS Database Migration User Guide.

" + "ModifyEndpointMessage$KinesisSettings": "

Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. For more information about the available settings, see Using Amazon Kinesis Data Streams as a Target for AWS Database Migration Service in the AWS Database Migration User Guide.

" } }, "ListTagsForResourceMessage": { @@ -899,11 +903,19 @@ "MongoDbSettings": { "base": "

Provides information that defines a MongoDB endpoint.

", "refs": { - "CreateEndpointMessage$MongoDbSettings": "

Settings in JSON format for the source MongoDB endpoint. For more information about the available settings, see the configuration properties section in Using MongoDB as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.

", + "CreateEndpointMessage$MongoDbSettings": "

Settings in JSON format for the source MongoDB endpoint. For more information about the available settings, see Using MongoDB as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.

", "Endpoint$MongoDbSettings": "

The settings for the MongoDB source endpoint. For more information, see the MongoDbSettings structure.

", "ModifyEndpointMessage$MongoDbSettings": "

Settings in JSON format for the source MongoDB endpoint. For more information about the available settings, see the configuration properties section in Using MongoDB as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.

" } }, + "NeptuneSettings": { + "base": "

Provides information that defines an Amazon Neptune endpoint.

", + "refs": { + "CreateEndpointMessage$NeptuneSettings": "

Settings in JSON format for the target Amazon Neptune endpoint. For more information about the available settings, see https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.EndpointSettings in the AWS Database Migration Service User Guide.

", + "Endpoint$NeptuneSettings": "

The settings for the MongoDB source endpoint. For more information, see the NeptuneSettings structure.

", + "ModifyEndpointMessage$NeptuneSettings": "

Settings in JSON format for the target Amazon Neptune endpoint. For more information about the available settings, see https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.EndpointSettings in the AWS Database Migration Service User Guide.

" + } + }, "NestingLevelValue": { "base": null, "refs": { @@ -1294,10 +1306,11 @@ "CreateReplicationTaskMessage$SourceEndpointArn": "

An Amazon Resource Name (ARN) that uniquely identifies the source endpoint.

", "CreateReplicationTaskMessage$TargetEndpointArn": "

An Amazon Resource Name (ARN) that uniquely identifies the target endpoint.

", "CreateReplicationTaskMessage$ReplicationInstanceArn": "

The Amazon Resource Name (ARN) of a replication instance.

", - "CreateReplicationTaskMessage$TableMappings": "

The table mappings for the task, in JSON format. For more information, see Table Mapping in the AWS Database Migration User Guide.

", - "CreateReplicationTaskMessage$ReplicationTaskSettings": "

Overall settings for the task, in JSON format. For more information, see Task Settings in the AWS Database Migration User Guide.

", + "CreateReplicationTaskMessage$TableMappings": "

The table mappings for the task, in JSON format. For more information, see Using Table Mapping to Specify Task Settings in the AWS Database Migration User Guide.

", + "CreateReplicationTaskMessage$ReplicationTaskSettings": "

Overall settings for the task, in JSON format. For more information, see Specifying Task Settings for AWS Database Migration Service Tasks in the AWS Database Migration User Guide.

", "CreateReplicationTaskMessage$CdcStartPosition": "

Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error.

The value can be in date, checkpoint, or LSN/SCN format.

Date Example: --cdc-start-position “2018-03-08T12:12:12”

Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"

LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”

When you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for AWS DMS.

", "CreateReplicationTaskMessage$CdcStopPosition": "

Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.

Server time example: --cdc-stop-position “server_time:3018-02-09T12:12:12”

Commit time example: --cdc-stop-position “commit_time: 3018-02-09T12:12:12 “

", + "CreateReplicationTaskMessage$TaskData": "

Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration User Guide.

", "DeleteCertificateMessage$CertificateArn": "

The Amazon Resource Name (ARN) of the deleted certificate.

", "DeleteConnectionMessage$EndpointArn": "

The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

", "DeleteConnectionMessage$ReplicationInstanceArn": "

The Amazon Resource Name (ARN) of the replication instance.

", @@ -1411,9 +1424,10 @@ "ModifyReplicationTaskMessage$ReplicationTaskArn": "

The Amazon Resource Name (ARN) of the replication task.

", "ModifyReplicationTaskMessage$ReplicationTaskIdentifier": "

The replication task identifier.

Constraints:

", "ModifyReplicationTaskMessage$TableMappings": "

When using the AWS CLI or boto3, provide the path of the JSON file that contains the table mappings. Precede the path with file://. When working with the DMS API, provide the JSON as the parameter value, for example: --table-mappings file://mappingfile.json

", - "ModifyReplicationTaskMessage$ReplicationTaskSettings": "

JSON file that contains settings for the task, such as target metadata settings.

", + "ModifyReplicationTaskMessage$ReplicationTaskSettings": "

JSON file that contains settings for the task, such as task metadata settings.

", "ModifyReplicationTaskMessage$CdcStartPosition": "

Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error.

The value can be in date, checkpoint, or LSN/SCN format.

Date Example: --cdc-start-position “2018-03-08T12:12:12”

Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"

LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”

When you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for AWS DMS.

", "ModifyReplicationTaskMessage$CdcStopPosition": "

Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.

Server time example: --cdc-stop-position “server_time:3018-02-09T12:12:12”

Commit time example: --cdc-stop-position “commit_time: 3018-02-09T12:12:12 “

", + "ModifyReplicationTaskMessage$TaskData": "

Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration User Guide.

", "MongoDbSettings$Username": "

The user name you use to access the MongoDB source endpoint.

", "MongoDbSettings$ServerName": "

The name of the server on the MongoDB source endpoint.

", "MongoDbSettings$DatabaseName": "

The database name on the MongoDB source endpoint.

", @@ -1421,6 +1435,9 @@ "MongoDbSettings$DocsToInvestigate": "

Indicates the number of documents to preview to determine the document organization. Use this setting when NestingLevel is set to ONE.

Must be a positive value greater than 0. Default value is 1000.

", "MongoDbSettings$AuthSource": "

The MongoDB database name. This setting isn't used when authType=NO.

The default is admin.

", "MongoDbSettings$KmsKeyId": "

The AWS KMS key identifier that is used to encrypt the content on the replication instance. If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

", + "NeptuneSettings$ServiceAccessRoleArn": "

The ARN of the service role you have created for the Neptune target endpoint. For more information, see https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.ServiceRole in the AWS Database Migration Service User Guide.

", + "NeptuneSettings$S3BucketName": "

The name of the S3 bucket for AWS DMS to temporarily store migrated graph data in CSV files before bulk-loading it to the Neptune target database. AWS DMS maps the SQL source data to graph data before storing it in these CSV files.

", + "NeptuneSettings$S3BucketFolder": "

A folder path where you where you want AWS DMS to store migrated graph data in the S3 bucket specified by S3BucketName

", "OrderableReplicationInstance$EngineVersion": "

The version of the replication engine.

", "OrderableReplicationInstance$ReplicationInstanceClass": "

The compute and memory capacity of the replication instance.

Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge

", "OrderableReplicationInstance$StorageType": "

The type of storage used by the replication instance.

", @@ -1483,6 +1500,7 @@ "ReplicationTask$CdcStopPosition": "

Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.

Server time example: --cdc-stop-position “server_time:3018-02-09T12:12:12”

Commit time example: --cdc-stop-position “commit_time: 3018-02-09T12:12:12 “

", "ReplicationTask$RecoveryCheckpoint": "

Indicates the last checkpoint that occurred during a change data capture (CDC) operation. You can provide this value to the CdcStartPosition parameter to start a CDC operation that begins at that checkpoint.

", "ReplicationTask$ReplicationTaskArn": "

The Amazon Resource Name (ARN) of the replication task.

", + "ReplicationTask$TaskData": "

Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration User Guide.

", "ReplicationTaskAssessmentResult$ReplicationTaskIdentifier": "

The replication task identifier of the task on which the task assessment was run.

", "ReplicationTaskAssessmentResult$ReplicationTaskArn": "

The Amazon Resource Name (ARN) of the replication task.

", "ReplicationTaskAssessmentResult$AssessmentStatus": "

The status of the task assessment.

", @@ -1509,6 +1527,7 @@ "Subnet$SubnetStatus": "

The status of the subnet.

", "SubnetIdentifierList$member": null, "SupportedEndpointType$EngineName": "

The database engine name. Valid values, depending on the EndpointType, include \"mysql\", \"oracle\", \"postgres\", \"mariadb\", \"aurora\", \"aurora-postgresql\", \"redshift\", \"s3\", \"db2\", \"azuredb\", \"sybase\", \"dynamodb\", \"mongodb\", \"kinesis\", \"kafka\", \"elasticsearch\", \"documentdb\", and \"sqlserver\".

", + "SupportedEndpointType$ReplicationInstanceEngineMinimumVersion": "

The earliest AWS DMS engine version that supports this endpoint engine. Note that endpoint engines released with AWS DMS versions earlier than 3.1.1 do not return a value for this parameter.

", "SupportedEndpointType$EngineDisplayName": "

The expanded name for the engine name. For example, if the EngineName parameter is \"aurora,\" this value would be \"Amazon Aurora MySQL.\"

", "TableStatistics$SchemaName": "

The schema name.

", "TableStatistics$TableName": "

The name of the table.

", diff --git a/models/apis/elastic-inference/2017-07-25/api-2.json b/models/apis/elastic-inference/2017-07-25/api-2.json index 6b942c3a2ae..7b0d5b54390 100644 --- a/models/apis/elastic-inference/2017-07-25/api-2.json +++ b/models/apis/elastic-inference/2017-07-25/api-2.json @@ -2,7 +2,7 @@ "version":"2.0", "metadata":{ "apiVersion":"2017-07-25", - "endpointPrefix":"elastic-inference", + "endpointPrefix":"api.elastic-inference", "jsonVersion":"1.1", "protocol":"rest-json", "serviceAbbreviation":"Amazon Elastic Inference", @@ -13,6 +13,46 @@ "uid":"elastic-inference-2017-07-25" }, "operations":{ + "DescribeAcceleratorOfferings":{ + "name":"DescribeAcceleratorOfferings", + "http":{ + "method":"POST", + "requestUri":"/describe-accelerator-offerings" + }, + "input":{"shape":"DescribeAcceleratorOfferingsRequest"}, + "output":{"shape":"DescribeAcceleratorOfferingsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ] + }, + "DescribeAcceleratorTypes":{ + "name":"DescribeAcceleratorTypes", + "http":{ + "method":"GET", + "requestUri":"/describe-accelerator-types" + }, + "input":{"shape":"DescribeAcceleratorTypesRequest"}, + "output":{"shape":"DescribeAcceleratorTypesResponse"}, + "errors":[ + {"shape":"InternalServerException"} + ] + }, + "DescribeAccelerators":{ + "name":"DescribeAccelerators", + "http":{ + "method":"POST", + "requestUri":"/describe-accelerators" + }, + "input":{"shape":"DescribeAcceleratorsRequest"}, + "output":{"shape":"DescribeAcceleratorsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ] + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -57,6 +97,68 @@ } }, "shapes":{ + "AcceleratorHealthStatus":{ + "type":"string", + "max":256, + "min":1 + }, + "AcceleratorId":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^eia-[0-9a-f]+$" + }, + "AcceleratorIdList":{ + "type":"list", + "member":{"shape":"AcceleratorId"}, + "max":1000, + "min":0 + }, + "AcceleratorType":{ + "type":"structure", + "members":{ + "acceleratorTypeName":{"shape":"AcceleratorTypeName"}, + "memoryInfo":{"shape":"MemoryInfo"}, + "throughputInfo":{"shape":"ThroughputInfoList"} + } + }, + "AcceleratorTypeList":{ + "type":"list", + "member":{"shape":"AcceleratorType"}, + "max":100, + "min":0 + }, + "AcceleratorTypeName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^\\S+$" + }, + "AcceleratorTypeNameList":{ + "type":"list", + "member":{"shape":"AcceleratorTypeName"}, + "max":100, + "min":0 + }, + "AcceleratorTypeOffering":{ + "type":"structure", + "members":{ + "acceleratorType":{"shape":"AcceleratorTypeName"}, + "locationType":{"shape":"LocationType"}, + "location":{"shape":"Location"} + } + }, + "AcceleratorTypeOfferingList":{ + "type":"list", + "member":{"shape":"AcceleratorTypeOffering"}, + "max":100, + "min":0 + }, + "AvailabilityZone":{ + "type":"string", + "max":256, + "min":1 + }, "BadRequestException":{ "type":"structure", "members":{ @@ -65,6 +167,87 @@ "error":{"httpStatusCode":400}, "exception":true }, + "DescribeAcceleratorOfferingsRequest":{ + "type":"structure", + "required":["locationType"], + "members":{ + "locationType":{"shape":"LocationType"}, + "acceleratorTypes":{"shape":"AcceleratorTypeNameList"} + } + }, + "DescribeAcceleratorOfferingsResponse":{ + "type":"structure", + "members":{ + "acceleratorTypeOfferings":{"shape":"AcceleratorTypeOfferingList"} + } + }, + "DescribeAcceleratorTypesRequest":{ + "type":"structure", + "members":{ + } + }, + "DescribeAcceleratorTypesResponse":{ + "type":"structure", + "members":{ + "acceleratorTypes":{"shape":"AcceleratorTypeList"} + } + }, + "DescribeAcceleratorsRequest":{ + "type":"structure", + "members":{ + "acceleratorIds":{"shape":"AcceleratorIdList"}, + "filters":{"shape":"FilterList"}, + "maxResults":{"shape":"MaxResults"}, + "nextToken":{"shape":"NextToken"} + } + }, + "DescribeAcceleratorsResponse":{ + "type":"structure", + "members":{ + "acceleratorSet":{"shape":"ElasticInferenceAcceleratorSet"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ElasticInferenceAccelerator":{ + "type":"structure", + "members":{ + "acceleratorHealth":{"shape":"ElasticInferenceAcceleratorHealth"}, + "acceleratorType":{"shape":"AcceleratorTypeName"}, + "acceleratorId":{"shape":"AcceleratorId"}, + "availabilityZone":{"shape":"AvailabilityZone"}, + "attachedResource":{"shape":"ResourceArn"} + } + }, + "ElasticInferenceAcceleratorHealth":{ + "type":"structure", + "members":{ + "status":{"shape":"AcceleratorHealthStatus"} + } + }, + "ElasticInferenceAcceleratorSet":{ + "type":"list", + "member":{"shape":"ElasticInferenceAccelerator"} + }, + "Filter":{ + "type":"structure", + "members":{ + "name":{"shape":"FilterName"}, + "values":{"shape":"ValueStringList"} + } + }, + "FilterList":{ + "type":"list", + "member":{"shape":"Filter"}, + "max":100, + "min":0 + }, + "FilterName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^\\S+$" + }, + "Integer":{"type":"integer"}, "InternalServerException":{ "type":"structure", "members":{ @@ -73,6 +256,19 @@ "error":{"httpStatusCode":500}, "exception":true }, + "Key":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^\\S+$" + }, + "KeyValuePair":{ + "type":"structure", + "members":{ + "key":{"shape":"Key"}, + "value":{"shape":"Value"} + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["resourceArn"], @@ -90,9 +286,47 @@ "tags":{"shape":"TagMap"} } }, + "Location":{ + "type":"string", + "max":256, + "min":1 + }, + "LocationType":{ + "type":"string", + "enum":[ + "region", + "availability-zone", + "availability-zone-id" + ], + "max":256, + "min":1 + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":0 + }, + "MemoryInfo":{ + "type":"structure", + "members":{ + "sizeInMiB":{"shape":"Integer"} + } + }, + "NextToken":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^[A-Za-z0-9+/]+={0,2}$" + }, "ResourceARN":{ "type":"string", "max":1011, + "min":1, + "pattern":"^arn:aws\\S*:elastic-inference:\\S+:\\d{12}:elastic-inference-accelerator/eia-[0-9a-f]+$" + }, + "ResourceArn":{ + "type":"string", + "max":1283, "min":1 }, "ResourceNotFoundException":{ @@ -103,11 +337,16 @@ "error":{"httpStatusCode":404}, "exception":true }, - "String":{"type":"string"}, + "String":{ + "type":"string", + "max":500000, + "pattern":"^.*$" + }, "TagKey":{ "type":"string", "max":128, - "min":1 + "min":1, + "pattern":"^\\S$" }, "TagKeyList":{ "type":"list", @@ -146,6 +385,12 @@ "type":"string", "max":256 }, + "ThroughputInfoList":{ + "type":"list", + "member":{"shape":"KeyValuePair"}, + "max":100, + "min":0 + }, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -169,6 +414,13 @@ "type":"structure", "members":{ } + }, + "Value":{"type":"integer"}, + "ValueStringList":{ + "type":"list", + "member":{"shape":"String"}, + "max":100, + "min":0 } } } diff --git a/models/apis/elastic-inference/2017-07-25/docs-2.json b/models/apis/elastic-inference/2017-07-25/docs-2.json index efd130ef8cf..d66b666fb59 100644 --- a/models/apis/elastic-inference/2017-07-25/docs-2.json +++ b/models/apis/elastic-inference/2017-07-25/docs-2.json @@ -1,20 +1,171 @@ { "version": "2.0", - "service": "Elastic Inference public APIs.", + "service": "

Elastic Inference public APIs.

", "operations": { - "ListTagsForResource": "Returns all tags of an Elastic Inference Accelerator.", - "TagResource": "Adds the specified tag(s) to an Elastic Inference Accelerator.", - "UntagResource": "Removes the specified tag(s) from an Elastic Inference Accelerator." + "DescribeAcceleratorOfferings": "

Describes the locations in which a given accelerator type or set of types is present in a given region.

", + "DescribeAcceleratorTypes": "

Describes the accelerator types available in a given region, as well as their characteristics, such as memory and throughput.

", + "DescribeAccelerators": "

Describes information over a provided set of accelerators belonging to an account.

", + "ListTagsForResource": "

Returns all tags of an Elastic Inference Accelerator.

", + "TagResource": "

Adds the specified tags to an Elastic Inference Accelerator.

", + "UntagResource": "

Removes the specified tags from an Elastic Inference Accelerator.

" }, "shapes": { + "AcceleratorHealthStatus": { + "base": null, + "refs": { + "ElasticInferenceAcceleratorHealth$status": "

The health status of the Elastic Inference Accelerator.

" + } + }, + "AcceleratorId": { + "base": null, + "refs": { + "AcceleratorIdList$member": null, + "ElasticInferenceAccelerator$acceleratorId": "

The ID of the Elastic Inference Accelerator.

" + } + }, + "AcceleratorIdList": { + "base": null, + "refs": { + "DescribeAcceleratorsRequest$acceleratorIds": "

The IDs of the accelerators to describe.

" + } + }, + "AcceleratorType": { + "base": "

The details of an Elastic Inference Accelerator type.

", + "refs": { + "AcceleratorTypeList$member": null + } + }, + "AcceleratorTypeList": { + "base": null, + "refs": { + "DescribeAcceleratorTypesResponse$acceleratorTypes": "

The available accelerator types.

" + } + }, + "AcceleratorTypeName": { + "base": null, + "refs": { + "AcceleratorType$acceleratorTypeName": "

The name of the Elastic Inference Accelerator type.

", + "AcceleratorTypeNameList$member": null, + "AcceleratorTypeOffering$acceleratorType": "

The name of the Elastic Inference Accelerator type.

", + "ElasticInferenceAccelerator$acceleratorType": "

The type of the Elastic Inference Accelerator.

" + } + }, + "AcceleratorTypeNameList": { + "base": null, + "refs": { + "DescribeAcceleratorOfferingsRequest$acceleratorTypes": "

The list of accelerator types to describe.

" + } + }, + "AcceleratorTypeOffering": { + "base": "

The offering for an Elastic Inference Accelerator type.

", + "refs": { + "AcceleratorTypeOfferingList$member": null + } + }, + "AcceleratorTypeOfferingList": { + "base": null, + "refs": { + "DescribeAcceleratorOfferingsResponse$acceleratorTypeOfferings": "

The list of accelerator type offerings for a specific location.

" + } + }, + "AvailabilityZone": { + "base": null, + "refs": { + "ElasticInferenceAccelerator$availabilityZone": "

The availability zone where the Elastic Inference Accelerator is present.

" + } + }, "BadRequestException": { - "base": "Raised when a malformed input has been provided to the API.", + "base": "

Raised when a malformed input has been provided to the API.

", + "refs": { + } + }, + "DescribeAcceleratorOfferingsRequest": { + "base": null, + "refs": { + } + }, + "DescribeAcceleratorOfferingsResponse": { + "base": null, + "refs": { + } + }, + "DescribeAcceleratorTypesRequest": { + "base": null, + "refs": { + } + }, + "DescribeAcceleratorTypesResponse": { + "base": null, + "refs": { + } + }, + "DescribeAcceleratorsRequest": { + "base": null, + "refs": { + } + }, + "DescribeAcceleratorsResponse": { + "base": null, + "refs": { + } + }, + "ElasticInferenceAccelerator": { + "base": "

The details of an Elastic Inference Accelerator.

", "refs": { + "ElasticInferenceAcceleratorSet$member": null + } + }, + "ElasticInferenceAcceleratorHealth": { + "base": "

The health details of an Elastic Inference Accelerator.

", + "refs": { + "ElasticInferenceAccelerator$acceleratorHealth": "

The health of the Elastic Inference Accelerator.

" + } + }, + "ElasticInferenceAcceleratorSet": { + "base": null, + "refs": { + "DescribeAcceleratorsResponse$acceleratorSet": "

The details of the Elastic Inference Accelerators.

" + } + }, + "Filter": { + "base": "

A filter expression for the Elastic Inference Accelerator list.

", + "refs": { + "FilterList$member": null + } + }, + "FilterList": { + "base": null, + "refs": { + "DescribeAcceleratorsRequest$filters": "

One or more filters. Filter names and values are case-sensitive. Valid filter names are: accelerator-types: can provide a list of accelerator type names to filter for. instance-id: can provide a list of EC2 instance ids to filter for.

" + } + }, + "FilterName": { + "base": null, + "refs": { + "Filter$name": "

The filter name for the Elastic Inference Accelerator list. It can assume the following values: accelerator-type: the type of Elastic Inference Accelerator to filter for. instance-id: an EC2 instance id to filter for.

" + } + }, + "Integer": { + "base": null, + "refs": { + "MemoryInfo$sizeInMiB": "

The size in mebibytes of the Elastic Inference Accelerator type.

" } }, "InternalServerException": { - "base": "Raised when an unexpected error occurred during request processing.", + "base": "

Raised when an unexpected error occurred during request processing.

", + "refs": { + } + }, + "Key": { + "base": null, + "refs": { + "KeyValuePair$key": "

The throughput value of the Elastic Inference Accelerator type. It can assume the following values: TFLOPS16bit: the throughput expressed in 16bit TeraFLOPS. TFLOPS32bit: the throughput expressed in 32bit TeraFLOPS.

" + } + }, + "KeyValuePair": { + "base": "

A throughput entry for an Elastic Inference Accelerator type.

", "refs": { + "ThroughputInfoList$member": null } }, "ListTagsForResourceRequest": { @@ -27,16 +178,54 @@ "refs": { } }, + "Location": { + "base": null, + "refs": { + "AcceleratorTypeOffering$location": "

The location for the offering. It will return either the region, availability zone or availability zone id for the offering depending on the locationType value.

" + } + }, + "LocationType": { + "base": null, + "refs": { + "AcceleratorTypeOffering$locationType": "

The location type for the offering. It can assume the following values: region: defines that the offering is at the regional level. availability-zone: defines that the offering is at the availability zone level. availability-zone-id: defines that the offering is at the availability zone level, defined by the availability zone id.

", + "DescribeAcceleratorOfferingsRequest$locationType": "

The location type that you want to describe accelerator type offerings for. It can assume the following values: region: will return the accelerator type offering at the regional level. availability-zone: will return the accelerator type offering at the availability zone level. availability-zone-id: will return the accelerator type offering at the availability zone level returning the availability zone id.

" + } + }, + "MaxResults": { + "base": null, + "refs": { + "DescribeAcceleratorsRequest$maxResults": "

The total number of items to return in the command's output. If the total number of items available is more than the value specified, a NextToken is provided in the command's output. To resume pagination, provide the NextToken value in the starting-token argument of a subsequent command. Do not use the NextToken response element directly outside of the AWS CLI.

" + } + }, + "MemoryInfo": { + "base": "

The memory information of an Elastic Inference Accelerator type.

", + "refs": { + "AcceleratorType$memoryInfo": "

The memory information of the Elastic Inference Accelerator type.

" + } + }, + "NextToken": { + "base": null, + "refs": { + "DescribeAcceleratorsRequest$nextToken": "

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

", + "DescribeAcceleratorsResponse$nextToken": "

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" + } + }, "ResourceARN": { "base": null, "refs": { - "ListTagsForResourceRequest$resourceArn": "The ARN of the Elastic Inference Accelerator to list the tags for.", - "TagResourceRequest$resourceArn": "The ARN of the Elastic Inference Accelerator to tag.", - "UntagResourceRequest$resourceArn": "The ARN of the Elastic Inference Accelerator to untag." + "ListTagsForResourceRequest$resourceArn": "

The ARN of the Elastic Inference Accelerator to list the tags for.

", + "TagResourceRequest$resourceArn": "

The ARN of the Elastic Inference Accelerator to tag.

", + "UntagResourceRequest$resourceArn": "

The ARN of the Elastic Inference Accelerator to untag.

" + } + }, + "ResourceArn": { + "base": null, + "refs": { + "ElasticInferenceAccelerator$attachedResource": "

The ARN of the resource that the Elastic Inference Accelerator is attached to.

" } }, "ResourceNotFoundException": { - "base": "Raised when the requested resource cannot be found.", + "base": "

Raised when the requested resource cannot be found.

", "refs": { } }, @@ -45,7 +234,8 @@ "refs": { "BadRequestException$message": null, "InternalServerException$message": null, - "ResourceNotFoundException$message": null + "ResourceNotFoundException$message": null, + "ValueStringList$member": null } }, "TagKey": { @@ -58,14 +248,14 @@ "TagKeyList": { "base": null, "refs": { - "UntagResourceRequest$tagKeys": "The list of tags to remove from the Elastic Inference Accelerator." + "UntagResourceRequest$tagKeys": "

The list of tags to remove from the Elastic Inference Accelerator.

" } }, "TagMap": { "base": null, "refs": { - "ListTagsForResourceResult$tags": "The tags of the Elastic Inference Accelerator.", - "TagResourceRequest$tags": "The tags to add to the Elastic Inference Accelerator." + "ListTagsForResourceResult$tags": "

The tags of the Elastic Inference Accelerator.

", + "TagResourceRequest$tags": "

The tags to add to the Elastic Inference Accelerator.

" } }, "TagResourceRequest": { @@ -84,6 +274,12 @@ "TagMap$value": null } }, + "ThroughputInfoList": { + "base": null, + "refs": { + "AcceleratorType$throughputInfo": "

The throughput information of the Elastic Inference Accelerator type.

" + } + }, "UntagResourceRequest": { "base": null, "refs": { @@ -93,6 +289,18 @@ "base": null, "refs": { } + }, + "Value": { + "base": null, + "refs": { + "KeyValuePair$value": "

The throughput value of the Elastic Inference Accelerator type.

" + } + }, + "ValueStringList": { + "base": null, + "refs": { + "Filter$values": "

The values for the filter of the Elastic Inference Accelerator list.

" + } } } } diff --git a/models/apis/elastic-inference/2017-07-25/paginators-1.json b/models/apis/elastic-inference/2017-07-25/paginators-1.json index 5677bd8e4a2..909b792bacb 100644 --- a/models/apis/elastic-inference/2017-07-25/paginators-1.json +++ b/models/apis/elastic-inference/2017-07-25/paginators-1.json @@ -1,4 +1,10 @@ { "pagination": { + "DescribeAccelerators": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "acceleratorSet" + } } } diff --git a/models/apis/es/2015-01-01/api-2.json b/models/apis/es/2015-01-01/api-2.json index 855eb8bb9e4..51029316121 100644 --- a/models/apis/es/2015-01-01/api-2.json +++ b/models/apis/es/2015-01-01/api-2.json @@ -1786,7 +1786,8 @@ "Cancellable":{"shape":"Boolean"}, "UpdateStatus":{"shape":"DeploymentStatus"}, "Description":{"shape":"String"}, - "AutomatedUpdateDate":{"shape":"DeploymentCloseDateTimeStamp"} + "AutomatedUpdateDate":{"shape":"DeploymentCloseDateTimeStamp"}, + "OptionalDeployment":{"shape":"Boolean"} } }, "ServiceUrl":{"type":"string"}, diff --git a/models/apis/es/2015-01-01/docs-2.json b/models/apis/es/2015-01-01/docs-2.json index f57214672ef..1c6bb144b00 100644 --- a/models/apis/es/2015-01-01/docs-2.json +++ b/models/apis/es/2015-01-01/docs-2.json @@ -145,6 +145,7 @@ "OptionStatus$PendingDeletion": "

Indicates whether the Elasticsearch domain is being deleted.

", "ServiceSoftwareOptions$UpdateAvailable": "

True if you are able to update you service software version. False if you are not able to update your service software version.

", "ServiceSoftwareOptions$Cancellable": "

True if you are able to cancel your service software version update. False if you are not able to cancel your service software version.

", + "ServiceSoftwareOptions$OptionalDeployment": "

True if a service software is never automatically updated. False if a service software is automatically updated after AutomatedUpdateDate.

", "UpgradeElasticsearchDomainRequest$PerformCheckOnly": "

This flag, when set to True, indicates that an Upgrade Eligibility Check needs to be performed. This will not actually perform the Upgrade.

", "UpgradeElasticsearchDomainResponse$PerformCheckOnly": "

This flag, when set to True, indicates that an Upgrade Eligibility Check needs to be performed. This will not actually perform the Upgrade.

" } diff --git a/models/apis/firehose/2015-08-04/api-2.json b/models/apis/firehose/2015-08-04/api-2.json index 15be5f99c08..c09a0aef5d8 100644 --- a/models/apis/firehose/2015-08-04/api-2.json +++ b/models/apis/firehose/2015-08-04/api-2.json @@ -218,6 +218,7 @@ }, "ClusterJDBCURL":{ "type":"string", + "max":512, "min":1, "pattern":"jdbc:(redshift|postgresql)://((?!-)[A-Za-z0-9-]{1,63}(?Lists the tags for the specified delivery stream. This operation has a limit of five transactions per second per account.

", "PutRecord": "

Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.

By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.

Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.

If the PutRecord operation throws a ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

", "PutRecordBatch": "

Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.

By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits, see Amazon Kinesis Data Firehose Limits.

Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before 64-bit encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.

Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.

A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error.

If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.

If PutRecordBatch throws ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

", - "StartDeliveryStreamEncryption": "

Enables server-side encryption (SSE) for the delivery stream.

This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

To check the encryption status of a delivery stream, use DescribeDeliveryStream.

Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. In this case, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement and creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.

If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.

If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again.

You can only enable SSE for a delivery stream that uses DirectPut as its source.

The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

", + "StartDeliveryStreamEncryption": "

Enables server-side encryption (SSE) for the delivery stream.

This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

To check the encryption status of a delivery stream, use DescribeDeliveryStream.

Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.

If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.

If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations.

You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source.

The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

", "StopDeliveryStreamEncryption": "

Disables server-side encryption (SSE) for the delivery stream.

This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to DISABLING, and then to DISABLED. You can continue to read and write data to your stream while its status is DISABLING. It can take up to 5 seconds after the encryption status changes to DISABLED before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

To check the encryption state of a delivery stream, use DescribeDeliveryStream.

If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption, Kinesis Data Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination.

The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

", "TagDeliveryStream": "

Adds or updates tags for the specified delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

Each delivery stream can have up to 50 tags.

This operation has a limit of five transactions per second per account.

", "UntagDeliveryStream": "

Removes tags from the specified delivery stream. Removed tags are deleted, and you can't recover them after this operation successfully completes.

If you specify a tag that doesn't exist, the operation ignores it.

This operation has a limit of five transactions per second per account.

", @@ -207,7 +207,7 @@ } }, "DeliveryStreamEncryptionConfigurationInput": { - "base": "

Used to specify the type and Amazon Resource Name (ARN) of the CMK needed for Server-Side Encryption (SSE).

", + "base": "

Specifies the type and Amazon Resource Name (ARN) of the CMK to use for Server-Side Encryption (SSE).

", "refs": { "CreateDeliveryStreamInput$DeliveryStreamEncryptionConfigurationInput": "

Used to specify the type and Amazon Resource Name (ARN) of the KMS key needed for Server-Side Encryption (SSE).

", "StartDeliveryStreamEncryptionInput$DeliveryStreamEncryptionConfigurationInput": "

Used to specify the type and Amazon Resource Name (ARN) of the KMS key needed for Server-Side Encryption (SSE).

" @@ -518,9 +518,9 @@ } }, "InputFormatConfiguration": { - "base": "

Specifies the deserializer you want to use to convert the format of the input data.

", + "base": "

Specifies the deserializer you want to use to convert the format of the input data. This parameter is required if Enabled is set to true.

", "refs": { - "DataFormatConversionConfiguration$InputFormatConfiguration": "

Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON.

" + "DataFormatConversionConfiguration$InputFormatConfiguration": "

Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. This parameter is required if Enabled is set to true.

" } }, "IntervalInSeconds": { @@ -549,7 +549,7 @@ "base": null, "refs": { "DeliveryStreamEncryptionConfiguration$KeyType": "

Indicates the type of customer master key (CMK) that is used for encryption. The default setting is AWS_OWNED_CMK. For more information about CMKs, see Customer Master Keys (CMKs).

", - "DeliveryStreamEncryptionConfigurationInput$KeyType": "

Indicates the type of customer master key (CMK) to use for encryption. The default setting is AWS_OWNED_CMK. For more information about CMKs, see Customer Master Keys (CMKs). When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType set to CUSTOMER_MANAGED_CMK, Kinesis Data Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Kinesis Data Firehose service to use the customer managed CMK to perform encryption and decryption. Kinesis Data Firehose manages that grant.

When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream that is already encrypted with a customer managed CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement.

" + "DeliveryStreamEncryptionConfigurationInput$KeyType": "

Indicates the type of customer master key (CMK) to use for encryption. The default setting is AWS_OWNED_CMK. For more information about CMKs, see Customer Master Keys (CMKs). When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType set to CUSTOMER_MANAGED_CMK, Kinesis Data Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Kinesis Data Firehose service to use the customer managed CMK to perform encryption and decryption. Kinesis Data Firehose manages that grant.

When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream that is encrypted with a customer managed CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement.

You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams. If a CreateDeliveryStream or StartDeliveryStreamEncryption operation exceeds this limit, Kinesis Data Firehose throws a LimitExceededException.

To encrypt your delivery stream, use symmetric CMKs. Kinesis Data Firehose doesn't support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see About Symmetric and Asymmetric CMKs in the AWS Key Management Service developer guide.

" } }, "KinesisStreamARN": { @@ -662,7 +662,10 @@ "SchemaConfiguration$DatabaseName": "

Specifies the name of the AWS Glue database that contains the schema for the output data.

", "SchemaConfiguration$TableName": "

Specifies the AWS Glue table that contains the column information that constitutes your data schema.

", "SchemaConfiguration$Region": "

If you don't specify an AWS Region, the default is the current Region.

", - "SchemaConfiguration$VersionId": "

Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to LATEST, Kinesis Data Firehose uses the most recent version. This means that any updates to the table are automatically picked up.

" + "SchemaConfiguration$VersionId": "

Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to LATEST, Kinesis Data Firehose uses the most recent version. This means that any updates to the table are automatically picked up.

", + "SecurityGroupIdList$member": null, + "SubnetIdList$member": null, + "VpcConfigurationDescription$VpcId": "

The ID of the Amazon ES destination's VPC.

" } }, "NonNegativeIntegerObject": { @@ -709,9 +712,9 @@ } }, "OutputFormatConfiguration": { - "base": "

Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data before it writes it to Amazon S3.

", + "base": "

Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data before it writes it to Amazon S3. This parameter is required if Enabled is set to true.

", "refs": { - "DataFormatConversionConfiguration$OutputFormatConfiguration": "

Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format.

" + "DataFormatConversionConfiguration$OutputFormatConfiguration": "

Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if Enabled is set to true.

" } }, "ParquetCompression": { @@ -941,7 +944,9 @@ "RedshiftDestinationUpdate$RoleARN": "

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

", "S3DestinationConfiguration$RoleARN": "

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

", "S3DestinationDescription$RoleARN": "

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

", - "S3DestinationUpdate$RoleARN": "

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "S3DestinationUpdate$RoleARN": "

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

", + "VpcConfiguration$RoleARN": "

The ARN of the IAM role that you want the delivery stream to use to create endpoints in the destination VPC.

", + "VpcConfigurationDescription$RoleARN": "

The ARN of the IAM role that you want the delivery stream uses to create endpoints in the destination VPC.

" } }, "S3BackupMode": { @@ -986,9 +991,16 @@ } }, "SchemaConfiguration": { - "base": "

Specifies the schema to which you want Kinesis Data Firehose to configure your data before it writes it to Amazon S3.

", + "base": "

Specifies the schema to which you want Kinesis Data Firehose to configure your data before it writes it to Amazon S3. This parameter is required if Enabled is set to true.

", "refs": { - "DataFormatConversionConfiguration$SchemaConfiguration": "

Specifies the AWS Glue Data Catalog table that contains the column information.

" + "DataFormatConversionConfiguration$SchemaConfiguration": "

Specifies the AWS Glue Data Catalog table that contains the column information. This parameter is required if Enabled is set to true.

" + } + }, + "SecurityGroupIdList": { + "base": null, + "refs": { + "VpcConfiguration$SecurityGroupIds": "

The IDs of the security groups that you want Kinesis Data Firehose to use when it creates ENIs in the VPC of the Amazon ES destination.

", + "VpcConfigurationDescription$SecurityGroupIds": "

The IDs of the security groups that Kinesis Data Firehose uses when it creates ENIs in the VPC of the Amazon ES destination.

" } }, "Serializer": { @@ -1074,6 +1086,13 @@ "refs": { } }, + "SubnetIdList": { + "base": null, + "refs": { + "VpcConfiguration$SubnetIds": "

The IDs of the subnets that you want Kinesis Data Firehose to use to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Kinesis Data Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.

The number of ENIs that Kinesis Data Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Kinesis Data Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Kinesis Data Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.

", + "VpcConfigurationDescription$SubnetIds": "

The IDs of the subnets that Kinesis Data Firehose uses to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Kinesis Data Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.

The number of ENIs that Kinesis Data Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Kinesis Data Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Kinesis Data Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.

" + } + }, "Tag": { "base": "

Metadata that you can assign to a delivery stream, consisting of a key-value pair.

", "refs": { @@ -1152,6 +1171,18 @@ "RedshiftDestinationDescription$Username": "

The name of the user.

", "RedshiftDestinationUpdate$Username": "

The name of the user.

" } + }, + "VpcConfiguration": { + "base": "

The details of the VPC of the Amazon ES destination.

", + "refs": { + "ElasticsearchDestinationConfiguration$VpcConfiguration": "

The details of the VPC of the Amazon ES destination.

" + } + }, + "VpcConfigurationDescription": { + "base": "

The details of the VPC of the Amazon ES destination.

", + "refs": { + "ElasticsearchDestinationDescription$VpcConfigurationDescription": "

The details of the VPC of the Amazon ES destination.

" + } } } } diff --git a/models/apis/fms/2018-01-01/api-2.json b/models/apis/fms/2018-01-01/api-2.json index 3c7b711245f..1ee5569b2e9 100644 --- a/models/apis/fms/2018-01-01/api-2.json +++ b/models/apis/fms/2018-01-01/api-2.json @@ -303,7 +303,10 @@ }, "CustomerPolicyScopeIdType":{ "type":"string", - "enum":["ACCOUNT"] + "enum":[ + "ACCOUNT", + "ORG_UNIT" + ] }, "CustomerPolicyScopeMap":{ "type":"map", diff --git a/models/apis/fms/2018-01-01/docs-2.json b/models/apis/fms/2018-01-01/docs-2.json index 19fb2bf51bf..27727247701 100644 --- a/models/apis/fms/2018-01-01/docs-2.json +++ b/models/apis/fms/2018-01-01/docs-2.json @@ -16,7 +16,7 @@ "ListPolicies": "

Returns an array of PolicySummary objects in the response.

", "ListTagsForResource": "

Retrieves the list of tags for the specified AWS resource.

", "PutNotificationChannel": "

Designates the IAM role and Amazon Simple Notification Service (SNS) topic that AWS Firewall Manager uses to record SNS logs.

", - "PutPolicy": "

Creates an AWS Firewall Manager policy.

Firewall Manager provides the following types of policies:

Each policy is specific to one of the three types. If you want to enforce more than one policy type across accounts, you can create multiple policies. You can create multiple policies for each type.

You must be subscribed to Shield Advanced to create a Shield Advanced policy. For more information about subscribing to Shield Advanced, see CreateSubscription.

", + "PutPolicy": "

Creates an AWS Firewall Manager policy.

Firewall Manager provides the following types of policies:

Each policy is specific to one of the types. If you want to enforce more than one policy type across accounts, create multiple policies. You can create multiple policies for each type.

You must be subscribed to Shield Advanced to create a Shield Advanced policy. For more information about subscribing to Shield Advanced, see CreateSubscription.

", "TagResource": "

Adds one or more tags to an AWS resource.

", "UntagResource": "

Removes one or more tags from an AWS resource.

" }, @@ -91,8 +91,8 @@ "CustomerPolicyScopeMap": { "base": null, "refs": { - "Policy$IncludeMap": "

Specifies the AWS account IDs to include in the policy. If IncludeMap is null, all accounts in the organization in AWS Organizations are included in the policy. If IncludeMap is not null, only values listed in IncludeMap are included in the policy.

The key to the map is ACCOUNT. For example, a valid IncludeMap would be {“ACCOUNT” : [“accountID1”, “accountID2”]}.

", - "Policy$ExcludeMap": "

Specifies the AWS account IDs to exclude from the policy. The IncludeMap values are evaluated first, with all the appropriate account IDs added to the policy. Then the accounts listed in ExcludeMap are removed, resulting in the final list of accounts to add to the policy.

The key to the map is ACCOUNT. For example, a valid ExcludeMap would be {“ACCOUNT” : [“accountID1”, “accountID2”]}.

" + "Policy$IncludeMap": "

Specifies the AWS account IDs and AWS Organizations organizational units (OUs) to include in the policy. Specifying an OU is the equivalent of specifying all accounts in the OU and in any of its child OUs, including any child OUs and accounts that are added at a later time.

You can specify inclusions or exclusions, but not both. If you specify an IncludeMap, AWS Firewall Manager applies the policy to all accounts specified by the IncludeMap, and does not evaluate any ExcludeMap specifications. If you do not specify an IncludeMap, then Firewall Manager applies the policy to all accounts except for those specified by the ExcludeMap.

You can specify account IDs, OUs, or a combination:

", + "Policy$ExcludeMap": "

Specifies the AWS account IDs and AWS Organizations organizational units (OUs) to exclude from the policy. Specifying an OU is the equivalent of specifying all accounts in the OU and in any of its child OUs, including any child OUs and accounts that are added at a later time.

You can specify inclusions or exclusions, but not both. If you specify an IncludeMap, AWS Firewall Manager applies the policy to all accounts specified by the IncludeMap, and does not evaluate any ExcludeMap specifications. If you do not specify an IncludeMap, then Firewall Manager applies the policy to all accounts except for those specified by the ExcludeMap.

You can specify account IDs, OUs, or a combination:

" } }, "DeleteNotificationChannelRequest": { @@ -270,7 +270,7 @@ "ManagedServiceData": { "base": null, "refs": { - "SecurityServicePolicyData$ManagedServiceData": "

Details about the service that are specific to the service type, in JSON format. For service type SHIELD_ADVANCED, this is an empty string.

" + "SecurityServicePolicyData$ManagedServiceData": "

Details about the service that are specific to the service type, in JSON format. For service type SHIELD_ADVANCED, this is an empty string.

" } }, "MemberAccounts": { diff --git a/models/apis/iot/2015-05-28/api-2.json b/models/apis/iot/2015-05-28/api-2.json index 9e40dce2e09..0c97a349e17 100644 --- a/models/apis/iot/2015-05-28/api-2.json +++ b/models/apis/iot/2015-05-28/api-2.json @@ -2807,7 +2807,8 @@ {"shape":"InternalException"}, {"shape":"NotConfiguredException"}, {"shape":"InvalidRequestException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"LimitExceededException"} ] }, "SetV2LoggingOptions":{ diff --git a/models/apis/mediapackage-vod/2018-11-07/api-2.json b/models/apis/mediapackage-vod/2018-11-07/api-2.json index 000203613ee..d38eeb4b140 100644 --- a/models/apis/mediapackage-vod/2018-11-07/api-2.json +++ b/models/apis/mediapackage-vod/2018-11-07/api-2.json @@ -419,6 +419,45 @@ "output": { "shape": "ListPackagingGroupsResponse" } + }, + "ListTagsForResource": { + "errors": [], + "http": { + "method": "GET", + "requestUri": "/tags/{resource-arn}", + "responseCode": 200 + }, + "input": { + "shape": "ListTagsForResourceRequest" + }, + "name": "ListTagsForResource", + "output": { + "shape": "ListTagsForResourceResponse" + } + }, + "TagResource": { + "errors": [], + "http": { + "method": "POST", + "requestUri": "/tags/{resource-arn}", + "responseCode": 204 + }, + "input": { + "shape": "TagResourceRequest" + }, + "name": "TagResource" + }, + "UntagResource": { + "errors": [], + "http": { + "method": "DELETE", + "requestUri": "/tags/{resource-arn}", + "responseCode": 204 + }, + "input": { + "shape": "UntagResourceRequest" + }, + "name": "UntagResource" } }, "shapes": { @@ -463,6 +502,10 @@ "SourceRoleArn": { "locationName": "sourceRoleArn", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "type": "structure" @@ -488,6 +531,10 @@ "SourceRoleArn": { "locationName": "sourceRoleArn", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "required": [ @@ -540,6 +587,10 @@ "SourceRoleArn": { "locationName": "sourceRoleArn", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "type": "structure" @@ -597,6 +648,10 @@ "SourceRoleArn": { "locationName": "sourceRoleArn", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "required": [ @@ -640,6 +695,10 @@ "SourceRoleArn": { "locationName": "sourceRoleArn", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "type": "structure" @@ -669,6 +728,10 @@ "PackagingGroupId": { "locationName": "packagingGroupId", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "required": [ @@ -706,6 +769,10 @@ "PackagingGroupId": { "locationName": "packagingGroupId", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "type": "structure" @@ -715,6 +782,10 @@ "Id": { "locationName": "id", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "required": [ @@ -735,6 +806,10 @@ "Id": { "locationName": "id", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "type": "structure" @@ -901,6 +976,10 @@ "SourceRoleArn": { "locationName": "sourceRoleArn", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "type": "structure" @@ -947,6 +1026,10 @@ "PackagingGroupId": { "locationName": "packagingGroupId", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "type": "structure" @@ -977,6 +1060,10 @@ "Id": { "locationName": "id", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "type": "structure" @@ -1194,6 +1281,28 @@ }, "type": "structure" }, + "ListTagsForResourceRequest": { + "members": { + "ResourceArn": { + "location": "uri", + "locationName": "resource-arn", + "shape": "__string" + } + }, + "required": [ + "ResourceArn" + ], + "type": "structure" + }, + "ListTagsForResourceResponse": { + "members": { + "Tags": { + "locationName": "tags", + "shape": "__mapOf__string" + } + }, + "type": "structure" + }, "ManifestLayout": { "enum": [ "FULL", @@ -1293,6 +1402,10 @@ "PackagingGroupId": { "locationName": "packagingGroupId", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "type": "structure" @@ -1322,6 +1435,10 @@ "PackagingGroupId": { "locationName": "packagingGroupId", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "required": [ @@ -1356,6 +1473,10 @@ "Id": { "locationName": "id", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "type": "structure" @@ -1365,6 +1486,10 @@ "Id": { "locationName": "id", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "required": [ @@ -1460,6 +1585,45 @@ }, "type": "structure" }, + "TagResourceRequest": { + "members": { + "ResourceArn": { + "location": "uri", + "locationName": "resource-arn", + "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "__mapOf__string" + } + }, + "required": [ + "ResourceArn", + "Tags" + ], + "type": "structure" + }, + "Tags": { + "key": { + "shape": "__string" + }, + "type": "map", + "value": { + "shape": "__string" + } + }, + "TagsModel": { + "members": { + "Tags": { + "locationName": "tags", + "shape": "__mapOf__string" + } + }, + "required": [ + "Tags" + ], + "type": "structure" + }, "TooManyRequestsException": { "error": { "httpStatusCode": 429 @@ -1486,6 +1650,25 @@ }, "type": "structure" }, + "UntagResourceRequest": { + "members": { + "ResourceArn": { + "location": "uri", + "locationName": "resource-arn", + "shape": "__string" + }, + "TagKeys": { + "location": "querystring", + "locationName": "tagKeys", + "shape": "__listOf__string" + } + }, + "required": [ + "TagKeys", + "ResourceArn" + ], + "type": "structure" + }, "__PeriodTriggersElement": { "enum": [ "ADS" @@ -1558,6 +1741,15 @@ "__long": { "type": "long" }, + "__mapOf__string": { + "key": { + "shape": "__string" + }, + "type": "map", + "value": { + "shape": "__string" + } + }, "__string": { "type": "string" } diff --git a/models/apis/mediapackage-vod/2018-11-07/docs-2.json b/models/apis/mediapackage-vod/2018-11-07/docs-2.json index c4424d219e1..382072efb5b 100644 --- a/models/apis/mediapackage-vod/2018-11-07/docs-2.json +++ b/models/apis/mediapackage-vod/2018-11-07/docs-2.json @@ -13,7 +13,10 @@ "DescribePackagingGroup" : "Returns a description of a MediaPackage VOD PackagingGroup resource.", "ListAssets" : "Returns a collection of MediaPackage VOD Asset resources.", "ListPackagingConfigurations" : "Returns a collection of MediaPackage VOD PackagingConfiguration resources.", - "ListPackagingGroups" : "Returns a collection of MediaPackage VOD PackagingGroup resources." + "ListPackagingGroups" : "Returns a collection of MediaPackage VOD PackagingGroup resources.", + "ListTagsForResource" : null, + "TagResource" : null, + "UntagResource" : null }, "shapes" : { "AdMarkers" : { @@ -191,6 +194,22 @@ "MssManifest$StreamSelection" : null } }, + "Tags" : { + "base" : "A collection of tags associated with a resource", + "refs" : { + "Asset$Tags" : null, + "AssetCreateParameters$Tags" : null, + "AssetShallow$Tags" : null, + "PackagingConfiguration$Tags" : null, + "PackagingConfigurationCreateParameters$Tags" : null, + "PackagingGroup$Tags" : null, + "PackagingGroupCreateParameters$Tags" : null + } + }, + "TagsModel" : { + "base" : null, + "refs" : { } + }, "__PeriodTriggersElement" : { "base" : null, "refs" : { @@ -273,6 +292,12 @@ "SpekeKeyProvider$SystemIds" : "The system IDs to include in key requests." } }, + "__mapOf__string" : { + "base" : null, + "refs" : { + "TagsModel$Tags" : null + } + }, "__string" : { "base" : null, "refs" : { @@ -315,7 +340,9 @@ "PackagingGroupList$NextToken" : "A token that can be used to resume pagination from the end of the collection.", "SpekeKeyProvider$RoleArn" : "An Amazon Resource Name (ARN) of an IAM role that AWS Elemental\nMediaPackage will assume when accessing the key provider service.\n", "SpekeKeyProvider$Url" : "The URL of the external key provider service.", - "__listOf__string$member" : null + "Tags$member" : null, + "__listOf__string$member" : null, + "__mapOf__string$member" : null } } } diff --git a/models/apis/pinpoint/2016-12-01/api-2.json b/models/apis/pinpoint/2016-12-01/api-2.json index 9801ce28d3e..76dfbcd4139 100644 --- a/models/apis/pinpoint/2016-12-01/api-2.json +++ b/models/apis/pinpoint/2016-12-01/api-2.json @@ -4952,6 +4952,14 @@ "Rows" ] }, + "CampaignCustomMessage": { + "type": "structure", + "members": { + "Data": { + "shape": "__string" + } + } + }, "CampaignDateRangeKpiResponse": { "type": "structure", "members": { @@ -5064,6 +5072,9 @@ "CreationDate": { "shape": "__string" }, + "CustomDeliveryConfiguration": { + "shape": "CustomDeliveryConfiguration" + }, "DefaultState": { "shape": "CampaignState" }, @@ -5650,6 +5661,20 @@ ], "payload": "CreateTemplateMessageBody" }, + "CustomDeliveryConfiguration": { + "type": "structure", + "members": { + "DeliveryUri": { + "shape": "__string" + }, + "EndpointTypes": { + "shape": "ListOf__EndpointTypesElement" + } + }, + "required": [ + "DeliveryUri" + ] + }, "DefaultMessage": { "type": "structure", "members": { @@ -9318,6 +9343,9 @@ "BaiduMessage": { "shape": "Message" }, + "CustomMessage": { + "shape": "CampaignCustomMessage" + }, "DefaultMessage": { "shape": "Message" }, @@ -10765,6 +10793,9 @@ "TreatmentResource": { "type": "structure", "members": { + "CustomDeliveryConfiguration": { + "shape": "CustomDeliveryConfiguration" + }, "Id": { "shape": "__string" }, @@ -11815,6 +11846,9 @@ "AdditionalTreatments": { "shape": "ListOfWriteTreatmentResource" }, + "CustomDeliveryConfiguration": { + "shape": "CustomDeliveryConfiguration" + }, "Description": { "shape": "__string" }, @@ -11940,6 +11974,9 @@ "WriteTreatmentResource": { "type": "structure", "members": { + "CustomDeliveryConfiguration": { + "shape": "CustomDeliveryConfiguration" + }, "MessageConfiguration": { "shape": "MessageConfiguration" }, @@ -11963,6 +12000,22 @@ "SizePercent" ] }, + "__EndpointTypesElement": { + "type": "string", + "enum": [ + "GCM", + "APNS", + "APNS_SANDBOX", + "APNS_VOIP", + "APNS_VOIP_SANDBOX", + "ADM", + "SMS", + "VOICE", + "EMAIL", + "BAIDU", + "CUSTOM" + ] + }, "__boolean": { "type": "boolean" }, @@ -12104,6 +12157,12 @@ "shape": "WriteTreatmentResource" } }, + "ListOf__EndpointTypesElement": { + "type": "list", + "member": { + "shape": "__EndpointTypesElement" + } + }, "ListOf__string": { "type": "list", "member": { diff --git a/models/apis/pinpoint/2016-12-01/docs-2.json b/models/apis/pinpoint/2016-12-01/docs-2.json index 6dbd297d602..85c48f18504 100644 --- a/models/apis/pinpoint/2016-12-01/docs-2.json +++ b/models/apis/pinpoint/2016-12-01/docs-2.json @@ -101,8 +101,8 @@ "UpdateCampaign" : "

Updates the configuration and other settings for a campaign.

", "UpdateEmailChannel" : "

Enables the email channel for an application or updates the status and settings of the email channel for an application.

", "UpdateEmailTemplate" : "

Updates an existing message template for messages that are sent through the email channel.

", - "UpdateEndpoint" : "

Creates a new endpoint for an application or updates the settings and attributes of an existing endpoint for an application. You can also use this operation to define custom attributes (Attributes, Metrics, and UserAttributes properties) for an endpoint.

", - "UpdateEndpointsBatch" : "

Creates a new batch of endpoints for an application or updates the settings and attributes of a batch of existing endpoints for an application. You can also use this operation to define custom attributes (Attributes, Metrics, and UserAttributes properties) for a batch of endpoints.

", + "UpdateEndpoint" : "

Creates a new endpoint for an application or updates the settings and attributes of an existing endpoint for an application. You can also use this operation to define custom attributes for an endpoint. If an update includes one or more values for a custom attribute, Amazon Pinpoint replaces (overwrites) any existing values with the new values.

", + "UpdateEndpointsBatch" : "

Creates a new batch of endpoints for an application or updates the settings and attributes of a batch of existing endpoints for an application. You can also use this operation to define custom attributes for a batch of endpoints. If an update includes one or more values for a custom attribute, Amazon Pinpoint replaces (overwrites) any existing values with the new values.

", "UpdateGcmChannel" : "

Enables the GCM channel for an application or updates the status and settings of the GCM channel for an application.

", "UpdateJourney" : "

Updates the configuration and other settings for a journey.

", "UpdateJourneyState" : "

Cancels (stops) an active journey.

", @@ -282,6 +282,12 @@ "JourneyDateRangeKpiResponse$KpiResult" : "

An array of objects that contains the results of the query. Each object contains the value for the metric and metadata about that value.

" } }, + "CampaignCustomMessage" : { + "base" : "

Specifies the contents of a message that's sent through a custom channel to recipients of a campaign.

", + "refs" : { + "MessageConfiguration$CustomMessage" : "

The message that the campaign sends through a custom channel, as specified by the delivery configuration (CustomDeliveryConfiguration) settings for the campaign. If specified, this message overrides the default message.

" + } + }, "CampaignDateRangeKpiResponse" : { "base" : "

Provides the results of a query that retrieved the data for a standard metric that applies to a campaign, and provides information about that query.

", "refs" : { } @@ -289,7 +295,7 @@ "CampaignEmailMessage" : { "base" : "

Specifies the content and \"From\" address for an email message that's sent to recipients of a campaign.

", "refs" : { - "MessageConfiguration$EmailMessage" : "

The message that the campaign sends through the email channel.

" + "MessageConfiguration$EmailMessage" : "

The message that the campaign sends through the email channel. If specified, this message overrides the default message.

" } }, "CampaignEventFilter" : { @@ -299,20 +305,20 @@ } }, "CampaignHook" : { - "base" : "

Specifies the AWS Lambda function to use as a code hook for a campaign.

", + "base" : "

Specifies settings for invoking an AWS Lambda function that customizes a segment for a campaign.

", "refs" : { - "ApplicationSettingsResource$CampaignHook" : "

The settings for the AWS Lambda function to use by default as a code hook for campaigns in the application.

", - "CampaignResponse$Hook" : "

The settings for the AWS Lambda function to use as a code hook for the campaign.

", - "WriteApplicationSettingsRequest$CampaignHook" : "

The settings for the AWS Lambda function to use by default as a code hook for campaigns in the application. To override these settings for a specific campaign, use the Campaign resource to define custom Lambda function settings for the campaign.

", - "WriteCampaignRequest$Hook" : "

The settings for the AWS Lambda function to use as a code hook for the campaign.

" + "ApplicationSettingsResource$CampaignHook" : "

The settings for the AWS Lambda function to invoke by default as a code hook for campaigns in the application. You can use this hook to customize segments that are used by campaigns in the application.

", + "CampaignResponse$Hook" : "

The settings for the AWS Lambda function to use as a code hook for the campaign. You can use this hook to customize the segment that's used by the campaign.

", + "WriteApplicationSettingsRequest$CampaignHook" : "

The settings for the AWS Lambda function to invoke by default as a code hook for campaigns in the application. You can use this hook to customize segments that are used by campaigns in the application.

To override these settings and define custom settings for a specific campaign, use the CampaignHook object of the Campaign resource.

", + "WriteCampaignRequest$Hook" : "

The settings for the AWS Lambda function to invoke as a code hook for the campaign. You can use this hook to customize the segment that's used by the campaign.

" } }, "CampaignLimits" : { - "base" : "

Specifies limits on the messages that a campaign can send.

", + "base" : "

For a campaign, specifies limits on the messages that the campaign can send. For an application, specifies the default limits for messages that campaigns and journeys in the application can send.

", "refs" : { - "ApplicationSettingsResource$Limits" : "

The default sending limits for campaigns in the application.

", + "ApplicationSettingsResource$Limits" : "

The default sending limits for campaigns and journeys in the application.

", "CampaignResponse$Limits" : "

The messaging limits for the campaign.

", - "WriteApplicationSettingsRequest$Limits" : "

The default sending limits for campaigns in the application. To override these limits for a specific campaign, use the Campaign resource to define custom limits for the campaign.

", + "WriteApplicationSettingsRequest$Limits" : "

The default sending limits for campaigns and journeys in the application. To override these limits and define custom limits for a specific campaign or journey, use the Campaign resource or the Journey resource, respectively.

", "WriteCampaignRequest$Limits" : "

The messaging limits for the campaign.

" } }, @@ -325,13 +331,13 @@ "CampaignSmsMessage" : { "base" : "

Specifies the content and settings for an SMS message that's sent to recipients of a campaign.

", "refs" : { - "MessageConfiguration$SMSMessage" : "

The message that the campaign sends through the SMS channel.

" + "MessageConfiguration$SMSMessage" : "

The message that the campaign sends through the SMS channel. If specified, this message overrides the default message.

" } }, "CampaignState" : { "base" : "

Provides information about the status of a campaign.

", "refs" : { - "CampaignResponse$DefaultState" : "

The current status of the campaign's default treatment. This value exists only for campaigns that have more than one treatment, to support A/B testing.

", + "CampaignResponse$DefaultState" : "

The current status of the campaign's default treatment. This value exists only for campaigns that have more than one treatment.

", "CampaignResponse$State" : "

The current status of the campaign.

", "TreatmentResource$State" : "

The current status of the treatment.

" } @@ -339,7 +345,7 @@ "CampaignStatus" : { "base" : null, "refs" : { - "CampaignState$CampaignStatus" : "

The current status of the campaign, or the current status of a treatment that belongs to an A/B test campaign. If a campaign uses A/B testing, the campaign has a status of COMPLETED only if all campaign treatments have a status of COMPLETED.

" + "CampaignState$CampaignStatus" : "

The current status of the campaign, or the current status of a treatment that belongs to an A/B test campaign.

If a campaign uses A/B testing, the campaign has a status of COMPLETED only if all campaign treatments have a status of COMPLETED. If you delete the segment that's associated with a campaign, the campaign fails and has a status of DELETED.

" } }, "CampaignsResponse" : { @@ -390,6 +396,15 @@ "base" : "

Provides information about a request to create a message template.

", "refs" : { } }, + "CustomDeliveryConfiguration" : { + "base" : "

Specifies the delivery configuration settings for sending a campaign or campaign treatment through a custom channel. This object is required if you use the CampaignCustomMessage object to define the message to send for the campaign or campaign treatment.

", + "refs" : { + "CampaignResponse$CustomDeliveryConfiguration" : "

The delivery configuration settings for sending the campaign through a custom channel.

", + "TreatmentResource$CustomDeliveryConfiguration" : "

The delivery configuration settings for sending the treatment through a custom channel. This object is required if the MessageConfiguration object for the treatment specifies a CustomMessage object.

", + "WriteCampaignRequest$CustomDeliveryConfiguration" : "

The delivery configuration settings for sending the campaign through a custom channel. This object is required if the MessageConfiguration object for the campaign specifies a CustomMessage object.

", + "WriteTreatmentResource$CustomDeliveryConfiguration" : "

The delivery configuration settings for sending the treatment through a custom channel. This object is required if the MessageConfiguration object for the treatment specifies a CustomMessage object.

" + } + }, "DefaultMessage" : { "base" : "

Specifies the default message for all channels.

", "refs" : { @@ -522,8 +537,8 @@ "EndpointUser" : { "base" : "

Specifies data for one or more attributes that describe the user who's associated with an endpoint.

", "refs" : { - "EndpointBatchItem$User" : "

One or more custom user attributes that describe the user who's associated with the endpoint.

", - "EndpointRequest$User" : "

One or more custom user attributes that describe the user who's associated with the endpoint.

", + "EndpointBatchItem$User" : "

One or more custom attributes that describe the user who's associated with the endpoint.

", + "EndpointRequest$User" : "

One or more custom attributes that describe the user who's associated with the endpoint.

", "EndpointResponse$User" : "

One or more custom user attributes that your app reports to Amazon Pinpoint for the user who's associated with the endpoint.

", "PublicEndpoint$User" : "

One or more custom user attributes that your app reports to Amazon Pinpoint for the user who's associated with the endpoint.

" } @@ -747,11 +762,11 @@ "Message" : { "base" : "

Specifies the content and settings for a push notification that's sent to recipients of a campaign.

", "refs" : { - "MessageConfiguration$ADMMessage" : "

The message that the campaign sends through the ADM (Amazon Device Messaging) channel. This message overrides the default message.

", - "MessageConfiguration$APNSMessage" : "

The message that the campaign sends through the APNs (Apple Push Notification service) channel. This message overrides the default message.

", - "MessageConfiguration$BaiduMessage" : "

The message that the campaign sends through the Baidu (Baidu Cloud Push) channel. This message overrides the default message.

", + "MessageConfiguration$ADMMessage" : "

The message that the campaign sends through the ADM (Amazon Device Messaging) channel. If specified, this message overrides the default message.

", + "MessageConfiguration$APNSMessage" : "

The message that the campaign sends through the APNs (Apple Push Notification service) channel. If specified, this message overrides the default message.

", + "MessageConfiguration$BaiduMessage" : "

The message that the campaign sends through the Baidu (Baidu Cloud Push) channel. If specified, this message overrides the default message.

", "MessageConfiguration$DefaultMessage" : "

The default message that the campaign sends through all the channels that are configured for the campaign.

", - "MessageConfiguration$GCMMessage" : "

The message that the campaign sends through the GCM channel, which enables Amazon Pinpoint to send push notifications through the Firebase Cloud Messaging (FCM), formerly Google Cloud Messaging (GCM), service. This message overrides the default message.

" + "MessageConfiguration$GCMMessage" : "

The message that the campaign sends through the GCM channel, which enables Amazon Pinpoint to send push notifications through the Firebase Cloud Messaging (FCM), formerly Google Cloud Messaging (GCM), service. If specified, this message overrides the default message.

" } }, "MessageBody" : { @@ -801,7 +816,7 @@ "Mode" : { "base" : null, "refs" : { - "CampaignHook$Mode" : "

Specifies which Lambda mode to use when invoking the AWS Lambda function.

" + "CampaignHook$Mode" : "

The mode that Amazon Pinpoint uses to invoke the AWS Lambda function. Possible values are:

" } }, "MultiConditionalBranch" : { @@ -1148,7 +1163,7 @@ "refs" : { } }, "TreatmentResource" : { - "base" : "

Specifies the settings for a campaign treatment. A treatment is a variation of a campaign that's used for A/B testing of a campaign.

", + "base" : "

Specifies the settings for a campaign treatment. A treatment is a variation of a campaign that's used for A/B testing of a campaign.

", "refs" : { "ListOfTreatmentResource$member" : null } @@ -1224,11 +1239,17 @@ "refs" : { } }, "WriteTreatmentResource" : { - "base" : "

Specifies the settings for a campaign treatment. A treatment is a variation of a campaign that's used for A/B testing of a campaign.

", + "base" : "

Specifies the settings for a campaign treatment. A treatment is a variation of a campaign that's used for A/B testing of a campaign.

", "refs" : { "ListOfWriteTreatmentResource$member" : null } }, + "__EndpointTypesElement" : { + "base" : null, + "refs" : { + "ListOf__EndpointTypesElement$member" : null + } + }, "__boolean" : { "base" : null, "refs" : { @@ -1293,7 +1314,7 @@ "VoiceChannelResponse$HasCredential" : "

(Not used) This property is retained only for backward compatibility.

", "VoiceChannelResponse$IsArchived" : "

Specifies whether the voice channel is archived.

", "WriteApplicationSettingsRequest$CloudWatchMetricsEnabled" : "

Specifies whether to enable application-related alarms in Amazon CloudWatch.

", - "WriteCampaignRequest$IsPaused" : "

Specifies whether to pause the campaign. A paused campaign doesn't run unless you resume it by setting this value to false.

", + "WriteCampaignRequest$IsPaused" : "

Specifies whether to pause the campaign. A paused campaign doesn't run unless you resume it by changing this value to false.

", "WriteJourneyRequest$LocalTime" : "

Specifies whether the journey's scheduled start and end times use each participant's local time. To base the schedule on each participant's local time, set this value to true.

" } }, @@ -1325,16 +1346,16 @@ "ActivityResponse$TotalEndpointCount" : "

The total number of endpoints that the campaign attempted to deliver messages to.

", "BaiduChannelResponse$Version" : "

The current version of the Baidu channel.

", "BaiduMessage$TimeToLive" : "

The amount of time, in seconds, that the Baidu Cloud Push service should store the message if the recipient's device is offline. The default value and maximum supported time is 604,800 seconds (7 days).

", - "CampaignLimits$Daily" : "

The maximum number of messages that a campaign can send to a single endpoint during a 24-hour period. The maximum value is 100.

", + "CampaignLimits$Daily" : "

The maximum number of messages that a campaign can send to a single endpoint during a 24-hour period. For an application, this value specifies the default limit for the number of messages that campaigns and journeys can send to a single endpoint during a 24-hour period. The maximum value is 100.

", "CampaignLimits$MaximumDuration" : "

The maximum amount of time, in seconds, that a campaign can attempt to deliver a message after the scheduled start time for the campaign. The minimum value is 60 seconds.

", - "CampaignLimits$MessagesPerSecond" : "

The maximum number of messages that a campaign can send each second. The minimum value is 50. The maximum value is 20,000.

", - "CampaignLimits$Total" : "

The maximum number of messages that a campaign can send to a single endpoint during the course of the campaign. The maximum value is 100.

", + "CampaignLimits$MessagesPerSecond" : "

The maximum number of messages that a campaign can send each second. For an application, this value specifies the default limit for the number of messages that campaigns and journeys can send each second. The minimum value is 50. The maximum value is 20,000.

", + "CampaignLimits$Total" : "

The maximum number of messages that a campaign can send to a single endpoint during the course of the campaign. If a campaign recurs, this setting applies to all runs of the campaign. The maximum value is 100.

", "CampaignResponse$HoldoutPercent" : "

The allocated percentage of users (segment members) who shouldn't receive messages from the campaign.

", "CampaignResponse$SegmentVersion" : "

The version number of the segment that's associated with the campaign.

", "CampaignResponse$Version" : "

The version number of the campaign.

", "ChannelResponse$Version" : "

The current version of the channel.

", - "CreateRecommenderConfiguration$RecommendationsPerMessage" : "

The number of recommended items to retrieve from the model for each endpoint or user, depending on the value for the RecommenderUserIdType property. This number determines how many recommended attributes are available for use as message variables in message templates. The minimum value is 1. The maximum value is 5. The default value is 5.

To use multiple recommended items and custom attributes with message variables, you have to use an AWS Lambda function (LambdaFunctionArn) to perform additional processing of recommendation data.

", - "EmailChannelResponse$MessagesPerSecond" : "

The maximum number of emails that you can send through the channel each second.

", + "CreateRecommenderConfiguration$RecommendationsPerMessage" : "

The number of recommended items to retrieve from the model for each endpoint or user, depending on the value for the RecommendationProviderIdType property. This number determines how many recommended items are available for use in message variables. The minimum value is 1. The maximum value is 5. The default value is 5.

To use multiple recommended items and custom attributes with message variables, you have to use an AWS Lambda function (RecommendationTransformerUri) to perform additional processing of recommendation data.

", + "EmailChannelResponse$MessagesPerSecond" : "

The maximum number of emails that can be sent through the channel each second.

", "EmailChannelResponse$Version" : "

The current version of the email channel.

", "EndpointItemResponse$StatusCode" : "

The status code that's returned in the response as a result of processing the endpoint data.

", "EndpointMessageResult$StatusCode" : "

The downstream service status code for delivering the message.

", @@ -1361,7 +1382,7 @@ "MessageResult$StatusCode" : "

The downstream service status code for delivering the message.

", "NumberValidateResponse$PhoneTypeCode" : "

The phone type, represented by an integer. Valid values are: 0 (mobile), 1 (landline), 2 (VoIP), 3 (invalid), 4 (other), and 5 (prepaid).

", "RandomSplitEntry$Percentage" : "

The percentage of participants to send down the activity path.

To determine which participants are sent down each path, Amazon Pinpoint applies a probability-based algorithm to the percentages that you specify for the paths. Therefore, the actual percentage of participants who are sent down a path may not be equal to the percentage that you specify.

", - "RecommenderConfigurationResponse$RecommendationsPerMessage" : "

The number of recommended items that are retrieved from the model for each endpoint or user, depending on the value for the RecommenderUserIdType property. This number determines how many recommended attributes are available for use as message variables in message templates.

", + "RecommenderConfigurationResponse$RecommendationsPerMessage" : "

The number of recommended items that are retrieved from the model for each endpoint or user, depending on the value for the RecommendationProviderIdType property. This number determines how many recommended items are available for use in message variables.

", "SMSChannelResponse$PromotionalMessagesPerSecond" : "

The maximum number of promotional messages that you can send through the SMS channel each second.

", "SMSChannelResponse$TransactionalMessagesPerSecond" : "

The maximum number of transactional messages that you can send through the SMS channel each second.

", "SMSChannelResponse$Version" : "

The current version of the SMS channel.

", @@ -1370,7 +1391,7 @@ "SegmentResponse$Version" : "

The version number of the segment.

", "Session$Duration" : "

The duration of the session, in milliseconds.

", "TreatmentResource$SizePercent" : "

The allocated percentage of users (segment members) that the treatment is sent to.

", - "UpdateRecommenderConfiguration$RecommendationsPerMessage" : "

The number of recommended items to retrieve from the model for each endpoint or user, depending on the value for the RecommenderUserIdType property. This number determines how many recommended attributes are available for use as message variables in message templates. The minimum value is 1. The maximum value is 5. The default value is 5.

To use multiple recommended items and custom attributes with message variables, you have to use an AWS Lambda function (LambdaFunctionArn) to perform additional processing of recommendation data.

", + "UpdateRecommenderConfiguration$RecommendationsPerMessage" : "

The number of recommended items to retrieve from the model for each endpoint or user, depending on the value for the RecommendationProviderIdType property. This number determines how many recommended items are available for use in message variables. The minimum value is 1. The maximum value is 5. The default value is 5.

To use multiple recommended items and custom attributes with message variables, you have to use an AWS Lambda function (RecommendationTransformerUri) to perform additional processing of recommendation data.

", "VoiceChannelResponse$Version" : "

The current version of the voice channel.

", "WriteCampaignRequest$HoldoutPercent" : "

The allocated percentage of users (segment members) who shouldn't receive messages from the campaign.

", "WriteCampaignRequest$SegmentVersion" : "

The version of the segment to associate with the campaign.

", @@ -1511,6 +1532,12 @@ "WriteCampaignRequest$AdditionalTreatments" : "

An array of requests that defines additional treatments for the campaign, in addition to the default treatment for the campaign.

" } }, + "ListOf__EndpointTypesElement" : { + "base" : null, + "refs" : { + "CustomDeliveryConfiguration$EndpointTypes" : "

The types of endpoints to send the campaign or treatment to. Each valid value maps to a type of channel that you can associate with an endpoint by using the ChannelType property of an endpoint.

" + } + }, "ListOf__string" : { "base" : null, "refs" : { @@ -1655,7 +1682,7 @@ "BaiduMessage$Data" : "

The JSON data payload to use for the push notification, if the notification is a silent push notification. This payload is added to the data.pinpoint.jsonBody object of the notification.

", "CampaignResponse$tags" : "

A string-to-string map of key-value pairs that identifies the tags that are associated with the campaign. Each tag consists of a required tag key and an associated tag value.

", "CreateApplicationRequest$tags" : "

A string-to-string map of key-value pairs that defines the tags to associate with the application. Each tag consists of a required tag key and an associated tag value.

", - "CreateRecommenderConfiguration$Attributes" : "

A map of key-value pairs that defines 1-10 custom endpoint or user attributes, depending on the value for the RecommenderUserIdType property. Each of these attributes temporarily stores a recommended item that's retrieved from the recommender model and sent to an AWS Lambda function for additional processing. Each attribute can be used as a message variable in a message template.

In the map, the key is the name of a custom attribute and the value is a custom display name for that attribute. The display name appears in the Attribute finder pane of the template editor on the Amazon Pinpoint console. The following restrictions apply to these names:

This object is required if the configuration invokes an AWS Lambda function (LambdaFunctionArn) to process recommendation data. Otherwise, don't include this object in your request.

", + "CreateRecommenderConfiguration$Attributes" : "

A map of key-value pairs that defines 1-10 custom endpoint or user attributes, depending on the value for the RecommendationProviderIdType property. Each of these attributes temporarily stores a recommended item that's retrieved from the recommender model and sent to an AWS Lambda function for additional processing. Each attribute can be used as a message variable in a message template.

In the map, the key is the name of a custom attribute and the value is a custom display name for that attribute. The display name appears in the Attribute finder of the template editor on the Amazon Pinpoint console. The following restrictions apply to these names:

This object is required if the configuration invokes an AWS Lambda function (RecommendationTransformerUri) to process recommendation data. Otherwise, don't include this object in your request.

", "DefaultPushNotificationMessage$Data" : "

The JSON data payload to use for the default push notification, if the notification is a silent push notification. This payload is added to the data.pinpoint.jsonBody object of the notification.

", "EmailTemplateRequest$tags" : "

A string-to-string map of key-value pairs that defines the tags to associate with the message template. Each tag consists of a required tag key and an associated tag value.

", "EmailTemplateResponse$tags" : "

A string-to-string map of key-value pairs that identifies the tags that are associated with the message template. Each tag consists of a required tag key and an associated tag value.

", @@ -1668,14 +1695,14 @@ "MessageRequest$Context" : "

A map of custom attributes to attach to the message. For a push notification, this payload is added to the data.pinpoint object. For an email or text message, this payload is added to email/SMS delivery receipt event attributes.

", "PushNotificationTemplateRequest$tags" : "

A string-to-string map of key-value pairs that defines the tags to associate with the message template. Each tag consists of a required tag key and an associated tag value.

", "PushNotificationTemplateResponse$tags" : "

A string-to-string map of key-value pairs that identifies the tags that are associated with the message template. Each tag consists of a required tag key and an associated tag value.

", - "RecommenderConfigurationResponse$Attributes" : "

A map that defines 1-10 custom endpoint or user attributes, depending on the value for the RecommenderUserIdType property. Each of these attributes temporarily stores a recommended item that's retrieved from the recommender model and sent to an AWS Lambda function for additional processing. Each attribute can be used as a message variable in a message template.

This value is null if the configuration doesn't invoke an AWS Lambda function (LambdaFunctionArn) to perform additional processing of recommendation data.

", + "RecommenderConfigurationResponse$Attributes" : "

A map that defines 1-10 custom endpoint or user attributes, depending on the value for the RecommendationProviderIdType property. Each of these attributes temporarily stores a recommended item that's retrieved from the recommender model and sent to an AWS Lambda function for additional processing. Each attribute can be used as a message variable in a message template.

This value is null if the configuration doesn't invoke an AWS Lambda function (RecommendationTransformerUri) to perform additional processing of recommendation data.

", "SMSTemplateRequest$tags" : "

A string-to-string map of key-value pairs that defines the tags to associate with the message template. Each tag consists of a required tag key and an associated tag value.

", "SMSTemplateResponse$tags" : "

A string-to-string map of key-value pairs that identifies the tags that are associated with the message template. Each tag consists of a required tag key and an associated tag value.

", "SegmentResponse$tags" : "

A string-to-string map of key-value pairs that identifies the tags that are associated with the segment. Each tag consists of a required tag key and an associated tag value.

", "SendUsersMessageRequest$Context" : "

A map of custom attribute-value pairs. For a push notification, Amazon Pinpoint adds these attributes to the data.pinpoint object in the body of the notification payload. Amazon Pinpoint also provides these attributes in the events that it generates for users-messages deliveries.

", "TagsModel$tags" : "

A string-to-string map of key-value pairs that defines the tags for an application, campaign, message template, or segment. Each of these resources can have a maximum of 50 tags.

Each tag consists of a required tag key and an associated tag value. The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.

", "TemplateResponse$tags" : "

A map of key-value pairs that identifies the tags that are associated with the message template. This object isn't included in a TemplateResponse object. To retrieve this object for a template, use the GetEmailTemplate, GetPushTemplate, GetSmsTemplate, or GetVoiceTemplate operation, depending on the type of template that you want to retrieve the object for.

", - "UpdateRecommenderConfiguration$Attributes" : "

A map of key-value pairs that defines 1-10 custom endpoint or user attributes, depending on the value for the RecommenderUserIdType property. Each of these attributes temporarily stores a recommended item that's retrieved from the recommender model and sent to an AWS Lambda function for additional processing. Each attribute can be used as a message variable in a message template.

In the map, the key is the name of a custom attribute and the value is a custom display name for that attribute. The display name appears in the Attribute finder pane of the template editor on the Amazon Pinpoint console. The following restrictions apply to these names:

This object is required if the configuration invokes an AWS Lambda function (LambdaFunctionArn) to process recommendation data. Otherwise, don't include this object in your request.

", + "UpdateRecommenderConfiguration$Attributes" : "

A map of key-value pairs that defines 1-10 custom endpoint or user attributes, depending on the value for the RecommendationProviderIdType property. Each of these attributes temporarily stores a recommended item that's retrieved from the recommender model and sent to an AWS Lambda function for additional processing. Each attribute can be used as a message variable in a message template.

In the map, the key is the name of a custom attribute and the value is a custom display name for that attribute. The display name appears in the Attribute finder of the template editor on the Amazon Pinpoint console. The following restrictions apply to these names:

This object is required if the configuration invokes an AWS Lambda function (RecommendationTransformerUri) to process recommendation data. Otherwise, don't include this object in your request.

", "VoiceTemplateRequest$tags" : "

A string-to-string map of key-value pairs that defines the tags to associate with the message template. Each tag consists of a required tag key and an associated tag value.

", "VoiceTemplateResponse$tags" : "

A string-to-string map of key-value pairs that identifies the tags that are associated with the message template. Each tag consists of a required tag key and an associated tag value.

", "WriteCampaignRequest$tags" : "

A string-to-string map of key-value pairs that defines the tags to associate with the campaign. Each tag consists of a required tag key and an associated tag value.

", @@ -1803,7 +1830,7 @@ "AndroidPushNotificationTemplate$Url" : "

The URL to open in a recipient's default mobile browser, if a recipient taps a push notification that's based on the message template and the value of the Action property is URL.

", "ApplicationDateRangeKpiResponse$ApplicationId" : "

The unique identifier for the application that the metric applies to.

", "ApplicationDateRangeKpiResponse$EndTime" : "

The last date and time of the date range that was used to filter the query results, in extended ISO 8601 format. The date range is inclusive.

", - "ApplicationDateRangeKpiResponse$KpiName" : "

The name of the metric, also referred to as a key performance indicator (KPI), that the data was retrieved for. This value describes the associated metric and consists of two or more terms, which are comprised of lowercase alphanumeric characters, separated by a hyphen. For a list of possible values, see the Amazon Pinpoint Developer Guide.

", + "ApplicationDateRangeKpiResponse$KpiName" : "

The name of the metric, also referred to as a key performance indicator (KPI), that the data was retrieved for. This value describes the associated metric and consists of two or more terms, which are comprised of lowercase alphanumeric characters, separated by a hyphen. For a list of possible values, see the Amazon Pinpoint Developer Guide.

", "ApplicationDateRangeKpiResponse$NextToken" : "

The string to use in a subsequent request to get the next page of results in a paginated response. This value is null for the Application Metrics resource because the resource returns all results in a single page.

", "ApplicationDateRangeKpiResponse$StartTime" : "

The first date and time of the date range that was used to filter the query results, in extended ISO 8601 format. The date range is inclusive.

", "ApplicationResponse$Arn" : "

The Amazon Resource Name (ARN) of the application.

", @@ -1832,17 +1859,18 @@ "BaiduMessage$Sound" : "

The sound to play when the recipient receives the push notification. You can use the default stream or specify the file name of a sound resource that's bundled in your app. On an Android platform, the sound file must reside in /res/raw/.

", "BaiduMessage$Title" : "

The title to display above the notification message on the recipient's device.

", "BaiduMessage$Url" : "

The URL to open in the recipient's default mobile browser, if a recipient taps the push notification and the value of the Action property is URL.

", + "CampaignCustomMessage$Data" : "

The raw, JSON-formatted string to use as the payload for the message. The maximum size is 5 KB.

", "CampaignDateRangeKpiResponse$ApplicationId" : "

The unique identifier for the application that the metric applies to.

", "CampaignDateRangeKpiResponse$CampaignId" : "

The unique identifier for the campaign that the metric applies to.

", "CampaignDateRangeKpiResponse$EndTime" : "

The last date and time of the date range that was used to filter the query results, in extended ISO 8601 format. The date range is inclusive.

", - "CampaignDateRangeKpiResponse$KpiName" : "

The name of the metric, also referred to as a key performance indicator (KPI), that the data was retrieved for. This value describes the associated metric and consists of two or more terms, which are comprised of lowercase alphanumeric characters, separated by a hyphen. For a list of possible values, see the Amazon Pinpoint Developer Guide.

", + "CampaignDateRangeKpiResponse$KpiName" : "

The name of the metric, also referred to as a key performance indicator (KPI), that the data was retrieved for. This value describes the associated metric and consists of two or more terms, which are comprised of lowercase alphanumeric characters, separated by a hyphen. For a list of possible values, see the Amazon Pinpoint Developer Guide.

", "CampaignDateRangeKpiResponse$NextToken" : "

The string to use in a subsequent request to get the next page of results in a paginated response. This value is null for the Campaign Metrics resource because the resource returns all results in a single page.

", "CampaignDateRangeKpiResponse$StartTime" : "

The first date and time of the date range that was used to filter the query results, in extended ISO 8601 format. The date range is inclusive.

", "CampaignEmailMessage$Body" : "

The body of the email for recipients whose email clients don't render HTML content.

", "CampaignEmailMessage$FromAddress" : "

The verified email address to send the email from. The default address is the FromAddress specified for the email channel for the application.

", "CampaignEmailMessage$HtmlBody" : "

The body of the email, in HTML format, for recipients whose email clients render HTML content.

", "CampaignEmailMessage$Title" : "

The subject line, or title, of the email.

", - "CampaignHook$LambdaFunctionName" : "

The name or Amazon Resource Name (ARN) of the AWS Lambda function that Amazon Pinpoint invokes to send messages for a campaign.

", + "CampaignHook$LambdaFunctionName" : "

The name or Amazon Resource Name (ARN) of the AWS Lambda function that Amazon Pinpoint invokes to customize a segment for a campaign.

", "CampaignHook$WebUrl" : "

The web URL that Amazon Pinpoint calls to invoke the AWS Lambda function over HTTPS.

", "CampaignResponse$ApplicationId" : "

The unique identifier for the application that the campaign applies to.

", "CampaignResponse$Arn" : "

The Amazon Resource Name (ARN) of the campaign.

", @@ -1852,8 +1880,8 @@ "CampaignResponse$LastModifiedDate" : "

The date, in ISO 8601 format, when the campaign was last modified.

", "CampaignResponse$Name" : "

The name of the campaign.

", "CampaignResponse$SegmentId" : "

The unique identifier for the segment that's associated with the campaign.

", - "CampaignResponse$TreatmentDescription" : "

The custom description of a variation of the campaign that's used for A/B testing.

", - "CampaignResponse$TreatmentName" : "

The custom name of a variation of the campaign that's used for A/B testing.

", + "CampaignResponse$TreatmentDescription" : "

The custom description of the default treatment for the campaign.

", + "CampaignResponse$TreatmentName" : "

The custom name of the default treatment for the campaign, if the campaign has multiple treatments. A treatment is a variation of a campaign that's used for A/B testing.

", "CampaignSmsMessage$Body" : "

The body of the SMS message.

", "CampaignSmsMessage$SenderId" : "

The sender ID to display on recipients' devices when they receive the SMS message.

", "CampaignsResponse$NextToken" : "

The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

", @@ -1865,16 +1893,17 @@ "ConditionalSplitActivity$FalseActivity" : "

The unique identifier for the activity to perform if the conditions aren't met.

", "ConditionalSplitActivity$TrueActivity" : "

The unique identifier for the activity to perform if the conditions are met.

", "CreateApplicationRequest$Name" : "

The display name of the application. This name is displayed as the Project name on the Amazon Pinpoint console.

", - "CreateRecommenderConfiguration$Description" : "

A custom description of the configuration for the recommender model. The description can contain up to 128 characters.

", + "CreateRecommenderConfiguration$Description" : "

A custom description of the configuration for the recommender model. The description can contain up to 128 characters. The characters can be letters, numbers, spaces, or the following symbols: _ ; () , ‐.

", "CreateRecommenderConfiguration$Name" : "

A custom name of the configuration for the recommender model. The name must start with a letter or number and it can contain up to 128 characters. The characters can be letters, numbers, spaces, underscores (_), or hyphens (-).

", - "CreateRecommenderConfiguration$RecommendationProviderIdType" : "

The type of Amazon Pinpoint ID to associate with unique user IDs in the recommender model. This value enables the model to use attribute and event data that’s specific to a particular endpoint or user in an Amazon Pinpoint application. Valid values are:

", + "CreateRecommenderConfiguration$RecommendationProviderIdType" : "

The type of Amazon Pinpoint ID to associate with unique user IDs in the recommender model. This value enables the model to use attribute and event data that’s specific to a particular endpoint or user in an Amazon Pinpoint application. Valid values are:

", "CreateRecommenderConfiguration$RecommendationProviderRoleArn" : "

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that authorizes Amazon Pinpoint to retrieve recommendation data from the recommender model.

", "CreateRecommenderConfiguration$RecommendationProviderUri" : "

The Amazon Resource Name (ARN) of the recommender model to retrieve recommendation data from. This value must match the ARN of an Amazon Personalize campaign.

", "CreateRecommenderConfiguration$RecommendationTransformerUri" : "

The name or Amazon Resource Name (ARN) of the AWS Lambda function to invoke for additional processing of recommendation data that's retrieved from the recommender model.

", - "CreateRecommenderConfiguration$RecommendationsDisplayName" : "

A custom display name for the standard endpoint or user attribute (RecommendationItems) that temporarily stores a recommended item for each endpoint or user, depending on the value for the RecommenderUserIdType property. This value is required if the configuration doesn't invoke an AWS Lambda function (LambdaFunctionArn) to perform additional processing of recommendation data.

This name appears in the Attribute finder pane of the template editor on the Amazon Pinpoint console. The name can contain up to 25 characters. The characters can be letters, numbers, spaces, underscores (_), or hyphens (-). These restrictions don't apply to attribute values.

", + "CreateRecommenderConfiguration$RecommendationsDisplayName" : "

A custom display name for the standard endpoint or user attribute (RecommendationItems) that temporarily stores recommended items for each endpoint or user, depending on the value for the RecommendationProviderIdType property. This value is required if the configuration doesn't invoke an AWS Lambda function (RecommendationTransformerUri) to perform additional processing of recommendation data.

This name appears in the Attribute finder of the template editor on the Amazon Pinpoint console. The name can contain up to 25 characters. The characters can be letters, numbers, spaces, underscores (_), or hyphens (-). These restrictions don't apply to attribute values.

", "CreateTemplateMessageBody$Arn" : "

The Amazon Resource Name (ARN) of the message template that was created.

", "CreateTemplateMessageBody$Message" : "

The message that's returned from the API for the request to create the message template.

", "CreateTemplateMessageBody$RequestID" : "

The unique identifier for the request to create the message template.

", + "CustomDeliveryConfiguration$DeliveryUri" : "

The destination to send the campaign or treatment to. This value can be one of the following:

", "DefaultMessage$Body" : "

The default body of the message.

", "DefaultPushNotificationMessage$Body" : "

The default body of the notification message.

", "DefaultPushNotificationMessage$Title" : "

The default title to display above the notification message on a recipient's device.

", @@ -1883,16 +1912,16 @@ "DefaultPushNotificationTemplate$Sound" : "

The sound to play when a recipient receives a push notification that's based on the message template. You can use the default stream or specify the file name of a sound resource that's bundled in your app. On an Android platform, the sound file must reside in /res/raw/.

For an iOS platform, this value is the key for the name of a sound file in your app's main bundle or the Library/Sounds folder in your app's data container. If the sound file can't be found or you specify default for the value, the system plays the default alert sound.

", "DefaultPushNotificationTemplate$Title" : "

The title to use in push notifications that are based on the message template. This title appears above the notification message on a recipient's device.

", "DefaultPushNotificationTemplate$Url" : "

The URL to open in a recipient's default mobile browser, if a recipient taps a push notification that's based on the message template and the value of the Action property is URL.

", - "EmailChannelRequest$ConfigurationSet" : "

The configuration set that you want to apply to email that you send through the channel by using the Amazon Pinpoint Email API.

", + "EmailChannelRequest$ConfigurationSet" : "

The Amazon SES configuration set that you want to apply to messages that you send through the channel.

", "EmailChannelRequest$FromAddress" : "

The verified email address that you want to send email from when you send email through the channel.

", "EmailChannelRequest$Identity" : "

The Amazon Resource Name (ARN) of the identity, verified with Amazon Simple Email Service (Amazon SES), that you want to use when you send email through the channel.

", "EmailChannelRequest$RoleArn" : "

The ARN of the AWS Identity and Access Management (IAM) role that you want Amazon Pinpoint to use when it submits email-related event data for the channel.

", "EmailChannelResponse$ApplicationId" : "

The unique identifier for the application that the email channel applies to.

", - "EmailChannelResponse$ConfigurationSet" : "

The configuration set that's applied to email that's sent through the channel by using the Amazon Pinpoint Email API.

", + "EmailChannelResponse$ConfigurationSet" : "

The Amazon SES configuration set that's applied to messages that are sent through the channel.

", "EmailChannelResponse$CreationDate" : "

The date and time, in ISO 8601 format, when the email channel was enabled.

", - "EmailChannelResponse$FromAddress" : "

The verified email address that you send email from when you send email through the channel.

", + "EmailChannelResponse$FromAddress" : "

The verified email address that email is sent from when you send email through the channel.

", "EmailChannelResponse$Id" : "

(Deprecated) An identifier for the email channel. This property is retained only for backward compatibility.

", - "EmailChannelResponse$Identity" : "

The Amazon Resource Name (ARN) of the identity, verified with Amazon Simple Email Service (Amazon SES), that you use when you send email through the channel.

", + "EmailChannelResponse$Identity" : "

The Amazon Resource Name (ARN) of the identity, verified with Amazon Simple Email Service (Amazon SES), that's used when you send email through the channel.

", "EmailChannelResponse$LastModifiedBy" : "

The user who last modified the email channel.

", "EmailChannelResponse$LastModifiedDate" : "

The date and time, in ISO 8601 format, when the email channel was last modified.

", "EmailChannelResponse$Platform" : "

The type of messaging or notification platform for the channel. For the email channel, this value is EMAIL.

", @@ -2028,7 +2057,7 @@ "JourneyDateRangeKpiResponse$ApplicationId" : "

The unique identifier for the application that the metric applies to.

", "JourneyDateRangeKpiResponse$EndTime" : "

The last date and time of the date range that was used to filter the query results, in extended ISO 8601 format. The date range is inclusive.

", "JourneyDateRangeKpiResponse$JourneyId" : "

The unique identifier for the journey that the metric applies to.

", - "JourneyDateRangeKpiResponse$KpiName" : "

The name of the metric, also referred to as a key performance indicator (KPI), that the data was retrieved for. This value describes the associated metric and consists of two or more terms, which are comprised of lowercase alphanumeric characters, separated by a hyphen. For a list of possible values, see the Amazon Pinpoint Developer Guide.

", + "JourneyDateRangeKpiResponse$KpiName" : "

The name of the metric, also referred to as a key performance indicator (KPI), that the data was retrieved for. This value describes the associated metric and consists of two or more terms, which are comprised of lowercase alphanumeric characters, separated by a hyphen. For a list of possible values, see the Amazon Pinpoint Developer Guide.

", "JourneyDateRangeKpiResponse$NextToken" : "

The string to use in a subsequent request to get the next page of results in a paginated response. This value is null for the Journey Engagement Metrics resource because the resource returns all results in a single page.

", "JourneyDateRangeKpiResponse$StartTime" : "

The first date and time of the date range that was used to filter the query results, in extended ISO 8601 format. The date range is inclusive.

", "JourneyEmailMessage$FromAddress" : "

The verified email address to send the email message from. The default address is the FromAddress specified for the email channel for the application.

", @@ -2116,7 +2145,7 @@ "RecommenderConfigurationResponse$RecommendationProviderRoleArn" : "

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that authorizes Amazon Pinpoint to retrieve recommendation data from the recommender model.

", "RecommenderConfigurationResponse$RecommendationProviderUri" : "

The Amazon Resource Name (ARN) of the recommender model that Amazon Pinpoint retrieves the recommendation data from. This value is the ARN of an Amazon Personalize campaign.

", "RecommenderConfigurationResponse$RecommendationTransformerUri" : "

The name or Amazon Resource Name (ARN) of the AWS Lambda function that Amazon Pinpoint invokes to perform additional processing of recommendation data that it retrieves from the recommender model.

", - "RecommenderConfigurationResponse$RecommendationsDisplayName" : "

The custom display name for the standard endpoint or user attribute (RecommendationItems) that temporarily stores a recommended item for each endpoint or user, depending on the value for the RecommenderUserIdType property. This name appears in the Attribute finder pane of the template editor on the Amazon Pinpoint console.

This value is null if the configuration doesn't invoke an AWS Lambda function (LambdaFunctionArn) to perform additional processing of recommendation data.

", + "RecommenderConfigurationResponse$RecommendationsDisplayName" : "

The custom display name for the standard endpoint or user attribute (RecommendationItems) that temporarily stores recommended items for each endpoint or user, depending on the value for the RecommendationProviderIdType property. This name appears in the Attribute finder of the template editor on the Amazon Pinpoint console.

This value is null if the configuration doesn't invoke an AWS Lambda function (RecommendationTransformerUri) to perform additional processing of recommendation data.

", "ResultRowValue$Key" : "

The friendly name of the metric whose value is specified by the Value property.

", "ResultRowValue$Type" : "

The data type of the value specified by the Value property.

", "ResultRowValue$Value" : "

In a Values object, the value for the metric that the query retrieved data for. In a GroupedBys object, the value for the field that was used to group data in a result set that contains multiple results (Values objects).

", @@ -2195,14 +2224,14 @@ "TemplatesResponse$NextToken" : "

The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

", "TreatmentResource$Id" : "

The unique identifier for the treatment.

", "TreatmentResource$TreatmentDescription" : "

The custom description of the treatment.

", - "TreatmentResource$TreatmentName" : "

The custom name of the treatment. A treatment is a variation of a campaign that's used for A/B testing of a campaign.

", - "UpdateRecommenderConfiguration$Description" : "

A custom description of the configuration for the recommender model. The description can contain up to 128 characters.

", + "TreatmentResource$TreatmentName" : "

The custom name of the treatment.

", + "UpdateRecommenderConfiguration$Description" : "

A custom description of the configuration for the recommender model. The description can contain up to 128 characters. The characters can be letters, numbers, spaces, or the following symbols: _ ; () , ‐.

", "UpdateRecommenderConfiguration$Name" : "

A custom name of the configuration for the recommender model. The name must start with a letter or number and it can contain up to 128 characters. The characters can be letters, numbers, spaces, underscores (_), or hyphens (-).

", - "UpdateRecommenderConfiguration$RecommendationProviderIdType" : "

The type of Amazon Pinpoint ID to associate with unique user IDs in the recommender model. This value enables the model to use attribute and event data that’s specific to a particular endpoint or user in an Amazon Pinpoint application. Valid values are:

", + "UpdateRecommenderConfiguration$RecommendationProviderIdType" : "

The type of Amazon Pinpoint ID to associate with unique user IDs in the recommender model. This value enables the model to use attribute and event data that’s specific to a particular endpoint or user in an Amazon Pinpoint application. Valid values are:

", "UpdateRecommenderConfiguration$RecommendationProviderRoleArn" : "

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that authorizes Amazon Pinpoint to retrieve recommendation data from the recommender model.

", "UpdateRecommenderConfiguration$RecommendationProviderUri" : "

The Amazon Resource Name (ARN) of the recommender model to retrieve recommendation data from. This value must match the ARN of an Amazon Personalize campaign.

", "UpdateRecommenderConfiguration$RecommendationTransformerUri" : "

The name or Amazon Resource Name (ARN) of the AWS Lambda function to invoke for additional processing of recommendation data that's retrieved from the recommender model.

", - "UpdateRecommenderConfiguration$RecommendationsDisplayName" : "

A custom display name for the standard endpoint or user attribute (RecommendationItems) that temporarily stores a recommended item for each endpoint or user, depending on the value for the RecommenderUserIdType property. This value is required if the configuration doesn't invoke an AWS Lambda function (LambdaFunctionArn) to perform additional processing of recommendation data.

This name appears in the Attribute finder pane of the template editor on the Amazon Pinpoint console. The name can contain up to 25 characters. The characters can be letters, numbers, spaces, underscores (_), or hyphens (-). These restrictions don't apply to attribute values.

", + "UpdateRecommenderConfiguration$RecommendationsDisplayName" : "

A custom display name for the standard endpoint or user attribute (RecommendationItems) that temporarily stores recommended items for each endpoint or user, depending on the value for the RecommendationProviderIdType property. This value is required if the configuration doesn't invoke an AWS Lambda function (RecommendationTransformerUri) to perform additional processing of recommendation data.

This name appears in the Attribute finder of the template editor on the Amazon Pinpoint console. The name can contain up to 25 characters. The characters can be letters, numbers, spaces, underscores (_), or hyphens (-). These restrictions don't apply to attribute values.

", "VoiceChannelResponse$ApplicationId" : "

The unique identifier for the application that the voice channel applies to.

", "VoiceChannelResponse$CreationDate" : "

The date and time, in ISO 8601 format, when the voice channel was enabled.

", "VoiceChannelResponse$Id" : "

(Deprecated) An identifier for the voice channel. This property is retained only for backward compatibility.

", @@ -2234,8 +2263,8 @@ "WriteCampaignRequest$Description" : "

A custom description of the campaign.

", "WriteCampaignRequest$Name" : "

A custom name for the campaign.

", "WriteCampaignRequest$SegmentId" : "

The unique identifier for the segment to associate with the campaign.

", - "WriteCampaignRequest$TreatmentDescription" : "

A custom description of a variation of the campaign to use for A/B testing.

", - "WriteCampaignRequest$TreatmentName" : "

A custom name for a variation of the campaign to use for A/B testing.

", + "WriteCampaignRequest$TreatmentDescription" : "

A custom description of the default treatment for the campaign.

", + "WriteCampaignRequest$TreatmentName" : "

A custom name of the default treatment for the campaign, if the campaign has multiple treatments. A treatment is a variation of a campaign that's used for A/B testing.

", "WriteEventStream$DestinationStreamArn" : "

The Amazon Resource Name (ARN) of the Amazon Kinesis data stream or Amazon Kinesis Data Firehose delivery stream that you want to publish event data to.

For a Kinesis data stream, the ARN format is: arn:aws:kinesis:region:account-id:stream/stream_name\n

For a Kinesis Data Firehose delivery stream, the ARN format is: arn:aws:firehose:region:account-id:deliverystream/stream_name\n

", "WriteEventStream$RoleArn" : "

The AWS Identity and Access Management (IAM) role that authorizes Amazon Pinpoint to publish event data to the stream in your AWS account.

", "WriteJourneyRequest$CreationDate" : "

The date, in ISO 8601 format, when the journey was created.

", @@ -2245,10 +2274,10 @@ "WriteJourneyRequest$StartActivity" : "

The unique identifier for the first activity in the journey. The identifier for this activity can contain a maximum of 128 characters. The characters must be alphanumeric characters.

", "WriteSegmentRequest$Name" : "

The name of the segment.

", "WriteTreatmentResource$TreatmentDescription" : "

A custom description of the treatment.

", - "WriteTreatmentResource$TreatmentName" : "

A custom name for the treatment. A treatment is a variation of a campaign that's used for A/B testing of a campaign.

", + "WriteTreatmentResource$TreatmentName" : "

A custom name for the treatment.

", "ListOf__string$member" : null, "MapOf__string$member" : null } } } -} +} \ No newline at end of file diff --git a/models/apis/ram/2018-01-04/api-2.json b/models/apis/ram/2018-01-04/api-2.json index bdc35630257..27a0208d457 100644 --- a/models/apis/ram/2018-01-04/api-2.json +++ b/models/apis/ram/2018-01-04/api-2.json @@ -198,6 +198,7 @@ {"shape":"MalformedArnException"}, {"shape":"InvalidNextTokenException"}, {"shape":"InvalidParameterException"}, + {"shape":"ResourceArnNotFoundException"}, {"shape":"ServerInternalException"}, {"shape":"ServiceUnavailableException"} ] @@ -232,6 +233,7 @@ {"shape":"ResourceShareInvitationArnNotFoundException"}, {"shape":"InvalidMaxResultsException"}, {"shape":"MalformedArnException"}, + {"shape":"UnknownResourceException"}, {"shape":"InvalidNextTokenException"}, {"shape":"InvalidParameterException"}, {"shape":"ServerInternalException"}, @@ -326,6 +328,21 @@ {"shape":"OperationNotPermittedException"} ] }, + "ListResourceTypes":{ + "name":"ListResourceTypes", + "http":{ + "method":"POST", + "requestUri":"/listresourcetypes" + }, + "input":{"shape":"ListResourceTypesRequest"}, + "output":{"shape":"ListResourceTypesResponse"}, + "errors":[ + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ServerInternalException"}, + {"shape":"ServiceUnavailableException"} + ] + }, "ListResources":{ "name":"ListResources", "http":{ @@ -358,7 +375,8 @@ {"shape":"InvalidParameterException"}, {"shape":"MissingRequiredParameterException"}, {"shape":"ServerInternalException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"UnknownResourceException"} ] }, "RejectResourceShareInvitation":{ @@ -798,6 +816,20 @@ "nextToken":{"shape":"String"} } }, + "ListResourceTypesRequest":{ + "type":"structure", + "members":{ + "nextToken":{"shape":"String"}, + "maxResults":{"shape":"MaxResults"} + } + }, + "ListResourceTypesResponse":{ + "type":"structure", + "members":{ + "resourceTypes":{"shape":"ServiceNameAndResourceTypeList"}, + "nextToken":{"shape":"String"} + } + }, "ListResourcesRequest":{ "type":"structure", "required":["resourceOwner"], @@ -1150,6 +1182,17 @@ "error":{"httpStatusCode":500}, "exception":true }, + "ServiceNameAndResourceType":{ + "type":"structure", + "members":{ + "resourceType":{"shape":"String"}, + "serviceName":{"shape":"String"} + } + }, + "ServiceNameAndResourceTypeList":{ + "type":"list", + "member":{"shape":"ServiceNameAndResourceType"} + }, "ServiceUnavailableException":{ "type":"structure", "required":["message"], diff --git a/models/apis/ram/2018-01-04/docs-2.json b/models/apis/ram/2018-01-04/docs-2.json index f80299b0095..b2cbb314851 100644 --- a/models/apis/ram/2018-01-04/docs-2.json +++ b/models/apis/ram/2018-01-04/docs-2.json @@ -19,6 +19,7 @@ "ListPermissions": "

Lists the AWS RAM permissions.

", "ListPrincipals": "

Lists the principals that you have shared resources with or that have shared resources with you.

", "ListResourceSharePermissions": "

Lists the AWS RAM permissions that are associated with a resource share.

", + "ListResourceTypes": "

Lists the shareable resource types supported by AWS RAM.

", "ListResources": "

Lists the resources that you added to a resource shares or the resources that are shared with you.

", "PromoteResourceShareCreatedFromPolicy": "

Resource shares that were created by attaching a policy to a resource are visible only to the resource share owner, and the resource share cannot be modified in AWS RAM.

Use this API action to promote the resource share. When you promote the resource share, it becomes:

", "RejectResourceShareInvitation": "

Rejects an invitation to a resource share from another AWS account.

", @@ -274,6 +275,16 @@ "refs": { } }, + "ListResourceTypesRequest": { + "base": null, + "refs": { + } + }, + "ListResourceTypesResponse": { + "base": null, + "refs": { + } + }, "ListResourcesRequest": { "base": null, "refs": { @@ -300,6 +311,7 @@ "ListPermissionsRequest$maxResults": "

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

", "ListPrincipalsRequest$maxResults": "

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

", "ListResourceSharePermissionsRequest$maxResults": "

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

", + "ListResourceTypesRequest$maxResults": "

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

", "ListResourcesRequest$maxResults": "

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

" } }, @@ -555,6 +567,18 @@ "refs": { } }, + "ServiceNameAndResourceType": { + "base": "

Information about the shareable resource types and the AWS services to which they belong.

", + "refs": { + "ServiceNameAndResourceTypeList$member": null + } + }, + "ServiceNameAndResourceTypeList": { + "base": null, + "refs": { + "ListResourceTypesResponse$resourceTypes": "

The shareable resource types supported by AWS RAM.

" + } + }, "ServiceUnavailableException": { "base": "

The service is not available.

", "refs": { @@ -613,14 +637,16 @@ "ListPermissionsRequest$nextToken": "

The token for the next page of results.

", "ListPermissionsResponse$nextToken": "

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", "ListPrincipalsRequest$resourceArn": "

The Amazon Resource Name (ARN) of the resource.

", - "ListPrincipalsRequest$resourceType": "

The resource type.

Valid values: ec2:CapacityReservation | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway | license-manager:LicenseConfiguration | rds:Cluster | route53resolver:ResolverRule I resource-groups:Group

", + "ListPrincipalsRequest$resourceType": "

The resource type.

Valid values: codebuild:Project | codebuild:ReportGroup | ec2:CapacityReservation | ec2:DedicatedHost | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway | imagebuilder:Component | imagebuilder:Image | imagebuilder:ImageRecipe | license-manager:LicenseConfiguration I resource-groups:Group | rds:Cluster | route53resolver:ResolverRule

", "ListPrincipalsRequest$nextToken": "

The token for the next page of results.

", "ListPrincipalsResponse$nextToken": "

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", "ListResourceSharePermissionsRequest$resourceShareArn": "

The Amazon Resource Name (ARN) of the resource share.

", "ListResourceSharePermissionsRequest$nextToken": "

The token for the next page of results.

", "ListResourceSharePermissionsResponse$nextToken": "

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", + "ListResourceTypesRequest$nextToken": "

The token for the next page of results.

", + "ListResourceTypesResponse$nextToken": "

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", "ListResourcesRequest$principal": "

The principal.

", - "ListResourcesRequest$resourceType": "

The resource type.

Valid values: ec2:CapacityReservation | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway | license-manager:LicenseConfiguration | rds:Cluster | route53resolver:ResolverRule | resource-groups:Group

", + "ListResourcesRequest$resourceType": "

The resource type.

Valid values: codebuild:Project | codebuild:ReportGroup | ec2:CapacityReservation | ec2:DedicatedHost | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway | imagebuilder:Component | imagebuilder:Image | imagebuilder:ImageRecipe | license-manager:LicenseConfiguration I resource-groups:Group | rds:Cluster | route53resolver:ResolverRule

", "ListResourcesRequest$nextToken": "

The token for the next page of results.

", "ListResourcesResponse$nextToken": "

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", "MalformedArnException$message": null, @@ -672,6 +698,8 @@ "ResourceSharePermissionSummary$resourceType": "

The type of resource to which the permission applies.

", "ResourceSharePermissionSummary$status": "

The current status of the permission.

", "ServerInternalException$message": null, + "ServiceNameAndResourceType$resourceType": "

The shareable resource types.

", + "ServiceNameAndResourceType$serviceName": "

The name of the AWS services to which the resources belong.

", "ServiceUnavailableException$message": null, "TagLimitExceededException$message": null, "TagPolicyViolationException$message": null, diff --git a/models/apis/rds/2014-10-31/api-2.json b/models/apis/rds/2014-10-31/api-2.json index fb376f11289..899ddb8509d 100644 --- a/models/apis/rds/2014-10-31/api-2.json +++ b/models/apis/rds/2014-10-31/api-2.json @@ -4937,6 +4937,7 @@ "EngineVersion":{"shape":"String"}, "DBInstanceClass":{"shape":"String"}, "LicenseModel":{"shape":"String"}, + "AvailabilityZoneGroup":{"shape":"String"}, "Vpc":{"shape":"BooleanOptional"}, "Filters":{"shape":"FilterList"}, "MaxRecords":{"shape":"IntegerOptional"}, @@ -6384,6 +6385,7 @@ "EngineVersion":{"shape":"String"}, "DBInstanceClass":{"shape":"String"}, "LicenseModel":{"shape":"String"}, + "AvailabilityZoneGroup":{"shape":"String"}, "AvailabilityZones":{"shape":"AvailabilityZoneList"}, "MultiAZCapable":{"shape":"Boolean"}, "ReadReplicaCapable":{"shape":"Boolean"}, diff --git a/models/apis/rds/2014-10-31/docs-2.json b/models/apis/rds/2014-10-31/docs-2.json index 907799892ef..68524aeb573 100644 --- a/models/apis/rds/2014-10-31/docs-2.json +++ b/models/apis/rds/2014-10-31/docs-2.json @@ -118,8 +118,8 @@ "RemoveTagsFromResource": "

Removes metadata tags from an Amazon RDS resource.

For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources in the Amazon RDS User Guide.

", "ResetDBClusterParameterGroup": "

Modifies the parameters of a DB cluster parameter group to the default value. To reset specific parameters submit a list of the following: ParameterName and ApplyMethod. To reset the entire DB cluster parameter group, specify the DBClusterParameterGroupName and ResetAllParameters parameters.

When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance restart or RebootDBInstance request. You must call RebootDBInstance for every DB instance in your DB cluster that you want the updated static parameter to apply to.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

", "ResetDBParameterGroup": "

Modifies the parameters of a DB parameter group to the engine/system default value. To reset specific parameters, provide a list of the following: ParameterName and ApplyMethod. To reset the entire DB parameter group, specify the DBParameterGroup name and ResetAllParameters parameters. When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance restart or RebootDBInstance request.

", - "RestoreDBClusterFromS3": "

Creates an Amazon Aurora DB cluster from data stored in an Amazon S3 bucket. Amazon RDS must be authorized to access the Amazon S3 bucket and the data must be created using the Percona XtraBackup utility as described in Migrating Data to an Amazon Aurora MySQL DB Cluster in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

", - "RestoreDBClusterFromSnapshot": "

Creates a new DB cluster from a DB snapshot or DB cluster snapshot. This action only applies to Aurora DB clusters.

The target DB cluster is created from the source snapshot with a default configuration. If you don't specify a security group, the new DB cluster is associated with the default security group.

This action only restores the DB cluster, not the DB instances for that DB cluster. You must invoke the CreateDBInstance action to create DB instances for the restored DB cluster, specifying the identifier of the restored DB cluster in DBClusterIdentifier. You can create DB instances only after the RestoreDBClusterFromSnapshot action has completed and the DB cluster is available.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

", + "RestoreDBClusterFromS3": "

Creates an Amazon Aurora DB cluster from data stored in an Amazon S3 bucket. Amazon RDS must be authorized to access the Amazon S3 bucket and the data must be created using the Percona XtraBackup utility as described in Migrating Data to an Amazon Aurora MySQL DB Cluster in the Amazon Aurora User Guide.

This action only restores the DB cluster, not the DB instances for that DB cluster. You must invoke the CreateDBInstance action to create DB instances for the restored DB cluster, specifying the identifier of the restored DB cluster in DBClusterIdentifier. You can create DB instances only after the RestoreDBClusterFromS3 action has completed and the DB cluster is available.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

", + "RestoreDBClusterFromSnapshot": "

Creates a new DB cluster from a DB snapshot or DB cluster snapshot. This action only applies to Aurora DB clusters.

The target DB cluster is created from the source snapshot with a default configuration. If you don't specify a security group, the new DB cluster is associated with the default security group.

This action only restores the DB cluster, not the DB instances for that DB cluster. You must invoke the CreateDBInstance action to create DB instances for the restored DB cluster, specifying the identifier of the restored DB cluster in DBClusterIdentifier. You can create DB instances only after the RestoreDBClusterFromSnapshot action has completed and the DB cluster is available.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

", "RestoreDBClusterToPointInTime": "

Restores a DB cluster to an arbitrary point in time. Users can restore to any point in time before LatestRestorableTime for up to BackupRetentionPeriod days. The target DB cluster is created from the source DB cluster with the same configuration as the original DB cluster, except that the new DB cluster is created with the default DB security group.

This action only restores the DB cluster, not the DB instances for that DB cluster. You must invoke the CreateDBInstance action to create DB instances for the restored DB cluster, specifying the identifier of the restored DB cluster in DBClusterIdentifier. You can create DB instances only after the RestoreDBClusterToPointInTime action has completed and the DB cluster is available.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

", "RestoreDBInstanceFromDBSnapshot": "

Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with the most of original configuration with the default security group and the default DB parameter group. By default, the new DB instance is created as a single-AZ deployment except when the instance is a SQL Server instance that has an option group that is associated with mirroring; in this case, the instance becomes a mirrored AZ deployment and not a single-AZ deployment.

If your intent is to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot action. RDS doesn't allow two DB instances with the same name. Once you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot action. The result is that you will replace the original DB instance with the DB instance created from the snapshot.

If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot.

This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterFromSnapshot.

", "RestoreDBInstanceFromS3": "

Amazon Relational Database Service (Amazon RDS) supports importing MySQL databases by using backup files. You can create a backup of your on-premises database, store it on Amazon Simple Storage Service (Amazon S3), and then restore the backup file onto a new Amazon RDS DB instance running MySQL. For more information, see Importing Data into an Amazon RDS MySQL DB Instance in the Amazon RDS User Guide.

", @@ -440,7 +440,7 @@ "ModifyEventSubscriptionMessage$Enabled": "

A value that indicates whether to activate the subscription.

", "ModifyGlobalClusterMessage$DeletionProtection": "

Indicates if the global database cluster has deletion protection enabled. The global database cluster can't be deleted when deletion protection is enabled.

", "OptionGroupOption$SupportsOptionVersionDowngrade": "

If true, you can change the option to an earlier version of the option. This only applies to options that have different versions available.

", - "OrderableDBInstanceOption$SupportsStorageAutoscaling": "

Whether or not Amazon RDS can automatically scale storage for DB instances that use the specified instance class.

", + "OrderableDBInstanceOption$SupportsStorageAutoscaling": "

Whether Amazon RDS can automatically scale storage for DB instances that use the specified DB instance class.

", "OrderableDBInstanceOption$SupportsKerberosAuthentication": "

Whether a DB instance supports Kerberos Authentication.

", "PendingModifiedValues$MultiAZ": "

Indicates that the Single-AZ DB instance is to change to a Multi-AZ deployment.

", "RebootDBInstanceMessage$ForceFailover": "

A value that indicates whether the reboot is conducted through a Multi-AZ failover.

Constraint: You can't enable force failover if the instance isn't configured for Multi-AZ.

", @@ -3818,6 +3818,7 @@ "DescribeOrderableDBInstanceOptionsMessage$EngineVersion": "

The engine version filter value. Specify this parameter to show only the available offerings matching the specified engine version.

", "DescribeOrderableDBInstanceOptionsMessage$DBInstanceClass": "

The DB instance class filter value. Specify this parameter to show only the available offerings matching the specified DB instance class.

", "DescribeOrderableDBInstanceOptionsMessage$LicenseModel": "

The license model filter value. Specify this parameter to show only the available offerings matching the specified license model.

", + "DescribeOrderableDBInstanceOptionsMessage$AvailabilityZoneGroup": "

The Availability Zone group associated with a Local Zone. Specify this parameter to retrieve available offerings for the Local Zones in the group.

Omit this parameter to show the available offerings in the specified AWS Region.

", "DescribeOrderableDBInstanceOptionsMessage$Marker": "

An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

", "DescribePendingMaintenanceActionsMessage$ResourceIdentifier": "

The ARN of a resource to return pending maintenance actions for.

", "DescribePendingMaintenanceActionsMessage$Marker": "

An optional pagination token provided by a previous DescribePendingMaintenanceActions request. If this parameter is specified, the response includes only records beyond the marker, up to a number of records specified by MaxRecords.

", @@ -4013,6 +4014,7 @@ "OrderableDBInstanceOption$EngineVersion": "

The engine version of a DB instance.

", "OrderableDBInstanceOption$DBInstanceClass": "

The DB instance class for a DB instance.

", "OrderableDBInstanceOption$LicenseModel": "

The license model for a DB instance.

", + "OrderableDBInstanceOption$AvailabilityZoneGroup": "

The Availability Zone group for a DB instance.

", "OrderableDBInstanceOption$StorageType": "

Indicates the storage type for a DB instance.

", "OrderableDBInstanceOptionsMessage$Marker": "

An optional pagination token provided by a previous OrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

", "Parameter$ParameterName": "

Specifies the name of the parameter.

", diff --git a/models/apis/redshift/2012-12-01/api-2.json b/models/apis/redshift/2012-12-01/api-2.json index 109482ce9ce..2826d0ec020 100644 --- a/models/apis/redshift/2012-12-01/api-2.json +++ b/models/apis/redshift/2012-12-01/api-2.json @@ -386,6 +386,27 @@ {"shape":"InvalidTagFault"} ] }, + "CreateUsageLimit":{ + "name":"CreateUsageLimit", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateUsageLimitMessage"}, + "output":{ + "shape":"UsageLimit", + "resultWrapper":"CreateUsageLimitResult" + }, + "errors":[ + {"shape":"ClusterNotFoundFault"}, + {"shape":"InvalidClusterStateFault"}, + {"shape":"LimitExceededFault"}, + {"shape":"UsageLimitAlreadyExistsFault"}, + {"shape":"InvalidUsageLimitFault"}, + {"shape":"TagLimitExceededFault"}, + {"shape":"UnsupportedOperationFault"} + ] + }, "DeleteCluster":{ "name":"DeleteCluster", "http":{ @@ -542,6 +563,18 @@ {"shape":"InvalidTagFault"} ] }, + "DeleteUsageLimit":{ + "name":"DeleteUsageLimit", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteUsageLimitMessage"}, + "errors":[ + {"shape":"UsageLimitNotFoundFault"}, + {"shape":"UnsupportedOperationFault"} + ] + }, "DescribeAccountAttributes":{ "name":"DescribeAccountAttributes", "http":{ @@ -959,6 +992,22 @@ {"shape":"InvalidTagFault"} ] }, + "DescribeUsageLimits":{ + "name":"DescribeUsageLimits", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeUsageLimitsMessage"}, + "output":{ + "shape":"UsageLimitList", + "resultWrapper":"DescribeUsageLimitsResult" + }, + "errors":[ + {"shape":"ClusterNotFoundFault"}, + {"shape":"UnsupportedOperationFault"} + ] + }, "DisableLogging":{ "name":"DisableLogging", "http":{ @@ -1299,6 +1348,23 @@ {"shape":"SnapshotScheduleUpdateInProgressFault"} ] }, + "ModifyUsageLimit":{ + "name":"ModifyUsageLimit", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyUsageLimitMessage"}, + "output":{ + "shape":"UsageLimit", + "resultWrapper":"ModifyUsageLimitResult" + }, + "errors":[ + {"shape":"InvalidUsageLimitFault"}, + {"shape":"UsageLimitNotFoundFault"}, + {"shape":"UnsupportedOperationFault"} + ] + }, "PauseCluster":{ "name":"PauseCluster", "http":{ @@ -2533,6 +2599,24 @@ "Tags":{"shape":"TagList"} } }, + "CreateUsageLimitMessage":{ + "type":"structure", + "required":[ + "ClusterIdentifier", + "FeatureType", + "LimitType", + "Amount" + ], + "members":{ + "ClusterIdentifier":{"shape":"String"}, + "FeatureType":{"shape":"UsageLimitFeatureType"}, + "LimitType":{"shape":"UsageLimitLimitType"}, + "Amount":{"shape":"Long"}, + "Period":{"shape":"UsageLimitPeriod"}, + "BreachAction":{"shape":"UsageLimitBreachAction"}, + "Tags":{"shape":"TagList"} + } + }, "CustomerStorageMessage":{ "type":"structure", "members":{ @@ -2693,6 +2777,13 @@ "TagKeys":{"shape":"TagKeyList"} } }, + "DeleteUsageLimitMessage":{ + "type":"structure", + "required":["UsageLimitId"], + "members":{ + "UsageLimitId":{"shape":"String"} + } + }, "DependentServiceRequestThrottlingFault":{ "type":"structure", "members":{ @@ -2992,6 +3083,18 @@ "TagValues":{"shape":"TagValueList"} } }, + "DescribeUsageLimitsMessage":{ + "type":"structure", + "members":{ + "UsageLimitId":{"shape":"String"}, + "ClusterIdentifier":{"shape":"String"}, + "FeatureType":{"shape":"UsageLimitFeatureType"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "TagKeys":{"shape":"TagKeyList"}, + "TagValues":{"shape":"TagValueList"} + } + }, "DisableLoggingMessage":{ "type":"structure", "required":["ClusterIdentifier"], @@ -3682,6 +3785,17 @@ }, "exception":true }, + "InvalidUsageLimitFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidUsageLimit", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "InvalidVPCNetworkStateFault":{ "type":"structure", "members":{ @@ -3931,6 +4045,15 @@ "ScheduleDefinitions":{"shape":"ScheduleDefinitionList"} } }, + "ModifyUsageLimitMessage":{ + "type":"structure", + "required":["UsageLimitId"], + "members":{ + "UsageLimitId":{"shape":"String"}, + "Amount":{"shape":"LongOptional"}, + "BreachAction":{"shape":"UsageLimitBreachAction"} + } + }, "NodeConfigurationOption":{ "type":"structure", "members":{ @@ -5295,6 +5418,82 @@ "SupportedOperations":{"shape":"SupportedOperationList"} } }, + "UsageLimit":{ + "type":"structure", + "members":{ + "UsageLimitId":{"shape":"String"}, + "ClusterIdentifier":{"shape":"String"}, + "FeatureType":{"shape":"UsageLimitFeatureType"}, + "LimitType":{"shape":"UsageLimitLimitType"}, + "Amount":{"shape":"Long"}, + "Period":{"shape":"UsageLimitPeriod"}, + "BreachAction":{"shape":"UsageLimitBreachAction"}, + "Tags":{"shape":"TagList"} + } + }, + "UsageLimitAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"UsageLimitAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "UsageLimitBreachAction":{ + "type":"string", + "enum":[ + "log", + "emit-metric", + "disable" + ] + }, + "UsageLimitFeatureType":{ + "type":"string", + "enum":[ + "spectrum", + "concurrency-scaling" + ] + }, + "UsageLimitLimitType":{ + "type":"string", + "enum":[ + "time", + "data-scanned" + ] + }, + "UsageLimitList":{ + "type":"structure", + "members":{ + "UsageLimits":{"shape":"UsageLimits"}, + "Marker":{"shape":"String"} + } + }, + "UsageLimitNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"UsageLimitNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "UsageLimitPeriod":{ + "type":"string", + "enum":[ + "daily", + "weekly", + "monthly" + ] + }, + "UsageLimits":{ + "type":"list", + "member":{"shape":"UsageLimit"} + }, "ValueStringList":{ "type":"list", "member":{ diff --git a/models/apis/redshift/2012-12-01/docs-2.json b/models/apis/redshift/2012-12-01/docs-2.json index b08b4e765ae..c5903dc4f80 100644 --- a/models/apis/redshift/2012-12-01/docs-2.json +++ b/models/apis/redshift/2012-12-01/docs-2.json @@ -21,6 +21,7 @@ "CreateSnapshotCopyGrant": "

Creates a snapshot copy grant that permits Amazon Redshift to use a customer master key (CMK) from AWS Key Management Service (AWS KMS) to encrypt copied snapshots in a destination region.

For more information about managing snapshot copy grants, go to Amazon Redshift Database Encryption in the Amazon Redshift Cluster Management Guide.

", "CreateSnapshotSchedule": "

Create a snapshot schedule that can be associated to a cluster and which overrides the default system backup schedule.

", "CreateTags": "

Adds tags to a cluster.

A resource can have up to 50 tags. If you try to create more than 50 tags for a resource, you will receive an error and the attempt will fail.

If you specify a key that already exists for the resource, the value for that key will be updated with the new value.

", + "CreateUsageLimit": "

Creates a usage limit for a specified Amazon Redshift feature on a cluster. The usage limit is identified by the returned usage limit identifier.

", "DeleteCluster": "

Deletes a previously provisioned cluster without its final snapshot being created. A successful response from the web service indicates that the request was received correctly. Use DescribeClusters to monitor the status of the deletion. The delete operation cannot be canceled or reverted once submitted. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

If you want to shut down the cluster and retain it for future use, set SkipFinalClusterSnapshot to false and specify a name for FinalClusterSnapshotIdentifier. You can later restore this snapshot to resume using the cluster. If a final cluster snapshot is requested, the status of the cluster will be \"final-snapshot\" while the snapshot is being taken, then it's \"deleting\" once Amazon Redshift begins deleting the cluster.

For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

", "DeleteClusterParameterGroup": "

Deletes a specified Amazon Redshift parameter group.

You cannot delete a parameter group if it is associated with a cluster.

", "DeleteClusterSecurityGroup": "

Deletes an Amazon Redshift security group.

You cannot delete a security group that is associated with any clusters. You cannot delete the default security group.

For information about managing security groups, go to Amazon Redshift Cluster Security Groups in the Amazon Redshift Cluster Management Guide.

", @@ -33,6 +34,7 @@ "DeleteSnapshotCopyGrant": "

Deletes the specified snapshot copy grant.

", "DeleteSnapshotSchedule": "

Deletes a snapshot schedule.

", "DeleteTags": "

Deletes tags from a resource. You must provide the ARN of the resource from which you want to delete the tag or tags.

", + "DeleteUsageLimit": "

Deletes a usage limit from a cluster.

", "DescribeAccountAttributes": "

Returns a list of attributes attached to an account

", "DescribeClusterDbRevisions": "

Returns an array of ClusterDbRevision objects.

", "DescribeClusterParameterGroups": "

Returns a list of Amazon Redshift parameter groups, including parameter groups you created and the default parameter group. For each parameter group, the response includes the parameter group name, description, and parameter group family name. You can optionally specify a name to retrieve the description of a specific parameter group.

For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all parameter groups that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all parameter groups that have any combination of those values are returned.

If both tag keys and values are omitted from the request, parameter groups are returned regardless of whether they have tag keys or values associated with them.

", @@ -61,6 +63,7 @@ "DescribeStorage": "

Returns account level backups storage size and provisional storage.

", "DescribeTableRestoreStatus": "

Lists the status of one or more table restore requests made using the RestoreTableFromClusterSnapshot API action. If you don't specify a value for the TableRestoreRequestId parameter, then DescribeTableRestoreStatus returns the status of all table restore requests ordered by the date and time of the request in ascending order. Otherwise DescribeTableRestoreStatus returns the status of the table specified by TableRestoreRequestId.

", "DescribeTags": "

Returns a list of tags. You can return tags from a specific resource by specifying an ARN, or you can return all tags for a given type of resource, such as clusters, snapshots, and so on.

The following are limitations for DescribeTags:

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all resources that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all resources that have any combination of those values are returned.

If both tag keys and values are omitted from the request, resources are returned regardless of whether they have tag keys or values associated with them.

", + "DescribeUsageLimits": "

Shows usage limits on a cluster. Results are filtered based on the combination of input usage limit identifier, cluster identifier, and feature type parameters:

", "DisableLogging": "

Stops logging information, such as queries and connection attempts, for the specified Amazon Redshift cluster.

", "DisableSnapshotCopy": "

Disables the automatic copying of snapshots from one region to another region for a specified cluster.

If your cluster and its snapshots are encrypted using a customer master key (CMK) from AWS KMS, use DeleteSnapshotCopyGrant to delete the grant that grants Amazon Redshift permission to the CMK in the destination region.

", "EnableLogging": "

Starts logging information, such as queries and connection attempts, for the specified Amazon Redshift cluster.

", @@ -79,6 +82,7 @@ "ModifyScheduledAction": "

Modifies a scheduled action.

", "ModifySnapshotCopyRetentionPeriod": "

Modifies the number of days to retain snapshots in the destination AWS Region after they are copied from the source AWS Region. By default, this operation only changes the retention period of copied automated snapshots. The retention periods for both new and existing copied automated snapshots are updated with the new retention period. You can set the manual option to change only the retention periods of copied manual snapshots. If you set this option, only newly copied manual snapshots have the new retention period.

", "ModifySnapshotSchedule": "

Modifies a snapshot schedule. Any schedule associated with a cluster is modified asynchronously.

", + "ModifyUsageLimit": "

Modifies a usage limit in a cluster. You can't modify the feature type or period of a usage limit.

", "PauseCluster": "

Pauses a cluster.

", "PurchaseReservedNodeOffering": "

Allows you to purchase reserved nodes. Amazon Redshift offers a predefined set of reserved node offerings. You can purchase one or more of the offerings. You can call the DescribeReservedNodeOfferings API to obtain the available reserved node offerings. You can call this API by providing a specific reserved node offering and the number of nodes you want to reserve.

For more information about reserved node offerings, go to Purchasing Reserved Nodes in the Amazon Redshift Cluster Management Guide.

", "RebootCluster": "

Reboots a cluster. This action is taken as soon as possible. It results in a momentary outage to the cluster, during which the cluster status is set to rebooting. A cluster event is created when the reboot is completed. Any pending cluster modifications (see ModifyCluster) are applied at this reboot. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

", @@ -733,6 +737,11 @@ "refs": { } }, + "CreateUsageLimitMessage": { + "base": null, + "refs": { + } + }, "CustomerStorageMessage": { "base": null, "refs": { @@ -845,6 +854,11 @@ "refs": { } }, + "DeleteUsageLimitMessage": { + "base": null, + "refs": { + } + }, "DependentServiceRequestThrottlingFault": { "base": "

The request cannot be completed because a dependent service is throttling requests made by Amazon Redshift on your behalf. Wait and retry the request.

", "refs": { @@ -1000,6 +1014,11 @@ "refs": { } }, + "DescribeUsageLimitsMessage": { + "base": null, + "refs": { + } + }, "DisableLoggingMessage": { "base": "

", "refs": { @@ -1366,6 +1385,7 @@ "DescribeSnapshotSchedulesMessage$MaxRecords": "

The maximum number or response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

", "DescribeTableRestoreStatusMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

", "DescribeTagsMessage$MaxRecords": "

The maximum number or response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

", + "DescribeUsageLimitsMessage$MaxRecords": "

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

Default: 100

Constraints: minimum 20, maximum 100.

", "EnableSnapshotCopyMessage$RetentionPeriod": "

The number of days to retain automated snapshots in the destination region after they are copied from the source region.

Default: 7.

Constraints: Must be at least 1 and no more than 35.

", "EnableSnapshotCopyMessage$ManualSnapshotRetentionPeriod": "

The number of days to retain newly copied snapshots in the destination AWS Region after they are copied from the source AWS Region. If the value is -1, the manual snapshot is retained indefinitely.

The value must be either -1 or an integer between 1 and 3,653.

", "GetClusterCredentialsMessage$DurationSeconds": "

The number of seconds until the returned temporary password expires.

Constraint: minimum 900, maximum 3600.

Default: 900

", @@ -1503,6 +1523,11 @@ "refs": { } }, + "InvalidUsageLimitFault": { + "base": "

The usage limit is not valid.

", + "refs": { + } + }, "InvalidVPCNetworkStateFault": { "base": "

The cluster subnet group does not cover all Availability Zones.

", "refs": { @@ -1522,6 +1547,7 @@ "base": null, "refs": { "ClusterSnapshotCopyStatus$RetentionPeriod": "

The number of days that automated snapshots are retained in the destination region after they are copied from a source region.

", + "CreateUsageLimitMessage$Amount": "

The limit amount. If time-based, this amount is in minutes. If data-based, this amount is in terabytes (TB). The value must be a positive number.

", "DataTransferProgress$TotalDataInMegaBytes": "

Describes the total amount of data to be transfered in megabytes.

", "DataTransferProgress$DataTransferredInMegaBytes": "

Describes the total amount of data that has been transfered in MB's.

", "RestoreStatus$SnapshotSizeInMegaBytes": "

The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 and DS2 node types.

", @@ -1529,7 +1555,8 @@ "RestoreStatus$ElapsedTimeInSeconds": "

The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 and DS2 node types.

", "RestoreStatus$EstimatedTimeToCompletionInSeconds": "

The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 and DS2 node types.

", "Snapshot$EstimatedSecondsToCompletion": "

The estimate of the time remaining before the snapshot backup will complete. Returns 0 for a completed backup.

", - "Snapshot$ElapsedTimeInSeconds": "

The amount of time an in-progress snapshot backup has been running, or the amount of time it took a completed backup to finish.

" + "Snapshot$ElapsedTimeInSeconds": "

The amount of time an in-progress snapshot backup has been running, or the amount of time it took a completed backup to finish.

", + "UsageLimit$Amount": "

The limit amount. If time-based, this amount is in minutes. If data-based, this amount is in terabytes (TB).

" } }, "LongOptional": { @@ -1537,6 +1564,7 @@ "refs": { "DataTransferProgress$EstimatedTimeToCompletionInSeconds": "

Describes the estimated number of seconds remaining to complete the transfer.

", "DataTransferProgress$ElapsedTimeInSeconds": "

Describes the number of seconds that have elapsed during the data transfer.

", + "ModifyUsageLimitMessage$Amount": "

The new limit amount. For more information about this parameter, see UsageLimit.

", "ResizeProgressMessage$TotalResizeDataInMegaBytes": "

The estimated total amount of data, in megabytes, on the cluster before the resize operation began.

", "ResizeProgressMessage$ProgressInMegaBytes": "

While the resize operation is in progress, this value shows the current amount of data, in megabytes, that has been processed so far. When the resize operation is complete, this value shows the total amount of data, in megabytes, on the cluster, which may be more or less than TotalResizeDataInMegaBytes (the estimated total amount of data before resize).

", "ResizeProgressMessage$ElapsedTimeInSeconds": "

The amount of seconds that have elapsed since the resize operation began. After the resize operation completes, this value shows the total actual time, in seconds, for the resize operation.

", @@ -1657,6 +1685,11 @@ "refs": { } }, + "ModifyUsageLimitMessage": { + "base": null, + "refs": { + } + }, "NodeConfigurationOption": { "base": "

A list of node configurations.

", "refs": { @@ -2417,6 +2450,7 @@ "CreateSnapshotScheduleMessage$ScheduleIdentifier": "

A unique identifier for a snapshot schedule. Only alphanumeric characters are allowed for the identifier.

", "CreateSnapshotScheduleMessage$ScheduleDescription": "

The description of the snapshot schedule.

", "CreateTagsMessage$ResourceName": "

The Amazon Resource Name (ARN) to which you want to add the tag or tags. For example, arn:aws:redshift:us-east-2:123456789:cluster:t1.

", + "CreateUsageLimitMessage$ClusterIdentifier": "

The identifier of the cluster that you want to limit usage.

", "DataTransferProgress$Status": "

Describes the status of the cluster. While the transfer is in progress the status is transferringdata.

", "DbGroupList$member": null, "DefaultClusterParameters$ParameterGroupFamily": "

The name of the cluster parameter group family to which the engine default parameters apply.

", @@ -2436,6 +2470,7 @@ "DeleteSnapshotCopyGrantMessage$SnapshotCopyGrantName": "

The name of the snapshot copy grant to delete.

", "DeleteSnapshotScheduleMessage$ScheduleIdentifier": "

A unique identifier of the snapshot schedule to delete.

", "DeleteTagsMessage$ResourceName": "

The Amazon Resource Name (ARN) from which you want to remove the tag or tags. For example, arn:aws:redshift:us-east-2:123456789:cluster:t1.

", + "DeleteUsageLimitMessage$UsageLimitId": "

The identifier of the usage limit to delete.

", "DescribeClusterDbRevisionsMessage$ClusterIdentifier": "

A unique identifier for a cluster whose ClusterDbRevisions you are requesting. This parameter is case sensitive. All clusters defined for an account are returned by default.

", "DescribeClusterDbRevisionsMessage$Marker": "

An optional parameter that specifies the starting point for returning a set of response records. When the results of a DescribeClusterDbRevisions request exceed the value specified in MaxRecords, Amazon Redshift returns a value in the marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the marker parameter and retrying the request.

Constraints: You can specify either the ClusterIdentifier parameter, or the marker parameter, but not both.

", "DescribeClusterParameterGroupsMessage$ParameterGroupName": "

The name of a specific parameter group for which to return details. By default, details about all parameter groups and the default parameter group are returned.

", @@ -2497,6 +2532,9 @@ "DescribeTagsMessage$ResourceName": "

The Amazon Resource Name (ARN) for which you want to describe the tag or tags. For example, arn:aws:redshift:us-east-2:123456789:cluster:t1.

", "DescribeTagsMessage$ResourceType": "

The type of resource with which you want to view tags. Valid resource types are:

For more information about Amazon Redshift resource types and constructing ARNs, go to Specifying Policy Elements: Actions, Effects, Resources, and Principals in the Amazon Redshift Cluster Management Guide.

", "DescribeTagsMessage$Marker": "

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the marker parameter and retrying the command. If the marker field is empty, all response records have been retrieved for the request.

", + "DescribeUsageLimitsMessage$UsageLimitId": "

The identifier of the usage limit to describe.

", + "DescribeUsageLimitsMessage$ClusterIdentifier": "

The identifier of the cluster for which you want to describe usage limits.

", + "DescribeUsageLimitsMessage$Marker": "

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeUsageLimits request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

", "DisableLoggingMessage$ClusterIdentifier": "

The identifier of the cluster on which logging is to be stopped.

Example: examplecluster

", "DisableSnapshotCopyMessage$ClusterIdentifier": "

The unique identifier of the source cluster that you want to disable copying of snapshots to a destination region.

Constraints: Must be the valid name of an existing cluster that has cross-region snapshot copy enabled.

", "EC2SecurityGroup$Status": "

The status of the EC2 security group.

", @@ -2590,6 +2628,7 @@ "ModifyScheduledActionMessage$ScheduledActionDescription": "

A modified description of the scheduled action.

", "ModifySnapshotCopyRetentionPeriodMessage$ClusterIdentifier": "

The unique identifier of the cluster for which you want to change the retention period for either automated or manual snapshots that are copied to a destination AWS Region.

Constraints: Must be the valid name of an existing cluster that has cross-region snapshot copy enabled.

", "ModifySnapshotScheduleMessage$ScheduleIdentifier": "

A unique alphanumeric identifier of the schedule to modify.

", + "ModifyUsageLimitMessage$UsageLimitId": "

The identifier of the usage limit to modify.

", "NodeConfigurationOption$NodeType": "

The node type, such as, \"ds2.8xlarge\".

", "NodeConfigurationOptionsMessage$Marker": "

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

", "OrderableClusterOption$ClusterVersion": "

The version of the orderable cluster.

", @@ -2651,7 +2690,7 @@ "RestoreFromClusterSnapshotMessage$ClusterParameterGroupName": "

The name of the parameter group to be associated with this cluster.

Default: The default Amazon Redshift cluster parameter group. For information about the default parameter group, go to Working with Amazon Redshift Parameter Groups.

Constraints:

", "RestoreFromClusterSnapshotMessage$PreferredMaintenanceWindow": "

The weekly time range (in UTC) during which automated cluster maintenance can occur.

Format: ddd:hh24:mi-ddd:hh24:mi

Default: The value selected for the cluster from which the snapshot was taken. For more information about the time blocks for each region, see Maintenance Windows in Amazon Redshift Cluster Management Guide.

Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

Constraints: Minimum 30-minute window.

", "RestoreFromClusterSnapshotMessage$KmsKeyId": "

The AWS Key Management Service (KMS) key ID of the encryption key that you want to use to encrypt data in the cluster that you restore from a shared snapshot.

", - "RestoreFromClusterSnapshotMessage$NodeType": "

The node type that the restored cluster will be provisioned with.

Default: The node type of the cluster from which the snapshot was taken. You can modify this if you are using any DS node type. In that case, you can choose to restore into another DS node type of the same size. For example, you can restore ds1.8xlarge into ds2.8xlarge, or ds1.xlarge into ds2.xlarge. If you have a DC instance type, you must restore into that same instance type and size. In other words, you can only restore a dc1.large instance type into another dc1.large instance type or dc2.large instance type. You can't restore dc1.8xlarge to dc2.8xlarge. First restore to a dc1.8xlareg cluster, then resize to a dc2.8large cluster. For more information about node types, see About Clusters and Nodes in the Amazon Redshift Cluster Management Guide.

", + "RestoreFromClusterSnapshotMessage$NodeType": "

The node type that the restored cluster will be provisioned with.

Default: The node type of the cluster from which the snapshot was taken. You can modify this if you are using any DS node type. In that case, you can choose to restore into another DS node type of the same size. For example, you can restore ds1.8xlarge into ds2.8xlarge, or ds1.xlarge into ds2.xlarge. If you have a DC instance type, you must restore into that same instance type and size. In other words, you can only restore a dc1.large instance type into another dc1.large instance type or dc2.large instance type. You can't restore dc1.8xlarge to dc2.8xlarge. First restore to a dc1.8xlarge cluster, then resize to a dc2.8large cluster. For more information about node types, see About Clusters and Nodes in the Amazon Redshift Cluster Management Guide.

", "RestoreFromClusterSnapshotMessage$AdditionalInfo": "

Reserved.

", "RestoreFromClusterSnapshotMessage$MaintenanceTrackName": "

The name of the maintenance track for the restored cluster. When you take a snapshot, the snapshot inherits the MaintenanceTrack value from the cluster. The snapshot might be on a different track than the cluster that was the source for the snapshot. For example, suppose that you take a snapshot of a cluster that is on the current track and then change the cluster to be on the trailing track. In this case, the snapshot and the source cluster are on different tracks.

", "RestoreFromClusterSnapshotMessage$SnapshotScheduleIdentifier": "

A unique identifier for the snapshot schedule.

", @@ -2733,6 +2772,9 @@ "TrackListMessage$Marker": "

The starting point to return a set of response tracklist records. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

", "UpdateTarget$MaintenanceTrackName": "

The name of the new maintenance track.

", "UpdateTarget$DatabaseVersion": "

The cluster version for the new maintenance track.

", + "UsageLimit$UsageLimitId": "

The identifier of the usage limit.

", + "UsageLimit$ClusterIdentifier": "

The identifier of the cluster with a usage limit.

", + "UsageLimitList$Marker": "

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

", "ValueStringList$member": null, "VpcSecurityGroupIdList$member": null, "VpcSecurityGroupMembership$VpcSecurityGroupId": "

The identifier of the VPC security group.

", @@ -2905,7 +2947,8 @@ "DescribeHsmConfigurationsMessage$TagKeys": "

A tag key or keys for which you want to return all matching HSM configurations that are associated with the specified key or keys. For example, suppose that you have HSM configurations that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the HSM configurations that have either or both of these tag keys associated with them.

", "DescribeSnapshotCopyGrantsMessage$TagKeys": "

A tag key or keys for which you want to return all matching resources that are associated with the specified key or keys. For example, suppose that you have resources tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with all resources that have either or both of these tag keys associated with them.

", "DescribeSnapshotSchedulesMessage$TagKeys": "

The key value for a snapshot schedule tag.

", - "DescribeTagsMessage$TagKeys": "

A tag key or keys for which you want to return all matching resources that are associated with the specified key or keys. For example, suppose that you have resources tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with all resources that have either or both of these tag keys associated with them.

" + "DescribeTagsMessage$TagKeys": "

A tag key or keys for which you want to return all matching resources that are associated with the specified key or keys. For example, suppose that you have resources tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with all resources that have either or both of these tag keys associated with them.

", + "DescribeUsageLimitsMessage$TagKeys": "

A tag key or keys for which you want to return all matching usage limit objects that are associated with the specified key or keys. For example, suppose that you have parameter groups that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the usage limit objects have either or both of these tag keys associated with them.

" } }, "TagLimitExceededFault": { @@ -2931,6 +2974,7 @@ "CreateSnapshotCopyGrantMessage$Tags": "

A list of tag instances.

", "CreateSnapshotScheduleMessage$Tags": "

An optional set of tags you can use to search for the schedule.

", "CreateTagsMessage$Tags": "

One or more name/value pairs to add as tags to the specified resource. Each tag name is passed in with the parameter Key and the corresponding value is passed in with the parameter Value. The Key and Value parameters are separated by a comma (,). Separate multiple tags with a space. For example, --tags \"Key\"=\"owner\",\"Value\"=\"admin\" \"Key\"=\"environment\",\"Value\"=\"test\" \"Key\"=\"version\",\"Value\"=\"1.0\".

", + "CreateUsageLimitMessage$Tags": "

A list of tag instances.

", "EC2SecurityGroup$Tags": "

The list of tags for the EC2 security group.

", "EventSubscription$Tags": "

The list of tags for the event subscription.

", "HsmClientCertificate$Tags": "

The list of tags for the HSM client certificate.

", @@ -2938,7 +2982,8 @@ "IPRange$Tags": "

The list of tags for the IP range.

", "Snapshot$Tags": "

The list of tags for the cluster snapshot.

", "SnapshotCopyGrant$Tags": "

A list of tag instances.

", - "SnapshotSchedule$Tags": "

An optional set of tags describing the schedule.

" + "SnapshotSchedule$Tags": "

An optional set of tags describing the schedule.

", + "UsageLimit$Tags": "

A list of tag instances.

" } }, "TagValueList": { @@ -2954,7 +2999,8 @@ "DescribeHsmConfigurationsMessage$TagValues": "

A tag value or values for which you want to return all matching HSM configurations that are associated with the specified tag value or values. For example, suppose that you have HSM configurations that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the HSM configurations that have either or both of these tag values associated with them.

", "DescribeSnapshotCopyGrantsMessage$TagValues": "

A tag value or values for which you want to return all matching resources that are associated with the specified value or values. For example, suppose that you have resources tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with all resources that have either or both of these tag values associated with them.

", "DescribeSnapshotSchedulesMessage$TagValues": "

The value corresponding to the key of the snapshot schedule tag.

", - "DescribeTagsMessage$TagValues": "

A tag value or values for which you want to return all matching resources that are associated with the specified value or values. For example, suppose that you have resources tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with all resources that have either or both of these tag values associated with them.

" + "DescribeTagsMessage$TagValues": "

A tag value or values for which you want to return all matching resources that are associated with the specified value or values. For example, suppose that you have resources tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with all resources that have either or both of these tag values associated with them.

", + "DescribeUsageLimitsMessage$TagValues": "

A tag value or values for which you want to return all matching usage limit objects that are associated with the specified tag value or values. For example, suppose that you have parameter groups that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the usage limit objects that have either or both of these tag values associated with them.

" } }, "TaggedResource": { @@ -3011,6 +3057,63 @@ "EligibleTracksToUpdateList$member": null } }, + "UsageLimit": { + "base": "

Describes a usage limit object for a cluster.

", + "refs": { + "UsageLimits$member": null + } + }, + "UsageLimitAlreadyExistsFault": { + "base": "

The usage limit already exists.

", + "refs": { + } + }, + "UsageLimitBreachAction": { + "base": null, + "refs": { + "CreateUsageLimitMessage$BreachAction": "

The action that Amazon Redshift takes when the limit is reached. The default is log. For more information about this parameter, see UsageLimit.

", + "ModifyUsageLimitMessage$BreachAction": "

The new action that Amazon Redshift takes when the limit is reached. For more information about this parameter, see UsageLimit.

", + "UsageLimit$BreachAction": "

The action that Amazon Redshift takes when the limit is reached. Possible values are:

" + } + }, + "UsageLimitFeatureType": { + "base": null, + "refs": { + "CreateUsageLimitMessage$FeatureType": "

The Amazon Redshift feature that you want to limit.

", + "DescribeUsageLimitsMessage$FeatureType": "

The feature type for which you want to describe usage limits.

", + "UsageLimit$FeatureType": "

The Amazon Redshift feature to which the limit applies.

" + } + }, + "UsageLimitLimitType": { + "base": null, + "refs": { + "CreateUsageLimitMessage$LimitType": "

The type of limit. Depending on the feature type, this can be based on a time duration or data size. If FeatureType is spectrum, then LimitType must be data-scanned. If FeatureType is concurrency-scaling, then LimitType must be time.

", + "UsageLimit$LimitType": "

The type of limit. Depending on the feature type, this can be based on a time duration or data size.

" + } + }, + "UsageLimitList": { + "base": null, + "refs": { + } + }, + "UsageLimitNotFoundFault": { + "base": "

The usage limit identifier can't be found.

", + "refs": { + } + }, + "UsageLimitPeriod": { + "base": null, + "refs": { + "CreateUsageLimitMessage$Period": "

The time period that the amount applies to. A weekly period begins on Sunday. The default is monthly.

", + "UsageLimit$Period": "

The time period that the amount applies to. A weekly period begins on Sunday. The default is monthly.

" + } + }, + "UsageLimits": { + "base": null, + "refs": { + "UsageLimitList$UsageLimits": "

Contains the output from the DescribeUsageLimits action.

" + } + }, "ValueStringList": { "base": null, "refs": { diff --git a/models/apis/redshift/2012-12-01/paginators-1.json b/models/apis/redshift/2012-12-01/paginators-1.json index e9690c816d8..b72738fbece 100644 --- a/models/apis/redshift/2012-12-01/paginators-1.json +++ b/models/apis/redshift/2012-12-01/paginators-1.json @@ -101,6 +101,12 @@ "limit_key": "MaxRecords", "output_token": "Marker", "result_key": "ScheduledActions" + }, + "DescribeUsageLimits": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "UsageLimits" } } } \ No newline at end of file diff --git a/models/apis/sagemaker/2017-07-24/api-2.json b/models/apis/sagemaker/2017-07-24/api-2.json index 74eae8b88f3..a074e3c33f2 100644 --- a/models/apis/sagemaker/2017-07-24/api-2.json +++ b/models/apis/sagemaker/2017-07-24/api-2.json @@ -4433,11 +4433,6 @@ "min":1, "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*$" }, - "EnvironmentArn":{ - "type":"string", - "max":256, - "pattern":"^arn:aws(-[\\w]+)*:sagemaker:.+:[0-9]{12}:environment/[a-z0-9]([-.]?[a-z0-9])*$" - }, "EnvironmentKey":{ "type":"string", "max":1024, @@ -7590,7 +7585,7 @@ "ResourceSpec":{ "type":"structure", "members":{ - "EnvironmentArn":{"shape":"EnvironmentArn"}, + "SageMakerImageArn":{"shape":"SageMakerImageArn"}, "InstanceType":{"shape":"AppInstanceType"} } }, @@ -7695,6 +7690,11 @@ "max":1024, "pattern":"^(https|s3)://([^/]+)/?(.*)$" }, + "SageMakerImageArn":{ + "type":"string", + "max":256, + "pattern":"^arn:aws(-[\\w]+)*:sagemaker:.+:[0-9]{12}:image/[a-z0-9]([-.]?[a-z0-9])*$" + }, "SamplingPercentage":{ "type":"integer", "max":100, diff --git a/models/apis/sagemaker/2017-07-24/docs-2.json b/models/apis/sagemaker/2017-07-24/docs-2.json index d096fff48f4..3eb698adfb4 100644 --- a/models/apis/sagemaker/2017-07-24/docs-2.json +++ b/models/apis/sagemaker/2017-07-24/docs-2.json @@ -2171,12 +2171,6 @@ "UpdateCodeRepositoryInput$CodeRepositoryName": "

The name of the Git repository to update.

" } }, - "EnvironmentArn": { - "base": null, - "refs": { - "ResourceSpec$EnvironmentArn": "

The Amazon Resource Name (ARN) of the environment.

" - } - }, "EnvironmentKey": { "base": null, "refs": { @@ -4855,7 +4849,7 @@ } }, "ResourceSpec": { - "base": "

The instance type and quantity.

", + "base": "

The instance type and the Amazon Resource Name (ARN) of the image created on the instance. The ARN is stored as metadata in Amazon SageMaker Studio notebooks.

", "refs": { "CreateAppRequest$ResourceSpec": "

The instance type and quantity.

", "DescribeAppResponse$ResourceSpec": "

The instance type and quantity.

", @@ -5003,6 +4997,12 @@ "UiConfig$UiTemplateS3Uri": "

The Amazon S3 bucket location of the UI template. For more information about the contents of a UI template, see Creating Your Custom Labeling Task Template.

" } }, + "SageMakerImageArn": { + "base": null, + "refs": { + "ResourceSpec$SageMakerImageArn": "

The Amazon Resource Name (ARN) of the image created on the instance.

" + } + }, "SamplingPercentage": { "base": null, "refs": { diff --git a/models/apis/storagegateway/2013-06-30/api-2.json b/models/apis/storagegateway/2013-06-30/api-2.json index acbf5a40e28..75e6676e603 100644 --- a/models/apis/storagegateway/2013-06-30/api-2.json +++ b/models/apis/storagegateway/2013-06-30/api-2.json @@ -235,6 +235,19 @@ {"shape":"InternalServerError"} ] }, + "DeleteAutomaticTapeCreationPolicy":{ + "name":"DeleteAutomaticTapeCreationPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAutomaticTapeCreationPolicyInput"}, + "output":{"shape":"DeleteAutomaticTapeCreationPolicyOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, "DeleteBandwidthRateLimit":{ "name":"DeleteBandwidthRateLimit", "http":{ @@ -612,6 +625,19 @@ {"shape":"InternalServerError"} ] }, + "ListAutomaticTapeCreationPolicies":{ + "name":"ListAutomaticTapeCreationPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAutomaticTapeCreationPoliciesInput"}, + "output":{"shape":"ListAutomaticTapeCreationPoliciesOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, "ListFileShares":{ "name":"ListFileShares", "http":{ @@ -859,6 +885,19 @@ {"shape":"InternalServerError"} ] }, + "UpdateAutomaticTapeCreationPolicy":{ + "name":"UpdateAutomaticTapeCreationPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAutomaticTapeCreationPolicyInput"}, + "output":{"shape":"UpdateAutomaticTapeCreationPolicyOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, "UpdateBandwidthRateLimit":{ "name":"UpdateBandwidthRateLimit", "http":{ @@ -1149,6 +1188,38 @@ "max":15, "min":5 }, + "AutomaticTapeCreationPolicyInfo":{ + "type":"structure", + "members":{ + "AutomaticTapeCreationRules":{"shape":"AutomaticTapeCreationRules"}, + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "AutomaticTapeCreationPolicyInfos":{ + "type":"list", + "member":{"shape":"AutomaticTapeCreationPolicyInfo"} + }, + "AutomaticTapeCreationRule":{ + "type":"structure", + "required":[ + "TapeBarcodePrefix", + "PoolId", + "TapeSizeInBytes", + "MinimumNumTapes" + ], + "members":{ + "TapeBarcodePrefix":{"shape":"TapeBarcodePrefix"}, + "PoolId":{"shape":"PoolId"}, + "TapeSizeInBytes":{"shape":"TapeSize"}, + "MinimumNumTapes":{"shape":"MinimumNumTapes"} + } + }, + "AutomaticTapeCreationRules":{ + "type":"list", + "member":{"shape":"AutomaticTapeCreationRule"}, + "max":10, + "min":1 + }, "AvailabilityMonitorTestStatus":{ "type":"string", "enum":[ @@ -1480,6 +1551,19 @@ "max":6, "min":0 }, + "DeleteAutomaticTapeCreationPolicyInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DeleteAutomaticTapeCreationPolicyOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, "DeleteBandwidthRateLimitInput":{ "type":"structure", "required":[ @@ -2287,6 +2371,18 @@ "max":25, "min":1 }, + "ListAutomaticTapeCreationPoliciesInput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "ListAutomaticTapeCreationPoliciesOutput":{ + "type":"structure", + "members":{ + "AutomaticTapeCreationPolicyInfos":{"shape":"AutomaticTapeCreationPolicyInfos"} + } + }, "ListFileSharesInput":{ "type":"structure", "members":{ @@ -2428,6 +2524,11 @@ "max":50, "min":2 }, + "MinimumNumTapes":{ + "type":"integer", + "max":10, + "min":1 + }, "MinuteOfHour":{ "type":"integer", "max":59, @@ -2968,6 +3069,23 @@ "max":3600, "min":0 }, + "UpdateAutomaticTapeCreationPolicyInput":{ + "type":"structure", + "required":[ + "AutomaticTapeCreationRules", + "GatewayARN" + ], + "members":{ + "AutomaticTapeCreationRules":{"shape":"AutomaticTapeCreationRules"}, + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "UpdateAutomaticTapeCreationPolicyOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, "UpdateBandwidthRateLimitInput":{ "type":"structure", "required":["GatewayARN"], diff --git a/models/apis/storagegateway/2013-06-30/docs-2.json b/models/apis/storagegateway/2013-06-30/docs-2.json index 2ec5e111455..fd8c47b1638 100644 --- a/models/apis/storagegateway/2013-06-30/docs-2.json +++ b/models/apis/storagegateway/2013-06-30/docs-2.json @@ -1,36 +1,37 @@ { "version": "2.0", - "service": "AWS Storage Gateway Service

AWS Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and the AWS storage infrastructure. The service enables you to securely upload data to the AWS cloud for cost effective backup and rapid disaster recovery.

Use the following links to get started using the AWS Storage Gateway Service API Reference:

AWS Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected.

IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS Resource IDs.

For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following:

arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG.

A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee.

For more information, see Announcement: Heads-up – Longer AWS Storage Gateway volume and snapshot IDs coming in 2016.

", + "service": "AWS Storage Gateway Service

AWS Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and the AWS storage infrastructure. The service enables you to securely upload data to the AWS Cloud for cost effective backup and rapid disaster recovery.

Use the following links to get started using the AWS Storage Gateway Service API Reference:

AWS Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected.

IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS Resource IDs.

For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following:

arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG.

A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee.

For more information, see Announcement: Heads-up – Longer AWS Storage Gateway volume and snapshot IDs coming in 2016.

", "operations": { "ActivateGateway": "

Activates the gateway you previously deployed on your host. In the activation process, you specify information such as the AWS Region that you want to use for storing snapshots or tapes, the time zone for scheduled snapshots the gateway snapshot schedule window, an activation key, and a name for your gateway. The activation process also associates your gateway with your account; for more information, see UpdateGatewayInformation.

You must turn on the gateway VM before you can activate your gateway.

", "AddCache": "

Configures one or more gateway local disks as cache for a gateway. This operation is only supported in the cached volume, tape and file gateway type (see Storage Gateway Concepts).

In the request, you specify the gateway Amazon Resource Name (ARN) to which you want to add cache, and one or more disk IDs that you want to configure as cache.

", "AddTagsToResource": "

Adds one or more tags to the specified resource. You use tags to add metadata to resources, which you can use to categorize these resources. For example, you can categorize resources by purpose, owner, environment, or team. Each tag consists of a key and a value, which you define. You can add tags to the following AWS Storage Gateway resources:

You can create a maximum of 50 tags for each resource. Virtual tapes and storage volumes that are recovered to a new gateway maintain their tags.

", "AddUploadBuffer": "

Configures one or more gateway local disks as upload buffer for a specified gateway. This operation is supported for the stored volume, cached volume and tape gateway types.

In the request, you specify the gateway Amazon Resource Name (ARN) to which you want to add upload buffer, and one or more disk IDs that you want to configure as upload buffer.

", "AddWorkingStorage": "

Configures one or more gateway local disks as working storage for a gateway. This operation is only supported in the stored volume gateway type. This operation is deprecated in cached volume API version 20120630. Use AddUploadBuffer instead.

Working storage is also referred to as upload buffer. You can also use the AddUploadBuffer operation to add upload buffer to a stored volume gateway.

In the request, you specify the gateway Amazon Resource Name (ARN) to which you want to add working storage, and one or more disk IDs that you want to configure as working storage.

", - "AssignTapePool": "

Assigns a tape to a tape pool for archiving. The tape assigned to a pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the S3 storage class (Glacier or Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

", + "AssignTapePool": "

Assigns a tape to a tape pool for archiving. The tape assigned to a pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the S3 storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

", "AttachVolume": "

Connects a volume to an iSCSI connection and then attaches the volume to the specified gateway. Detaching and attaching a volume enables you to recover your data from one gateway to a different gateway without creating a snapshot. It also makes it easier to move your volumes from an on-premises gateway to a gateway hosted on an Amazon EC2 instance.

", "CancelArchival": "

Cancels archiving of a virtual tape to the virtual tape shelf (VTS) after the archiving process is initiated. This operation is only supported in the tape gateway type.

", "CancelRetrieval": "

Cancels retrieval of a virtual tape from the virtual tape shelf (VTS) to a gateway after the retrieval process is initiated. The virtual tape is returned to the VTS. This operation is only supported in the tape gateway type.

", "CreateCachediSCSIVolume": "

Creates a cached volume on a specified cached volume gateway. This operation is only supported in the cached volume gateway type.

Cache storage must be allocated to the gateway before you can create a cached volume. Use the AddCache operation to add cache storage to a gateway.

In the request, you must specify the gateway, size of the volume in bytes, the iSCSI target name, an IP address on which to expose the target, and a unique client token. In response, the gateway creates the volume and returns information about it. This information includes the volume Amazon Resource Name (ARN), its size, and the iSCSI target ARN that initiators can use to connect to the volume target.

Optionally, you can provide the ARN for an existing volume as the SourceVolumeARN for this cached volume, which creates an exact copy of the existing volume’s latest recovery point. The VolumeSizeInBytes value must be equal to or larger than the size of the copied volume, in bytes.

", - "CreateNFSFileShare": "

Creates a Network File System (NFS) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway exposes file shares using a NFS interface. This operation is only supported for file gateways.

File gateway requires AWS Security Token Service (AWS STS) to be activated to enable you create a file share. Make sure AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in the AWS Region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateway does not support creating hard or symbolic links on a file share.

", - "CreateSMBFileShare": "

Creates a Server Message Block (SMB) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway expose file shares using a SMB interface. This operation is only supported for file gateways.

File gateways require AWS Security Token Service (AWS STS) to be activated to enable you to create a file share. Make sure that AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in this AWS Region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateways don't support creating hard or symbolic links on a file share.

", - "CreateSnapshot": "

Initiates a snapshot of a volume.

AWS Storage Gateway provides the ability to back up point-in-time snapshots of your data to Amazon Simple Storage (S3) for durable off-site recovery, as well as import the data to an Amazon Elastic Block Store (EBS) volume in Amazon Elastic Compute Cloud (EC2). You can take snapshots of your gateway volume on a scheduled or ad hoc basis. This API enables you to take ad-hoc snapshot. For more information, see Editing a Snapshot Schedule.

In the CreateSnapshot request you identify the volume by providing its Amazon Resource Name (ARN). You must also provide description for the snapshot. When AWS Storage Gateway takes the snapshot of specified volume, the snapshot and description appears in the AWS Storage Gateway Console. In response, AWS Storage Gateway returns you a snapshot ID. You can use this snapshot ID to check the snapshot progress or later use it when you want to create a volume from a snapshot. This operation is only supported in stored and cached volume gateway type.

To list or delete a snapshot, you must use the Amazon EC2 API. For more information, see DescribeSnapshots or DeleteSnapshot in the EC2 API reference.

Volume and snapshot IDs are changing to a longer length ID format. For more information, see the important note on the Welcome page.

", + "CreateNFSFileShare": "

Creates a Network File System (NFS) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway exposes file shares using an NFS interface. This operation is only supported for file gateways.

File gateway requires AWS Security Token Service (AWS STS) to be activated to enable you to create a file share. Make sure AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in the AWS Region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateway does not support creating hard or symbolic links on a file share.

", + "CreateSMBFileShare": "

Creates a Server Message Block (SMB) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway expose file shares using an SMB interface. This operation is only supported for file gateways.

File gateways require AWS Security Token Service (AWS STS) to be activated to enable you to create a file share. Make sure that AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in this AWS Region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateways don't support creating hard or symbolic links on a file share.

", + "CreateSnapshot": "

Initiates a snapshot of a volume.

AWS Storage Gateway provides the ability to back up point-in-time snapshots of your data to Amazon Simple Storage Service (Amazon S3) for durable off-site recovery, as well as import the data to an Amazon Elastic Block Store (EBS) volume in Amazon Elastic Compute Cloud (EC2). You can take snapshots of your gateway volume on a scheduled or ad hoc basis. This API enables you to take an ad hoc snapshot. For more information, see Editing a Snapshot Schedule.

In the CreateSnapshot request you identify the volume by providing its Amazon Resource Name (ARN). You must also provide description for the snapshot. When AWS Storage Gateway takes the snapshot of specified volume, the snapshot and description appears in the AWS Storage Gateway Console. In response, AWS Storage Gateway returns you a snapshot ID. You can use this snapshot ID to check the snapshot progress or later use it when you want to create a volume from a snapshot. This operation is only supported in stored and cached volume gateway type.

To list or delete a snapshot, you must use the Amazon EC2 API. For more information, see DescribeSnapshots or DeleteSnapshot in the EC2 API reference.

Volume and snapshot IDs are changing to a longer length ID format. For more information, see the important note on the Welcome page.

", "CreateSnapshotFromVolumeRecoveryPoint": "

Initiates a snapshot of a gateway from a volume recovery point. This operation is only supported in the cached volume gateway type.

A volume recovery point is a point in time at which all data of the volume is consistent and from which you can create a snapshot. To get a list of volume recovery point for cached volume gateway, use ListVolumeRecoveryPoints.

In the CreateSnapshotFromVolumeRecoveryPoint request, you identify the volume by providing its Amazon Resource Name (ARN). You must also provide a description for the snapshot. When the gateway takes a snapshot of the specified volume, the snapshot and its description appear in the AWS Storage Gateway console. In response, the gateway returns you a snapshot ID. You can use this snapshot ID to check the snapshot progress or later use it when you want to create a volume from a snapshot.

To list or delete a snapshot, you must use the Amazon EC2 API. For more information, in Amazon Elastic Compute Cloud API Reference.

", "CreateStorediSCSIVolume": "

Creates a volume on a specified gateway. This operation is only supported in the stored volume gateway type.

The size of the volume to create is inferred from the disk size. You can choose to preserve existing data on the disk, create volume from an existing snapshot, or create an empty volume. If you choose to create an empty gateway volume, then any existing data on the disk is erased.

In the request you must specify the gateway and the disk information on which you are creating the volume. In response, the gateway creates the volume and returns volume information such as the volume Amazon Resource Name (ARN), its size, and the iSCSI target ARN that initiators can use to connect to the volume target.

", - "CreateTapeWithBarcode": "

Creates a virtual tape by using your own barcode. You write data to the virtual tape and then archive the tape. A barcode is unique and can not be reused if it has already been used on a tape . This applies to barcodes used on deleted tapes. This operation is only supported in the tape gateway type.

Cache storage must be allocated to the gateway before you can create a virtual tape. Use the AddCache operation to add cache storage to a gateway.

", + "CreateTapeWithBarcode": "

Creates a virtual tape by using your own barcode. You write data to the virtual tape and then archive the tape. A barcode is unique and cannot be reused if it has already been used on a tape. This applies to barcodes used on deleted tapes. This operation is only supported in the tape gateway type.

Cache storage must be allocated to the gateway before you can create a virtual tape. Use the AddCache operation to add cache storage to a gateway.

", "CreateTapes": "

Creates one or more virtual tapes. You write data to the virtual tapes and then archive the tapes. This operation is only supported in the tape gateway type.

Cache storage must be allocated to the gateway before you can create virtual tapes. Use the AddCache operation to add cache storage to a gateway.

", + "DeleteAutomaticTapeCreationPolicy": "

Deletes the automatic tape creation policy of a gateway. If you delete this policy, new virtual tapes must be created manually. Use the Amazon Resource Name (ARN) of the gateway in your request to remove the policy.

", "DeleteBandwidthRateLimit": "

Deletes the bandwidth rate limits of a gateway. You can delete either the upload and download bandwidth rate limit, or you can delete both. If you delete only one of the limits, the other limit remains unchanged. To specify which gateway to work with, use the Amazon Resource Name (ARN) of the gateway in your request. This operation is supported for the stored volume, cached volume and tape gateway types.

", "DeleteChapCredentials": "

Deletes Challenge-Handshake Authentication Protocol (CHAP) credentials for a specified iSCSI target and initiator pair. This operation is supported in volume and tape gateway types.

", "DeleteFileShare": "

Deletes a file share from a file gateway. This operation is only supported for file gateways.

", "DeleteGateway": "

Deletes a gateway. To specify which gateway to delete, use the Amazon Resource Name (ARN) of the gateway in your request. The operation deletes the gateway; however, it does not delete the gateway virtual machine (VM) from your host computer.

After you delete a gateway, you cannot reactivate it. Completed snapshots of the gateway volumes are not deleted upon deleting the gateway, however, pending snapshots will not complete. After you delete a gateway, your next step is to remove it from your environment.

You no longer pay software charges after the gateway is deleted; however, your existing Amazon EBS snapshots persist and you will continue to be billed for these snapshots. You can choose to remove all remaining Amazon EBS snapshots by canceling your Amazon EC2 subscription.  If you prefer not to cancel your Amazon EC2 subscription, you can delete your snapshots using the Amazon EC2 console. For more information, see the AWS Storage Gateway Detail Page.

", - "DeleteSnapshotSchedule": "

Deletes a snapshot of a volume.

You can take snapshots of your gateway volumes on a scheduled or ad hoc basis. This API action enables you to delete a snapshot schedule for a volume. For more information, see Working with Snapshots. In the DeleteSnapshotSchedule request, you identify the volume by providing its Amazon Resource Name (ARN). This operation is only supported in stored and cached volume gateway types.

To list or delete a snapshot, you must use the Amazon EC2 API. in Amazon Elastic Compute Cloud API Reference.

", + "DeleteSnapshotSchedule": "

Deletes a snapshot of a volume.

You can take snapshots of your gateway volumes on a scheduled or ad hoc basis. This API action enables you to delete a snapshot schedule for a volume. For more information, see Working with Snapshots. In the DeleteSnapshotSchedule request, you identify the volume by providing its Amazon Resource Name (ARN). This operation is only supported in stored and cached volume gateway types.

To list or delete a snapshot, you must use the Amazon EC2 API. For more information, go to DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

", "DeleteTape": "

Deletes the specified virtual tape. This operation is only supported in the tape gateway type.

", "DeleteTapeArchive": "

Deletes the specified virtual tape from the virtual tape shelf (VTS). This operation is only supported in the tape gateway type.

", "DeleteVolume": "

Deletes the specified storage volume that you previously created using the CreateCachediSCSIVolume or CreateStorediSCSIVolume API. This operation is only supported in the cached volume and stored volume types. For stored volume gateways, the local disk that was configured as the storage volume is not deleted. You can reuse the local disk to create another storage volume.

Before you delete a volume, make sure there are no iSCSI connections to the volume you are deleting. You should also make sure there is no snapshot in progress. You can use the Amazon Elastic Compute Cloud (Amazon EC2) API to query snapshots on the volume you are deleting and check the snapshot status. For more information, go to DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

In the request, you must provide the Amazon Resource Name (ARN) of the storage volume you want to delete.

", "DescribeAvailabilityMonitorTest": "

Returns information about the most recent High Availability monitoring test that was performed on the host in a cluster. If a test isn't performed, the status and start time in the response would be null.

", "DescribeBandwidthRateLimit": "

Returns the bandwidth rate limits of a gateway. By default, these limits are not set, which means no bandwidth rate limiting is in effect. This operation is supported for the stored volume, cached volume and tape gateway types.'

This operation only returns a value for a bandwidth rate limit only if the limit is set. If no limits are set for the gateway, then this operation returns only the gateway ARN in the response body. To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request.

", - "DescribeCache": "

Returns information about the cache of a gateway. This operation is only supported in the cached volume, tape and file gateway types.

The response includes disk IDs that are configured as cache, and it includes the amount of cache allocated and used.

", - "DescribeCachediSCSIVolumes": "

Returns a description of the gateway volumes specified in the request. This operation is only supported in the cached volume gateway types.

The list of gateway volumes in the request must be from one gateway. In the response Amazon Storage Gateway returns volume information sorted by volume Amazon Resource Name (ARN).

", + "DescribeCache": "

Returns information about the cache of a gateway. This operation is only supported in the cached volume, tape, and file gateway types.

The response includes disk IDs that are configured as cache, and it includes the amount of cache allocated and used.

", + "DescribeCachediSCSIVolumes": "

Returns a description of the gateway volumes specified in the request. This operation is only supported in the cached volume gateway types.

The list of gateway volumes in the request must be from one gateway. In the response, AWS Storage Gateway returns volume information sorted by volume Amazon Resource Name (ARN).

", "DescribeChapCredentials": "

Returns an array of Challenge-Handshake Authentication Protocol (CHAP) credentials information for a specified iSCSI target, one for each target-initiator pair. This operation is supported in the volume and tape gateway types.

", "DescribeGatewayInformation": "

Returns metadata about a gateway such as its name, network interfaces, configured time zone, and the state (whether the gateway is running or not). To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request.

", "DescribeMaintenanceStartTime": "

Returns your gateway's weekly maintenance start time including the day and time of the week. Note that values are in terms of the gateway's time zone.

", @@ -38,7 +39,7 @@ "DescribeSMBFileShares": "

Gets a description for one or more Server Message Block (SMB) file shares from a file gateway. This operation is only supported for file gateways.

", "DescribeSMBSettings": "

Gets a description of a Server Message Block (SMB) file share settings from a file gateway. This operation is only supported for file gateways.

", "DescribeSnapshotSchedule": "

Describes the snapshot schedule for the specified gateway volume. The snapshot schedule information includes intervals at which snapshots are automatically initiated on the volume. This operation is only supported in the cached volume and stored volume types.

", - "DescribeStorediSCSIVolumes": "

Returns the description of the gateway volumes specified in the request. The list of gateway volumes in the request must be from one gateway. In the response Amazon Storage Gateway returns volume information sorted by volume ARNs. This operation is only supported in stored volume gateway type.

", + "DescribeStorediSCSIVolumes": "

Returns the description of the gateway volumes specified in the request. The list of gateway volumes in the request must be from one gateway. In the response AWS Storage Gateway returns volume information sorted by volume ARNs. This operation is only supported in stored volume gateway type.

", "DescribeTapeArchives": "

Returns a description of specified virtual tapes in the virtual tape shelf (VTS). This operation is only supported in the tape gateway type.

If a specific TapeARN is not specified, AWS Storage Gateway returns a description of all virtual tapes found in the VTS associated with your account.

", "DescribeTapeRecoveryPoints": "

Returns a list of virtual tape recovery points that are available for the specified tape gateway.

A recovery point is a point-in-time view of a virtual tape at which all the data on the virtual tape is consistent. If your gateway crashes, virtual tapes that have recovery points can be recovered to a new gateway. This operation is only supported in the tape gateway type.

", "DescribeTapes": "

Returns a description of the specified Amazon Resource Name (ARN) of virtual tapes. If a TapeARN is not specified, returns a description of all virtual tapes associated with the specified gateway. This operation is only supported in the tape gateway type.

", @@ -46,8 +47,9 @@ "DescribeVTLDevices": "

Returns a description of virtual tape library (VTL) devices for the specified tape gateway. In the response, AWS Storage Gateway returns VTL device information.

This operation is only supported in the tape gateway type.

", "DescribeWorkingStorage": "

Returns information about the working storage of a gateway. This operation is only supported in the stored volumes gateway type. This operation is deprecated in cached volumes API version (20120630). Use DescribeUploadBuffer instead.

Working storage is also referred to as upload buffer. You can also use the DescribeUploadBuffer operation to add upload buffer to a stored volume gateway.

The response includes disk IDs that are configured as working storage, and it includes the amount of working storage allocated and used.

", "DetachVolume": "

Disconnects a volume from an iSCSI connection and then detaches the volume from the specified gateway. Detaching and attaching a volume enables you to recover your data from one gateway to a different gateway without creating a snapshot. It also makes it easier to move your volumes from an on-premises gateway to a gateway hosted on an Amazon EC2 instance. This operation is only supported in the volume gateway type.

", - "DisableGateway": "

Disables a tape gateway when the gateway is no longer functioning. For example, if your gateway VM is damaged, you can disable the gateway so you can recover virtual tapes.

Use this operation for a tape gateway that is not reachable or not functioning. This operation is only supported in the tape gateway type.

Once a gateway is disabled it cannot be enabled.

", + "DisableGateway": "

Disables a tape gateway when the gateway is no longer functioning. For example, if your gateway VM is damaged, you can disable the gateway so you can recover virtual tapes.

Use this operation for a tape gateway that is not reachable or not functioning. This operation is only supported in the tape gateway type.

After a gateway is disabled, it cannot be enabled.

", "JoinDomain": "

Adds a file gateway to an Active Directory domain. This operation is only supported for file gateways that support the SMB file protocol.

", + "ListAutomaticTapeCreationPolicies": "

Lists the automatic tape creation policies for a gateway. If there are no automatic tape creation policies for the gateway, it returns an empty list.

This operation is only supported for tape gateways.

", "ListFileShares": "

Gets a list of the file shares for a specific file gateway, or the list of file shares that belong to the calling user account. This operation is only supported for file gateways.

", "ListGateways": "

Lists gateways owned by an AWS account in an AWS Region specified in the request. The returned list is ordered by gateway Amazon Resource Name (ARN).

By default, the operation returns a maximum of 100 gateways. This operation supports pagination that allows you to optionally reduce the number of gateways returned in a response.

If you have more gateways than are returned in a response (that is, the response returns only a truncated list of your gateways), the response contains a marker that you can specify in your next request to fetch the next page of gateways.

", "ListLocalDisks": "

Returns a list of the gateway's local disks. To specify which gateway to describe, you use the Amazon Resource Name (ARN) of the gateway in the body of the request.

The request returns a list of all disks, specifying which are configured as working storage, cache storage, or stored volume or not configured at all. The response includes a DiskStatus field. This field can have a value of present (the disk is available to use), missing (the disk is no longer connected to the gateway), or mismatch (the disk node is occupied by a disk that has incorrect metadata or the disk content is corrupted).

", @@ -59,14 +61,15 @@ "NotifyWhenUploaded": "

Sends you notification through CloudWatch Events when all files written to your file share have been uploaded to Amazon S3.

AWS Storage Gateway can send a notification through Amazon CloudWatch Events when all files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the file share up to the time that you make a request for notification. When the upload is done, Storage Gateway sends you notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to send the notification through event targets such as Amazon SNS or AWS Lambda function. This operation is only supported for file gateways.

For more information, see Getting File Upload Notification in the Storage Gateway User Guide (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-upload-notification).

", "RefreshCache": "

Refreshes the cache for the specified file share. This operation finds objects in the Amazon S3 bucket that were added, removed or replaced since the gateway last listed the bucket's contents and cached the results. This operation is only supported in the file gateway type. You can subscribe to be notified through an Amazon CloudWatch event when your RefreshCache operation completes. For more information, see Getting Notified About File Operations.

When this API is called, it only initiates the refresh operation. When the API call completes and returns a success code, it doesn't necessarily mean that the file refresh has completed. You should use the refresh-complete notification to determine that the operation has completed before you check for new files on the gateway file share. You can subscribe to be notified through an CloudWatch event when your RefreshCache operation completes.

Throttle limit: This API is asynchronous so the gateway will accept no more than two refreshes at any time. We recommend using the refresh-complete CloudWatch event notification before issuing additional requests. For more information, see Getting Notified About File Operations.

If you invoke the RefreshCache API when two requests are already being processed, any new request will cause an InvalidGatewayRequestException error because too many requests were sent to the server.

For more information, see \"https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-notification\".

", "RemoveTagsFromResource": "

Removes one or more tags from the specified resource. This operation is supported in storage gateways of all types.

", - "ResetCache": "

Resets all cache disks that have encountered a error and makes the disks available for reconfiguration as cache storage. If your cache disk encounters a error, the gateway prevents read and write operations on virtual tapes in the gateway. For example, an error can occur when a disk is corrupted or removed from the gateway. When a cache is reset, the gateway loses its cache storage. At this point you can reconfigure the disks as cache disks. This operation is only supported in the cached volume and tape types.

If the cache disk you are resetting contains data that has not been uploaded to Amazon S3 yet, that data can be lost. After you reset cache disks, there will be no configured cache disks left in the gateway, so you must configure at least one new cache disk for your gateway to function properly.

", + "ResetCache": "

Resets all cache disks that have encountered an error and makes the disks available for reconfiguration as cache storage. If your cache disk encounters an error, the gateway prevents read and write operations on virtual tapes in the gateway. For example, an error can occur when a disk is corrupted or removed from the gateway. When a cache is reset, the gateway loses its cache storage. At this point, you can reconfigure the disks as cache disks. This operation is only supported in the cached volume and tape types.

If the cache disk you are resetting contains data that has not been uploaded to Amazon S3 yet, that data can be lost. After you reset cache disks, there will be no configured cache disks left in the gateway, so you must configure at least one new cache disk for your gateway to function properly.

", "RetrieveTapeArchive": "

Retrieves an archived virtual tape from the virtual tape shelf (VTS) to a tape gateway. Virtual tapes archived in the VTS are not associated with any gateway. However after a tape is retrieved, it is associated with a gateway, even though it is also listed in the VTS, that is, archive. This operation is only supported in the tape gateway type.

Once a tape is successfully retrieved to a gateway, it cannot be retrieved again to another gateway. You must archive the tape again before you can retrieve it to another gateway. This operation is only supported in the tape gateway type.

", "RetrieveTapeRecoveryPoint": "

Retrieves the recovery point for the specified virtual tape. This operation is only supported in the tape gateway type.

A recovery point is a point in time view of a virtual tape at which all the data on the tape is consistent. If your gateway crashes, virtual tapes that have recovery points can be recovered to a new gateway.

The virtual tape can be retrieved to only one gateway. The retrieved tape is read-only. The virtual tape can be retrieved to only a tape gateway. There is no charge for retrieving recovery points.

", "SetLocalConsolePassword": "

Sets the password for your VM local console. When you log in to the local console for the first time, you log in to the VM with the default credentials. We recommend that you set a new password. You don't need to know the default password to set a new password.

", "SetSMBGuestPassword": "

Sets the password for the guest user smbguest. The smbguest user is the user when the authentication method for the file share is set to GuestAccess.

", - "ShutdownGateway": "

Shuts down a gateway. To specify which gateway to shut down, use the Amazon Resource Name (ARN) of the gateway in the body of your request.

The operation shuts down the gateway service component running in the gateway's virtual machine (VM) and not the host VM.

If you want to shut down the VM, it is recommended that you first shut down the gateway component in the VM to avoid unpredictable conditions.

After the gateway is shutdown, you cannot call any other API except StartGateway, DescribeGatewayInformation, and ListGateways. For more information, see ActivateGateway. Your applications cannot read from or write to the gateway's storage volumes, and there are no snapshots taken.

When you make a shutdown request, you will get a 200 OK success response immediately. However, it might take some time for the gateway to shut down. You can call the DescribeGatewayInformation API to check the status. For more information, see ActivateGateway.

If do not intend to use the gateway again, you must delete the gateway (using DeleteGateway) to no longer pay software charges associated with the gateway.

", + "ShutdownGateway": "

Shuts down a gateway. To specify which gateway to shut down, use the Amazon Resource Name (ARN) of the gateway in the body of your request.

The operation shuts down the gateway service component running in the gateway's virtual machine (VM) and not the host VM.

If you want to shut down the VM, it is recommended that you first shut down the gateway component in the VM to avoid unpredictable conditions.

After the gateway is shutdown, you cannot call any other API except StartGateway, DescribeGatewayInformation and ListGateways. For more information, see ActivateGateway. Your applications cannot read from or write to the gateway's storage volumes, and there are no snapshots taken.

When you make a shutdown request, you will get a 200 OK success response immediately. However, it might take some time for the gateway to shut down. You can call the DescribeGatewayInformation API to check the status. For more information, see ActivateGateway.

If do not intend to use the gateway again, you must delete the gateway (using DeleteGateway) to no longer pay software charges associated with the gateway.

", "StartAvailabilityMonitorTest": "

Start a test that verifies that the specified gateway is configured for High Availability monitoring in your host environment. This request only initiates the test and that a successful response only indicates that the test was started. It doesn't indicate that the test passed. For the status of the test, invoke the DescribeAvailabilityMonitorTest API.

Starting this test will cause your gateway to go offline for a brief period.

", "StartGateway": "

Starts a gateway that you previously shut down (see ShutdownGateway). After the gateway starts, you can then make other API calls, your applications can read from or write to the gateway's storage volumes and you will be able to take snapshot backups.

When you make a request, you will get a 200 OK success response immediately. However, it might take some time for the gateway to be ready. You should call DescribeGatewayInformation and check the status before making any additional API calls. For more information, see ActivateGateway.

To specify which gateway to start, use the Amazon Resource Name (ARN) of the gateway in your request.

", + "UpdateAutomaticTapeCreationPolicy": "

Updates the automatic tape creation policy of a gateway. Use this to update the policy with a new set of automatic tape creation rules. This is only supported for tape gateways.

By default, there is no automatic tape creation policy.

A gateway can have only one automatic tape creation policy.

", "UpdateBandwidthRateLimit": "

Updates the bandwidth rate limits of a gateway. You can update both the upload and download bandwidth rate limit or specify only one of the two. If you don't set a bandwidth rate limit, the existing rate limit remains. This operation is supported for the stored volume, cached volume and tape gateway types.'

By default, a gateway's bandwidth rate limits are not set. If you don't set any limit, the gateway does not have any limitations on its bandwidth usage and could potentially use the maximum available bandwidth.

To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.

", "UpdateChapCredentials": "

Updates the Challenge-Handshake Authentication Protocol (CHAP) credentials for a specified iSCSI target. By default, a gateway does not have CHAP enabled; however, for added security, you might use it. This operation is supported in the volume and tape gateway types.

When you update CHAP credentials, all existing connections on the target are closed and initiators must reconnect with the new credentials.

", "UpdateGatewayInformation": "

Updates a gateway's metadata, which includes the gateway's name and time zone. To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.

For Gateways activated after September 2, 2015, the gateway's ARN contains the gateway ID rather than the gateway name. However, changing the name of the gateway has no effect on the gateway's ARN.

", @@ -138,7 +141,7 @@ } }, "AddWorkingStorageOutput": { - "base": "

A JSON object containing the of the gateway for which working storage was configured.

", + "base": "

A JSON object containing the Amazon Resource Name (ARN) of the gateway for which working storage was configured.

", "refs": { } }, @@ -177,6 +180,31 @@ "SMBFileShareInfo$Authentication": null } }, + "AutomaticTapeCreationPolicyInfo": { + "base": "

Information about the gateway's automatic tape creation policies, including the automatic tape creation rules and the gateway that is using the policies.

", + "refs": { + "AutomaticTapeCreationPolicyInfos$member": null + } + }, + "AutomaticTapeCreationPolicyInfos": { + "base": null, + "refs": { + "ListAutomaticTapeCreationPoliciesOutput$AutomaticTapeCreationPolicyInfos": "

Gets a listing of information about the gateway's automatic tape creation policies, including the automatic tape creation rules and the gateway that is using the policies.

" + } + }, + "AutomaticTapeCreationRule": { + "base": "

An automatic tape creation policy consists of automatic tape creation rules where each rule defines when and how to create new tapes.

", + "refs": { + "AutomaticTapeCreationRules$member": null + } + }, + "AutomaticTapeCreationRules": { + "base": null, + "refs": { + "AutomaticTapeCreationPolicyInfo$AutomaticTapeCreationRules": "

An automatic tape creation policy consists of a list of automatic tape creation rules. This returns the rules that determine when and how to automatically create new tapes.

", + "UpdateAutomaticTapeCreationPolicyInput$AutomaticTapeCreationRules": "

An automatic tape creation policy consists of a list of automatic tape creation rules. The rules determine when and how to automatically create new tapes.

" + } + }, "AvailabilityMonitorTestStatus": { "base": null, "refs": { @@ -206,19 +234,19 @@ "Boolean": { "base": null, "refs": { - "CreateCachediSCSIVolumeInput$KMSEncrypted": "

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

", - "CreateNFSFileShareInput$KMSEncrypted": "

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

", + "CreateCachediSCSIVolumeInput$KMSEncrypted": "

True to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

", + "CreateNFSFileShareInput$KMSEncrypted": "

True to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

", "CreateNFSFileShareInput$ReadOnly": "

A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

", "CreateNFSFileShareInput$GuessMIMETypeEnabled": "

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

", "CreateNFSFileShareInput$RequesterPays": "

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs. Otherwise the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

", - "CreateSMBFileShareInput$KMSEncrypted": "

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

", + "CreateSMBFileShareInput$KMSEncrypted": "

True to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

", "CreateSMBFileShareInput$ReadOnly": "

A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

", "CreateSMBFileShareInput$GuessMIMETypeEnabled": "

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

", "CreateSMBFileShareInput$RequesterPays": "

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs. Otherwise the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

", "CreateSMBFileShareInput$SMBACLEnabled": "

Set this value to \"true to enable ACL (access control list) on the SMB file share. Set it to \"false\" to map file and directory permissions to the POSIX permissions.

For more information, see https://docs.aws.amazon.com/storagegateway/latest/userguide/smb-acl.html in the Storage Gateway User Guide.

", - "CreateStorediSCSIVolumeInput$KMSEncrypted": "

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

", - "CreateTapeWithBarcodeInput$KMSEncrypted": "

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

", - "CreateTapesInput$KMSEncrypted": "

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

", + "CreateStorediSCSIVolumeInput$KMSEncrypted": "

True to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

", + "CreateTapeWithBarcodeInput$KMSEncrypted": "

True to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

", + "CreateTapesInput$KMSEncrypted": "

True to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

", "DescribeSMBSettingsOutput$SMBGuestPasswordSet": "

This value is true if a password for the guest user “smbguest” is set, and otherwise false.

", "DetachVolumeInput$ForceDetach": "

Set to true to forcibly remove the iSCSI connection of the target volume and detach the volume. The default is false. If this value is set to false, you must manually disconnect the iSCSI connection from the target volume.

", "NFSFileShareInfo$ReadOnly": "

A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

", @@ -229,11 +257,11 @@ "SMBFileShareInfo$GuessMIMETypeEnabled": "

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

", "SMBFileShareInfo$RequesterPays": "

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs. Otherwise the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

", "SMBFileShareInfo$SMBACLEnabled": "

If this value is set to \"true\", indicates that ACL (access control list) is enabled on the SMB file share. If it is set to \"false\", it indicates that file and directory permissions are mapped to the POSIX permission.

For more information, see https://docs.aws.amazon.com/storagegateway/latest/userguide/smb-acl.html in the Storage Gateway User Guide.

", - "UpdateNFSFileShareInput$KMSEncrypted": "

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

", + "UpdateNFSFileShareInput$KMSEncrypted": "

True to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

", "UpdateNFSFileShareInput$ReadOnly": "

A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

", "UpdateNFSFileShareInput$GuessMIMETypeEnabled": "

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

", "UpdateNFSFileShareInput$RequesterPays": "

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs. Otherwise the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

", - "UpdateSMBFileShareInput$KMSEncrypted": "

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

", + "UpdateSMBFileShareInput$KMSEncrypted": "

True to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

", "UpdateSMBFileShareInput$ReadOnly": "

A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

", "UpdateSMBFileShareInput$GuessMIMETypeEnabled": "

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

", "UpdateSMBFileShareInput$RequesterPays": "

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs. Otherwise the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

", @@ -410,13 +438,23 @@ "UpdateMaintenanceStartTimeInput$DayOfWeek": "

The day of the week component of the maintenance start time week represented as an ordinal number from 0 to 6, where 0 represents Sunday and 6 Saturday.

" } }, + "DeleteAutomaticTapeCreationPolicyInput": { + "base": null, + "refs": { + } + }, + "DeleteAutomaticTapeCreationPolicyOutput": { + "base": null, + "refs": { + } + }, "DeleteBandwidthRateLimitInput": { "base": "

A JSON object containing the following fields:

", "refs": { } }, "DeleteBandwidthRateLimitOutput": { - "base": "

A JSON object containing the of the gateway whose bandwidth rate information was deleted.

", + "base": "

A JSON object containing the Amazon Resource Name (ARN) of the gateway whose bandwidth rate information was deleted.

", "refs": { } }, @@ -486,7 +524,7 @@ } }, "DeleteVolumeOutput": { - "base": "

A JSON object containing the of the storage volume that was deleted

", + "base": "

A JSON object containing the Amazon Resource Name (ARN) of the storage volume that was deleted

", "refs": { } }, @@ -501,7 +539,7 @@ } }, "DescribeBandwidthRateLimitInput": { - "base": "

A JSON object containing the of the gateway.

", + "base": "

A JSON object containing the Amazon Resource Name (ARN) of the gateway.

", "refs": { } }, @@ -551,7 +589,7 @@ } }, "DescribeMaintenanceStartTimeInput": { - "base": "

A JSON object containing the of the gateway.

", + "base": "

A JSON object containing the Amazon Resource Name (ARN) of the gateway.

", "refs": { } }, @@ -661,7 +699,7 @@ } }, "DescribeWorkingStorageInput": { - "base": "

A JSON object containing the of the gateway.

", + "base": "

A JSON object containing the Amazon Resource Name (ARN) of the gateway.

", "refs": { } }, @@ -746,10 +784,10 @@ "DiskIds": { "base": null, "refs": { - "AddCacheInput$DiskIds": "

An array of strings that identify disks that are to be configured as working storage. Each string have a minimum length of 1 and maximum length of 300. You can get the disk IDs from the ListLocalDisks API.

", - "AddUploadBufferInput$DiskIds": "

An array of strings that identify disks that are to be configured as working storage. Each string have a minimum length of 1 and maximum length of 300. You can get the disk IDs from the ListLocalDisks API.

", - "AddWorkingStorageInput$DiskIds": "

An array of strings that identify disks that are to be configured as working storage. Each string have a minimum length of 1 and maximum length of 300. You can get the disk IDs from the ListLocalDisks API.

", - "DescribeCacheOutput$DiskIds": "

An array of strings that identify disks that are to be configured as working storage. Each string have a minimum length of 1 and maximum length of 300. You can get the disk IDs from the ListLocalDisks API.

", + "AddCacheInput$DiskIds": "

An array of strings that identify disks that are to be configured as working storage. Each string has a minimum length of 1 and maximum length of 300. You can get the disk IDs from the ListLocalDisks API.

", + "AddUploadBufferInput$DiskIds": "

An array of strings that identify disks that are to be configured as working storage. Each string has a minimum length of 1 and maximum length of 300. You can get the disk IDs from the ListLocalDisks API.

", + "AddWorkingStorageInput$DiskIds": "

An array of strings that identify disks that are to be configured as working storage. Each string has a minimum length of 1 and maximum length of 300. You can get the disk IDs from the ListLocalDisks API.

", + "DescribeCacheOutput$DiskIds": "

An array of strings that identify disks that are to be configured as working storage. Each string has a minimum length of 1 and maximum length of 300. You can get the disk IDs from the ListLocalDisks API.

", "DescribeUploadBufferOutput$DiskIds": "

An array of the gateway's local disk IDs that are configured as working storage. Each local disk ID is specified as a string (minimum length of 1 and maximum length of 300). If no local disks are configured as working storage, then the DiskIds array is empty.

", "DescribeWorkingStorageOutput$DiskIds": "

An array of the gateway's local disk IDs that are configured as working storage. Each local disk ID is specified as a string (minimum length of 1 and maximum length of 300). If no local disks are configured as working storage, then the DiskIds array is empty.

" } @@ -888,7 +926,7 @@ "refs": { "CreateSMBFileShareInput$AdminUserList": "

A list of users in the Active Directory that will be granted administrator privileges on the file share. These users can do all file operations as the super-user.

Use this option very carefully, because any user in this list can do anything they like on the file share, regardless of file permissions.

", "CreateSMBFileShareInput$ValidUserList": "

A list of users or groups in the Active Directory that are allowed to access the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

", - "CreateSMBFileShareInput$InvalidUserList": "

A list of users or groups in the Active Directory that are not allowed to access the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

", + "CreateSMBFileShareInput$InvalidUserList": "

A list of users or groups in the Active Directory that are not allowed to access the file share. A group must be prefixed with the @ character. For example, @group1. Can only be set if Authentication is set to ActiveDirectory.

", "SMBFileShareInfo$AdminUserList": "

A list of users or groups in the Active Directory that have administrator rights to the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

", "SMBFileShareInfo$ValidUserList": "

A list of users or groups in the Active Directory that are allowed to access the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

", "SMBFileShareInfo$InvalidUserList": "

A list of users or groups in the Active Directory that are not allowed to access the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

", @@ -920,6 +958,7 @@ "AddWorkingStorageInput$GatewayARN": null, "AddWorkingStorageOutput$GatewayARN": null, "AttachVolumeInput$GatewayARN": "

The Amazon Resource Name (ARN) of the gateway that you want to attach the volume to.

", + "AutomaticTapeCreationPolicyInfo$GatewayARN": null, "CancelArchivalInput$GatewayARN": null, "CancelRetrievalInput$GatewayARN": null, "CreateCachediSCSIVolumeInput$GatewayARN": null, @@ -928,6 +967,8 @@ "CreateStorediSCSIVolumeInput$GatewayARN": null, "CreateTapeWithBarcodeInput$GatewayARN": "

The unique Amazon Resource Name (ARN) that represents the gateway to associate the virtual tape with. Use the ListGateways operation to return a list of gateways for your account and AWS Region.

", "CreateTapesInput$GatewayARN": "

The unique Amazon Resource Name (ARN) that represents the gateway to associate the virtual tapes with. Use the ListGateways operation to return a list of gateways for your account and AWS Region.

", + "DeleteAutomaticTapeCreationPolicyInput$GatewayARN": null, + "DeleteAutomaticTapeCreationPolicyOutput$GatewayARN": null, "DeleteBandwidthRateLimitInput$GatewayARN": null, "DeleteBandwidthRateLimitOutput$GatewayARN": null, "DeleteGatewayInput$GatewayARN": null, @@ -960,7 +1001,8 @@ "GatewayInfo$GatewayARN": "

The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and AWS Region.

", "JoinDomainInput$GatewayARN": "

The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and AWS Region.

", "JoinDomainOutput$GatewayARN": "

The unique Amazon Resource Name (ARN) of the gateway that joined the domain.

", - "ListFileSharesInput$GatewayARN": "

The Amazon resource Name (ARN) of the gateway whose file shares you want to list. If this field is not present, all file shares under your account are listed.

", + "ListAutomaticTapeCreationPoliciesInput$GatewayARN": null, + "ListFileSharesInput$GatewayARN": "

The Amazon Resource Name (ARN) of the gateway whose file shares you want to list. If this field is not present, all file shares under your account are listed.

", "ListLocalDisksInput$GatewayARN": null, "ListLocalDisksOutput$GatewayARN": null, "ListVolumeRecoveryPointsInput$GatewayARN": null, @@ -985,6 +1027,8 @@ "StartGatewayOutput$GatewayARN": null, "TapeArchive$RetrievedTo": "

The Amazon Resource Name (ARN) of the tape gateway that the virtual tape is being retrieved to.

The virtual tape is retrieved from the virtual tape shelf (VTS).

", "TapeInfo$GatewayARN": "

The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and AWS Region.

", + "UpdateAutomaticTapeCreationPolicyInput$GatewayARN": null, + "UpdateAutomaticTapeCreationPolicyOutput$GatewayARN": null, "UpdateBandwidthRateLimitInput$GatewayARN": null, "UpdateBandwidthRateLimitOutput$GatewayARN": null, "UpdateGatewayInformationInput$GatewayARN": null, @@ -1137,22 +1181,22 @@ } }, "KMSKey": { - "base": "

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

", + "base": "

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side encryption. This value can only be set when KMSEncrypted is true. Optional.

", "refs": { "CachediSCSIVolume$KMSKey": null, - "CreateCachediSCSIVolumeInput$KMSKey": "

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

", - "CreateNFSFileShareInput$KMSKey": "

The Amazon Resource Name (ARN) AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

", - "CreateSMBFileShareInput$KMSKey": "

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

", - "CreateStorediSCSIVolumeInput$KMSKey": "

The Amazon Resource Name (ARN) of the KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

", - "CreateTapeWithBarcodeInput$KMSKey": "

The Amazon Resource Name (ARN) of the AWS KMS Key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

", - "CreateTapesInput$KMSKey": "

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

", + "CreateCachediSCSIVolumeInput$KMSKey": "

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side encryption. This value can only be set when KMSEncrypted is true. Optional.

", + "CreateNFSFileShareInput$KMSKey": "

The Amazon Resource Name (ARN) AWS KMS key used for Amazon S3 server-side encryption. This value can only be set when KMSEncrypted is true. Optional.

", + "CreateSMBFileShareInput$KMSKey": "

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side encryption. This value can only be set when KMSEncrypted is true. Optional.

", + "CreateStorediSCSIVolumeInput$KMSKey": "

The Amazon Resource Name (ARN) of the KMS key used for Amazon S3 server-side encryption. This value can only be set when KMSEncrypted is true. Optional.

", + "CreateTapeWithBarcodeInput$KMSKey": "

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side encryption. This value can only be set when KMSEncrypted is true. Optional.

", + "CreateTapesInput$KMSKey": "

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side encryption. This value can only be set when KMSEncrypted is true. Optional.

", "NFSFileShareInfo$KMSKey": null, "SMBFileShareInfo$KMSKey": null, "StorediSCSIVolume$KMSKey": null, "Tape$KMSKey": null, "TapeArchive$KMSKey": null, - "UpdateNFSFileShareInput$KMSKey": "

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

", - "UpdateSMBFileShareInput$KMSKey": "

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "UpdateNFSFileShareInput$KMSKey": "

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side encryption. This value can only be set when KMSEncrypted is true. Optional.

", + "UpdateSMBFileShareInput$KMSKey": "

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side encryption. This value can only be set when KMSEncrypted is true. Optional.

" } }, "LastSoftwareUpdate": { @@ -1161,6 +1205,16 @@ "DescribeGatewayInformationOutput$LastSoftwareUpdate": "

The date on which the last software update was applied to the gateway. If the gateway has never been updated, this field does not return a value in the response.

" } }, + "ListAutomaticTapeCreationPoliciesInput": { + "base": null, + "refs": { + } + }, + "ListAutomaticTapeCreationPoliciesOutput": { + "base": null, + "refs": { + } + }, "ListFileSharesInput": { "base": "

ListFileShareInput

", "refs": { @@ -1182,7 +1236,7 @@ } }, "ListLocalDisksInput": { - "base": "

A JSON object containing the of the gateway.

", + "base": "

A JSON object containing the Amazon Resource Name (ARN) of the gateway.

", "refs": { } }, @@ -1286,6 +1340,12 @@ "ActivateGatewayInput$MediumChangerType": "

The value that indicates the type of medium changer to use for tape gateway. This field is optional.

Valid Values: \"STK-L700\", \"AWS-Gateway-VTL\"

" } }, + "MinimumNumTapes": { + "base": null, + "refs": { + "AutomaticTapeCreationRule$MinimumNumTapes": "

The minimum number of available virtual tapes that the gateway maintains at all times. If the number of tapes on the gateway goes below this value, the gateway creates as many new tapes as are needed to have MinimumNumTapes on the gateway.

" + } + }, "MinuteOfHour": { "base": null, "refs": { @@ -1399,18 +1459,19 @@ "PoolId": { "base": null, "refs": { - "AssignTapePoolInput$PoolId": "

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (Glacier or Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

", - "CreateTapeWithBarcodeInput$PoolId": "

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (Glacier or Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

", - "CreateTapesInput$PoolId": "

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (Glacier or Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

", - "Tape$PoolId": "

The ID of the pool that contains tapes that will be archived. The tapes in this pool are archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (Glacier or Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

", + "AssignTapePoolInput$PoolId": "

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

", + "AutomaticTapeCreationRule$PoolId": "

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the Amazon S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

", + "CreateTapeWithBarcodeInput$PoolId": "

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

", + "CreateTapesInput$PoolId": "

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

", + "Tape$PoolId": "

The ID of the pool that contains tapes that will be archived. The tapes in this pool are archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S# Glacier Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

", "TapeArchive$PoolId": "

The ID of the pool that was used to archive the tape. The tapes in this pool are archived in the S3 storage class that is associated with the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

", - "TapeInfo$PoolId": "

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (Glacier or Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

" + "TapeInfo$PoolId": "

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

" } }, "PositiveIntObject": { "base": null, "refs": { - "DescribeTapeArchivesInput$Limit": "

Specifies that the number of virtual tapes descried be limited to the specified number.

", + "DescribeTapeArchivesInput$Limit": "

Specifies that the number of virtual tapes described be limited to the specified number.

", "DescribeTapeRecoveryPointsInput$Limit": "

Specifies that the number of virtual tape recovery points that are described be limited to the specified number.

", "DescribeTapesInput$Limit": "

Specifies that the number of virtual tapes described be limited to the specified number.

Amazon Web Services may impose its own limit, if this field is not set.

", "DescribeVTLDevicesInput$Limit": "

Specifies that the number of VTL devices described be limited to the specified number.

", @@ -1556,12 +1617,12 @@ } }, "ShutdownGatewayInput": { - "base": "

A JSON object containing the of the gateway to shut down.

", + "base": "

A JSON object containing the Amazon Resource Name (ARN) of the gateway to shut down.

", "refs": { } }, "ShutdownGatewayOutput": { - "base": "

A JSON object containing the of the gateway that was shut down.

", + "base": "

A JSON object containing the Amazon Resource Name (ARN) of the gateway that was shut down.

", "refs": { } }, @@ -1602,12 +1663,12 @@ } }, "StartGatewayInput": { - "base": "

A JSON object containing the of the gateway to start.

", + "base": "

A JSON object containing the Amazon Resource Name (ARN) of the gateway to start.

", "refs": { } }, "StartGatewayOutput": { - "base": "

A JSON object containing the of the gateway that was restarted.

", + "base": "

A JSON object containing the Amazon Resource Name (ARN) of the gateway that was restarted.

", "refs": { } }, @@ -1623,7 +1684,7 @@ } }, "StorageGatewayError": { - "base": "

Provides additional information about an error that was returned by the service as an or. See the errorCode and errorDetails members for more information about the error.

", + "base": "

Provides additional information about an error that was returned by the service. See the errorCode and errorDetails members for more information about the error.

", "refs": { "InternalServerError$error": "

A StorageGatewayError that provides more information about the cause of the error.

", "InvalidGatewayRequestException$error": "

A StorageGatewayError that provides more detail about the cause of the error.

", @@ -1658,7 +1719,7 @@ "TagKeys": { "base": null, "refs": { - "RemoveTagsFromResourceInput$TagKeys": "

The keys of the tags you want to remove from the specified resource. A tag is composed of a key/value pair.

" + "RemoveTagsFromResourceInput$TagKeys": "

The keys of the tags you want to remove from the specified resource. A tag is composed of a key-value pair.

" } }, "TagValue": { @@ -1758,6 +1819,7 @@ "TapeBarcodePrefix": { "base": null, "refs": { + "AutomaticTapeCreationRule$TapeBarcodePrefix": "

A prefix that you append to the barcode of the virtual tape that you are creating. This prefix makes the barcode unique.

The prefix must be 1-4 characters in length and must be one of the uppercase letters from A to Z.

", "CreateTapesInput$TapeBarcodePrefix": "

A prefix that you append to the barcode of the virtual tape you are creating. This prefix makes the barcode unique.

The prefix must be 1 to 4 characters in length and must be one of the uppercase letters from A to Z.

" } }, @@ -1774,7 +1836,7 @@ } }, "TapeInfos": { - "base": "

An array of TapeInfo objects, where each object describes an a single tape. If there not tapes in the tape library or VTS, then the TapeInfos is an empty array.

", + "base": "

An array of TapeInfo objects, where each object describes a single tape. If there are no tapes in the tape library or VTS, then the TapeInfos is an empty array.

", "refs": { "ListTapesOutput$TapeInfos": null } @@ -1800,8 +1862,9 @@ "TapeSize": { "base": null, "refs": { - "CreateTapeWithBarcodeInput$TapeSizeInBytes": "

The size, in bytes, of the virtual tape that you want to create.

The size must be aligned by gigabyte (1024*1024*1024 byte).

", - "CreateTapesInput$TapeSizeInBytes": "

The size, in bytes, of the virtual tapes that you want to create.

The size must be aligned by gigabyte (1024*1024*1024 byte).

", + "AutomaticTapeCreationRule$TapeSizeInBytes": "

The size, in bytes, of the virtual tape capacity.

", + "CreateTapeWithBarcodeInput$TapeSizeInBytes": "

The size, in bytes, of the virtual tape that you want to create.

The size must be aligned by gigabyte (1024*1024*1024 bytes).

", + "CreateTapesInput$TapeSizeInBytes": "

The size, in bytes, of the virtual tapes that you want to create.

The size must be aligned by gigabyte (1024*1024*1024 bytes).

", "Tape$TapeSizeInBytes": "

The size, in bytes, of the virtual tape capacity.

", "TapeArchive$TapeSizeInBytes": "

The size, in bytes, of the archived virtual tape.

", "TapeInfo$TapeSizeInBytes": "

The size, in bytes, of a virtual tape.

", @@ -1870,13 +1933,23 @@ "JoinDomainInput$TimeoutInSeconds": "

Specifies the time in seconds, in which the JoinDomain operation must complete. The default is 20 seconds.

" } }, + "UpdateAutomaticTapeCreationPolicyInput": { + "base": null, + "refs": { + } + }, + "UpdateAutomaticTapeCreationPolicyOutput": { + "base": null, + "refs": { + } + }, "UpdateBandwidthRateLimitInput": { "base": "

A JSON object containing one or more of the following fields:

", "refs": { } }, "UpdateBandwidthRateLimitOutput": { - "base": "

A JSON object containing the of the gateway whose throttle information was updated.

", + "base": "

A JSON object containing the Amazon Resource Name (ARN) of the gateway whose throttle information was updated.

", "refs": { } }, @@ -1901,12 +1974,12 @@ } }, "UpdateGatewaySoftwareNowInput": { - "base": "

A JSON object containing the of the gateway to update.

", + "base": "

A JSON object containing the Amazon Resource Name (ARN) of the gateway to update.

", "refs": { } }, "UpdateGatewaySoftwareNowOutput": { - "base": "

A JSON object containing the of the gateway that was updated.

", + "base": "

A JSON object containing the Amazon Resource Name (ARN) of the gateway that was updated.

", "refs": { } }, @@ -1916,7 +1989,7 @@ } }, "UpdateMaintenanceStartTimeOutput": { - "base": "

A JSON object containing the of the gateway whose maintenance start time is updated.

", + "base": "

A JSON object containing the Amazon Resource Name (ARN) of the gateway whose maintenance start time is updated.

", "refs": { } }, @@ -1956,7 +2029,7 @@ } }, "UpdateSnapshotScheduleOutput": { - "base": "

A JSON object containing the of the updated storage volume.

", + "base": "

A JSON object containing the Amazon Resource Name (ARN) of the updated storage volume.

", "refs": { } }, @@ -2013,7 +2086,7 @@ "VTLDevices": { "base": null, "refs": { - "DescribeVTLDevicesOutput$VTLDevices": "

An array of VTL device objects composed of the Amazon Resource Name(ARN) of the VTL devices.

" + "DescribeVTLDevicesOutput$VTLDevices": "

An array of VTL device objects composed of the Amazon Resource Name (ARN) of the VTL devices.

" } }, "VolumeARN": { @@ -2049,8 +2122,8 @@ "VolumeARNs": { "base": null, "refs": { - "DescribeCachediSCSIVolumesInput$VolumeARNs": "

An array of strings where each string represents the Amazon Resource Name (ARN) of a cached volume. All of the specified cached volumes must from the same gateway. Use ListVolumes to get volume ARNs for a gateway.

", - "DescribeStorediSCSIVolumesInput$VolumeARNs": "

An array of strings where each string represents the Amazon Resource Name (ARN) of a stored volume. All of the specified stored volumes must from the same gateway. Use ListVolumes to get volume ARNs for a gateway.

" + "DescribeCachediSCSIVolumesInput$VolumeARNs": "

An array of strings where each string represents the Amazon Resource Name (ARN) of a cached volume. All of the specified cached volumes must be from the same gateway. Use ListVolumes to get volume ARNs for a gateway.

", + "DescribeStorediSCSIVolumesInput$VolumeARNs": "

An array of strings where each string represents the Amazon Resource Name (ARN) of a stored volume. All of the specified stored volumes must be from the same gateway. Use ListVolumes to get volume ARNs for a gateway.

" } }, "VolumeAttachmentStatus": { @@ -2128,7 +2201,7 @@ "CreateStorediSCSIVolumeInput$PreserveExistingData": "

Specify this field as true if you want to preserve the data on the local disk. Otherwise, specifying this field as false creates an empty volume.

Valid Values: true, false

", "DeleteFileShareInput$ForceDelete": "

If this value is set to true, the operation deletes a file share immediately and aborts all data uploads to AWS. Otherwise, the file share is not deleted until all data is uploaded to AWS. This process aborts the data upload process, and the file share enters the FORCE_DELETING status.

", "DeviceiSCSIAttributes$ChapEnabled": "

Indicates whether mutual CHAP is enabled for the iSCSI target.

", - "NFSFileShareInfo$KMSEncrypted": "

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

", + "NFSFileShareInfo$KMSEncrypted": "

True to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

", "SMBFileShareInfo$KMSEncrypted": "

True to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

", "StorediSCSIVolume$PreservedExistingData": "

Indicates if when the stored volume was created, existing data on the underlying local disk was preserved.

Valid Values: true, false

", "VolumeiSCSIAttributes$ChapEnabled": "

Indicates whether mutual CHAP is enabled for the iSCSI target.

" @@ -2162,7 +2235,7 @@ "CachediSCSIVolume$VolumeSizeInBytes": "

The size, in bytes, of the volume capacity.

", "CreateCachediSCSIVolumeInput$VolumeSizeInBytes": "

The size of the volume in bytes.

", "CreateStorediSCSIVolumeOutput$VolumeSizeInBytes": "

The size of the volume in bytes.

", - "DescribeCacheOutput$CacheAllocatedInBytes": "

The amount of cache in bytes allocated to the a gateway.

", + "DescribeCacheOutput$CacheAllocatedInBytes": "

The amount of cache in bytes allocated to a gateway.

", "DescribeUploadBufferOutput$UploadBufferUsedInBytes": "

The total number of bytes being used in the gateway's upload buffer.

", "DescribeUploadBufferOutput$UploadBufferAllocatedInBytes": "

The total number of bytes allocated in the gateway's as upload buffer.

", "DescribeWorkingStorageOutput$WorkingStorageUsedInBytes": "

The total working storage in bytes in use by the gateway. If no working storage is configured for the gateway, this field returns 0.

", diff --git a/models/apis/transcribe-streaming/2017-10-26/api-2.json b/models/apis/transcribe-streaming/2017-10-26/api-2.json index e9dc6a14092..da095533399 100755 --- a/models/apis/transcribe-streaming/2017-10-26/api-2.json +++ b/models/apis/transcribe-streaming/2017-10-26/api-2.json @@ -24,7 +24,8 @@ {"shape":"BadRequestException"}, {"shape":"LimitExceededException"}, {"shape":"InternalFailureException"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"ServiceUnavailableException"} ] } }, @@ -148,6 +149,14 @@ "type":"list", "member":{"shape":"Result"} }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "error":{"httpStatusCode":503}, + "exception":true + }, "SessionId":{ "type":"string", "pattern":"[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}" @@ -248,7 +257,8 @@ "BadRequestException":{"shape":"BadRequestException"}, "LimitExceededException":{"shape":"LimitExceededException"}, "InternalFailureException":{"shape":"InternalFailureException"}, - "ConflictException":{"shape":"ConflictException"} + "ConflictException":{"shape":"ConflictException"}, + "ServiceUnavailableException":{"shape":"ServiceUnavailableException"} }, "eventstream":true }, diff --git a/models/apis/transcribe-streaming/2017-10-26/docs-2.json b/models/apis/transcribe-streaming/2017-10-26/docs-2.json index 7945a1be246..49275b64180 100755 --- a/models/apis/transcribe-streaming/2017-10-26/docs-2.json +++ b/models/apis/transcribe-streaming/2017-10-26/docs-2.json @@ -131,6 +131,12 @@ "Transcript$Results": "

Result objects that contain the results of transcribing a portion of the input audio stream. The array can be empty.

" } }, + "ServiceUnavailableException": { + "base": "

Service is currently unavailable. Try your request later.

", + "refs": { + "TranscriptResultStream$ServiceUnavailableException": "

Service is currently unavailable. Try your request later.

" + } + }, "SessionId": { "base": null, "refs": { @@ -157,7 +163,8 @@ "InternalFailureException$Message": null, "Item$Content": "

The word or punctuation that was recognized in the input audio.

", "LimitExceededException$Message": null, - "Result$ResultId": "

A unique identifier for the result.

" + "Result$ResultId": "

A unique identifier for the result.

", + "ServiceUnavailableException$Message": null } }, "Transcript": { diff --git a/models/apis/transfer/2018-11-05/api-2.json b/models/apis/transfer/2018-11-05/api-2.json index bc6c361330b..5b87796b8e8 100644 --- a/models/apis/transfer/2018-11-05/api-2.json +++ b/models/apis/transfer/2018-11-05/api-2.json @@ -6,7 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceAbbreviation":"AWS Transfer", - "serviceFullName":"AWS Transfer for SFTP", + "serviceFullName":"AWS Transfer Family", "serviceId":"Transfer", "signatureVersion":"v4", "signingName":"transfer", @@ -23,10 +23,12 @@ "input":{"shape":"CreateServerRequest"}, "output":{"shape":"CreateServerResponse"}, "errors":[ + {"shape":"AccessDeniedException"}, {"shape":"ServiceUnavailableException"}, {"shape":"InternalServiceError"}, {"shape":"InvalidRequestException"}, - {"shape":"ResourceExistsException"} + {"shape":"ResourceExistsException"}, + {"shape":"ThrottlingException"} ] }, "CreateUser":{ @@ -53,6 +55,7 @@ }, "input":{"shape":"DeleteServerRequest"}, "errors":[ + {"shape":"AccessDeniedException"}, {"shape":"ServiceUnavailableException"}, {"shape":"InternalServiceError"}, {"shape":"InvalidRequestException"}, @@ -263,6 +266,7 @@ "input":{"shape":"UpdateServerRequest"}, "output":{"shape":"UpdateServerResponse"}, "errors":[ + {"shape":"AccessDeniedException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ConflictException"}, {"shape":"InternalServiceError"}, @@ -290,6 +294,14 @@ } }, "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ServiceErrorMessage"} + }, + "exception":true, + "synthetic":true + }, "AddressAllocationId":{"type":"string"}, "AddressAllocationIds":{ "type":"list", @@ -301,6 +313,10 @@ "min":20, "pattern":"arn:.*" }, + "Certificate":{ + "type":"string", + "max":1600 + }, "ConflictException":{ "type":"structure", "required":["Message"], @@ -312,12 +328,14 @@ "CreateServerRequest":{ "type":"structure", "members":{ + "Certificate":{"shape":"Certificate"}, "EndpointDetails":{"shape":"EndpointDetails"}, "EndpointType":{"shape":"EndpointType"}, "HostKey":{"shape":"HostKey"}, "IdentityProviderDetails":{"shape":"IdentityProviderDetails"}, "IdentityProviderType":{"shape":"IdentityProviderType"}, "LoggingRole":{"shape":"Role"}, + "Protocols":{"shape":"Protocols"}, "Tags":{"shape":"Tags"} } }, @@ -431,12 +449,14 @@ "required":["Arn"], "members":{ "Arn":{"shape":"Arn"}, + "Certificate":{"shape":"Certificate"}, "EndpointDetails":{"shape":"EndpointDetails"}, "EndpointType":{"shape":"EndpointType"}, "HostKeyFingerprint":{"shape":"HostKeyFingerprint"}, "IdentityProviderDetails":{"shape":"IdentityProviderDetails"}, "IdentityProviderType":{"shape":"IdentityProviderType"}, "LoggingRole":{"shape":"Role"}, + "Protocols":{"shape":"Protocols"}, "ServerId":{"shape":"ServerId"}, "State":{"shape":"State"}, "Tags":{"shape":"Tags"}, @@ -691,6 +711,20 @@ "type":"string", "max":2048 }, + "Protocol":{ + "type":"string", + "enum":[ + "SFTP", + "FTP", + "FTPS" + ] + }, + "Protocols":{ + "type":"list", + "member":{"shape":"Protocol"}, + "max":3, + "min":1 + }, "Resource":{"type":"string"}, "ResourceExistsException":{ "type":"structure", @@ -857,7 +891,8 @@ "members":{ "ServerId":{"shape":"ServerId"}, "UserName":{"shape":"UserName"}, - "UserPassword":{"shape":"UserPassword"} + "UserPassword":{"shape":"UserPassword"}, + "ServerProtocol":{"shape":"Protocol"} } }, "TestIdentityProviderResponse":{ @@ -895,11 +930,13 @@ "type":"structure", "required":["ServerId"], "members":{ + "Certificate":{"shape":"Certificate"}, "EndpointDetails":{"shape":"EndpointDetails"}, "EndpointType":{"shape":"EndpointType"}, "HostKey":{"shape":"HostKey"}, "IdentityProviderDetails":{"shape":"IdentityProviderDetails"}, "LoggingRole":{"shape":"NullableRole"}, + "Protocols":{"shape":"Protocols"}, "ServerId":{"shape":"ServerId"} } }, diff --git a/models/apis/transfer/2018-11-05/docs-2.json b/models/apis/transfer/2018-11-05/docs-2.json index 2d13640628e..dbd4a93f1fb 100644 --- a/models/apis/transfer/2018-11-05/docs-2.json +++ b/models/apis/transfer/2018-11-05/docs-2.json @@ -1,27 +1,32 @@ { "version": "2.0", - "service": "

AWS Transfer for SFTP is a fully managed service that enables the transfer of files directly into and out of Amazon S3 using the Secure File Transfer Protocol (SFTP)—also known as Secure Shell (SSH) File Transfer Protocol. AWS helps you seamlessly migrate your file transfer workflows to AWS Transfer for SFTP—by integrating with existing authentication systems, and providing DNS routing with Amazon Route 53—so nothing changes for your customers and partners, or their applications. With your data in S3, you can use it with AWS services for processing, analytics, machine learning, and archiving. Getting started with AWS Transfer for SFTP (AWS SFTP) is easy; there is no infrastructure to buy and set up.

", + "service": "

AWS Transfer Family is a fully managed service that enables the transfer of files over the the File Transfer Protocol (FTP), File Transfer Protocol over SSL (FTPS), or Secure Shell (SSH) File Transfer Protocol (SFTP) directly into and out of Amazon Simple Storage Service (Amazon S3). AWS helps you seamlessly migrate your file transfer workflows to AWS Transfer Family by integrating with existing authentication systems, and providing DNS routing with Amazon Route 53 so nothing changes for your customers and partners, or their applications. With your data in Amazon S3, you can use it with AWS services for processing, analytics, machine learning, and archiving. Getting started with AWS Transfer Family is easy since there is no infrastructure to buy and set up.

", "operations": { - "CreateServer": "

Instantiates an autoscaling virtual server based on Secure File Transfer Protocol (SFTP) in AWS. When you make updates to your server or when you work with users, use the service-generated ServerId property that is assigned to the newly created server.

", - "CreateUser": "

Creates a user and associates them with an existing Secure File Transfer Protocol (SFTP) server. You can only create and associate users with SFTP servers that have the IdentityProviderType set to SERVICE_MANAGED. Using parameters for CreateUser, you can specify the user name, set the home directory, store the user's public key, and assign the user's AWS Identity and Access Management (IAM) role. You can also optionally add a scope-down policy, and assign metadata with tags that can be used to group and search for users.

", - "DeleteServer": "

Deletes the Secure File Transfer Protocol (SFTP) server that you specify.

No response returns from this operation.

", + "CreateServer": "

Instantiates an autoscaling virtual server based on the selected file transfer protocol in AWS. When you make updates to your file transfer protocol-enabled server or when you work with users, use the service-generated ServerId property that is assigned to the newly created server.

", + "CreateUser": "

Creates a user and associates them with an existing file transfer protocol-enabled server. You can only create and associate users with servers that have the IdentityProviderType set to SERVICE_MANAGED. Using parameters for CreateUser, you can specify the user name, set the home directory, store the user's public key, and assign the user's AWS Identity and Access Management (IAM) role. You can also optionally add a scope-down policy, and assign metadata with tags that can be used to group and search for users.

", + "DeleteServer": "

Deletes the file transfer protocol-enabled server that you specify.

No response returns from this operation.

", "DeleteSshPublicKey": "

Deletes a user's Secure Shell (SSH) public key.

No response is returned from this operation.

", - "DeleteUser": "

Deletes the user belonging to the server you specify.

No response returns from this operation.

When you delete a user from a server, the user's information is lost.

", - "DescribeServer": "

Describes the server that you specify by passing the ServerId parameter.

The response contains a description of the server's properties. When you set EndpointType to VPC, the response will contain the EndpointDetails.

", - "DescribeUser": "

Describes the user assigned to a specific server, as identified by its ServerId property.

The response from this call returns the properties of the user associated with the ServerId value that was specified.

", - "ImportSshPublicKey": "

Adds a Secure Shell (SSH) public key to a user account identified by a UserName value assigned to a specific server, identified by ServerId.

The response returns the UserName value, the ServerId value, and the name of the SshPublicKeyId.

", - "ListServers": "

Lists the Secure File Transfer Protocol (SFTP) servers that are associated with your AWS account.

", + "DeleteUser": "

Deletes the user belonging to a file transfer protocol-enabled server you specify.

No response returns from this operation.

When you delete a user from a server, the user's information is lost.

", + "DescribeServer": "

Describes a file transfer protocol-enabled server that you specify by passing the ServerId parameter.

The response contains a description of a server's properties. When you set EndpointType to VPC, the response will contain the EndpointDetails.

", + "DescribeUser": "

Describes the user assigned to the specific file transfer protocol-enabled server, as identified by its ServerId property.

The response from this call returns the properties of the user associated with the ServerId value that was specified.

", + "ImportSshPublicKey": "

Adds a Secure Shell (SSH) public key to a user account identified by a UserName value assigned to the specific file transfer protocol-enabled server, identified by ServerId.

The response returns the UserName value, the ServerId value, and the name of the SshPublicKeyId.

", + "ListServers": "

Lists the file transfer protocol-enabled servers that are associated with your AWS account.

", "ListTagsForResource": "

Lists all of the tags associated with the Amazon Resource Number (ARN) you specify. The resource can be a user, server, or role.

", - "ListUsers": "

Lists the users for the server that you specify by passing the ServerId parameter.

", - "StartServer": "

Changes the state of a Secure File Transfer Protocol (SFTP) server from OFFLINE to ONLINE. It has no impact on an SFTP server that is already ONLINE. An ONLINE server can accept and process file transfer jobs.

The state of STARTING indicates that the server is in an intermediate state, either not fully able to respond, or not fully online. The values of START_FAILED can indicate an error condition.

No response is returned from this call.

", - "StopServer": "

Changes the state of an SFTP server from ONLINE to OFFLINE. An OFFLINE server cannot accept and process file transfer jobs. Information tied to your server such as server and user properties are not affected by stopping your server. Stopping a server will not reduce or impact your Secure File Transfer Protocol (SFTP) endpoint billing.

The state of STOPPING indicates that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of STOP_FAILED can indicate an error condition.

No response is returned from this call.

", + "ListUsers": "

Lists the users for a file transfer protocol-enabled server that you specify by passing the ServerId parameter.

", + "StartServer": "

Changes the state of a file transfer protocol-enabled server from OFFLINE to ONLINE. It has no impact on a server that is already ONLINE. An ONLINE server can accept and process file transfer jobs.

The state of STARTING indicates that the server is in an intermediate state, either not fully able to respond, or not fully online. The values of START_FAILED can indicate an error condition.

No response is returned from this call.

", + "StopServer": "

Changes the state of a file transfer protocol-enabled server from ONLINE to OFFLINE. An OFFLINE server cannot accept and process file transfer jobs. Information tied to your server, such as server and user properties, are not affected by stopping your server. Stopping the server will not reduce or impact your file transfer protocol endpoint billing.

The state of STOPPING indicates that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of STOP_FAILED can indicate an error condition.

No response is returned from this call.

", "TagResource": "

Attaches a key-value pair to a resource, as identified by its Amazon Resource Name (ARN). Resources are users, servers, roles, and other entities.

There is no response returned from this call.

", - "TestIdentityProvider": "

If the IdentityProviderType of the server is API_Gateway, tests whether your API Gateway is set up successfully. We highly recommend that you call this operation to test your authentication method as soon as you create your server. By doing so, you can troubleshoot issues with the API Gateway integration to ensure that your users can successfully use the service.

", + "TestIdentityProvider": "

If the IdentityProviderType of a file transfer protocol-enabled server is API_Gateway, tests whether your API Gateway is set up successfully. We highly recommend that you call this operation to test your authentication method as soon as you create your server. By doing so, you can troubleshoot issues with the API Gateway integration to ensure that your users can successfully use the service.

", "UntagResource": "

Detaches a key-value pair from a resource, as identified by its Amazon Resource Name (ARN). Resources are users, servers, roles, and other entities.

No response is returned from this call.

", - "UpdateServer": "

Updates the server properties after that server has been created.

The UpdateServer call returns the ServerId of the Secure File Transfer Protocol (SFTP) server you updated.

", + "UpdateServer": "

Updates the file transfer protocol-enabled server's properties after that server has been created.

The UpdateServer call returns the ServerId of the server you updated.

", "UpdateUser": "

Assigns new properties to a user. Parameters you pass modify any or all of the following: the home directory, role, and policy for the UserName and ServerId you specify.

The response returns the ServerId and the UserName for the updated user.

" }, "shapes": { + "AccessDeniedException": { + "base": "

You do not have sufficient access to perform this action.

", + "refs": { + } + }, "AddressAllocationId": { "base": null, "refs": { @@ -31,24 +36,32 @@ "AddressAllocationIds": { "base": null, "refs": { - "EndpointDetails$AddressAllocationIds": "

A list of address allocation IDs that are required to attach an Elastic IP address to your SFTP server's endpoint. This is only valid in the UpdateServer API.

This property can only be use when EndpointType is set to VPC.

" + "EndpointDetails$AddressAllocationIds": "

A list of address allocation IDs that are required to attach an Elastic IP address to your file transfer protocol-enabled server's endpoint. This is only valid in the UpdateServer API.

This property can only be use when EndpointType is set to VPC.

" } }, "Arn": { "base": null, "refs": { - "DescribedServer$Arn": "

Specifies the unique Amazon Resource Name (ARN) for the server to be described.

", - "DescribedUser$Arn": "

This property contains the unique Amazon Resource Name (ARN) for the user that was requested to be described.

", + "DescribedServer$Arn": "

Specifies the unique Amazon Resource Name (ARN) for a file transfer protocol-enabled server to be described.

", + "DescribedUser$Arn": "

Contains the unique Amazon Resource Name (ARN) for the user that was requested to be described.

", "ListTagsForResourceRequest$Arn": "

Requests the tags associated with a particular Amazon Resource Name (ARN). An ARN is an identifier for a specific AWS resource, such as a server, user, or role.

", - "ListTagsForResourceResponse$Arn": "

This value is the ARN you specified to list the tags of.

", - "ListedServer$Arn": "

The unique Amazon Resource Name (ARN) for the server to be listed.

", - "ListedUser$Arn": "

This property is the unique Amazon Resource Name (ARN) for the user that you want to learn about.

", + "ListTagsForResourceResponse$Arn": "

The ARN you specified to list the tags of.

", + "ListedServer$Arn": "

The unique Amazon Resource Name (ARN) for a file transfer protocol-enabled server to be listed.

", + "ListedUser$Arn": "

The unique Amazon Resource Name (ARN) for the user that you want to learn about.

", "TagResourceRequest$Arn": "

An Amazon Resource Name (ARN) for a specific AWS resource, such as a server, user, or role.

", - "UntagResourceRequest$Arn": "

This is the value of the resource that will have the tag removed. An Amazon Resource Name (ARN) is an identifier for a specific AWS resource, such as a server, user, or role.

" + "UntagResourceRequest$Arn": "

The value of the resource that will have the tag removed. An Amazon Resource Name (ARN) is an identifier for a specific AWS resource, such as a server, user, or role.

" + } + }, + "Certificate": { + "base": null, + "refs": { + "CreateServerRequest$Certificate": "

The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. Required when Protocols is set to FTPS.

", + "DescribedServer$Certificate": "

The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. Required when Protocols is set to FTPS.

", + "UpdateServerRequest$Certificate": "

The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. Required when Protocols is set to FTPS.

" } }, "ConflictException": { - "base": "

This exception is thrown when the UpdatServer is called for a server that has VPC as the endpoint type and the server's VpcEndpointID is not in the available state.

", + "base": "

This exception is thrown when the UpdatServer is called for a file transfer protocol-enabled server that has VPC as the endpoint type and the server's VpcEndpointID is not in the available state.

", "refs": { } }, @@ -114,9 +127,9 @@ } }, "DescribedServer": { - "base": "

Describes the properties of the server that was specified. Information returned includes the following: the server Amazon Resource Name (ARN), the authentication configuration and type, the logging role, the server ID and state, and assigned tags or metadata.

", + "base": "

Describes the properties of a file transfer protocol-enabled server that was specified. Information returned includes the following: the server Amazon Resource Name (ARN), the authentication configuration and type, the logging role, the server ID and state, and assigned tags or metadata.

", "refs": { - "DescribeServerResponse$Server": "

An array containing the properties of the server with the ServerID you specified.

" + "DescribeServerResponse$Server": "

An array containing the properties of a file transfer protocol-enabled server with the ServerID you specified.

" } }, "DescribedUser": { @@ -126,29 +139,29 @@ } }, "EndpointDetails": { - "base": "

The virtual private cloud (VPC) endpoint settings that are configured for your SFTP server. With a VPC endpoint, you can restrict access to your SFTP server and resources only within your VPC. To control incoming internet traffic, invoke the UpdateServer API and attach an Elastic IP to your server's endpoint.

", + "base": "

The virtual private cloud (VPC) endpoint settings that are configured for your file transfer protocol-enabled server. With a VPC endpoint, you can restrict access to your server and resources only within your VPC. To control incoming internet traffic, invoke the UpdateServer API and attach an Elastic IP to your server's endpoint.

", "refs": { - "CreateServerRequest$EndpointDetails": "

The virtual private cloud (VPC) endpoint settings that are configured for your SFTP server. With a VPC endpoint, you can restrict access to your SFTP server to resources only within your VPC. To control incoming internet traffic, you will need to invoke the UpdateServer API and attach an Elastic IP to your server's endpoint.

", - "DescribedServer$EndpointDetails": "

The virtual private cloud (VPC) endpoint settings that you configured for your SFTP server.

", - "UpdateServerRequest$EndpointDetails": "

The virtual private cloud (VPC) endpoint settings that are configured for your SFTP server. With a VPC endpoint, you can restrict access to your SFTP server to resources only within your VPC. To control incoming internet traffic, you will need to associate one or more Elastic IP addresses with your server's endpoint.

" + "CreateServerRequest$EndpointDetails": "

The virtual private cloud (VPC) endpoint settings that are configured for your file transfer protocol-enabled server. When you host your endpoint within your VPC, you can make it accessible only to resources within your VPC, or you can attach Elastic IPs and make it accessible to clients over the internet. Your VPC's default security groups are automatically assigned to your endpoint.

", + "DescribedServer$EndpointDetails": "

The virtual private cloud (VPC) endpoint settings that you configured for your file transfer protocol-enabled server.

", + "UpdateServerRequest$EndpointDetails": "

The virtual private cloud (VPC) endpoint settings that are configured for your file transfer protocol-enabled server. With a VPC endpoint, you can restrict access to your server to resources only within your VPC. To control incoming internet traffic, you will need to associate one or more Elastic IP addresses with your server's endpoint.

" } }, "EndpointType": { "base": null, "refs": { - "CreateServerRequest$EndpointType": "

The type of VPC endpoint that you want your SFTP server to connect to. You can choose to connect to the public internet or a virtual private cloud (VPC) endpoint. With a VPC endpoint, you can restrict access to your SFTP server and resources only within your VPC.

", - "DescribedServer$EndpointType": "

The type of endpoint that your SFTP server is connected to. If your SFTP server is connected to a VPC endpoint, your server isn't accessible over the public internet.

", - "ListedServer$EndpointType": "

The type of VPC endpoint that your SFTP server is connected to. If your SFTP server is connected to a VPC endpoint, your server isn't accessible over the public internet.

", - "UpdateServerRequest$EndpointType": "

The type of endpoint that you want your SFTP server to connect to. You can choose to connect to the public internet or a virtual private cloud (VPC) endpoint. With a VPC endpoint, your SFTP server isn't accessible over the public internet.

" + "CreateServerRequest$EndpointType": "

The type of VPC endpoint that you want your file transfer protocol-enabled server to connect to. You can choose to connect to the public internet or a virtual private cloud (VPC) endpoint. With a VPC endpoint, you can restrict access to your server and resources only within your VPC.

", + "DescribedServer$EndpointType": "

The type of endpoint that your file transfer protocol-enabled server is connected to. If your server is connected to a VPC endpoint, your server isn't accessible over the public internet.

", + "ListedServer$EndpointType": "

The type of VPC endpoint that your file transfer protocol-enabled server is connected to. If your server is connected to a VPC endpoint, your server isn't accessible over the public internet.

", + "UpdateServerRequest$EndpointType": "

The type of endpoint that you want your file transfer protocol-enabled server to connect to. You can choose to connect to the public internet or a VPC endpoint. With a VPC endpoint, your server isn't accessible over the public internet.

" } }, "HomeDirectory": { "base": null, "refs": { - "CreateUserRequest$HomeDirectory": "

The landing directory (folder) for a user when they log in to the server using their SFTP client.

An example is <your-Amazon-S3-bucket-name>/home/username.

", - "DescribedUser$HomeDirectory": "

This property specifies the landing directory (or folder), which is the location that files are written to or read from in an Amazon S3 bucket for the described user. An example is /your s3 bucket name/home/username .

", - "ListedUser$HomeDirectory": "

This value specifies the location that files are written to or read from an Amazon S3 bucket for the user you specify by their ARN.

", - "UpdateUserRequest$HomeDirectory": "

A parameter that specifies the landing directory (folder) for a user when they log in to the server using their client.

An example is <your-Amazon-S3-bucket-name>/home/username.

" + "CreateUserRequest$HomeDirectory": "

The landing directory (folder) for a user when they log in to the file transfer protocol-enabled server using the client.

An example is your-Amazon-S3-bucket-name>/home/username.

", + "DescribedUser$HomeDirectory": "

Specifies the landing directory (or folder), which is the location that files are written to or read from in an Amazon S3 bucket for the described user. An example is /your s3 bucket name/home/username .

", + "ListedUser$HomeDirectory": "

Specifies the location that files are written to or read from an Amazon S3 bucket for the user you specify by their ARN.

", + "UpdateUserRequest$HomeDirectory": "

Specifies the landing directory (folder) for a user when they log in to the file transfer protocol-enabled server using their file transfer protocol client.

An example is your-Amazon-S3-bucket-name>/home/username.

" } }, "HomeDirectoryMapEntry": { @@ -160,47 +173,47 @@ "HomeDirectoryMappings": { "base": null, "refs": { - "CreateUserRequest$HomeDirectoryMappings": "

Logical directory mappings that specify what S3 paths and keys should be visible to your user and how you want to make them visible. You will need to specify the \"Entry\" and \"Target\" pair, where Entry shows how the path is made visible and Target is the actual S3 path. If you only specify a target, it will be displayed as is. You will need to also make sure that your AWS IAM Role provides access to paths in Target. The following is an example.

'[ \"/bucket2/documentation\", { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]'

In most cases, you can use this value instead of the scope down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

If the target of a logical directory entry does not exist in S3, the entry will be ignored. As a workaround, you can use the S3 api to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api call instead of s3 so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

", - "DescribedUser$HomeDirectoryMappings": "

Logical directory mappings that you specified for what S3 paths and keys should be visible to your user and how you want to make them visible. You will need to specify the \"Entry\" and \"Target\" pair, where Entry shows how the path is made visible and Target is the actual S3 path. If you only specify a target, it will be displayed as is. You will need to also make sure that your AWS IAM Role provides access to paths in Target.

In most cases, you can use this value instead of the scope down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

In most cases, you can use this value instead of the scope down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

", - "UpdateUserRequest$HomeDirectoryMappings": "

Logical directory mappings that specify what S3 paths and keys should be visible to your user and how you want to make them visible. You will need to specify the \"Entry\" and \"Target\" pair, where Entry shows how the path is made visible and Target is the actual S3 path. If you only specify a target, it will be displayed as is. You will need to also make sure that your AWS IAM Role provides access to paths in Target. The following is an example.

'[ \"/bucket2/documentation\", { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]'

In most cases, you can use this value instead of the scope down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

If the target of a logical directory entry does not exist in S3, the entry will be ignored. As a workaround, you can use the S3 api to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api call instead of s3 so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

" + "CreateUserRequest$HomeDirectoryMappings": "

Logical directory mappings that specify what Amazon S3 paths and keys should be visible to your user and how you want to make them visible. You will need to specify the \"Entry\" and \"Target\" pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 path. If you only specify a target, it will be displayed as is. You will need to also make sure that your AWS IAM Role provides access to paths in Target. The following is an example.

'[ \"/bucket2/documentation\", { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]'

In most cases, you can use this value instead of the scope-down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

If the target of a logical directory entry does not exist in Amazon S3, the entry will be ignored. As a workaround, you can use the Amazon S3 api to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api call instead of s3 so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a '/' for it to be considered a folder.

", + "DescribedUser$HomeDirectoryMappings": "

Logical directory mappings that you specified for what Amazon S3 paths and keys should be visible to your user and how you want to make them visible. You will need to specify the \"Entry\" and \"Target\" pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 path. If you only specify a target, it will be displayed as is. You will need to also make sure that your AWS IAM Role provides access to paths in Target.

In most cases, you can use this value instead of the scope-down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

", + "UpdateUserRequest$HomeDirectoryMappings": "

Logical directory mappings that specify what Amazon S3 paths and keys should be visible to your user and how you want to make them visible. You will need to specify the \"Entry\" and \"Target\" pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 path. If you only specify a target, it will be displayed as is. You will need to also make sure that your AWS IAM Role provides access to paths in Target. The following is an example.

'[ \"/bucket2/documentation\", { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]'

In most cases, you can use this value instead of the scope-down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

If the target of a logical directory entry does not exist in Amazon S3, the entry will be ignored. As a workaround, you can use the Amazon S3 api to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api call instead of s3 so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

" } }, "HomeDirectoryType": { "base": null, "refs": { - "CreateUserRequest$HomeDirectoryType": "

The type of landing directory (folder) you want your users' home directory to be when they log into the SFTP server. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their SFTP clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make S3 paths visible to your user.

", - "DescribedUser$HomeDirectoryType": "

The type of landing directory (folder) you mapped for your users' to see when they log into the SFTP server. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their SFTP clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make S3 paths visible to your user.

", - "ListedUser$HomeDirectoryType": "

The type of landing directory (folder) you mapped for your users' home directory. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their SFTP clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make S3 paths visible to your user.

", - "UpdateUserRequest$HomeDirectoryType": "

The type of landing directory (folder) you want your users' home directory to be when they log into the SFTP serve. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their SFTP clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make S3 paths visible to your user.

" + "CreateUserRequest$HomeDirectoryType": "

The type of landing directory (folder) you want your users' home directory to be when they log into the file transfer protocol-enabled server. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their file transfer protocol clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 paths visible to your users.

", + "DescribedUser$HomeDirectoryType": "

The type of landing directory (folder) you mapped for your users to see when they log into the file transfer protocol-enabled server. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their file transfer protocol clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 paths visible to your users.

", + "ListedUser$HomeDirectoryType": "

The type of landing directory (folder) you mapped for your users' home directory. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their file transfer protocol clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 paths visible to your users.

", + "UpdateUserRequest$HomeDirectoryType": "

The type of landing directory (folder) you want your users' home directory to be when they log into the file transfer protocol-enabled server. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their file transfer protocol clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 paths visible to your users.

" } }, "HostKey": { "base": null, "refs": { - "CreateServerRequest$HostKey": "

The RSA private key as generated by the ssh-keygen -N \"\" -f my-new-server-key command.

If you aren't planning to migrate existing users from an existing SFTP server to a new AWS SFTP server, don't update the host key. Accidentally changing a server's host key can be disruptive.

For more information, see \"https://alpha-docs-aws.amazon.com/transfer/latest/userguide/configuring-servers.html#change-host-key\" in the AWS SFTP User Guide.

", - "UpdateServerRequest$HostKey": "

The RSA private key as generated by ssh-keygen -N \"\" -f my-new-server-key.

If you aren't planning to migrate existing users from an existing SFTP server to a new AWS SFTP server, don't update the host key. Accidentally changing a server's host key can be disruptive.

For more information, see \"https://docs.aws.amazon.com/transfer/latest/userguide/configuring-servers.html#change-host-key\" in the AWS SFTP User Guide.

" + "CreateServerRequest$HostKey": "

The RSA private key as generated by the ssh-keygen -N \"\" -f my-new-server-key command.

If you aren't planning to migrate existing users from an existing SFTP-enabled server to a new server, don't update the host key. Accidentally changing a server's host key can be disruptive.

For more information, see Changing the Host Key for Your AWS Transfer Family Server in the AWS Transfer Family User Guide.

", + "UpdateServerRequest$HostKey": "

The RSA private key as generated by ssh-keygen -N \"\" -f my-new-server-key.

If you aren't planning to migrate existing users from an existing file transfer protocol-enabled server to a new server, don't update the host key. Accidentally changing a server's host key can be disruptive.

For more information, see Changing the Host Key for Your AWS Transfer Family Server in the AWS Transfer Family User Guide.

" } }, "HostKeyFingerprint": { "base": null, "refs": { - "DescribedServer$HostKeyFingerprint": "

This value contains the message-digest algorithm (MD5) hash of the server's host key. This value is equivalent to the output of the ssh-keygen -l -E md5 -f my-new-server-key command.

" + "DescribedServer$HostKeyFingerprint": "

Contains the message-digest algorithm (MD5) hash of a file transfer protocol-enabled server's host key. This value is equivalent to the output of the ssh-keygen -l -E md5 -f my-new-server-key command.

" } }, "IdentityProviderDetails": { - "base": "

Returns information related to the type of user authentication that is in use for a server's users. A server can have only one method of authentication.

", + "base": "

Returns information related to the type of user authentication that is in use for a file transfer protocol-enabled server's users. A server can have only one method of authentication.

", "refs": { - "CreateServerRequest$IdentityProviderDetails": "

This parameter is required when the IdentityProviderType is set to API_GATEWAY. Accepts an array containing all of the information required to call a customer-supplied authentication API, including the API Gateway URL. This property is not required when the IdentityProviderType is set to SERVICE_MANAGED.

", - "DescribedServer$IdentityProviderDetails": "

Specifies information to call a customer-supplied authentication API. This field is not populated when the IdentityProviderType of the server is SERVICE_MANAGED>.

", - "UpdateServerRequest$IdentityProviderDetails": "

This response parameter is an array containing all of the information required to call a customer's authentication API method.

" + "CreateServerRequest$IdentityProviderDetails": "

Required when IdentityProviderType is set to API_GATEWAY. Accepts an array containing all of the information required to call a customer-supplied authentication API, including the API Gateway URL. Not required when IdentityProviderType is set to SERVICE_MANAGED.

", + "DescribedServer$IdentityProviderDetails": "

Specifies information to call a customer-supplied authentication API. This field is not populated when the IdentityProviderType of a file transfer protocol-enabled server is SERVICE_MANAGED.

", + "UpdateServerRequest$IdentityProviderDetails": "

An array containing all of the information required to call a customer's authentication API method.

" } }, "IdentityProviderType": { - "base": "

Returns information related to the type of user authentication that is in use for a server's users. For SERVICE_MANAGED authentication, the Secure Shell (SSH) public keys are stored with a user on an SFTP server instance. For API_GATEWAY authentication, your custom authentication method is implemented by using an API call. A server can have only one method of authentication.

", + "base": "

Returns information related to the type of user authentication that is in use for a file transfer protocol-enabled server's users. For SERVICE_MANAGED authentication, the Secure Shell (SSH) public keys are stored with a user on the server instance. For API_GATEWAY authentication, your custom authentication method is implemented by using an API call. The server can have only one method of authentication.

", "refs": { - "CreateServerRequest$IdentityProviderType": "

Specifies the mode of authentication for the SFTP server. The default value is SERVICE_MANAGED, which allows you to store and access SFTP user credentials within the AWS Transfer for SFTP service. Use the API_GATEWAY value to integrate with an identity provider of your choosing. The API_GATEWAY setting requires you to provide an API Gateway endpoint URL to call for authentication using the IdentityProviderDetails parameter.

", - "DescribedServer$IdentityProviderType": "

This property defines the mode of authentication method enabled for this service. A value of SERVICE_MANAGED means that you are using this server to store and access SFTP user credentials within the service. A value of API_GATEWAY indicates that you have integrated an API Gateway endpoint that will be invoked for authenticating your user into the service.

", - "ListedServer$IdentityProviderType": "

The authentication method used to validate a user for the server that was specified. This can include Secure Shell (SSH), user name and password combinations, or your own custom authentication method. Valid values include SERVICE_MANAGED or API_GATEWAY.

" + "CreateServerRequest$IdentityProviderType": "

Specifies the mode of authentication for a file transfer protocol-enabled server. The default value is SERVICE_MANAGED, which allows you to store and access user credentials within the AWS Transfer Family service. Use the API_GATEWAY value to integrate with an identity provider of your choosing. The API_GATEWAY setting requires you to provide an API Gateway endpoint URL to call for authentication using the IdentityProviderDetails parameter.

", + "DescribedServer$IdentityProviderType": "

Defines the mode of authentication method enabled for this service. A value of SERVICE_MANAGED means that you are using this file transfer protocol-enabled server to store and access user credentials within the service. A value of API_GATEWAY indicates that you have integrated an API Gateway endpoint that will be invoked for authenticating your user into the service.

", + "ListedServer$IdentityProviderType": "

The authentication method used to validate a user for a file transfer protocol-enabled server that was specified. This can include Secure Shell (SSH), user name and password combinations, or your own custom authentication method. Valid values include SERVICE_MANAGED or API_GATEWAY.

" } }, "ImportSshPublicKeyRequest": { @@ -209,12 +222,12 @@ } }, "ImportSshPublicKeyResponse": { - "base": "

This response identifies the user, the server they belong to, and the identifier of the SSH public key associated with that user. A user can have more than one key on each server that they are associated with.

", + "base": "

Identifies the user, the file transfer protocol-enabled server they belong to, and the identifier of the SSH public key associated with that user. A user can have more than one key on each server that they are associated with.

", "refs": { } }, "InternalServiceError": { - "base": "

This exception is thrown when an error occurs in the AWS Transfer for SFTP service.

", + "base": "

This exception is thrown when an error occurs in the AWS Transfer Family service.

", "refs": { } }, @@ -259,7 +272,7 @@ } }, "ListedServer": { - "base": "

Returns properties of the server that was specified.

", + "base": "

Returns properties of a file transfer protocol-enabled server that was specified.

", "refs": { "ListedServers$member": null } @@ -267,7 +280,7 @@ "ListedServers": { "base": null, "refs": { - "ListServersResponse$Servers": "

An array of servers that were listed.

" + "ListServersResponse$Servers": "

An array of file transfer protocol-enabled servers that were listed.

" } }, "ListedUser": { @@ -297,7 +310,7 @@ "MaxResults": { "base": null, "refs": { - "ListServersRequest$MaxResults": "

Specifies the number of servers to return as a response to the ListServers query.

", + "ListServersRequest$MaxResults": "

Specifies the number of file transfer protocol-enabled servers to return as a response to the ListServers query.

", "ListTagsForResourceRequest$MaxResults": "

Specifies the number of tags to return as a response to the ListTagsForResource request.

", "ListUsersRequest$MaxResults": "

Specifies the number of users to return as a response to the ListUsers request.

" } @@ -311,14 +324,14 @@ "InvalidRequestException$Message": null, "ResourceExistsException$Message": null, "ResourceNotFoundException$Message": null, - "TestIdentityProviderResponse$Message": "

A message that indicates whether the test was successful or not.

" + "TestIdentityProviderResponse$Message": "

A message that indicates whether the test was successful or not.

" } }, "NextToken": { "base": null, "refs": { - "ListServersRequest$NextToken": "

When additional results are obtained from the ListServers command, a NextToken parameter is returned in the output. You can then pass the NextToken parameter in a subsequent command to continue listing additional servers.

", - "ListServersResponse$NextToken": "

When you can get additional results from the ListServers operation, a NextToken parameter is returned in the output. In a following command, you can pass in the NextToken parameter to continue listing additional servers.

", + "ListServersRequest$NextToken": "

When additional results are obtained from theListServers command, a NextToken parameter is returned in the output. You can then pass the NextToken parameter in a subsequent command to continue listing additional file transfer protocol-enabled servers.

", + "ListServersResponse$NextToken": "

When you can get additional results from the ListServers operation, a NextToken parameter is returned in the output. In a following command, you can pass in the NextToken parameter to continue listing additional file transfer protocol-enabled servers.

", "ListTagsForResourceRequest$NextToken": "

When you request additional results from the ListTagsForResource operation, a NextToken parameter is returned in the input. You can then pass in a subsequent command to the NextToken parameter to continue listing additional tags.

", "ListTagsForResourceResponse$NextToken": "

When you can get additional results from the ListTagsForResource call, a NextToken parameter is returned in the output. You can then pass in a subsequent command to the NextToken parameter to continue listing additional tags.

", "ListUsersRequest$NextToken": "

When you can get additional results from the ListUsers call, a NextToken parameter is returned in the output. You can then pass in a subsequent command to the NextToken parameter to continue listing additional users.

", @@ -328,15 +341,30 @@ "NullableRole": { "base": null, "refs": { - "UpdateServerRequest$LoggingRole": "

A value that changes the AWS Identity and Access Management (IAM) role that allows Amazon S3 events to be logged in Amazon CloudWatch, turning logging on or off.

" + "UpdateServerRequest$LoggingRole": "

Changes the AWS Identity and Access Management (IAM) role that allows Amazon S3 events to be logged in Amazon CloudWatch, turning logging on or off.

" } }, "Policy": { "base": null, "refs": { - "CreateUserRequest$Policy": "

A scope-down policy for your user so you can use the same IAM role across multiple users. This policy scopes down user access to portions of their Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.

For scope-down policies, AWS Transfer for SFTP stores the policy as a JSON blob, instead of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass it in the Policy argument.

For an example of a scope-down policy, see \"https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down\">Creating a Scope-Down Policy.

For more information, see \"https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html\" in the AWS Security Token Service API Reference.

", + "CreateUserRequest$Policy": "

A scope-down policy for your user so you can use the same IAM role across multiple users. This policy scopes down user access to portions of their Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.

For scope-down policies, AWS Transfer Family stores the policy as a JSON blob, instead of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass it in the Policy argument.

For an example of a scope-down policy, see Creating a Scope-Down Policy.

For more information, see AssumeRole in the AWS Security Token Service API Reference.

", "DescribedUser$Policy": "

Specifies the name of the policy in use for the described user.

", - "UpdateUserRequest$Policy": "

Allows you to supply a scope-down policy for your user so you can use the same AWS Identity and Access Management (IAM) role across multiple users. The policy scopes down user access to portions of your Amazon S3 bucket. Variables you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.

For scope-down policies, AWS Transfer for SFTP stores the policy as a JSON blob, instead of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass it in the Policy argument.

For an example of a scope-down policy, see \"https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down\">Creating a Scope-Down Policy.

For more information, see \"https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html\" in the AWS Security Token Service API Reference.

" + "UpdateUserRequest$Policy": "

Allows you to supply a scope-down policy for your user so you can use the same AWS Identity and Access Management (IAM) role across multiple users. The policy scopes down user access to portions of your Amazon S3 bucket. Variables you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.

For scope-down policies, AWS Transfer Family stores the policy as a JSON blob, instead of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass it in the Policy argument.

For an example of a scope-down policy, see Creating a Scope-Down Policy.

For more information, see AssumeRole in the AWS Security Token Service API Reference.

" + } + }, + "Protocol": { + "base": null, + "refs": { + "Protocols$member": null, + "TestIdentityProviderRequest$ServerProtocol": "

The type of file transfer protocol to be tested.

The available protocols are:

" + } + }, + "Protocols": { + "base": null, + "refs": { + "CreateServerRequest$Protocols": "

Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:

", + "DescribedServer$Protocols": "

Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:

", + "UpdateServerRequest$Protocols": "

Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:

" } }, "Resource": { @@ -352,7 +380,7 @@ } }, "ResourceNotFoundException": { - "base": "

This exception is thrown when a resource is not found by the AWS Transfer for SFTP service.

", + "base": "

This exception is thrown when a resource is not found by the AWS Transfer Family service.

", "refs": { } }, @@ -378,56 +406,57 @@ "Role": { "base": null, "refs": { - "CreateServerRequest$LoggingRole": "

A value that allows the service to write your SFTP users' activity to your Amazon CloudWatch logs for monitoring and auditing purposes.

", - "CreateUserRequest$Role": "

The IAM role that controls your user's access to your Amazon S3 bucket. The policies attached to this role will determine the level of access you want to provide your users when transferring files into and out of your Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship that allows the SFTP server to access your resources when servicing your SFTP user's transfer requests.

", - "DescribedServer$LoggingRole": "

This property is an AWS Identity and Access Management (IAM) entity that allows the server to turn on Amazon CloudWatch logging for Amazon S3 events. When set, user activity can be viewed in your CloudWatch logs.

", - "DescribedUser$Role": "

This property specifies the IAM role that controls your user's access to your Amazon S3 bucket. The policies attached to this role will determine the level of access you want to provide your users when transferring files into and out of your Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship that allows the SFTP server to access your resources when servicing your SFTP user's transfer requests.

", - "IdentityProviderDetails$InvocationRole": "

The InvocationRole parameter provides the type of InvocationRole used to authenticate the user account.

", - "ListedServer$LoggingRole": "

The AWS Identity and Access Management entity that allows the server to turn on Amazon CloudWatch logging.

", - "ListedUser$Role": "

The role in use by this user. A role is an AWS Identity and Access Management (IAM) entity that, in this case, allows the SFTP server to act on a user's behalf. It allows the server to inherit the trust relationship that enables that user to perform file operations to their Amazon S3 bucket.

", - "UpdateUserRequest$Role": "

The IAM role that controls your user's access to your Amazon S3 bucket. The policies attached to this role will determine the level of access you want to provide your users when transferring files into and out of your Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship that allows the Secure File Transfer Protocol (SFTP) server to access your resources when servicing your SFTP user's transfer requests.

" + "CreateServerRequest$LoggingRole": "

Allows the service to write your users' activity to your Amazon CloudWatch logs for monitoring and auditing purposes.

", + "CreateUserRequest$Role": "

The IAM role that controls your users' access to your Amazon S3 bucket. The policies attached to this role will determine the level of access you want to provide your users when transferring files into and out of your Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship that allows the file transfer protocol-enabled server to access your resources when servicing your users' transfer requests.

", + "DescribedServer$LoggingRole": "

An AWS Identity and Access Management (IAM) entity that allows a file transfer protocol-enabled server to turn on Amazon CloudWatch logging for Amazon S3 events. When set, user activity can be viewed in your CloudWatch logs.

", + "DescribedUser$Role": "

Specifies the IAM role that controls your users' access to your Amazon S3 bucket. The policies attached to this role will determine the level of access you want to provide your users when transferring files into and out of your Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship that allows a file transfer protocol-enabled server to access your resources when servicing your users' transfer requests.

", + "IdentityProviderDetails$InvocationRole": "

Provides the type of InvocationRole used to authenticate the user account.

", + "ListedServer$LoggingRole": "

The AWS Identity and Access Management (IAM) entity that allows a file transfer protocol-enabled server to turn on Amazon CloudWatch logging.

", + "ListedUser$Role": "

The role in use by this user. A role is an AWS Identity and Access Management (IAM) entity that, in this case, allows a file transfer protocol-enabled server to act on a user's behalf. It allows the server to inherit the trust relationship that enables that user to perform file operations to their Amazon S3 bucket.

", + "UpdateUserRequest$Role": "

The IAM role that controls your users' access to your Amazon S3 bucket. The policies attached to this role will determine the level of access you want to provide your users when transferring files into and out of your Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship that allows the file transfer protocol-enabled server to access your resources when servicing your users' transfer requests.

" } }, "ServerId": { "base": null, "refs": { - "CreateServerResponse$ServerId": "

The service-assigned ID of the SFTP server that is created.

", - "CreateUserRequest$ServerId": "

A system-assigned unique identifier for an SFTP server instance. This is the specific SFTP server that you added your user to.

", - "CreateUserResponse$ServerId": "

The ID of the SFTP server that the user is attached to.

", - "DeleteServerRequest$ServerId": "

A unique system-assigned identifier for an SFTP server instance.

", - "DeleteSshPublicKeyRequest$ServerId": "

A system-assigned unique identifier for a Secure File Transfer Protocol (SFTP) server instance that has the user assigned to it.

", - "DeleteUserRequest$ServerId": "

A system-assigned unique identifier for an SFTP server instance that has the user assigned to it.

", - "DescribeServerRequest$ServerId": "

A system-assigned unique identifier for an SFTP server.

", - "DescribeUserRequest$ServerId": "

A system-assigned unique identifier for an SFTP server that has this user assigned.

", - "DescribeUserResponse$ServerId": "

A system-assigned unique identifier for an SFTP server that has this user assigned.

", - "DescribedServer$ServerId": "

This property is a unique system-assigned identifier for the SFTP server that you instantiate.

", - "ImportSshPublicKeyRequest$ServerId": "

A system-assigned unique identifier for an SFTP server.

", - "ImportSshPublicKeyResponse$ServerId": "

A system-assigned unique identifier for an SFTP server.

", - "ListUsersRequest$ServerId": "

A system-assigned unique identifier for a Secure File Transfer Protocol (SFTP) server that has users assigned to it.

", - "ListUsersResponse$ServerId": "

A system-assigned unique identifier for an SFTP server that the users are assigned to.

", - "ListedServer$ServerId": "

This value is the unique system assigned identifier for the SFTP servers that were listed.

", - "StartServerRequest$ServerId": "

A system-assigned unique identifier for an SFTP server that you start.

", - "StopServerRequest$ServerId": "

A system-assigned unique identifier for an SFTP server that you stopped.

", - "TestIdentityProviderRequest$ServerId": "

A system-assigned identifier for a specific server. That server's user authentication method is tested with a user name and password.

", - "UpdateServerRequest$ServerId": "

A system-assigned unique identifier for an SFTP server instance that the user account is assigned to.

", - "UpdateServerResponse$ServerId": "

A system-assigned unique identifier for an SFTP server that the user account is assigned to.

", - "UpdateUserRequest$ServerId": "

A system-assigned unique identifier for an SFTP server instance that the user account is assigned to.

", - "UpdateUserResponse$ServerId": "

A system-assigned unique identifier for an SFTP server instance that the user account is assigned to.

" + "CreateServerResponse$ServerId": "

The service-assigned ID of the file transfer protocol-enabled server that is created.

", + "CreateUserRequest$ServerId": "

A system-assigned unique identifier for a file transfer protocol-enabled server instance. This is the specific server that you added your user to.

", + "CreateUserResponse$ServerId": "

The ID of the file transfer protocol-enabled server that the user is attached to.

", + "DeleteServerRequest$ServerId": "

A unique system-assigned identifier for a file transfer protocol-enabled server instance.

", + "DeleteSshPublicKeyRequest$ServerId": "

A system-assigned unique identifier for a file transfer protocol-enabled server instance that has the user assigned to it.

", + "DeleteUserRequest$ServerId": "

A system-assigned unique identifier for a file transfer protocol-enabled server instance that has the user assigned to it.

", + "DescribeServerRequest$ServerId": "

A system-assigned unique identifier for a file transfer protocol-enabled server.

", + "DescribeUserRequest$ServerId": "

A system-assigned unique identifier for a file transfer protocol-enabled server that has this user assigned.

", + "DescribeUserResponse$ServerId": "

A system-assigned unique identifier for a file transfer protocol-enabled server that has this user assigned.

", + "DescribedServer$ServerId": "

Unique system-assigned identifier for a file transfer protocol-enabled server that you instantiate.

", + "ImportSshPublicKeyRequest$ServerId": "

A system-assigned unique identifier for a file transfer protocol-enabled server.

", + "ImportSshPublicKeyResponse$ServerId": "

A system-assigned unique identifier for a file transfer protocol-enabled server.

", + "ListUsersRequest$ServerId": "

A system-assigned unique identifier for a file transfer protocol-enabled server that has users assigned to it.

", + "ListUsersResponse$ServerId": "

A system-assigned unique identifier for a file transfer protocol-enabled server that the users are assigned to.

", + "ListedServer$ServerId": "

The unique system assigned identifier for a file transfer protocol-enabled servers that were listed.

", + "StartServerRequest$ServerId": "

A system-assigned unique identifier for a file transfer protocol-enabled server that you start.

", + "StopServerRequest$ServerId": "

A system-assigned unique identifier for a file transfer protocol-enabled server that you stopped.

", + "TestIdentityProviderRequest$ServerId": "

A system-assigned identifier for a specific file transfer protocol-enabled server. That server's user authentication method is tested with a user name and password.

", + "UpdateServerRequest$ServerId": "

A system-assigned unique identifier for a file transfer protocol-enabled server instance that the user account is assigned to.

", + "UpdateServerResponse$ServerId": "

A system-assigned unique identifier for a file transfer protocol-enabled server that the user account is assigned to.

", + "UpdateUserRequest$ServerId": "

A system-assigned unique identifier for a file transfer protocol-enabled server instance that the user account is assigned to.

", + "UpdateUserResponse$ServerId": "

A system-assigned unique identifier for a file transfer protocol-enabled server instance that the user account is assigned to.

" } }, "ServiceErrorMessage": { "base": null, "refs": { + "AccessDeniedException$Message": null, "ServiceUnavailableException$Message": null } }, "ServiceUnavailableException": { - "base": "

The request has failed because the AWS Transfer for SFTP service is not available.

", + "base": "

The request has failed because the AWS Transfer Family service is not available.

", "refs": { } }, "SshPublicKey": { - "base": "

Provides information about the public Secure Shell (SSH) key that is associated with a user account for a specific server (as identified by ServerId). The information returned includes the date the key was imported, the public key contents, and the public key ID. A user can store more than one SSH public key associated with their user name on a specific SFTP server.

", + "base": "

Provides information about the public Secure Shell (SSH) key that is associated with a user account for the specific file transfer protocol-enabled server (as identified by ServerId). The information returned includes the date the key was imported, the public key contents, and the public key ID. A user can store more than one SSH public key associated with their user name on a specific server.

", "refs": { "SshPublicKeys$member": null } @@ -435,7 +464,7 @@ "SshPublicKeyBody": { "base": null, "refs": { - "CreateUserRequest$SshPublicKeyBody": "

The public portion of the Secure Shell (SSH) key used to authenticate the user to the SFTP server.

", + "CreateUserRequest$SshPublicKeyBody": "

The public portion of the Secure Shell (SSH) key used to authenticate the user to the file transfer protocol-enabled server.

", "ImportSshPublicKeyRequest$SshPublicKeyBody": "

The public key portion of an SSH key pair.

", "SshPublicKey$SshPublicKeyBody": "

The content of the SSH public key as specified by the PublicKeyId.

" } @@ -443,21 +472,21 @@ "SshPublicKeyCount": { "base": null, "refs": { - "ListedUser$SshPublicKeyCount": "

This value is the number of SSH public keys stored for the user you specified.

" + "ListedUser$SshPublicKeyCount": "

The number of SSH public keys stored for the user you specified.

" } }, "SshPublicKeyId": { "base": null, "refs": { "DeleteSshPublicKeyRequest$SshPublicKeyId": "

A unique identifier used to reference your user's specific SSH key.

", - "ImportSshPublicKeyResponse$SshPublicKeyId": "

This identifier is the name given to a public key by the system that was imported.

", + "ImportSshPublicKeyResponse$SshPublicKeyId": "

The name given to a public key by the system that was imported.

", "SshPublicKey$SshPublicKeyId": "

The SshPublicKeyId parameter contains the identifier of the public key.

" } }, "SshPublicKeys": { "base": null, "refs": { - "DescribedUser$SshPublicKeys": "

This property contains the public key portion of the Secure Shell (SSH) keys stored for the described user.

" + "DescribedUser$SshPublicKeys": "

Contains the public key portion of the Secure Shell (SSH) keys stored for the described user.

" } }, "StartServerRequest": { @@ -466,10 +495,10 @@ } }, "State": { - "base": "

Describes the condition of the SFTP server with respect to its ability to perform file operations. There are six possible states: OFFLINE, ONLINE, STARTING, STOPPING, START_FAILED, and STOP_FAILED.

OFFLINE indicates that the SFTP server exists, but that it is not available for file operations. ONLINE indicates that the SFTP server is available to perform file operations. STARTING indicates that the SFTP server's was instantiated, but the server is not yet available to perform file operations. Under normal conditions, it can take a couple of minutes for an SFTP server to be completely operational. Both START_FAILED and STOP_FAILED are error conditions.

", + "base": "

Describes the condition of a file transfer protocol-enabled server with respect to its ability to perform file operations. There are six possible states: OFFLINE, ONLINE, STARTING, STOPPING, START_FAILED, and STOP_FAILED.

OFFLINE indicates that the server exists, but that it is not available for file operations. ONLINE indicates that the server is available to perform file operations. STARTING indicates that the server's was instantiated, but the server is not yet available to perform file operations. Under normal conditions, it can take a couple of minutes for the server to be completely operational. Both START_FAILED and STOP_FAILED are error conditions.

", "refs": { - "DescribedServer$State": "

The condition of the SFTP server for the server that was described. A value of ONLINE indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.

The states of STARTING and STOPPING indicate that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.

", - "ListedServer$State": "

This property describes the condition of the SFTP server for the server that was described. A value of ONLINE> indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.

The states of STARTING and STOPPING indicate that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.

" + "DescribedServer$State": "

The condition of a file transfer protocol-enabled server for the server that was described. A value of ONLINE indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.

The states of STARTING and STOPPING indicate that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.

", + "ListedServer$State": "

Describes the condition of a file transfer protocol-enabled server for the server that was described. A value of ONLINE indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.

The states of STARTING and STOPPING indicate that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.

" } }, "StatusCode": { @@ -492,7 +521,7 @@ "SubnetIds": { "base": null, "refs": { - "EndpointDetails$SubnetIds": "

A list of subnet IDs that are required to host your SFTP server endpoint in your VPC.

" + "EndpointDetails$SubnetIds": "

A list of subnet IDs that are required to host your file transfer protocol-enabled server endpoint in your VPC.

" } }, "Tag": { @@ -522,16 +551,16 @@ "TagValue": { "base": null, "refs": { - "Tag$Value": "

This property contains one or more values that you assigned to the key name you create.

" + "Tag$Value": "

Contains one or more values that you assigned to the key name you create.

" } }, "Tags": { "base": null, "refs": { - "CreateServerRequest$Tags": "

Key-value pairs that can be used to group and search for servers.

", + "CreateServerRequest$Tags": "

Key-value pairs that can be used to group and search for file transfer protocol-enabled servers.

", "CreateUserRequest$Tags": "

Key-value pairs that can be used to group and search for users. Tags are metadata attached to users for any purpose.

", - "DescribedServer$Tags": "

This property contains the key-value pairs that you can use to search for and group servers that were assigned to the server that was described.

", - "DescribedUser$Tags": "

This property contains the key-value pairs for the user requested. Tag can be used to search for and group users for a variety of purposes.

", + "DescribedServer$Tags": "

Contains the key-value pairs that you can use to search for and group file transfer protocol-enabled servers that were assigned to the server that was described.

", + "DescribedUser$Tags": "

Contains the key-value pairs for the user requested. Tag can be used to search for and group users for a variety of purposes.

", "ListTagsForResourceResponse$Tags": "

Key-value pairs that are assigned to a resource, usually for the purpose of grouping and searching for items. Tags are metadata that you define.

", "TagResourceRequest$Tags": "

Key-value pairs assigned to ARNs that you can use to group and search for resources by type. You can attach this metadata to user accounts for any purpose.

" } @@ -572,39 +601,39 @@ } }, "UpdateUserResponse": { - "base": "

UpdateUserResponse returns the user name and server identifier for the request to update a user's properties.

", + "base": "

UpdateUserResponse returns the user name and file transfer protocol-enabled server identifier for the request to update a user's properties.

", "refs": { } }, "Url": { "base": null, "refs": { - "IdentityProviderDetails$Url": "

The Url parameter provides contains the location of the service endpoint used to authenticate users.

", + "IdentityProviderDetails$Url": "

Contains the location of the service endpoint used to authenticate users.

", "TestIdentityProviderResponse$Url": "

The endpoint of the service used to authenticate a user.

" } }, "UserCount": { "base": null, "refs": { - "DescribedServer$UserCount": "

The number of users that are assigned to the SFTP server you specified with the ServerId.

", - "ListedServer$UserCount": "

This property is a numeric value that indicates the number of users that are assigned to the SFTP server you specified with the ServerId.

" + "DescribedServer$UserCount": "

The number of users that are assigned to a file transfer protocol-enabled server you specified with the ServerId.

", + "ListedServer$UserCount": "

A numeric value that indicates the number of users that are assigned to a file transfer protocol-enabled server you specified with the ServerId.

" } }, "UserName": { "base": null, "refs": { - "CreateUserRequest$UserName": "

A unique string that identifies a user and is associated with a server as specified by the ServerId. This user name must be a minimum of 3 and a maximum of 32 characters long. The following are valid characters: a-z, A-Z, 0-9, underscore, and hyphen. The user name can't start with a hyphen.

", - "CreateUserResponse$UserName": "

A unique string that identifies a user account associated with an SFTP server.

", + "CreateUserRequest$UserName": "

A unique string that identifies a user and is associated with a file transfer protocol-enabled server as specified by the ServerId. This user name must be a minimum of 3 and a maximum of 32 characters long. The following are valid characters: a-z, A-Z, 0-9, underscore, and hyphen. The user name can't start with a hyphen.

", + "CreateUserResponse$UserName": "

A unique string that identifies a user account associated with a file transfer protocol-enabled server.

", "DeleteSshPublicKeyRequest$UserName": "

A unique string that identifies a user whose public key is being deleted.

", - "DeleteUserRequest$UserName": "

A unique string that identifies a user that is being deleted from the server.

", - "DescribeUserRequest$UserName": "

The name of the user assigned to one or more servers. User names are part of the sign-in credentials to use the AWS Transfer for SFTP service and perform file transfer tasks.

", - "DescribedUser$UserName": "

This property is the name of the user that was requested to be described. User names are used for authentication purposes. This is the string that will be used by your user when they log in to your SFTP server.

", - "ImportSshPublicKeyRequest$UserName": "

The name of the user account that is assigned to one or more servers.

", + "DeleteUserRequest$UserName": "

A unique string that identifies a user that is being deleted from a file transfer protocol-enabled server.

", + "DescribeUserRequest$UserName": "

The name of the user assigned to one or more file transfer protocol-enabled servers. User names are part of the sign-in credentials to use the AWS Transfer Family service and perform file transfer tasks.

", + "DescribedUser$UserName": "

The name of the user that was requested to be described. User names are used for authentication purposes. This is the string that will be used by your user when they log in to your file transfer protocol-enabled server.

", + "ImportSshPublicKeyRequest$UserName": "

The name of the user account that is assigned to one or more file transfer protocol-enabled servers.

", "ImportSshPublicKeyResponse$UserName": "

A user name assigned to the ServerID value that you specified.

", "ListedUser$UserName": "

The name of the user whose ARN was specified. User names are used for authentication purposes.

", - "TestIdentityProviderRequest$UserName": "

This request parameter is the name of the user account to be tested.

", - "UpdateUserRequest$UserName": "

A unique string that identifies a user and is associated with a server as specified by the ServerId. This is the string that will be used by your user when they log in to your SFTP server. This user name is a minimum of 3 and a maximum of 32 characters long. The following are valid characters: a-z, A-Z, 0-9, underscore, and hyphen. The user name can't start with a hyphen.

", - "UpdateUserResponse$UserName": "

The unique identifier for a user that is assigned to the SFTP server instance that was specified in the request.

" + "TestIdentityProviderRequest$UserName": "

The name of the user account to be tested.

", + "UpdateUserRequest$UserName": "

A unique string that identifies a user and is associated with a file transfer protocol-enabled server as specified by the ServerId. This is the string that will be used by your user when they log in to your server. This user name is a minimum of 3 and a maximum of 32 characters long. The following are valid characters: a-z, A-Z, 0-9, underscore, and hyphen. The user name can't start with a hyphen.

", + "UpdateUserResponse$UserName": "

The unique identifier for a user that is assigned to a file transfer protocol-enabled server instance that was specified in the request.

" } }, "UserPassword": { @@ -622,7 +651,7 @@ "VpcId": { "base": null, "refs": { - "EndpointDetails$VpcId": "

The VPC ID of the virtual private cloud in which the SFTP server's endpoint will be hosted.

" + "EndpointDetails$VpcId": "

The VPC ID of the VPC in which a file transfer protocol-enabled server's endpoint will be hosted.

" } } } diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index e00dc7f0dc0..6c5047e3560 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -8,8 +8,11 @@ "dnsSuffix" : "amazonaws.com", "partition" : "aws", "partitionName" : "AWS Standard", - "regionRegex" : "^(us|eu|ap|sa|ca|me)\\-\\w+\\-\\d+$", + "regionRegex" : "^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$", "regions" : { + "af-south-1" : { + "description" : "Africa (Cape Town)" + }, "ap-east-1" : { "description" : "Asia Pacific (Hong Kong)" }, @@ -73,6 +76,7 @@ }, "access-analyzer" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -95,6 +99,7 @@ }, "acm" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -202,6 +207,12 @@ }, "api.ecr" : { "endpoints" : { + "af-south-1" : { + "credentialScope" : { + "region" : "af-south-1" + }, + "hostname" : "api.ecr.af-south-1.amazonaws.com" + }, "ap-east-1" : { "credentialScope" : { "region" : "ap-east-1" @@ -336,6 +347,28 @@ } } }, + "api.elastic-inference" : { + "endpoints" : { + "ap-northeast-1" : { + "hostname" : "api.elastic-inference.ap-northeast-1.amazonaws.com" + }, + "ap-northeast-2" : { + "hostname" : "api.elastic-inference.ap-northeast-2.amazonaws.com" + }, + "eu-west-1" : { + "hostname" : "api.elastic-inference.eu-west-1.amazonaws.com" + }, + "us-east-1" : { + "hostname" : "api.elastic-inference.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "hostname" : "api.elastic-inference.us-east-2.amazonaws.com" + }, + "us-west-2" : { + "hostname" : "api.elastic-inference.us-west-2.amazonaws.com" + } + } + }, "api.mediatailor" : { "endpoints" : { "ap-northeast-1" : { }, @@ -406,6 +439,7 @@ }, "apigateway" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -431,6 +465,7 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -539,6 +574,7 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -720,6 +756,7 @@ }, "cloudformation" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -834,6 +871,7 @@ }, "cloudtrail" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -954,6 +992,7 @@ }, "codedeploy" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -1395,6 +1434,7 @@ }, "directconnect" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -1448,6 +1488,7 @@ }, "dms" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -1613,6 +1654,7 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -1675,6 +1717,7 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -1727,6 +1770,7 @@ }, "ecs" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -1771,30 +1815,9 @@ "us-west-2" : { } } }, - "elastic-inference" : { - "endpoints" : { - "ap-northeast-1" : { - "hostname" : "api.elastic-inference.ap-northeast-1.amazonaws.com" - }, - "ap-northeast-2" : { - "hostname" : "api.elastic-inference.ap-northeast-2.amazonaws.com" - }, - "eu-west-1" : { - "hostname" : "api.elastic-inference.eu-west-1.amazonaws.com" - }, - "us-east-1" : { - "hostname" : "api.elastic-inference.us-east-1.amazonaws.com" - }, - "us-east-2" : { - "hostname" : "api.elastic-inference.us-east-2.amazonaws.com" - }, - "us-west-2" : { - "hostname" : "api.elastic-inference.us-west-2.amazonaws.com" - } - } - }, "elasticache" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -1823,6 +1846,7 @@ }, "elasticbeanstalk" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -2002,6 +2026,7 @@ "protocols" : [ "https" ] }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -2052,6 +2077,7 @@ "sslCommonName" : "{region}.{service}.{dnsSuffix}" }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -2140,6 +2166,7 @@ }, "es" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -2168,6 +2195,7 @@ }, "events" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -2403,6 +2431,7 @@ "endpoints" : { "ap-east-1" : { }, "ap-northeast-1" : { }, + "ap-northeast-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, @@ -2438,6 +2467,7 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -2557,6 +2587,7 @@ "endpoints" : { "ap-southeast-2" : { }, "eu-north-1" : { }, + "eu-west-1" : { }, "me-south-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -2865,6 +2896,7 @@ }, "kinesis" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -2951,6 +2983,7 @@ }, "kms" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -2990,6 +3023,7 @@ }, "lambda" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -3099,6 +3133,7 @@ }, "logs" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -3234,6 +3269,7 @@ } }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -3273,8 +3309,11 @@ } }, "endpoints" : { + "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-west-2" : { } } @@ -3284,6 +3323,7 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -3794,6 +3834,7 @@ }, "rds" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -3818,6 +3859,7 @@ }, "redshift" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -3886,6 +3928,7 @@ }, "resource-groups" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -3990,8 +4033,11 @@ } }, "endpoints" : { + "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-west-2" : { } } @@ -4048,6 +4094,7 @@ "signatureVersions" : [ "s3v4" ] }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { "hostname" : "s3.ap-northeast-1.amazonaws.com", @@ -4528,6 +4575,7 @@ }, "sms" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -4686,6 +4734,7 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -4736,6 +4785,7 @@ "sslCommonName" : "{region}.queue.{dnsSuffix}" }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -4784,6 +4834,7 @@ }, "ssm" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -4854,6 +4905,7 @@ }, "states" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -4986,6 +5038,7 @@ }, "sts" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -5050,6 +5103,7 @@ }, "swf" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -5096,6 +5150,7 @@ }, "tagging" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -5522,6 +5577,7 @@ }, "xray" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -5668,6 +5724,12 @@ "cn-northwest-1" : { } } }, + "codecommit" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "codedeploy" : { "endpoints" : { "cn-north-1" : { }, @@ -7030,6 +7092,12 @@ } } }, + "securityhub" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, "serverlessrepo" : { "defaults" : { "protocols" : [ "https" ] @@ -7335,6 +7403,14 @@ "us-iso-east-1" : { } } }, + "comprehend" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "us-iso-east-1" : { } + } + }, "config" : { "endpoints" : { "us-iso-east-1" : { } diff --git a/service/accessanalyzer/api_enums.go b/service/accessanalyzer/api_enums.go index 2a695279c85..073982a926c 100644 --- a/service/accessanalyzer/api_enums.go +++ b/service/accessanalyzer/api_enums.go @@ -21,6 +21,24 @@ func (enum AnalyzerStatus) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type FindingSourceType string + +// Enum values for FindingSourceType +const ( + FindingSourceTypeBucketAcl FindingSourceType = "BUCKET_ACL" + FindingSourceTypePolicy FindingSourceType = "POLICY" + FindingSourceTypeS3AccessPoint FindingSourceType = "S3_ACCESS_POINT" +) + +func (enum FindingSourceType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum FindingSourceType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type FindingStatus string // Enum values for FindingStatus diff --git a/service/accessanalyzer/api_types.go b/service/accessanalyzer/api_types.go index 9931c4163ae..c949d440d9b 100644 --- a/service/accessanalyzer/api_types.go +++ b/service/accessanalyzer/api_types.go @@ -56,7 +56,8 @@ type AnalyzedResource struct { // ResourceType is a required field ResourceType ResourceType `locationName:"resourceType" type:"string" required:"true" enum:"true"` - // Indicates how the access that generated the finding is granted. + // Indicates how the access that generated the finding is granted. This is populated + // for Amazon S3 bucket findings. SharedVia []string `locationName:"sharedVia" type:"list"` // The current status of the finding generated from the analyzed resource. @@ -537,6 +538,10 @@ type Finding struct { // ResourceType is a required field ResourceType ResourceType `locationName:"resourceType" type:"string" required:"true" enum:"true"` + // The sources of the finding. This indicates how the access that generated + // the finding is granted. It is populated for Amazon S3 bucket findings. + Sources []FindingSource `locationName:"sources" type:"list"` + // The current status of the finding. // // Status is a required field @@ -641,6 +646,18 @@ func (s Finding) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "resourceType", protocol.QuotedValue{ValueMarshaler: v}, metadata) } + if s.Sources != nil { + v := s.Sources + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "sources", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } if len(s.Status) > 0 { v := s.Status @@ -657,6 +674,68 @@ func (s Finding) MarshalFields(e protocol.FieldEncoder) error { return nil } +// The source of the finding. This indicates how the access that generated the +// finding is granted. It is populated for Amazon S3 bucket findings. +type FindingSource struct { + _ struct{} `type:"structure"` + + // Includes details about how the access that generated the finding is granted. + // This is populated for Amazon S3 bucket findings. + Detail *FindingSourceDetail `locationName:"detail" type:"structure"` + + // Indicates the type of access that generated the finding. + // + // Type is a required field + Type FindingSourceType `locationName:"type" type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s FindingSource) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s FindingSource) MarshalFields(e protocol.FieldEncoder) error { + if s.Detail != nil { + v := s.Detail + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "detail", v, metadata) + } + if len(s.Type) > 0 { + v := s.Type + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "type", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// Includes details about how the access that generated the finding is granted. +// This is populated for Amazon S3 bucket findings. +type FindingSourceDetail struct { + _ struct{} `type:"structure"` + + // The ARN of the access point that generated the finding. + AccessPointArn *string `locationName:"accessPointArn" type:"string"` +} + +// String returns the string representation +func (s FindingSourceDetail) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s FindingSourceDetail) MarshalFields(e protocol.FieldEncoder) error { + if s.AccessPointArn != nil { + v := *s.AccessPointArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "accessPointArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + // Contains information about a finding. type FindingSummary struct { _ struct{} `type:"structure"` @@ -709,6 +788,10 @@ type FindingSummary struct { // ResourceType is a required field ResourceType ResourceType `locationName:"resourceType" type:"string" required:"true" enum:"true"` + // The sources of the finding. This indicates how the access that generated + // the finding is granted. It is populated for Amazon S3 bucket findings. + Sources []FindingSource `locationName:"sources" type:"list"` + // The status of the finding. // // Status is a required field @@ -813,6 +896,18 @@ func (s FindingSummary) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "resourceType", protocol.QuotedValue{ValueMarshaler: v}, metadata) } + if s.Sources != nil { + v := s.Sources + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "sources", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } if len(s.Status) > 0 { v := s.Status diff --git a/service/applicationautoscaling/api_doc.go b/service/applicationautoscaling/api_doc.go index 55fe3bdb8f1..a272b822fb8 100644 --- a/service/applicationautoscaling/api_doc.go +++ b/service/applicationautoscaling/api_doc.go @@ -26,6 +26,8 @@ // // * AWS Lambda function provisioned concurrency // +// * Amazon Keyspaces for Apache Cassandra tables +// // API Summary // // The Application Auto Scaling service API includes three key sets of actions: @@ -41,10 +43,11 @@ // activity history. // // * Suspend and resume scaling - Temporarily suspend and later resume automatic -// scaling by calling the RegisterScalableTarget action for any Application -// Auto Scaling scalable target. You can suspend and resume, individually -// or in combination, scale-out activities triggered by a scaling policy, -// scale-in activities triggered by a scaling policy, and scheduled scaling. +// scaling by calling the RegisterScalableTarget (https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html) +// API action for any Application Auto Scaling scalable target. You can suspend +// and resume (individually or in combination) scale-out activities that +// are triggered by a scaling policy, scale-in activities that are triggered +// by a scaling policy, and scheduled scaling. // // To learn more about Application Auto Scaling, including information about // granting IAM users required permissions for Application Auto Scaling actions, diff --git a/service/applicationautoscaling/api_enums.go b/service/applicationautoscaling/api_enums.go index c50174e5d94..3c261d7023c 100644 --- a/service/applicationautoscaling/api_enums.go +++ b/service/applicationautoscaling/api_enums.go @@ -76,6 +76,8 @@ const ( MetricTypeAppStreamAverageCapacityUtilization MetricType = "AppStreamAverageCapacityUtilization" MetricTypeComprehendInferenceUtilization MetricType = "ComprehendInferenceUtilization" MetricTypeLambdaProvisionedConcurrencyUtilization MetricType = "LambdaProvisionedConcurrencyUtilization" + MetricTypeCassandraReadCapacityUtilization MetricType = "CassandraReadCapacityUtilization" + MetricTypeCassandraWriteCapacityUtilization MetricType = "CassandraWriteCapacityUtilization" ) func (enum MetricType) MarshalValue() (string, error) { @@ -121,6 +123,8 @@ const ( ScalableDimensionCustomResourceResourceTypeProperty ScalableDimension = "custom-resource:ResourceType:Property" ScalableDimensionComprehendDocumentClassifierEndpointDesiredInferenceUnits ScalableDimension = "comprehend:document-classifier-endpoint:DesiredInferenceUnits" ScalableDimensionLambdaFunctionProvisionedConcurrency ScalableDimension = "lambda:function:ProvisionedConcurrency" + ScalableDimensionCassandraTableReadCapacityUnits ScalableDimension = "cassandra:table:ReadCapacityUnits" + ScalableDimensionCassandraTableWriteCapacityUnits ScalableDimension = "cassandra:table:WriteCapacityUnits" ) func (enum ScalableDimension) MarshalValue() (string, error) { @@ -167,6 +171,7 @@ const ( ServiceNamespaceCustomResource ServiceNamespace = "custom-resource" ServiceNamespaceComprehend ServiceNamespace = "comprehend" ServiceNamespaceLambda ServiceNamespace = "lambda" + ServiceNamespaceCassandra ServiceNamespace = "cassandra" ) func (enum ServiceNamespace) MarshalValue() (string, error) { diff --git a/service/applicationautoscaling/api_examples_test.go b/service/applicationautoscaling/api_examples_test.go index 0b84297b888..8422cda6338 100644 --- a/service/applicationautoscaling/api_examples_test.go +++ b/service/applicationautoscaling/api_examples_test.go @@ -105,7 +105,7 @@ func ExampleClient_DeregisterScalableTargetRequest_shared00() { // To describe scalable targets // -// This example describes the scalable targets for the ecs service namespace. +// This example describes the scalable targets for the ECS service namespace. func ExampleClient_DescribeScalableTargetsRequest_shared00() { cfg, err := external.LoadDefaultAWSConfig() if err != nil { @@ -190,7 +190,7 @@ func ExampleClient_DescribeScalingActivitiesRequest_shared00() { // To describe scaling policies // -// This example describes the scaling policies for the ecs service namespace. +// This example describes the scaling policies for the ECS service namespace. func ExampleClient_DescribeScalingPoliciesRequest_shared00() { cfg, err := external.LoadDefaultAWSConfig() if err != nil { @@ -291,259 +291,6 @@ func ExampleClient_PutScalingPolicyRequest_shared00() { fmt.Println(result) } -// To apply a target tracking scaling policy with a customized metric specification -// -// The following example applies a target tracking scaling policy with a customized -// metric specification to an Amazon ECS service called web-app in the default cluster. -// The policy keeps the average utilization of the service at 75 percent, with scale-out -// and scale-in cooldown periods of 60 seconds. -func ExampleClient_PutScalingPolicyRequest_shared01() { - cfg, err := external.LoadDefaultAWSConfig() - if err != nil { - panic("failed to load config, " + err.Error()) - } - - svc := applicationautoscaling.New(cfg) - input := &applicationautoscaling.PutScalingPolicyInput{ - PolicyName: aws.String("cms75-target-tracking-scaling-policy"), - PolicyType: applicationautoscaling.PolicyTypeTargetTrackingScaling, - ResourceId: aws.String("service/default/web-app"), - ScalableDimension: applicationautoscaling.ScalableDimensionEcsServiceDesiredCount, - ServiceNamespace: applicationautoscaling.ServiceNamespaceEcs, - TargetTrackingScalingPolicyConfiguration: &applicationautoscaling.TargetTrackingScalingPolicyConfiguration{ - CustomizedMetricSpecification: &applicationautoscaling.CustomizedMetricSpecification{ - Dimensions: []applicationautoscaling.MetricDimension{ - { - Name: aws.String("MyOptionalMetricDimensionName"), - Value: aws.String("MyOptionalMetricDimensionValue"), - }, - }, - MetricName: aws.String("MyUtilizationMetric"), - Namespace: aws.String("MyNamespace"), - Statistic: applicationautoscaling.MetricStatisticAverage, - Unit: aws.String("Percent"), - }, - ScaleInCooldown: aws.Int64(60), - ScaleOutCooldown: aws.Int64(60), - TargetValue: aws.Float64(75.000000), - }, - } - - req := svc.PutScalingPolicyRequest(input) - result, err := req.Send(context.Background()) - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - switch aerr.Code() { - case applicationautoscaling.ErrCodeValidationException: - fmt.Println(applicationautoscaling.ErrCodeValidationException, aerr.Error()) - case applicationautoscaling.ErrCodeLimitExceededException: - fmt.Println(applicationautoscaling.ErrCodeLimitExceededException, aerr.Error()) - case applicationautoscaling.ErrCodeObjectNotFoundException: - fmt.Println(applicationautoscaling.ErrCodeObjectNotFoundException, aerr.Error()) - case applicationautoscaling.ErrCodeConcurrentUpdateException: - fmt.Println(applicationautoscaling.ErrCodeConcurrentUpdateException, aerr.Error()) - case applicationautoscaling.ErrCodeFailedResourceAccessException: - fmt.Println(applicationautoscaling.ErrCodeFailedResourceAccessException, aerr.Error()) - case applicationautoscaling.ErrCodeInternalServiceException: - fmt.Println(applicationautoscaling.ErrCodeInternalServiceException, aerr.Error()) - default: - fmt.Println(aerr.Error()) - } - } else { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - } - return - } - - fmt.Println(result) -} - -// To apply a target tracking scaling policy for scale out only -// -// The following example applies a target tracking scaling policy to an Amazon ECS service -// called web-app in the default cluster. The policy is used to scale out the ECS service -// when the RequestCountPerTarget metric from the Application Load Balancer exceeds -// the threshold. -func ExampleClient_PutScalingPolicyRequest_shared02() { - cfg, err := external.LoadDefaultAWSConfig() - if err != nil { - panic("failed to load config, " + err.Error()) - } - - svc := applicationautoscaling.New(cfg) - input := &applicationautoscaling.PutScalingPolicyInput{ - PolicyName: aws.String("alb-scale-out-target-tracking-scaling-policy"), - PolicyType: applicationautoscaling.PolicyTypeTargetTrackingScaling, - ResourceId: aws.String("service/default/web-app"), - ScalableDimension: applicationautoscaling.ScalableDimensionEcsServiceDesiredCount, - ServiceNamespace: applicationautoscaling.ServiceNamespaceEcs, - TargetTrackingScalingPolicyConfiguration: &applicationautoscaling.TargetTrackingScalingPolicyConfiguration{ - DisableScaleIn: aws.Bool(true), - PredefinedMetricSpecification: &applicationautoscaling.PredefinedMetricSpecification{ - PredefinedMetricType: applicationautoscaling.MetricTypeAlbrequestCountPerTarget, - ResourceLabel: aws.String("app/EC2Co-EcsEl-1TKLTMITMM0EO/f37c06a68c1748aa/targetgroup/EC2Co-Defau-LDNM7Q3ZH1ZN/6d4ea56ca2d6a18d"), - }, - ScaleInCooldown: aws.Int64(60), - ScaleOutCooldown: aws.Int64(60), - TargetValue: aws.Float64(1000.000000), - }, - } - - req := svc.PutScalingPolicyRequest(input) - result, err := req.Send(context.Background()) - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - switch aerr.Code() { - case applicationautoscaling.ErrCodeValidationException: - fmt.Println(applicationautoscaling.ErrCodeValidationException, aerr.Error()) - case applicationautoscaling.ErrCodeLimitExceededException: - fmt.Println(applicationautoscaling.ErrCodeLimitExceededException, aerr.Error()) - case applicationautoscaling.ErrCodeObjectNotFoundException: - fmt.Println(applicationautoscaling.ErrCodeObjectNotFoundException, aerr.Error()) - case applicationautoscaling.ErrCodeConcurrentUpdateException: - fmt.Println(applicationautoscaling.ErrCodeConcurrentUpdateException, aerr.Error()) - case applicationautoscaling.ErrCodeFailedResourceAccessException: - fmt.Println(applicationautoscaling.ErrCodeFailedResourceAccessException, aerr.Error()) - case applicationautoscaling.ErrCodeInternalServiceException: - fmt.Println(applicationautoscaling.ErrCodeInternalServiceException, aerr.Error()) - default: - fmt.Println(aerr.Error()) - } - } else { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - } - return - } - - fmt.Println(result) -} - -// To apply a step scaling policy to an Amazon ECS service -// -// This example applies a step scaling policy to an Amazon ECS service called web-app -// in the default cluster. The policy increases the desired count of the service by -// 200%, with a cool down period of 60 seconds. -func ExampleClient_PutScalingPolicyRequest_shared03() { - cfg, err := external.LoadDefaultAWSConfig() - if err != nil { - panic("failed to load config, " + err.Error()) - } - - svc := applicationautoscaling.New(cfg) - input := &applicationautoscaling.PutScalingPolicyInput{ - PolicyName: aws.String("web-app-cpu-gt-75"), - PolicyType: applicationautoscaling.PolicyTypeStepScaling, - ResourceId: aws.String("service/default/web-app"), - ScalableDimension: applicationautoscaling.ScalableDimensionEcsServiceDesiredCount, - ServiceNamespace: applicationautoscaling.ServiceNamespaceEcs, - StepScalingPolicyConfiguration: &applicationautoscaling.StepScalingPolicyConfiguration{ - AdjustmentType: applicationautoscaling.AdjustmentTypePercentChangeInCapacity, - Cooldown: aws.Int64(60), - StepAdjustments: []applicationautoscaling.StepAdjustment{ - { - MetricIntervalLowerBound: aws.Float64(0.000000), - ScalingAdjustment: aws.Int64(200), - }, - }, - }, - } - - req := svc.PutScalingPolicyRequest(input) - result, err := req.Send(context.Background()) - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - switch aerr.Code() { - case applicationautoscaling.ErrCodeValidationException: - fmt.Println(applicationautoscaling.ErrCodeValidationException, aerr.Error()) - case applicationautoscaling.ErrCodeLimitExceededException: - fmt.Println(applicationautoscaling.ErrCodeLimitExceededException, aerr.Error()) - case applicationautoscaling.ErrCodeObjectNotFoundException: - fmt.Println(applicationautoscaling.ErrCodeObjectNotFoundException, aerr.Error()) - case applicationautoscaling.ErrCodeConcurrentUpdateException: - fmt.Println(applicationautoscaling.ErrCodeConcurrentUpdateException, aerr.Error()) - case applicationautoscaling.ErrCodeFailedResourceAccessException: - fmt.Println(applicationautoscaling.ErrCodeFailedResourceAccessException, aerr.Error()) - case applicationautoscaling.ErrCodeInternalServiceException: - fmt.Println(applicationautoscaling.ErrCodeInternalServiceException, aerr.Error()) - default: - fmt.Println(aerr.Error()) - } - } else { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - } - return - } - - fmt.Println(result) -} - -// To apply a step scaling policy to an Amazon EC2 Spot fleet -// -// This example applies a step scaling policy to an Amazon EC2 Spot fleet. The policy -// increases the target capacity of the spot fleet by 200%, with a cool down period -// of 180 seconds.", -func ExampleClient_PutScalingPolicyRequest_shared04() { - cfg, err := external.LoadDefaultAWSConfig() - if err != nil { - panic("failed to load config, " + err.Error()) - } - - svc := applicationautoscaling.New(cfg) - input := &applicationautoscaling.PutScalingPolicyInput{ - PolicyName: aws.String("fleet-cpu-gt-75"), - PolicyType: applicationautoscaling.PolicyTypeStepScaling, - ResourceId: aws.String("spot-fleet-request/sfr-45e69d8a-be48-4539-bbf3-3464e99c50c3"), - ScalableDimension: applicationautoscaling.ScalableDimensionEc2SpotFleetRequestTargetCapacity, - ServiceNamespace: applicationautoscaling.ServiceNamespaceEc2, - StepScalingPolicyConfiguration: &applicationautoscaling.StepScalingPolicyConfiguration{ - AdjustmentType: applicationautoscaling.AdjustmentTypePercentChangeInCapacity, - Cooldown: aws.Int64(180), - StepAdjustments: []applicationautoscaling.StepAdjustment{ - { - MetricIntervalLowerBound: aws.Float64(0.000000), - ScalingAdjustment: aws.Int64(200), - }, - }, - }, - } - - req := svc.PutScalingPolicyRequest(input) - result, err := req.Send(context.Background()) - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - switch aerr.Code() { - case applicationautoscaling.ErrCodeValidationException: - fmt.Println(applicationautoscaling.ErrCodeValidationException, aerr.Error()) - case applicationautoscaling.ErrCodeLimitExceededException: - fmt.Println(applicationautoscaling.ErrCodeLimitExceededException, aerr.Error()) - case applicationautoscaling.ErrCodeObjectNotFoundException: - fmt.Println(applicationautoscaling.ErrCodeObjectNotFoundException, aerr.Error()) - case applicationautoscaling.ErrCodeConcurrentUpdateException: - fmt.Println(applicationautoscaling.ErrCodeConcurrentUpdateException, aerr.Error()) - case applicationautoscaling.ErrCodeFailedResourceAccessException: - fmt.Println(applicationautoscaling.ErrCodeFailedResourceAccessException, aerr.Error()) - case applicationautoscaling.ErrCodeInternalServiceException: - fmt.Println(applicationautoscaling.ErrCodeInternalServiceException, aerr.Error()) - default: - fmt.Println(aerr.Error()) - } - } else { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - } - return - } - - fmt.Println(result) -} - // To register an ECS service as a scalable target // // This example registers a scalable target from an Amazon ECS service called web-app @@ -590,49 +337,3 @@ func ExampleClient_RegisterScalableTargetRequest_shared00() { fmt.Println(result) } - -// To register an EC2 Spot fleet as a scalable target -// -// This example registers a scalable target from an Amazon EC2 Spot fleet with a minimum -// target capacity of 1 and a maximum of 10. -func ExampleClient_RegisterScalableTargetRequest_shared01() { - cfg, err := external.LoadDefaultAWSConfig() - if err != nil { - panic("failed to load config, " + err.Error()) - } - - svc := applicationautoscaling.New(cfg) - input := &applicationautoscaling.RegisterScalableTargetInput{ - MaxCapacity: aws.Int64(10), - MinCapacity: aws.Int64(1), - ResourceId: aws.String("spot-fleet-request/sfr-45e69d8a-be48-4539-bbf3-3464e99c50c3"), - ScalableDimension: applicationautoscaling.ScalableDimensionEc2SpotFleetRequestTargetCapacity, - ServiceNamespace: applicationautoscaling.ServiceNamespaceEc2, - } - - req := svc.RegisterScalableTargetRequest(input) - result, err := req.Send(context.Background()) - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - switch aerr.Code() { - case applicationautoscaling.ErrCodeValidationException: - fmt.Println(applicationautoscaling.ErrCodeValidationException, aerr.Error()) - case applicationautoscaling.ErrCodeLimitExceededException: - fmt.Println(applicationautoscaling.ErrCodeLimitExceededException, aerr.Error()) - case applicationautoscaling.ErrCodeConcurrentUpdateException: - fmt.Println(applicationautoscaling.ErrCodeConcurrentUpdateException, aerr.Error()) - case applicationautoscaling.ErrCodeInternalServiceException: - fmt.Println(applicationautoscaling.ErrCodeInternalServiceException, aerr.Error()) - default: - fmt.Println(aerr.Error()) - } - } else { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - } - return - } - - fmt.Println(result) -} diff --git a/service/applicationautoscaling/api_op_DeleteScalingPolicy.go b/service/applicationautoscaling/api_op_DeleteScalingPolicy.go index ae80edcc20f..5ec947c82eb 100644 --- a/service/applicationautoscaling/api_op_DeleteScalingPolicy.go +++ b/service/applicationautoscaling/api_op_DeleteScalingPolicy.go @@ -57,6 +57,9 @@ type DeleteScalingPolicyInput struct { // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -103,13 +106,17 @@ type DeleteScalingPolicyInput struct { // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. + // // ScalableDimension is a required field ScalableDimension ScalableDimension `type:"string" required:"true" enum:"true"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource. For a resource + // provided by your own application or service, use custom-resource instead. // // ServiceNamespace is a required field ServiceNamespace ServiceNamespace `type:"string" required:"true" enum:"true"` @@ -175,8 +182,6 @@ const opDeleteScalingPolicy = "DeleteScalingPolicy" // and Delete a Target Tracking Scaling Policy (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html#delete-target-tracking-policy) // in the Application Auto Scaling User Guide. // -// To create a scaling policy or update an existing one, see PutScalingPolicy. -// // // Example sending a request using DeleteScalingPolicyRequest. // req := client.DeleteScalingPolicyRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/applicationautoscaling/api_op_DeleteScheduledAction.go b/service/applicationautoscaling/api_op_DeleteScheduledAction.go index d21053a1172..b1081a18078 100644 --- a/service/applicationautoscaling/api_op_DeleteScheduledAction.go +++ b/service/applicationautoscaling/api_op_DeleteScheduledAction.go @@ -52,6 +52,9 @@ type DeleteScheduledActionInput struct { // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -98,6 +101,12 @@ type DeleteScheduledActionInput struct { // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. + // // ScalableDimension is a required field ScalableDimension ScalableDimension `type:"string" required:"true" enum:"true"` @@ -106,10 +115,8 @@ type DeleteScheduledActionInput struct { // ScheduledActionName is a required field ScheduledActionName *string `min:"1" type:"string" required:"true"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource. For a resource + // provided by your own application or service, use custom-resource instead. // // ServiceNamespace is a required field ServiceNamespace ServiceNamespace `type:"string" required:"true" enum:"true"` diff --git a/service/applicationautoscaling/api_op_DeregisterScalableTarget.go b/service/applicationautoscaling/api_op_DeregisterScalableTarget.go index 6f2ca61554b..df94b98dfd1 100644 --- a/service/applicationautoscaling/api_op_DeregisterScalableTarget.go +++ b/service/applicationautoscaling/api_op_DeregisterScalableTarget.go @@ -52,6 +52,9 @@ type DeregisterScalableTargetInput struct { // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -98,13 +101,17 @@ type DeregisterScalableTargetInput struct { // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. + // // ScalableDimension is a required field ScalableDimension ScalableDimension `type:"string" required:"true" enum:"true"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource. For a resource + // provided by your own application or service, use custom-resource instead. // // ServiceNamespace is a required field ServiceNamespace ServiceNamespace `type:"string" required:"true" enum:"true"` @@ -152,12 +159,12 @@ const opDeregisterScalableTarget = "DeregisterScalableTarget" // DeregisterScalableTargetRequest returns a request value for making API operation for // Application Auto Scaling. // -// Deregisters an Application Auto Scaling scalable target. -// -// Deregistering a scalable target deletes the scaling policies that are associated -// with it. +// Deregisters an Application Auto Scaling scalable target when you have finished +// using it. To see which resources have been registered, use DescribeScalableTargets +// (https://docs.aws.amazon.com/autoscaling/application/APIReference/API_DescribeScalableTargets.html). // -// To create a scalable target or update an existing one, see RegisterScalableTarget. +// Deregistering a scalable target deletes the scaling policies and the scheduled +// actions that are associated with it. // // // Example sending a request using DeregisterScalableTargetRequest. // req := client.DeregisterScalableTargetRequest(params) diff --git a/service/applicationautoscaling/api_op_DescribeScalableTargets.go b/service/applicationautoscaling/api_op_DescribeScalableTargets.go index f191e33941c..b9fb1a852c2 100644 --- a/service/applicationautoscaling/api_op_DescribeScalableTargets.go +++ b/service/applicationautoscaling/api_op_DescribeScalableTargets.go @@ -64,6 +64,9 @@ type DescribeScalableTargetsInput struct { // unique identifier is the function name with a function version or alias // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. + // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. ResourceIds []string `type:"list"` // The scalable dimension associated with the scalable target. This string consists @@ -109,12 +112,16 @@ type DescribeScalableTargetsInput struct { // // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. + // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. ScalableDimension ScalableDimension `type:"string" enum:"true"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource. For a resource + // provided by your own application or service, use custom-resource instead. // // ServiceNamespace is a required field ServiceNamespace ServiceNamespace `type:"string" required:"true" enum:"true"` @@ -163,10 +170,6 @@ const opDescribeScalableTargets = "DescribeScalableTargets" // // You can filter the results using ResourceIds and ScalableDimension. // -// To create a scalable target or update an existing one, see RegisterScalableTarget. -// If you are no longer using a scalable target, you can deregister it using -// DeregisterScalableTarget. -// // // Example sending a request using DescribeScalableTargetsRequest. // req := client.DescribeScalableTargetsRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/applicationautoscaling/api_op_DescribeScalingActivities.go b/service/applicationautoscaling/api_op_DescribeScalingActivities.go index 4c035ccd611..e7af36108f5 100644 --- a/service/applicationautoscaling/api_op_DescribeScalingActivities.go +++ b/service/applicationautoscaling/api_op_DescribeScalingActivities.go @@ -64,6 +64,9 @@ type DescribeScalingActivitiesInput struct { // unique identifier is the function name with a function version or alias // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. + // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. ResourceId *string `min:"1" type:"string"` // The scalable dimension. This string consists of the service namespace, resource @@ -109,12 +112,16 @@ type DescribeScalingActivitiesInput struct { // // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. + // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. ScalableDimension ScalableDimension `type:"string" enum:"true"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource. For a resource + // provided by your own application or service, use custom-resource instead. // // ServiceNamespace is a required field ServiceNamespace ServiceNamespace `type:"string" required:"true" enum:"true"` @@ -167,11 +174,6 @@ const opDescribeScalingActivities = "DescribeScalingActivities" // // You can filter the results using ResourceId and ScalableDimension. // -// Scaling activities are triggered by CloudWatch alarms that are associated -// with scaling policies. To view the scaling policies for a service namespace, -// see DescribeScalingPolicies. To create a scaling policy or update an existing -// one, see PutScalingPolicy. -// // // Example sending a request using DescribeScalingActivitiesRequest. // req := client.DescribeScalingActivitiesRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/applicationautoscaling/api_op_DescribeScalingPolicies.go b/service/applicationautoscaling/api_op_DescribeScalingPolicies.go index dc2925b1d77..25ed031856a 100644 --- a/service/applicationautoscaling/api_op_DescribeScalingPolicies.go +++ b/service/applicationautoscaling/api_op_DescribeScalingPolicies.go @@ -67,6 +67,9 @@ type DescribeScalingPoliciesInput struct { // unique identifier is the function name with a function version or alias // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. + // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. ResourceId *string `min:"1" type:"string"` // The scalable dimension. This string consists of the service namespace, resource @@ -112,12 +115,16 @@ type DescribeScalingPoliciesInput struct { // // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. + // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. ScalableDimension ScalableDimension `type:"string" enum:"true"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource. For a resource + // provided by your own application or service, use custom-resource instead. // // ServiceNamespace is a required field ServiceNamespace ServiceNamespace `type:"string" required:"true" enum:"true"` @@ -170,8 +177,9 @@ const opDescribeScalingPolicies = "DescribeScalingPolicies" // // You can filter the results using ResourceId, ScalableDimension, and PolicyNames. // -// To create a scaling policy or update an existing one, see PutScalingPolicy. -// If you are no longer using a scaling policy, you can delete it using DeleteScalingPolicy. +// For more information, see Target Tracking Scaling Policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html) +// and Step Scaling Policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html) +// in the Application Auto Scaling User Guide. // // // Example sending a request using DescribeScalingPoliciesRequest. // req := client.DescribeScalingPoliciesRequest(params) diff --git a/service/applicationautoscaling/api_op_DescribeScheduledActions.go b/service/applicationautoscaling/api_op_DescribeScheduledActions.go index 7ed8665cc1e..d166091fc5e 100644 --- a/service/applicationautoscaling/api_op_DescribeScheduledActions.go +++ b/service/applicationautoscaling/api_op_DescribeScheduledActions.go @@ -64,6 +64,9 @@ type DescribeScheduledActionsInput struct { // unique identifier is the function name with a function version or alias // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. + // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. ResourceId *string `min:"1" type:"string"` // The scalable dimension. This string consists of the service namespace, resource @@ -109,15 +112,19 @@ type DescribeScheduledActionsInput struct { // // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. + // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. ScalableDimension ScalableDimension `type:"string" enum:"true"` // The names of the scheduled actions to describe. ScheduledActionNames []string `type:"list"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource. For a resource + // provided by your own application or service, use custom-resource instead. // // ServiceNamespace is a required field ServiceNamespace ServiceNamespace `type:"string" required:"true" enum:"true"` @@ -171,8 +178,8 @@ const opDescribeScheduledActions = "DescribeScheduledActions" // You can filter the results using the ResourceId, ScalableDimension, and ScheduledActionNames // parameters. // -// To create a scheduled action or update an existing one, see PutScheduledAction. -// If you are no longer using a scheduled action, you can delete it using DeleteScheduledAction. +// For more information, see Scheduled Scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-scheduled-scaling.html) +// in the Application Auto Scaling User Guide. // // // Example sending a request using DescribeScheduledActionsRequest. // req := client.DescribeScheduledActionsRequest(params) diff --git a/service/applicationautoscaling/api_op_PutScalingPolicy.go b/service/applicationautoscaling/api_op_PutScalingPolicy.go index fa2c640b5b6..5ccae46aa12 100644 --- a/service/applicationautoscaling/api_op_PutScalingPolicy.go +++ b/service/applicationautoscaling/api_op_PutScalingPolicy.go @@ -24,7 +24,8 @@ type PutScalingPolicyInput struct { // // TargetTrackingScaling—Not supported for Amazon EMR // - // StepScaling—Not supported for DynamoDB, Amazon Comprehend, or AWS Lambda + // StepScaling—Not supported for DynamoDB, Amazon Comprehend, Lambda, or Amazon + // Keyspaces for Apache Cassandra. // // For more information, see Target Tracking Scaling Policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html) // and Step Scaling Policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html) @@ -71,6 +72,9 @@ type PutScalingPolicyInput struct { // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -117,13 +121,17 @@ type PutScalingPolicyInput struct { // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. + // // ScalableDimension is a required field ScalableDimension ScalableDimension `type:"string" required:"true" enum:"true"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource. For a resource + // provided by your own application or service, use custom-resource instead. // // ServiceNamespace is a required field ServiceNamespace ServiceNamespace `type:"string" required:"true" enum:"true"` @@ -209,19 +217,13 @@ const opPutScalingPolicy = "PutScalingPolicy" // PutScalingPolicyRequest returns a request value for making API operation for // Application Auto Scaling. // -// Creates or updates a policy for an Application Auto Scaling scalable target. +// Creates or updates a scaling policy for an Application Auto Scaling scalable +// target. // // Each scalable target is identified by a service namespace, resource ID, and // scalable dimension. A scaling policy applies to the scalable target identified // by those three attributes. You cannot create a scaling policy until you have -// registered the resource as a scalable target using RegisterScalableTarget. -// -// To update a policy, specify its policy name and the parameters that you want -// to change. Any parameters that you don't specify are not changed by this -// update request. -// -// You can view the scaling policies for a service namespace using DescribeScalingPolicies. -// If you are no longer using a scaling policy, you can delete it using DeleteScalingPolicy. +// registered the resource as a scalable target. // // Multiple scaling policies can be in force at the same time for the same scalable // target. You can have one or more target tracking scaling policies, one or @@ -234,8 +236,13 @@ const opPutScalingPolicy = "PutScalingPolicy" // uses the policy with the highest calculated capacity (200% of 10 = 20) and // scales out to 30. // -// Learn more about how to work with scaling policies in the Application Auto -// Scaling User Guide (https://docs.aws.amazon.com/autoscaling/application/userguide/what-is-application-auto-scaling.html). +// For more information, see Target Tracking Scaling Policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html) +// and Step Scaling Policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html) +// in the Application Auto Scaling User Guide. +// +// If a scalable target is deregistered, the scalable target is no longer available +// to execute scaling policies. Any scaling policies that were specified for +// the scalable target are deleted. // // // Example sending a request using PutScalingPolicyRequest. // req := client.PutScalingPolicyRequest(params) diff --git a/service/applicationautoscaling/api_op_PutScheduledAction.go b/service/applicationautoscaling/api_op_PutScheduledAction.go index 2b397e31534..129038287e3 100644 --- a/service/applicationautoscaling/api_op_PutScheduledAction.go +++ b/service/applicationautoscaling/api_op_PutScheduledAction.go @@ -13,7 +13,7 @@ import ( type PutScheduledActionInput struct { _ struct{} `type:"structure"` - // The date and time for the scheduled action to end. + // The date and time for the recurring schedule to end. EndTime *time.Time `type:"timestamp"` // The identifier of the resource associated with the scheduled action. This @@ -56,6 +56,9 @@ type PutScheduledActionInput struct { // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -102,11 +105,17 @@ type PutScheduledActionInput struct { // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. + // // ScalableDimension is a required field ScalableDimension ScalableDimension `type:"string" required:"true" enum:"true"` // The new minimum and maximum capacity. You can set both values or just one. - // During the scheduled time, if the current capacity is below the minimum capacity, + // At the scheduled time, if the current capacity is below the minimum capacity, // Application Auto Scaling scales out to the minimum capacity. If the current // capacity is above the maximum capacity, Application Auto Scaling scales in // to the maximum capacity. @@ -120,29 +129,31 @@ type PutScheduledActionInput struct { // // * Cron expressions - "cron(fields)" // - // At expressions are useful for one-time schedules. Specify the time, in UTC. + // At expressions are useful for one-time schedules. Specify the time in UTC. // // For rate expressions, value is a positive integer and unit is minute | minutes // | hour | hours | day | days. // // For more information about cron expressions, see Cron Expressions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions) // in the Amazon CloudWatch Events User Guide. + // + // For examples of using these expressions, see Scheduled Scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-scheduled-scaling.html) + // in the Application Auto Scaling User Guide. Schedule *string `min:"1" type:"string"` - // The name of the scheduled action. + // The name of the scheduled action. This name must be unique among all other + // scheduled actions on the specified scalable target. // // ScheduledActionName is a required field ScheduledActionName *string `min:"1" type:"string" required:"true"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource. For a resource + // provided by your own application or service, use custom-resource instead. // // ServiceNamespace is a required field ServiceNamespace ServiceNamespace `type:"string" required:"true" enum:"true"` - // The date and time for the scheduled action to start. + // The date and time for this scheduled action to start. StartTime *time.Time `type:"timestamp"` } @@ -204,18 +215,21 @@ const opPutScheduledAction = "PutScheduledAction" // Each scalable target is identified by a service namespace, resource ID, and // scalable dimension. A scheduled action applies to the scalable target identified // by those three attributes. You cannot create a scheduled action until you -// have registered the resource as a scalable target using RegisterScalableTarget. +// have registered the resource as a scalable target. +// +// When start and end times are specified with a recurring schedule using a +// cron expression or rates, they form the boundaries of when the recurring +// action starts and stops. // -// To update an action, specify its name and the parameters that you want to -// change. If you don't specify start and end times, the old values are deleted. -// Any other parameters that you don't specify are not changed by this update -// request. +// To update a scheduled action, specify the parameters that you want to change. +// If you don't specify start and end times, the old values are deleted. // -// You can view the scheduled actions using DescribeScheduledActions. If you -// are no longer using a scheduled action, you can delete it using DeleteScheduledAction. +// For more information, see Scheduled Scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-scheduled-scaling.html) +// in the Application Auto Scaling User Guide. // -// Learn more about how to work with scheduled actions in the Application Auto -// Scaling User Guide (https://docs.aws.amazon.com/autoscaling/application/userguide/what-is-application-auto-scaling.html). +// If a scalable target is deregistered, the scalable target is no longer available +// to run scheduled actions. Any scheduled actions that were specified for the +// scalable target are deleted. // // // Example sending a request using PutScheduledActionRequest. // req := client.PutScheduledActionRequest(params) diff --git a/service/applicationautoscaling/api_op_RegisterScalableTarget.go b/service/applicationautoscaling/api_op_RegisterScalableTarget.go index 89efba2084d..a08a3d86ad1 100644 --- a/service/applicationautoscaling/api_op_RegisterScalableTarget.go +++ b/service/applicationautoscaling/api_op_RegisterScalableTarget.go @@ -12,12 +12,20 @@ import ( type RegisterScalableTargetInput struct { _ struct{} `type:"structure"` - // The maximum value to scale to in response to a scale-out event. MaxCapacity - // is required to register a scalable target. + // The maximum value that you plan to scale out to. When a scaling policy is + // in effect, Application Auto Scaling can scale out (expand) as needed to the + // maximum capacity limit in response to changing demand. + // + // This parameter is required if you are registering a scalable target. MaxCapacity *int64 `type:"integer"` - // The minimum value to scale to in response to a scale-in event. MinCapacity - // is required to register a scalable target. + // The minimum value that you plan to scale in to. When a scaling policy is + // in effect, Application Auto Scaling can scale in (contract) as needed to + // the minimum capacity limit in response to changing demand. + // + // This parameter is required if you are registering a scalable target. For + // Lambda provisioned concurrency, the minimum value allowed is 0. For all other + // resources, the minimum value allowed is 1. MinCapacity *int64 `type:"integer"` // The identifier of the resource that is associated with the scalable target. @@ -60,16 +68,19 @@ type RegisterScalableTargetInput struct { // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` - // Application Auto Scaling creates a service-linked role that grants it permissions - // to modify the scalable target on your behalf. For more information, see Service-Linked - // Roles for Application Auto Scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-service-linked-roles.html). + // This parameter is required for services that do not support service-linked + // roles (such as Amazon EMR), and it must specify the ARN of an IAM role that + // allows Application Auto Scaling to modify the scalable target on your behalf. // - // For Amazon EMR, this parameter is required, and it must specify the ARN of - // an IAM role that allows Application Auto Scaling to modify the scalable target - // on your behalf. + // If the service supports service-linked roles, Application Auto Scaling uses + // a service-linked role, which it creates if it does not yet exist. For more + // information, see Application Auto Scaling IAM Roles (https://docs.aws.amazon.com/autoscaling/application/userguide/security_iam_service-with-iam.html#security_iam_service-with-iam-roles). RoleARN *string `min:"1" type:"string"` // The scalable dimension associated with the scalable target. This string consists @@ -115,13 +126,17 @@ type RegisterScalableTargetInput struct { // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. + // // ScalableDimension is a required field ScalableDimension ScalableDimension `type:"string" required:"true" enum:"true"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource. For a resource + // provided by your own application or service, use custom-resource instead. // // ServiceNamespace is a required field ServiceNamespace ServiceNamespace `type:"string" required:"true" enum:"true"` @@ -192,26 +207,28 @@ const opRegisterScalableTarget = "RegisterScalableTarget" // RegisterScalableTargetRequest returns a request value for making API operation for // Application Auto Scaling. // -// Registers or updates a scalable target. A scalable target is a resource that -// Application Auto Scaling can scale out and scale in. Scalable targets are -// uniquely identified by the combination of resource ID, scalable dimension, -// and namespace. +// Registers or updates a scalable target. // -// When you register a new scalable target, you must specify values for minimum -// and maximum capacity. Application Auto Scaling will not scale capacity to -// values that are outside of this range. +// A scalable target is a resource that Application Auto Scaling can scale out +// and scale in. Scalable targets are uniquely identified by the combination +// of resource ID, scalable dimension, and namespace. // -// To update a scalable target, specify the parameter that you want to change -// as well as the following parameters that identify the scalable target: resource -// ID, scalable dimension, and namespace. Any parameters that you don't specify -// are not changed by this update request. +// When you register a new scalable target, you must specify values for minimum +// and maximum capacity. Application Auto Scaling scaling policies will not +// scale capacity to values that are outside of this range. // // After you register a scalable target, you do not need to register it again // to use other Application Auto Scaling operations. To see which resources -// have been registered, use DescribeScalableTargets. You can also view the -// scaling policies for a service namespace by using DescribeScalableTargets. +// have been registered, use DescribeScalableTargets (https://docs.aws.amazon.com/autoscaling/application/APIReference/API_DescribeScalableTargets.html). +// You can also view the scaling policies for a service namespace by using DescribeScalableTargets +// (https://docs.aws.amazon.com/autoscaling/application/APIReference/API_DescribeScalableTargets.html). +// If you no longer need a scalable target, you can deregister it by using DeregisterScalableTarget +// (https://docs.aws.amazon.com/autoscaling/application/APIReference/API_DeregisterScalableTarget.html). // -// If you no longer need a scalable target, you can deregister it by using DeregisterScalableTarget. +// To update a scalable target, specify the parameters that you want to change. +// Include the parameters that identify the scalable target: resource ID, scalable +// dimension, and namespace. Any parameters that you don't specify are not changed +// by this update request. // // // Example sending a request using RegisterScalableTargetRequest. // req := client.RegisterScalableTargetRequest(params) diff --git a/service/applicationautoscaling/api_types.go b/service/applicationautoscaling/api_types.go index 55b97cd8645..24dd3b4a935 100644 --- a/service/applicationautoscaling/api_types.go +++ b/service/applicationautoscaling/api_types.go @@ -36,6 +36,10 @@ func (s Alarm) String() string { // Represents a CloudWatch metric of your choosing for a target tracking scaling // policy to use with Application Auto Scaling. // +// For information about the available metrics for a service, see AWS Services +// That Publish CloudWatch Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/aws-services-cloudwatch-metrics.html) +// in the Amazon CloudWatch User Guide. +// // To create your customized metric specification: // // * Add values for each required parameter from CloudWatch. You can use @@ -47,7 +51,7 @@ func (s Alarm) String() string { // * Choose a metric that changes proportionally with capacity. The value // of the metric should increase or decrease in inverse proportion to the // number of capacity units. That is, the value of the metric should decrease -// when capacity increases. +// when capacity increases, and increase when capacity decreases. // // For more information about CloudWatch, see Amazon CloudWatch Concepts (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html). type CustomizedMetricSpecification struct { @@ -151,6 +155,12 @@ func (s *MetricDimension) Validate() error { // Represents a predefined metric for a target tracking scaling policy to use // with Application Auto Scaling. +// +// Only the AWS services that you're using send metrics to Amazon CloudWatch. +// To determine whether a desired metric already exists by looking up its namespace +// and dimension using the CloudWatch metrics dashboard in the console, follow +// the procedure in Building Dashboards with CloudWatch (https://docs.aws.amazon.com/autoscaling/application/userguide/monitoring-cloudwatch.html) +// in the Application Auto Scaling User Guide. type PredefinedMetricSpecification struct { _ struct{} `type:"structure"` @@ -255,6 +265,9 @@ type ScalableTarget struct { // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -307,13 +320,16 @@ type ScalableTarget struct { // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. + // // ScalableDimension is a required field ScalableDimension ScalableDimension `type:"string" required:"true" enum:"true"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource, or a custom-resource. // // ServiceNamespace is a required field ServiceNamespace ServiceNamespace `type:"string" required:"true" enum:"true"` @@ -336,6 +352,9 @@ type ScalableTargetAction struct { MaxCapacity *int64 `type:"integer"` // The minimum capacity. + // + // For Lambda provisioned concurrency, the minimum value allowed is 0. For all + // other resources, the minimum value allowed is 1. MinCapacity *int64 `type:"integer"` } @@ -409,6 +428,9 @@ type ScalingActivity struct { // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -455,13 +477,16 @@ type ScalingActivity struct { // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. + // // ScalableDimension is a required field ScalableDimension ScalableDimension `type:"string" required:"true" enum:"true"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource, or a custom-resource. // // ServiceNamespace is a required field ServiceNamespace ServiceNamespace `type:"string" required:"true" enum:"true"` @@ -552,6 +577,9 @@ type ScalingPolicy struct { // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -598,13 +626,16 @@ type ScalingPolicy struct { // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. + // // ScalableDimension is a required field ScalableDimension ScalableDimension `type:"string" required:"true" enum:"true"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource, or a custom-resource. // // ServiceNamespace is a required field ServiceNamespace ServiceNamespace `type:"string" required:"true" enum:"true"` @@ -673,6 +704,9 @@ type ScheduledAction struct { // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -718,10 +752,16 @@ type ScheduledAction struct { // // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. + // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. ScalableDimension ScalableDimension `type:"string" enum:"true"` // The new minimum and maximum capacity. You can set both values or just one. - // During the scheduled time, if the current capacity is below the minimum capacity, + // At the scheduled time, if the current capacity is below the minimum capacity, // Application Auto Scaling scales out to the minimum capacity. If the current // capacity is above the maximum capacity, Application Auto Scaling scales in // to the maximum capacity. @@ -735,7 +775,7 @@ type ScheduledAction struct { // // * Cron expressions - "cron(fields)" // - // At expressions are useful for one-time schedules. Specify the time, in UTC. + // At expressions are useful for one-time schedules. Specify the time in UTC. // // For rate expressions, value is a positive integer and unit is minute | minutes // | hour | hours | day | days. @@ -743,6 +783,9 @@ type ScheduledAction struct { // For more information about cron expressions, see Cron Expressions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions) // in the Amazon CloudWatch Events User Guide. // + // For examples of using these expressions, see Scheduled Scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-scheduled-scaling.html) + // in the Application Auto Scaling User Guide. + // // Schedule is a required field Schedule *string `min:"1" type:"string" required:"true"` @@ -756,10 +799,7 @@ type ScheduledAction struct { // ScheduledActionName is a required field ScheduledActionName *string `min:"1" type:"string" required:"true"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource, or a custom-resource. // // ServiceNamespace is a required field ServiceNamespace ServiceNamespace `type:"string" required:"true" enum:"true"` @@ -773,9 +813,10 @@ func (s ScheduledAction) String() string { return awsutil.Prettify(s) } -// Represents a step adjustment for a StepScalingPolicyConfiguration. Describes -// an adjustment based on the difference between the value of the aggregated -// CloudWatch metric and the breach threshold that you've defined for the alarm. +// Represents a step adjustment for a StepScalingPolicyConfiguration (https://docs.aws.amazon.com/autoscaling/application/APIReference/API_StepScalingPolicyConfiguration.html). +// Describes an adjustment based on the difference between the value of the +// aggregated CloudWatch metric and the breach threshold that you've defined +// for the alarm. // // For the following examples, suppose that you have an alarm with a breach // threshold of 50: @@ -822,8 +863,8 @@ type StepAdjustment struct { MetricIntervalUpperBound *float64 `type:"double"` // The amount by which to scale, based on the specified adjustment type. A positive - // value adds to the current scalable dimension while a negative number removes - // from the current scalable dimension. + // value adds to the current capacity while a negative number removes from the + // current capacity. // // ScalingAdjustment is a required field ScalingAdjustment *int64 `type:"integer" required:"true"` @@ -853,8 +894,10 @@ func (s *StepAdjustment) Validate() error { type StepScalingPolicyConfiguration struct { _ struct{} `type:"structure"` - // Specifies whether the ScalingAdjustment value in a StepAdjustment is an absolute - // number or a percentage of the current capacity. + // Specifies whether the ScalingAdjustment value in a StepAdjustment (https://docs.aws.amazon.com/autoscaling/application/APIReference/API_StepAdjustment.html) + // is an absolute number or a percentage of the current capacity. + // + // AdjustmentType is required if you are adding a new step scaling policy configuration. AdjustmentType AdjustmentType `type:"string" enum:"true"` // The amount of time, in seconds, after a scaling activity completes where @@ -862,7 +905,7 @@ type StepScalingPolicyConfiguration struct { // events. // // For scale-out policies, while the cooldown period is in effect, the capacity - // that has been added by the previous scale-out event that initiated the cooldown + // that has been added by the previous scale-out action that initiated the cooldown // is calculated as part of the desired capacity for the next scale out. The // intention is to continuously (but not excessively) scale out. For example, // an alarm triggers a step scaling policy to scale out an Amazon ECS service @@ -870,7 +913,7 @@ type StepScalingPolicyConfiguration struct { // of 5 minutes starts. During the cooldown period, if the alarm triggers the // same policy again but at a more aggressive step adjustment to scale out the // service by 3 tasks, the 2 tasks that were added in the previous scale-out - // event are considered part of that capacity and only 1 additional task is + // action are considered part of that capacity and only 1 additional task is // added to the desired count. // // For scale-in policies, the cooldown period is used to block subsequent scale-in @@ -878,6 +921,35 @@ type StepScalingPolicyConfiguration struct { // to protect your application's availability. However, if another alarm triggers // a scale-out policy during the cooldown period after a scale-in, Application // Auto Scaling scales out your scalable target immediately. + // + // Application Auto Scaling provides a default value of 300 for the following + // scalable targets: + // + // * ECS services + // + // * Spot Fleet requests + // + // * EMR clusters + // + // * AppStream 2.0 fleets + // + // * Aurora DB clusters + // + // * Amazon SageMaker endpoint variants + // + // * Custom resources + // + // For all other scalable targets, the default value is 0: + // + // * DynamoDB tables + // + // * DynamoDB global secondary indexes + // + // * Amazon Comprehend document classification endpoints + // + // * Lambda provisioned concurrency + // + // * Amazon Keyspaces tables Cooldown *int64 `type:"integer"` // The aggregation type for the CloudWatch metrics. Valid values are Minimum, @@ -885,19 +957,21 @@ type StepScalingPolicyConfiguration struct { // as Average. MetricAggregationType MetricAggregationType `type:"string" enum:"true"` - // The minimum number to adjust your scalable dimension as a result of a scaling - // activity. If the adjustment type is PercentChangeInCapacity, the scaling - // policy changes the scalable dimension of the scalable target by this amount. + // The minimum value to scale by when scaling by percentages. For example, suppose + // that you create a step scaling policy to scale out an Amazon ECS service + // by 25 percent and you specify a MinAdjustmentMagnitude of 2. If the service + // has 4 tasks and the scaling policy is performed, 25 percent of 4 is 1. However, + // because you specified a MinAdjustmentMagnitude of 2, Application Auto Scaling + // scales out the service by 2 tasks. // - // For example, suppose that you create a step scaling policy to scale out an - // Amazon ECS service by 25 percent and you specify a MinAdjustmentMagnitude - // of 2. If the service has 4 tasks and the scaling policy is performed, 25 - // percent of 4 is 1. However, because you specified a MinAdjustmentMagnitude - // of 2, Application Auto Scaling scales out the service by 2 tasks. + // Valid only if the adjustment type is PercentChangeInCapacity. MinAdjustmentMagnitude *int64 `type:"integer"` // A set of adjustments that enable you to scale based on the size of the alarm // breach. + // + // At least one step adjustment is required if you are adding a new step scaling + // policy configuration. StepAdjustments []StepAdjustment `type:"list"` } @@ -960,9 +1034,9 @@ type TargetTrackingScalingPolicyConfiguration struct { // Indicates whether scale in by the target tracking scaling policy is disabled. // If the value is true, scale in is disabled and the target tracking scaling - // policy won't remove capacity from the scalable resource. Otherwise, scale - // in is enabled and the target tracking scaling policy can remove capacity - // from the scalable resource. The default value is false. + // policy won't remove capacity from the scalable target. Otherwise, scale in + // is enabled and the target tracking scaling policy can remove capacity from + // the scalable target. The default value is false. DisableScaleIn *bool `type:"boolean"` // A predefined metric. You can specify either a predefined metric or a customized @@ -977,15 +1051,73 @@ type TargetTrackingScalingPolicyConfiguration struct { // application's availability. However, if another alarm triggers a scale-out // policy during the cooldown period after a scale-in, Application Auto Scaling // scales out your scalable target immediately. + // + // Application Auto Scaling provides a default value of 300 for the following + // scalable targets: + // + // * ECS services + // + // * Spot Fleet requests + // + // * EMR clusters + // + // * AppStream 2.0 fleets + // + // * Aurora DB clusters + // + // * Amazon SageMaker endpoint variants + // + // * Custom resources + // + // For all other scalable targets, the default value is 0: + // + // * DynamoDB tables + // + // * DynamoDB global secondary indexes + // + // * Amazon Comprehend document classification endpoints + // + // * Lambda provisioned concurrency + // + // * Amazon Keyspaces tables ScaleInCooldown *int64 `type:"integer"` // The amount of time, in seconds, after a scale-out activity completes before // another scale-out activity can start. // // While the cooldown period is in effect, the capacity that has been added - // by the previous scale-out event that initiated the cooldown is calculated + // by the previous scale-out action that initiated the cooldown is calculated // as part of the desired capacity for the next scale out. The intention is // to continuously (but not excessively) scale out. + // + // Application Auto Scaling provides a default value of 300 for the following + // scalable targets: + // + // * ECS services + // + // * Spot Fleet requests + // + // * EMR clusters + // + // * AppStream 2.0 fleets + // + // * Aurora DB clusters + // + // * Amazon SageMaker endpoint variants + // + // * Custom resources + // + // For all other scalable targets, the default value is 0: + // + // * DynamoDB tables + // + // * DynamoDB global secondary indexes + // + // * Amazon Comprehend document classification endpoints + // + // * Lambda provisioned concurrency + // + // * Amazon Keyspaces tables ScaleOutCooldown *int64 `type:"integer"` // The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 diff --git a/service/codegurureviewer/api_enums.go b/service/codegurureviewer/api_enums.go index 078fbb094e9..98cb3c20d8b 100644 --- a/service/codegurureviewer/api_enums.go +++ b/service/codegurureviewer/api_enums.go @@ -2,6 +2,25 @@ package codegurureviewer +type JobState string + +// Enum values for JobState +const ( + JobStateCompleted JobState = "Completed" + JobStatePending JobState = "Pending" + JobStateFailed JobState = "Failed" + JobStateDeleting JobState = "Deleting" +) + +func (enum JobState) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum JobState) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type ProviderType string // Enum values for ProviderType @@ -19,6 +38,23 @@ func (enum ProviderType) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type Reaction string + +// Enum values for Reaction +const ( + ReactionThumbsUp Reaction = "ThumbsUp" + ReactionThumbsDown Reaction = "ThumbsDown" +) + +func (enum Reaction) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum Reaction) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type RepositoryAssociationState string // Enum values for RepositoryAssociationState @@ -37,3 +73,19 @@ func (enum RepositoryAssociationState) MarshalValueBuf(b []byte) ([]byte, error) b = b[0:0] return append(b, enum...), nil } + +type Type string + +// Enum values for Type +const ( + TypePullRequest Type = "PullRequest" +) + +func (enum Type) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum Type) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} diff --git a/service/codegurureviewer/api_errors.go b/service/codegurureviewer/api_errors.go index e84072c62f0..52d04d8a016 100644 --- a/service/codegurureviewer/api_errors.go +++ b/service/codegurureviewer/api_errors.go @@ -30,6 +30,12 @@ const ( // The resource specified in the request was not found. ErrCodeNotFoundException = "NotFoundException" + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The resource specified in the request was not found. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + // ErrCodeThrottlingException for service response error code // "ThrottlingException". // diff --git a/service/codegurureviewer/api_op_AssociateRepository.go b/service/codegurureviewer/api_op_AssociateRepository.go index f8d2df2e61c..605bf23ce6c 100644 --- a/service/codegurureviewer/api_op_AssociateRepository.go +++ b/service/codegurureviewer/api_op_AssociateRepository.go @@ -16,21 +16,20 @@ type AssociateRepositoryInput struct { // Unique, case-sensitive identifier that you provide to ensure the idempotency // of the request. // - // If you want to add a new repository association, this parameter specifies - // a unique identifier for the new repository association that helps ensure - // idempotency. + // To add a new repository association, this parameter specifies a unique identifier + // for the new repository association that helps ensure idempotency. // - // If you use the AWS CLI or one of the AWS SDK to call this operation, then - // you can leave this parameter empty. The CLI or SDK generates a random UUID - // for you and includes that in the request. If you don't use the SDK and instead - // generate a raw HTTP request to the Secrets Manager service endpoint, then - // you must generate a ClientRequestToken yourself for new versions and include + // If you use the AWS CLI or one of the AWS SDKs to call this operation, you + // can leave this parameter empty. The CLI or SDK generates a random UUID for + // you and includes that in the request. If you don't use the SDK and instead + // generate a raw HTTP request to the Secrets Manager service endpoint, you + // must generate a ClientRequestToken yourself for new versions and include // that value in the request. // - // You typically only need to interact with this value if you implement your - // own retry logic and want to ensure that a given repository association is - // not created twice. We recommend that you generate a UUID-type value to ensure - // uniqueness within the specified repository association. + // You typically interact with this value if you implement your own retry logic + // and want to ensure that a given repository association is not created twice. + // We recommend that you generate a UUID-type value to ensure uniqueness within + // the specified repository association. // // Amazon CodeGuru Reviewer uses this value to prevent the accidental creation // of duplicate repository associations if there are failures and retries. @@ -124,8 +123,9 @@ const opAssociateRepository = "AssociateRepository" // // Associates an AWS CodeCommit repository with Amazon CodeGuru Reviewer. When // you associate an AWS CodeCommit repository with Amazon CodeGuru Reviewer, -// Amazon CodeGuru Reviewer will provide recommendations for each pull request. -// You can view recommendations in the AWS CodeCommit repository. +// Amazon CodeGuru Reviewer will provide recommendations for each pull request +// raised within the repository. You can view recommendations in the AWS CodeCommit +// repository. // // You can associate a GitHub repository using the Amazon CodeGuru Reviewer // console. diff --git a/service/codegurureviewer/api_op_DescribeCodeReview.go b/service/codegurureviewer/api_op_DescribeCodeReview.go new file mode 100644 index 00000000000..76723ca0151 --- /dev/null +++ b/service/codegurureviewer/api_op_DescribeCodeReview.go @@ -0,0 +1,146 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codegurureviewer + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DescribeCodeReviewInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the code review to describe. + // + // CodeReviewArn is a required field + CodeReviewArn *string `location:"uri" locationName:"CodeReviewArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeCodeReviewInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeCodeReviewInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeCodeReviewInput"} + + if s.CodeReviewArn == nil { + invalidParams.Add(aws.NewErrParamRequired("CodeReviewArn")) + } + if s.CodeReviewArn != nil && len(*s.CodeReviewArn) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("CodeReviewArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeCodeReviewInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.CodeReviewArn != nil { + v := *s.CodeReviewArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "CodeReviewArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DescribeCodeReviewOutput struct { + _ struct{} `type:"structure"` + + // Information about the code review. + CodeReview *CodeReview `type:"structure"` +} + +// String returns the string representation +func (s DescribeCodeReviewOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeCodeReviewOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.CodeReview != nil { + v := s.CodeReview + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "CodeReview", v, metadata) + } + return nil +} + +const opDescribeCodeReview = "DescribeCodeReview" + +// DescribeCodeReviewRequest returns a request value for making API operation for +// Amazon CodeGuru Reviewer. +// +// Returns the metadaata associated with the code review along with its status. +// +// // Example sending a request using DescribeCodeReviewRequest. +// req := client.DescribeCodeReviewRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codeguru-reviewer-2019-09-19/DescribeCodeReview +func (c *Client) DescribeCodeReviewRequest(input *DescribeCodeReviewInput) DescribeCodeReviewRequest { + op := &aws.Operation{ + Name: opDescribeCodeReview, + HTTPMethod: "GET", + HTTPPath: "/codereviews/{CodeReviewArn}", + } + + if input == nil { + input = &DescribeCodeReviewInput{} + } + + req := c.newRequest(op, input, &DescribeCodeReviewOutput{}) + return DescribeCodeReviewRequest{Request: req, Input: input, Copy: c.DescribeCodeReviewRequest} +} + +// DescribeCodeReviewRequest is the request type for the +// DescribeCodeReview API operation. +type DescribeCodeReviewRequest struct { + *aws.Request + Input *DescribeCodeReviewInput + Copy func(*DescribeCodeReviewInput) DescribeCodeReviewRequest +} + +// Send marshals and sends the DescribeCodeReview API request. +func (r DescribeCodeReviewRequest) Send(ctx context.Context) (*DescribeCodeReviewResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeCodeReviewResponse{ + DescribeCodeReviewOutput: r.Request.Data.(*DescribeCodeReviewOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeCodeReviewResponse is the response type for the +// DescribeCodeReview API operation. +type DescribeCodeReviewResponse struct { + *DescribeCodeReviewOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeCodeReview request. +func (r *DescribeCodeReviewResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codegurureviewer/api_op_DescribeRecommendationFeedback.go b/service/codegurureviewer/api_op_DescribeRecommendationFeedback.go new file mode 100644 index 00000000000..0ee6e58f4dd --- /dev/null +++ b/service/codegurureviewer/api_op_DescribeRecommendationFeedback.go @@ -0,0 +1,178 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codegurureviewer + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DescribeRecommendationFeedbackInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that identifies the code review. + // + // CodeReviewArn is a required field + CodeReviewArn *string `location:"uri" locationName:"CodeReviewArn" min:"1" type:"string" required:"true"` + + // The recommendation ID that can be used to track the provided recommendations + // and then to collect the feedback. + // + // RecommendationId is a required field + RecommendationId *string `location:"querystring" locationName:"RecommendationId" min:"1" type:"string" required:"true"` + + // Optional parameter to describe the feedback for a given user. If this is + // not supplied, it defaults to the user making the request. + UserId *string `location:"querystring" locationName:"UserId" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeRecommendationFeedbackInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeRecommendationFeedbackInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeRecommendationFeedbackInput"} + + if s.CodeReviewArn == nil { + invalidParams.Add(aws.NewErrParamRequired("CodeReviewArn")) + } + if s.CodeReviewArn != nil && len(*s.CodeReviewArn) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("CodeReviewArn", 1)) + } + + if s.RecommendationId == nil { + invalidParams.Add(aws.NewErrParamRequired("RecommendationId")) + } + if s.RecommendationId != nil && len(*s.RecommendationId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RecommendationId", 1)) + } + if s.UserId != nil && len(*s.UserId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeRecommendationFeedbackInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.CodeReviewArn != nil { + v := *s.CodeReviewArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "CodeReviewArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RecommendationId != nil { + v := *s.RecommendationId + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "RecommendationId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.UserId != nil { + v := *s.UserId + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "UserId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DescribeRecommendationFeedbackOutput struct { + _ struct{} `type:"structure"` + + // The recommendation feedback given by the user. + RecommendationFeedback *RecommendationFeedback `type:"structure"` +} + +// String returns the string representation +func (s DescribeRecommendationFeedbackOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeRecommendationFeedbackOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.RecommendationFeedback != nil { + v := s.RecommendationFeedback + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "RecommendationFeedback", v, metadata) + } + return nil +} + +const opDescribeRecommendationFeedback = "DescribeRecommendationFeedback" + +// DescribeRecommendationFeedbackRequest returns a request value for making API operation for +// Amazon CodeGuru Reviewer. +// +// Describes the customer feedback for a CodeGuru Reviewer recommendation. +// +// // Example sending a request using DescribeRecommendationFeedbackRequest. +// req := client.DescribeRecommendationFeedbackRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codeguru-reviewer-2019-09-19/DescribeRecommendationFeedback +func (c *Client) DescribeRecommendationFeedbackRequest(input *DescribeRecommendationFeedbackInput) DescribeRecommendationFeedbackRequest { + op := &aws.Operation{ + Name: opDescribeRecommendationFeedback, + HTTPMethod: "GET", + HTTPPath: "/feedback/{CodeReviewArn}", + } + + if input == nil { + input = &DescribeRecommendationFeedbackInput{} + } + + req := c.newRequest(op, input, &DescribeRecommendationFeedbackOutput{}) + return DescribeRecommendationFeedbackRequest{Request: req, Input: input, Copy: c.DescribeRecommendationFeedbackRequest} +} + +// DescribeRecommendationFeedbackRequest is the request type for the +// DescribeRecommendationFeedback API operation. +type DescribeRecommendationFeedbackRequest struct { + *aws.Request + Input *DescribeRecommendationFeedbackInput + Copy func(*DescribeRecommendationFeedbackInput) DescribeRecommendationFeedbackRequest +} + +// Send marshals and sends the DescribeRecommendationFeedback API request. +func (r DescribeRecommendationFeedbackRequest) Send(ctx context.Context) (*DescribeRecommendationFeedbackResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeRecommendationFeedbackResponse{ + DescribeRecommendationFeedbackOutput: r.Request.Data.(*DescribeRecommendationFeedbackOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeRecommendationFeedbackResponse is the response type for the +// DescribeRecommendationFeedback API operation. +type DescribeRecommendationFeedbackResponse struct { + *DescribeRecommendationFeedbackOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeRecommendationFeedback request. +func (r *DescribeRecommendationFeedbackResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codegurureviewer/api_op_DescribeRepositoryAssociation.go b/service/codegurureviewer/api_op_DescribeRepositoryAssociation.go index d5a29b36e4e..121fd86d5b7 100644 --- a/service/codegurureviewer/api_op_DescribeRepositoryAssociation.go +++ b/service/codegurureviewer/api_op_DescribeRepositoryAssociation.go @@ -13,7 +13,8 @@ import ( type DescribeRepositoryAssociationInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) identifying the association. + // The Amazon Resource Name (ARN) identifying the association. You can retrieve + // this ARN by calling ListRepositories. // // AssociationArn is a required field AssociationArn *string `location:"uri" locationName:"AssociationArn" min:"1" type:"string" required:"true"` diff --git a/service/codegurureviewer/api_op_ListCodeReviews.go b/service/codegurureviewer/api_op_ListCodeReviews.go new file mode 100644 index 00000000000..6669c8c0d0c --- /dev/null +++ b/service/codegurureviewer/api_op_ListCodeReviews.go @@ -0,0 +1,297 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codegurureviewer + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListCodeReviewsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results that are returned per call. The default is + // 100. + MaxResults *int64 `location:"querystring" locationName:"MaxResults" min:"1" type:"integer"` + + // If nextToken is returned, there are more results available. The value of + // nextToken is a unique pagination token for each page. Make the call again + // using the returned token to retrieve the next page. Keep all other arguments + // unchanged. + NextToken *string `location:"querystring" locationName:"NextToken" min:"1" type:"string"` + + // List of provider types for filtering that needs to be applied before displaying + // the result. For example, "providerTypes=[GitHub]" will list code reviews + // from GitHub. + ProviderTypes []ProviderType `location:"querystring" locationName:"ProviderTypes" min:"1" type:"list"` + + // List of repository names for filtering that needs to be applied before displaying + // the result. + RepositoryNames []string `location:"querystring" locationName:"RepositoryNames" min:"1" type:"list"` + + // List of states for filtering that needs to be applied before displaying the + // result. For example, "states=[Pending]" will list code reviews in the Pending + // state. + States []JobState `location:"querystring" locationName:"States" min:"1" type:"list"` + + // The type of code reviews to list in the response. + // + // Type is a required field + Type Type `location:"querystring" locationName:"Type" type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s ListCodeReviewsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListCodeReviewsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListCodeReviewsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NextToken", 1)) + } + if s.ProviderTypes != nil && len(s.ProviderTypes) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ProviderTypes", 1)) + } + if s.RepositoryNames != nil && len(s.RepositoryNames) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RepositoryNames", 1)) + } + if s.States != nil && len(s.States) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("States", 1)) + } + if len(s.Type) == 0 { + invalidParams.Add(aws.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListCodeReviewsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "MaxResults", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ProviderTypes != nil { + v := s.ProviderTypes + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.QueryTarget, "ProviderTypes", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if s.RepositoryNames != nil { + v := s.RepositoryNames + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.QueryTarget, "RepositoryNames", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if s.States != nil { + v := s.States + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.QueryTarget, "States", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if len(s.Type) > 0 { + v := s.Type + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "Type", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +type ListCodeReviewsOutput struct { + _ struct{} `type:"structure"` + + // A list of code reviews that meet the criteria of the request. + CodeReviewSummaries []CodeReviewSummary `type:"list"` + + // Pagination token. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListCodeReviewsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListCodeReviewsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.CodeReviewSummaries != nil { + v := s.CodeReviewSummaries + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "CodeReviewSummaries", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +const opListCodeReviews = "ListCodeReviews" + +// ListCodeReviewsRequest returns a request value for making API operation for +// Amazon CodeGuru Reviewer. +// +// Lists all the code reviews that the customer has created in the past 90 days. +// +// // Example sending a request using ListCodeReviewsRequest. +// req := client.ListCodeReviewsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codeguru-reviewer-2019-09-19/ListCodeReviews +func (c *Client) ListCodeReviewsRequest(input *ListCodeReviewsInput) ListCodeReviewsRequest { + op := &aws.Operation{ + Name: opListCodeReviews, + HTTPMethod: "GET", + HTTPPath: "/codereviews", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListCodeReviewsInput{} + } + + req := c.newRequest(op, input, &ListCodeReviewsOutput{}) + return ListCodeReviewsRequest{Request: req, Input: input, Copy: c.ListCodeReviewsRequest} +} + +// ListCodeReviewsRequest is the request type for the +// ListCodeReviews API operation. +type ListCodeReviewsRequest struct { + *aws.Request + Input *ListCodeReviewsInput + Copy func(*ListCodeReviewsInput) ListCodeReviewsRequest +} + +// Send marshals and sends the ListCodeReviews API request. +func (r ListCodeReviewsRequest) Send(ctx context.Context) (*ListCodeReviewsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListCodeReviewsResponse{ + ListCodeReviewsOutput: r.Request.Data.(*ListCodeReviewsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListCodeReviewsRequestPaginator returns a paginator for ListCodeReviews. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListCodeReviewsRequest(input) +// p := codegurureviewer.NewListCodeReviewsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListCodeReviewsPaginator(req ListCodeReviewsRequest) ListCodeReviewsPaginator { + return ListCodeReviewsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListCodeReviewsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListCodeReviewsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListCodeReviewsPaginator struct { + aws.Pager +} + +func (p *ListCodeReviewsPaginator) CurrentPage() *ListCodeReviewsOutput { + return p.Pager.CurrentPage().(*ListCodeReviewsOutput) +} + +// ListCodeReviewsResponse is the response type for the +// ListCodeReviews API operation. +type ListCodeReviewsResponse struct { + *ListCodeReviewsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListCodeReviews request. +func (r *ListCodeReviewsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codegurureviewer/api_op_ListRecommendationFeedback.go b/service/codegurureviewer/api_op_ListRecommendationFeedback.go new file mode 100644 index 00000000000..018faad8367 --- /dev/null +++ b/service/codegurureviewer/api_op_ListRecommendationFeedback.go @@ -0,0 +1,287 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codegurureviewer + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListRecommendationFeedbackInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that identifies the code review. + // + // CodeReviewArn is a required field + CodeReviewArn *string `location:"uri" locationName:"CodeReviewArn" min:"1" type:"string" required:"true"` + + // The maximum number of results that are returned per call. The default is + // 100. + MaxResults *int64 `location:"querystring" locationName:"MaxResults" min:"1" type:"integer"` + + // If nextToken is returned, there are more results available. The value of + // nextToken is a unique pagination token for each page. Make the call again + // using the returned token to retrieve the next page. Keep all other arguments + // unchanged. + NextToken *string `location:"querystring" locationName:"NextToken" min:"1" type:"string"` + + // Filter on recommendationIds that need to be applied before displaying the + // result. This can be used to query all the recommendation feedback for a given + // recommendation. + RecommendationIds []string `location:"querystring" locationName:"RecommendationIds" min:"1" type:"list"` + + // Filter on userIds that need to be applied before displaying the result. This + // can be used to query all the recommendation feedback for a code review from + // a given user. + UserIds []string `location:"querystring" locationName:"UserIds" min:"1" type:"list"` +} + +// String returns the string representation +func (s ListRecommendationFeedbackInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRecommendationFeedbackInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListRecommendationFeedbackInput"} + + if s.CodeReviewArn == nil { + invalidParams.Add(aws.NewErrParamRequired("CodeReviewArn")) + } + if s.CodeReviewArn != nil && len(*s.CodeReviewArn) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("CodeReviewArn", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NextToken", 1)) + } + if s.RecommendationIds != nil && len(s.RecommendationIds) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RecommendationIds", 1)) + } + if s.UserIds != nil && len(s.UserIds) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListRecommendationFeedbackInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.CodeReviewArn != nil { + v := *s.CodeReviewArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "CodeReviewArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "MaxResults", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RecommendationIds != nil { + v := s.RecommendationIds + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.QueryTarget, "RecommendationIds", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if s.UserIds != nil { + v := s.UserIds + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.QueryTarget, "UserIds", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + return nil +} + +type ListRecommendationFeedbackOutput struct { + _ struct{} `type:"structure"` + + // If nextToken is returned, there are more results available. The value of + // nextToken is a unique pagination token for each page. Make the call again + // using the returned token to retrieve the next page. Keep all other arguments + // unchanged. + NextToken *string `min:"1" type:"string"` + + // Recommendation feedback summaries corresponding to the code reivew ARN. + RecommendationFeedbackSummaries []RecommendationFeedbackSummary `type:"list"` +} + +// String returns the string representation +func (s ListRecommendationFeedbackOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListRecommendationFeedbackOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RecommendationFeedbackSummaries != nil { + v := s.RecommendationFeedbackSummaries + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "RecommendationFeedbackSummaries", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +const opListRecommendationFeedback = "ListRecommendationFeedback" + +// ListRecommendationFeedbackRequest returns a request value for making API operation for +// Amazon CodeGuru Reviewer. +// +// Lists the customer feedback for a CodeGuru Reviewer recommendation for all +// users. This API will be used from the console to extract the previously given +// feedback by the user to pre-populate the feedback emojis for all recommendations. +// +// // Example sending a request using ListRecommendationFeedbackRequest. +// req := client.ListRecommendationFeedbackRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codeguru-reviewer-2019-09-19/ListRecommendationFeedback +func (c *Client) ListRecommendationFeedbackRequest(input *ListRecommendationFeedbackInput) ListRecommendationFeedbackRequest { + op := &aws.Operation{ + Name: opListRecommendationFeedback, + HTTPMethod: "GET", + HTTPPath: "/feedback/{CodeReviewArn}/RecommendationFeedback", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListRecommendationFeedbackInput{} + } + + req := c.newRequest(op, input, &ListRecommendationFeedbackOutput{}) + return ListRecommendationFeedbackRequest{Request: req, Input: input, Copy: c.ListRecommendationFeedbackRequest} +} + +// ListRecommendationFeedbackRequest is the request type for the +// ListRecommendationFeedback API operation. +type ListRecommendationFeedbackRequest struct { + *aws.Request + Input *ListRecommendationFeedbackInput + Copy func(*ListRecommendationFeedbackInput) ListRecommendationFeedbackRequest +} + +// Send marshals and sends the ListRecommendationFeedback API request. +func (r ListRecommendationFeedbackRequest) Send(ctx context.Context) (*ListRecommendationFeedbackResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListRecommendationFeedbackResponse{ + ListRecommendationFeedbackOutput: r.Request.Data.(*ListRecommendationFeedbackOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListRecommendationFeedbackRequestPaginator returns a paginator for ListRecommendationFeedback. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListRecommendationFeedbackRequest(input) +// p := codegurureviewer.NewListRecommendationFeedbackRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListRecommendationFeedbackPaginator(req ListRecommendationFeedbackRequest) ListRecommendationFeedbackPaginator { + return ListRecommendationFeedbackPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListRecommendationFeedbackInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListRecommendationFeedbackPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListRecommendationFeedbackPaginator struct { + aws.Pager +} + +func (p *ListRecommendationFeedbackPaginator) CurrentPage() *ListRecommendationFeedbackOutput { + return p.Pager.CurrentPage().(*ListRecommendationFeedbackOutput) +} + +// ListRecommendationFeedbackResponse is the response type for the +// ListRecommendationFeedback API operation. +type ListRecommendationFeedbackResponse struct { + *ListRecommendationFeedbackOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListRecommendationFeedback request. +func (r *ListRecommendationFeedbackResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codegurureviewer/api_op_ListRecommendations.go b/service/codegurureviewer/api_op_ListRecommendations.go new file mode 100644 index 00000000000..8f6bfac95fc --- /dev/null +++ b/service/codegurureviewer/api_op_ListRecommendations.go @@ -0,0 +1,239 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codegurureviewer + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListRecommendationsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the code review to describe. + // + // CodeReviewArn is a required field + CodeReviewArn *string `location:"uri" locationName:"CodeReviewArn" min:"1" type:"string" required:"true"` + + // The maximum number of results that are returned per call. The default is + // 100. + MaxResults *int64 `location:"querystring" locationName:"MaxResults" min:"1" type:"integer"` + + // Pagination token. + NextToken *string `location:"querystring" locationName:"NextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s ListRecommendationsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRecommendationsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListRecommendationsInput"} + + if s.CodeReviewArn == nil { + invalidParams.Add(aws.NewErrParamRequired("CodeReviewArn")) + } + if s.CodeReviewArn != nil && len(*s.CodeReviewArn) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("CodeReviewArn", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListRecommendationsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.CodeReviewArn != nil { + v := *s.CodeReviewArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "CodeReviewArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "MaxResults", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListRecommendationsOutput struct { + _ struct{} `type:"structure"` + + // Pagination token. + NextToken *string `min:"1" type:"string"` + + // List of recommendations for the requested code review. + RecommendationSummaries []RecommendationSummary `type:"list"` +} + +// String returns the string representation +func (s ListRecommendationsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListRecommendationsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RecommendationSummaries != nil { + v := s.RecommendationSummaries + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "RecommendationSummaries", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +const opListRecommendations = "ListRecommendations" + +// ListRecommendationsRequest returns a request value for making API operation for +// Amazon CodeGuru Reviewer. +// +// Returns the list of all recommendations for a completed code review. +// +// // Example sending a request using ListRecommendationsRequest. +// req := client.ListRecommendationsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codeguru-reviewer-2019-09-19/ListRecommendations +func (c *Client) ListRecommendationsRequest(input *ListRecommendationsInput) ListRecommendationsRequest { + op := &aws.Operation{ + Name: opListRecommendations, + HTTPMethod: "GET", + HTTPPath: "/codereviews/{CodeReviewArn}/Recommendations", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListRecommendationsInput{} + } + + req := c.newRequest(op, input, &ListRecommendationsOutput{}) + return ListRecommendationsRequest{Request: req, Input: input, Copy: c.ListRecommendationsRequest} +} + +// ListRecommendationsRequest is the request type for the +// ListRecommendations API operation. +type ListRecommendationsRequest struct { + *aws.Request + Input *ListRecommendationsInput + Copy func(*ListRecommendationsInput) ListRecommendationsRequest +} + +// Send marshals and sends the ListRecommendations API request. +func (r ListRecommendationsRequest) Send(ctx context.Context) (*ListRecommendationsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListRecommendationsResponse{ + ListRecommendationsOutput: r.Request.Data.(*ListRecommendationsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListRecommendationsRequestPaginator returns a paginator for ListRecommendations. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListRecommendationsRequest(input) +// p := codegurureviewer.NewListRecommendationsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListRecommendationsPaginator(req ListRecommendationsRequest) ListRecommendationsPaginator { + return ListRecommendationsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListRecommendationsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListRecommendationsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListRecommendationsPaginator struct { + aws.Pager +} + +func (p *ListRecommendationsPaginator) CurrentPage() *ListRecommendationsOutput { + return p.Pager.CurrentPage().(*ListRecommendationsOutput) +} + +// ListRecommendationsResponse is the response type for the +// ListRecommendations API operation. +type ListRecommendationsResponse struct { + *ListRecommendationsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListRecommendations request. +func (r *ListRecommendationsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codegurureviewer/api_op_ListRepositoryAssociations.go b/service/codegurureviewer/api_op_ListRepositoryAssociations.go index dbab3ea8e9f..2ecec7e37ab 100644 --- a/service/codegurureviewer/api_op_ListRepositoryAssociations.go +++ b/service/codegurureviewer/api_op_ListRepositoryAssociations.go @@ -15,15 +15,14 @@ type ListRepositoryAssociationsInput struct { // The maximum number of repository association results returned by ListRepositoryAssociations // in paginated output. When this parameter is used, ListRepositoryAssociations - // only returns maxResults results in a single page along with a nextToken response + // only returns maxResults results in a single page with a nextToken response // element. The remaining results of the initial request can be seen by sending // another ListRepositoryAssociations request with the returned nextToken value. - // This value can be between 1 and 100. If this parameter is not used, then - // ListRepositoryAssociations returns up to 100 results and a nextToken value - // if applicable. + // This value can be between 1 and 25. If this parameter is not used, ListRepositoryAssociations + // returns up to 25 results and a nextToken value if applicable. MaxResults *int64 `location:"querystring" locationName:"MaxResults" min:"1" type:"integer"` - // List of names to use as a filter. + // List of repository names to use as a filter. Names []string `location:"querystring" locationName:"Name" min:"1" type:"list"` // The nextToken value returned from a previous paginated ListRepositoryAssociations @@ -31,12 +30,13 @@ type ListRepositoryAssociationsInput struct { // parameter. Pagination continues from the end of the previous results that // returned the nextToken value. // - // This token should be treated as an opaque identifier that is only used to - // retrieve the next items in a list and not for other programmatic purposes. + // Treat this token as an opaque identifier that is only used to retrieve the + // next items in a list and not for other programmatic purposes. NextToken *string `location:"querystring" locationName:"NextToken" min:"1" type:"string"` - // List of owners to use as a filter. For AWS CodeCommit, the owner is the AWS - // account id. For GitHub, it is the GitHub account name. + // List of owners to use as a filter. For GitHub, this is name of the GitHub + // account that was used to associate the repository. For AWS CodeCommit, it + // is the name of the CodeCommit account that was used to associate the repository. Owners []string `location:"querystring" locationName:"Owner" min:"1" type:"list"` // List of provider types to use as a filter. diff --git a/service/codegurureviewer/api_op_PutRecommendationFeedback.go b/service/codegurureviewer/api_op_PutRecommendationFeedback.go new file mode 100644 index 00000000000..5c538befb5e --- /dev/null +++ b/service/codegurureviewer/api_op_PutRecommendationFeedback.go @@ -0,0 +1,179 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codegurureviewer + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type PutRecommendationFeedbackInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that identifies the code review. + // + // CodeReviewArn is a required field + CodeReviewArn *string `min:"1" type:"string" required:"true"` + + // List for storing reactions. Reactions are utf-8 text code for emojis. If + // you send an empty list it clears all your feedback. + // + // Reactions is a required field + Reactions []Reaction `type:"list" required:"true"` + + // The recommendation ID that can be used to track the provided recommendations + // and then to collect the feedback. + // + // RecommendationId is a required field + RecommendationId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRecommendationFeedbackInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutRecommendationFeedbackInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "PutRecommendationFeedbackInput"} + + if s.CodeReviewArn == nil { + invalidParams.Add(aws.NewErrParamRequired("CodeReviewArn")) + } + if s.CodeReviewArn != nil && len(*s.CodeReviewArn) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("CodeReviewArn", 1)) + } + + if s.Reactions == nil { + invalidParams.Add(aws.NewErrParamRequired("Reactions")) + } + + if s.RecommendationId == nil { + invalidParams.Add(aws.NewErrParamRequired("RecommendationId")) + } + if s.RecommendationId != nil && len(*s.RecommendationId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RecommendationId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutRecommendationFeedbackInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.CodeReviewArn != nil { + v := *s.CodeReviewArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CodeReviewArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Reactions != nil { + v := s.Reactions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Reactions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if s.RecommendationId != nil { + v := *s.RecommendationId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RecommendationId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type PutRecommendationFeedbackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutRecommendationFeedbackOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutRecommendationFeedbackOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opPutRecommendationFeedback = "PutRecommendationFeedback" + +// PutRecommendationFeedbackRequest returns a request value for making API operation for +// Amazon CodeGuru Reviewer. +// +// Stores customer feedback for a CodeGuru-Reviewer recommendation. When this +// API is called again with different reactions the previous feedback is overwritten. +// +// // Example sending a request using PutRecommendationFeedbackRequest. +// req := client.PutRecommendationFeedbackRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codeguru-reviewer-2019-09-19/PutRecommendationFeedback +func (c *Client) PutRecommendationFeedbackRequest(input *PutRecommendationFeedbackInput) PutRecommendationFeedbackRequest { + op := &aws.Operation{ + Name: opPutRecommendationFeedback, + HTTPMethod: "PUT", + HTTPPath: "/feedback", + } + + if input == nil { + input = &PutRecommendationFeedbackInput{} + } + + req := c.newRequest(op, input, &PutRecommendationFeedbackOutput{}) + return PutRecommendationFeedbackRequest{Request: req, Input: input, Copy: c.PutRecommendationFeedbackRequest} +} + +// PutRecommendationFeedbackRequest is the request type for the +// PutRecommendationFeedback API operation. +type PutRecommendationFeedbackRequest struct { + *aws.Request + Input *PutRecommendationFeedbackInput + Copy func(*PutRecommendationFeedbackInput) PutRecommendationFeedbackRequest +} + +// Send marshals and sends the PutRecommendationFeedback API request. +func (r PutRecommendationFeedbackRequest) Send(ctx context.Context) (*PutRecommendationFeedbackResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &PutRecommendationFeedbackResponse{ + PutRecommendationFeedbackOutput: r.Request.Data.(*PutRecommendationFeedbackOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// PutRecommendationFeedbackResponse is the response type for the +// PutRecommendationFeedback API operation. +type PutRecommendationFeedbackResponse struct { + *PutRecommendationFeedbackOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// PutRecommendationFeedback request. +func (r *PutRecommendationFeedbackResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codegurureviewer/api_types.go b/service/codegurureviewer/api_types.go index 0046a34cfde..aac9353b80a 100644 --- a/service/codegurureviewer/api_types.go +++ b/service/codegurureviewer/api_types.go @@ -56,6 +56,550 @@ func (s CodeCommitRepository) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Information about a code review. +type CodeReview struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the code review to describe. + CodeReviewArn *string `min:"1" type:"string"` + + // The time, in milliseconds since the epoch, when the code review was created. + CreatedTimeStamp *time.Time `type:"timestamp"` + + // The time, in milliseconds since the epoch, when the code review was last + // updated. + LastUpdatedTimeStamp *time.Time `type:"timestamp"` + + // The statistics from the code review. + Metrics *Metrics `type:"structure"` + + // The name of the code review. + Name *string `min:"1" type:"string"` + + // The owner of the repository. + Owner *string `min:"1" type:"string"` + + // The provider type of the repository association. + ProviderType ProviderType `type:"string" enum:"true"` + + // The pull request ID for the code review. + PullRequestId *string `min:"1" type:"string"` + + // The name of the repository. + RepositoryName *string `min:"1" type:"string"` + + // The type of the source code for the code review. + SourceCodeType *SourceCodeType `type:"structure"` + + // The state of the code review. + State JobState `type:"string" enum:"true"` + + // The reason for the state of the code review. + StateReason *string `type:"string"` + + // The type of code review. + Type Type `type:"string" enum:"true"` +} + +// String returns the string representation +func (s CodeReview) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CodeReview) MarshalFields(e protocol.FieldEncoder) error { + if s.CodeReviewArn != nil { + v := *s.CodeReviewArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CodeReviewArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedTimeStamp != nil { + v := *s.CreatedTimeStamp + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedTimeStamp", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.LastUpdatedTimeStamp != nil { + v := *s.LastUpdatedTimeStamp + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "LastUpdatedTimeStamp", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.Metrics != nil { + v := s.Metrics + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Metrics", v, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Owner != nil { + v := *s.Owner + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Owner", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.ProviderType) > 0 { + v := s.ProviderType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ProviderType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.PullRequestId != nil { + v := *s.PullRequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "PullRequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RepositoryName != nil { + v := *s.RepositoryName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RepositoryName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SourceCodeType != nil { + v := s.SourceCodeType + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "SourceCodeType", v, metadata) + } + if len(s.State) > 0 { + v := s.State + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "State", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.StateReason != nil { + v := *s.StateReason + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "StateReason", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Type) > 0 { + v := s.Type + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Type", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// Information about the summary of the code review. +type CodeReviewSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the code review to describe. + CodeReviewArn *string `min:"1" type:"string"` + + // The time, in milliseconds since the epoch, when the code review was created. + CreatedTimeStamp *time.Time `type:"timestamp"` + + // The time, in milliseconds since the epoch, when the code review was last + // updated. + LastUpdatedTimeStamp *time.Time `type:"timestamp"` + + // The statistics from the code review. + MetricsSummary *MetricsSummary `type:"structure"` + + // The name of the code review. + Name *string `min:"1" type:"string"` + + // The owner of the repository. + Owner *string `min:"1" type:"string"` + + // The provider type of the repository association. + ProviderType ProviderType `type:"string" enum:"true"` + + // The pull request ID for the code review. + PullRequestId *string `min:"1" type:"string"` + + // The name of the repository. + RepositoryName *string `min:"1" type:"string"` + + // The state of the code review. + State JobState `type:"string" enum:"true"` + + // The type of the code review. + Type Type `type:"string" enum:"true"` +} + +// String returns the string representation +func (s CodeReviewSummary) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CodeReviewSummary) MarshalFields(e protocol.FieldEncoder) error { + if s.CodeReviewArn != nil { + v := *s.CodeReviewArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CodeReviewArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedTimeStamp != nil { + v := *s.CreatedTimeStamp + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedTimeStamp", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.LastUpdatedTimeStamp != nil { + v := *s.LastUpdatedTimeStamp + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "LastUpdatedTimeStamp", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.MetricsSummary != nil { + v := s.MetricsSummary + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "MetricsSummary", v, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Owner != nil { + v := *s.Owner + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Owner", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.ProviderType) > 0 { + v := s.ProviderType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ProviderType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.PullRequestId != nil { + v := *s.PullRequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "PullRequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RepositoryName != nil { + v := *s.RepositoryName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RepositoryName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.State) > 0 { + v := s.State + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "State", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if len(s.Type) > 0 { + v := s.Type + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Type", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// The commit diff for the pull request. +type CommitDiffSourceCodeType struct { + _ struct{} `type:"structure"` + + // Destination Commit SHA + DestinationCommit *string `min:"6" type:"string"` + + // Source Commit SHA. + SourceCommit *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s CommitDiffSourceCodeType) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CommitDiffSourceCodeType) MarshalFields(e protocol.FieldEncoder) error { + if s.DestinationCommit != nil { + v := *s.DestinationCommit + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DestinationCommit", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SourceCommit != nil { + v := *s.SourceCommit + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SourceCommit", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Information about the statistics from the code review. +type Metrics struct { + _ struct{} `type:"structure"` + + // Total number of recommendations found in the code review. + FindingsCount *int64 `type:"long"` + + // Lines of code metered in the code review. + MeteredLinesOfCodeCount *int64 `type:"long"` +} + +// String returns the string representation +func (s Metrics) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Metrics) MarshalFields(e protocol.FieldEncoder) error { + if s.FindingsCount != nil { + v := *s.FindingsCount + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "FindingsCount", protocol.Int64Value(v), metadata) + } + if s.MeteredLinesOfCodeCount != nil { + v := *s.MeteredLinesOfCodeCount + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MeteredLinesOfCodeCount", protocol.Int64Value(v), metadata) + } + return nil +} + +// Information about metrics summaries. +type MetricsSummary struct { + _ struct{} `type:"structure"` + + // Total number of recommendations found in the code review. + FindingsCount *int64 `type:"long"` + + // Lines of code metered in the code review. + MeteredLinesOfCodeCount *int64 `type:"long"` +} + +// String returns the string representation +func (s MetricsSummary) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s MetricsSummary) MarshalFields(e protocol.FieldEncoder) error { + if s.FindingsCount != nil { + v := *s.FindingsCount + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "FindingsCount", protocol.Int64Value(v), metadata) + } + if s.MeteredLinesOfCodeCount != nil { + v := *s.MeteredLinesOfCodeCount + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MeteredLinesOfCodeCount", protocol.Int64Value(v), metadata) + } + return nil +} + +// Information about the recommendation feedback. +type RecommendationFeedback struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that identifies the code review. + CodeReviewArn *string `min:"1" type:"string"` + + // The time at which the feedback was created. + CreatedTimeStamp *time.Time `type:"timestamp"` + + // The time at which the feedback was last updated. + LastUpdatedTimeStamp *time.Time `type:"timestamp"` + + // List for storing reactions. Reactions are utf-8 text code for emojis. You + // can send an empty list to clear off all your feedback. + Reactions []Reaction `type:"list"` + + // The recommendation ID that can be used to track the provided recommendations. + // Later on it can be used to collect the feedback. + RecommendationId *string `min:"1" type:"string"` + + // The user principal that made the API call. + UserId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s RecommendationFeedback) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s RecommendationFeedback) MarshalFields(e protocol.FieldEncoder) error { + if s.CodeReviewArn != nil { + v := *s.CodeReviewArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CodeReviewArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedTimeStamp != nil { + v := *s.CreatedTimeStamp + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedTimeStamp", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.LastUpdatedTimeStamp != nil { + v := *s.LastUpdatedTimeStamp + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "LastUpdatedTimeStamp", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.Reactions != nil { + v := s.Reactions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Reactions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if s.RecommendationId != nil { + v := *s.RecommendationId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RecommendationId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.UserId != nil { + v := *s.UserId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "UserId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Information about recommendation feedback summaries. +type RecommendationFeedbackSummary struct { + _ struct{} `type:"structure"` + + // List for storing reactions. Reactions are utf-8 text code for emojis. + Reactions []Reaction `type:"list"` + + // The recommendation ID that can be used to track the provided recommendations. + // Later on it can be used to collect the feedback. + RecommendationId *string `min:"1" type:"string"` + + // The identifier for the user that gave the feedback. + UserId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s RecommendationFeedbackSummary) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s RecommendationFeedbackSummary) MarshalFields(e protocol.FieldEncoder) error { + if s.Reactions != nil { + v := s.Reactions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Reactions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if s.RecommendationId != nil { + v := *s.RecommendationId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RecommendationId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.UserId != nil { + v := *s.UserId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "UserId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Information about recommendations. +type RecommendationSummary struct { + _ struct{} `type:"structure"` + + // A description of the recommendation generated by CodeGuru Reviewer for the + // lines of code between the start line and the end line. + Description *string `min:"1" type:"string"` + + // Last line where the recommendation is applicable in the source commit or + // source branch. For a single line comment the start line and end line values + // will be the same. + EndLine *int64 `type:"integer"` + + // Name of the file on which a recommendation is provided. + FilePath *string `min:"1" type:"string"` + + // The recommendation ID that can be used to track the provided recommendations. + // Later on it can be used to collect the feedback. + RecommendationId *string `min:"1" type:"string"` + + // Start line from where the recommendation is applicable in the source commit + // or source branch. + StartLine *int64 `type:"integer"` +} + +// String returns the string representation +func (s RecommendationSummary) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s RecommendationSummary) MarshalFields(e protocol.FieldEncoder) error { + if s.Description != nil { + v := *s.Description + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.EndLine != nil { + v := *s.EndLine + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "EndLine", protocol.Int64Value(v), metadata) + } + if s.FilePath != nil { + v := *s.FilePath + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "FilePath", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RecommendationId != nil { + v := *s.RecommendationId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RecommendationId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.StartLine != nil { + v := *s.StartLine + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "StartLine", protocol.Int64Value(v), metadata) + } + return nil +} + // Information about a repository. type Repository struct { _ struct{} `type:"structure"` @@ -102,7 +646,7 @@ type RepositoryAssociation struct { // The Amazon Resource Name (ARN) identifying the repository association. AssociationArn *string `min:"1" type:"string"` - // The id of the repository association. + // The ID of the repository association. AssociationId *string `min:"1" type:"string"` // The time, in milliseconds since the epoch, when the repository association @@ -230,8 +774,7 @@ type RepositoryAssociationSummary struct { // // Failed // - // The association failed. For more information about troubleshooting (or why - // it failed), see [troubleshooting topic]. + // The association failed. // // Disassociating // @@ -291,3 +834,27 @@ func (s RepositoryAssociationSummary) MarshalFields(e protocol.FieldEncoder) err } return nil } + +// Information about the source code type. +type SourceCodeType struct { + _ struct{} `type:"structure"` + + // The commit diff for the pull request. + CommitDiff *CommitDiffSourceCodeType `type:"structure"` +} + +// String returns the string representation +func (s SourceCodeType) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s SourceCodeType) MarshalFields(e protocol.FieldEncoder) error { + if s.CommitDiff != nil { + v := s.CommitDiff + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "CommitDiff", v, metadata) + } + return nil +} diff --git a/service/codegurureviewer/codegururevieweriface/interface.go b/service/codegurureviewer/codegururevieweriface/interface.go index 801146a3b59..9ae89765964 100644 --- a/service/codegurureviewer/codegururevieweriface/interface.go +++ b/service/codegurureviewer/codegururevieweriface/interface.go @@ -63,11 +63,23 @@ import ( type ClientAPI interface { AssociateRepositoryRequest(*codegurureviewer.AssociateRepositoryInput) codegurureviewer.AssociateRepositoryRequest + DescribeCodeReviewRequest(*codegurureviewer.DescribeCodeReviewInput) codegurureviewer.DescribeCodeReviewRequest + + DescribeRecommendationFeedbackRequest(*codegurureviewer.DescribeRecommendationFeedbackInput) codegurureviewer.DescribeRecommendationFeedbackRequest + DescribeRepositoryAssociationRequest(*codegurureviewer.DescribeRepositoryAssociationInput) codegurureviewer.DescribeRepositoryAssociationRequest DisassociateRepositoryRequest(*codegurureviewer.DisassociateRepositoryInput) codegurureviewer.DisassociateRepositoryRequest + ListCodeReviewsRequest(*codegurureviewer.ListCodeReviewsInput) codegurureviewer.ListCodeReviewsRequest + + ListRecommendationFeedbackRequest(*codegurureviewer.ListRecommendationFeedbackInput) codegurureviewer.ListRecommendationFeedbackRequest + + ListRecommendationsRequest(*codegurureviewer.ListRecommendationsInput) codegurureviewer.ListRecommendationsRequest + ListRepositoryAssociationsRequest(*codegurureviewer.ListRepositoryAssociationsInput) codegurureviewer.ListRepositoryAssociationsRequest + + PutRecommendationFeedbackRequest(*codegurureviewer.PutRecommendationFeedbackInput) codegurureviewer.PutRecommendationFeedbackRequest } var _ ClientAPI = (*codegurureviewer.Client)(nil) diff --git a/service/databasemigrationservice/api_op_CreateEndpoint.go b/service/databasemigrationservice/api_op_CreateEndpoint.go index 69b9ac0a99e..3d7399a57f1 100644 --- a/service/databasemigrationservice/api_op_CreateEndpoint.go +++ b/service/databasemigrationservice/api_op_CreateEndpoint.go @@ -80,15 +80,15 @@ type CreateEndpointInput struct { // in the AWS Database Migration Service User Guide. ExtraConnectionAttributes *string `type:"string"` - // Settings in JSON format for the target Apache Kafka endpoint. For information - // about other available settings, see Using Object Mapping to Migrate Data - // to Apache Kafka (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kafka.html#CHAP_Target.Kafka.ObjectMapping) + // Settings in JSON format for the target Apache Kafka endpoint. For more information + // about the available settings, see Using Apache Kafka as a Target for AWS + // Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kafka.html) // in the AWS Database Migration User Guide. KafkaSettings *KafkaSettings `type:"structure"` // Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. - // For information about other available settings, see Using Object Mapping - // to Migrate Data to a Kinesis Data Stream (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kinesis.html#CHAP_Target.Kinesis.ObjectMapping) + // For more information about the available settings, see Using Amazon Kinesis + // Data Streams as a Target for AWS Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kinesis.html) // in the AWS Database Migration User Guide. KinesisSettings *KinesisSettings `type:"structure"` @@ -103,11 +103,17 @@ type CreateEndpointInput struct { KmsKeyId *string `type:"string"` // Settings in JSON format for the source MongoDB endpoint. For more information - // about the available settings, see the configuration properties section in - // Using MongoDB as a Target for AWS Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MongoDB.html) + // about the available settings, see Using MongoDB as a Target for AWS Database + // Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MongoDB.html#CHAP_Source.MongoDB.Configuration) // in the AWS Database Migration Service User Guide. MongoDbSettings *MongoDbSettings `type:"structure"` + // Settings in JSON format for the target Amazon Neptune endpoint. For more + // information about the available settings, see https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.EndpointSettings + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.EndpointSettings) + // in the AWS Database Migration Service User Guide. + NeptuneSettings *NeptuneSettings `type:"structure"` + // The password to be used to log in to the endpoint database. Password *string `type:"string" sensitive:"true"` @@ -170,6 +176,11 @@ func (s *CreateEndpointInput) Validate() error { invalidParams.AddNested("ElasticsearchSettings", err.(aws.ErrInvalidParams)) } } + if s.NeptuneSettings != nil { + if err := s.NeptuneSettings.Validate(); err != nil { + invalidParams.AddNested("NeptuneSettings", err.(aws.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams diff --git a/service/databasemigrationservice/api_op_CreateReplicationTask.go b/service/databasemigrationservice/api_op_CreateReplicationTask.go index b483f93867f..9a2c5ac4cf6 100644 --- a/service/databasemigrationservice/api_op_CreateReplicationTask.go +++ b/service/databasemigrationservice/api_op_CreateReplicationTask.go @@ -72,7 +72,7 @@ type CreateReplicationTaskInput struct { ReplicationTaskIdentifier *string `type:"string" required:"true"` // Overall settings for the task, in JSON format. For more information, see - // Task Settings (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TaskSettings.html) + // Specifying Task Settings for AWS Database Migration Service Tasks (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TaskSettings.html) // in the AWS Database Migration User Guide. ReplicationTaskSettings *string `type:"string"` @@ -82,7 +82,7 @@ type CreateReplicationTaskInput struct { SourceEndpointArn *string `type:"string" required:"true"` // The table mappings for the task, in JSON format. For more information, see - // Table Mapping (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TableMapping.html) + // Using Table Mapping to Specify Task Settings (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TableMapping.html) // in the AWS Database Migration User Guide. // // TableMappings is a required field @@ -95,6 +95,12 @@ type CreateReplicationTaskInput struct { // // TargetEndpointArn is a required field TargetEndpointArn *string `type:"string" required:"true"` + + // Supplemental information that the task requires to migrate the data for certain + // source and target endpoints. For more information, see Specifying Supplemental + // Data for Task Settings (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.TaskData.html) + // in the AWS Database Migration User Guide. + TaskData *string `type:"string"` } // String returns the string representation diff --git a/service/databasemigrationservice/api_op_DescribeReplicationSubnetGroups.go b/service/databasemigrationservice/api_op_DescribeReplicationSubnetGroups.go index 749ba40aeb0..642601a8596 100644 --- a/service/databasemigrationservice/api_op_DescribeReplicationSubnetGroups.go +++ b/service/databasemigrationservice/api_op_DescribeReplicationSubnetGroups.go @@ -14,6 +14,8 @@ type DescribeReplicationSubnetGroupsInput struct { _ struct{} `type:"structure"` // Filters applied to the describe action. + // + // Valid filter names: replication-subnet-group-id Filters []Filter `type:"list"` // An optional pagination token provided by a previous request. If this parameter diff --git a/service/databasemigrationservice/api_op_ModifyEndpoint.go b/service/databasemigrationservice/api_op_ModifyEndpoint.go index 68255271dfc..0bddb01c6a8 100644 --- a/service/databasemigrationservice/api_op_ModifyEndpoint.go +++ b/service/databasemigrationservice/api_op_ModifyEndpoint.go @@ -76,15 +76,15 @@ type ModifyEndpointInput struct { // pass the empty string ("") as an argument. ExtraConnectionAttributes *string `type:"string"` - // Settings in JSON format for the target Apache Kafka endpoint. For information - // about other available settings, see Using Object Mapping to Migrate Data - // to Apache Kafka (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kafka.html#CHAP_Target.Kafka.ObjectMapping) + // Settings in JSON format for the target Apache Kafka endpoint. For more information + // about the available settings, see Using Apache Kafka as a Target for AWS + // Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kafka.html) // in the AWS Database Migration User Guide. KafkaSettings *KafkaSettings `type:"structure"` // Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. - // For information about other available settings, see Using Object Mapping - // to Migrate Data to a Kinesis Data Stream (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kinesis.html#CHAP_Target.Kinesis.ObjectMapping) + // For more information about the available settings, see Using Amazon Kinesis + // Data Streams as a Target for AWS Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kinesis.html) // in the AWS Database Migration User Guide. KinesisSettings *KinesisSettings `type:"structure"` @@ -94,6 +94,12 @@ type ModifyEndpointInput struct { // in the AWS Database Migration Service User Guide. MongoDbSettings *MongoDbSettings `type:"structure"` + // Settings in JSON format for the target Amazon Neptune endpoint. For more + // information about the available settings, see https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.EndpointSettings + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.EndpointSettings) + // in the AWS Database Migration Service User Guide. + NeptuneSettings *NeptuneSettings `type:"structure"` + // The password to be used to login to the endpoint database. Password *string `type:"string" sensitive:"true"` @@ -145,6 +151,11 @@ func (s *ModifyEndpointInput) Validate() error { invalidParams.AddNested("ElasticsearchSettings", err.(aws.ErrInvalidParams)) } } + if s.NeptuneSettings != nil { + if err := s.NeptuneSettings.Validate(); err != nil { + invalidParams.AddNested("NeptuneSettings", err.(aws.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams diff --git a/service/databasemigrationservice/api_op_ModifyReplicationTask.go b/service/databasemigrationservice/api_op_ModifyReplicationTask.go index ea6223320bd..f17d4334765 100644 --- a/service/databasemigrationservice/api_op_ModifyReplicationTask.go +++ b/service/databasemigrationservice/api_op_ModifyReplicationTask.go @@ -67,7 +67,7 @@ type ModifyReplicationTaskInput struct { // * Cannot end with a hyphen or contain two consecutive hyphens. ReplicationTaskIdentifier *string `type:"string"` - // JSON file that contains settings for the task, such as target metadata settings. + // JSON file that contains settings for the task, such as task metadata settings. ReplicationTaskSettings *string `type:"string"` // When using the AWS CLI or boto3, provide the path of the JSON file that contains @@ -75,6 +75,12 @@ type ModifyReplicationTaskInput struct { // DMS API, provide the JSON as the parameter value, for example: --table-mappings // file://mappingfile.json TableMappings *string `type:"string"` + + // Supplemental information that the task requires to migrate the data for certain + // source and target endpoints. For more information, see Specifying Supplemental + // Data for Task Settings (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.TaskData.html) + // in the AWS Database Migration User Guide. + TaskData *string `type:"string"` } // String returns the string representation diff --git a/service/databasemigrationservice/api_types.go b/service/databasemigrationservice/api_types.go index cbd7c9904bd..827411ec849 100644 --- a/service/databasemigrationservice/api_types.go +++ b/service/databasemigrationservice/api_types.go @@ -314,6 +314,10 @@ type Endpoint struct { // MongoDbSettings structure. MongoDbSettings *MongoDbSettings `type:"structure"` + // The settings for the MongoDB source endpoint. For more information, see the + // NeptuneSettings structure. + NeptuneSettings *NeptuneSettings `type:"structure"` + // The port value used to access the endpoint. Port *int64 `type:"integer"` @@ -627,6 +631,74 @@ func (s MongoDbSettings) String() string { return awsutil.Prettify(s) } +// Provides information that defines an Amazon Neptune endpoint. +type NeptuneSettings struct { + _ struct{} `type:"structure"` + + // The number of milliseconds for AWS DMS to wait to retry a bulk-load of migrated + // graph data to the Neptune target database before raising an error. The default + // is 250. + ErrorRetryDuration *int64 `type:"integer"` + + // If you want IAM authorization enabled for this endpoint, set this parameter + // to true and attach the appropriate role policy document to your service role + // specified by ServiceAccessRoleArn. The default is false. + IamAuthEnabled *bool `type:"boolean"` + + // The maximum size in KB of migrated graph data stored in a CSV file before + // AWS DMS bulk-loads the data to the Neptune target database. The default is + // 1048576 KB. If successful, AWS DMS clears the bucket, ready to store the + // next batch of migrated graph data. + MaxFileSize *int64 `type:"integer"` + + // The number of times for AWS DMS to retry a bulk-load of migrated graph data + // to the Neptune target database before raising an error. The default is 5. + MaxRetryCount *int64 `type:"integer"` + + // A folder path where you where you want AWS DMS to store migrated graph data + // in the S3 bucket specified by S3BucketName + // + // S3BucketFolder is a required field + S3BucketFolder *string `type:"string" required:"true"` + + // The name of the S3 bucket for AWS DMS to temporarily store migrated graph + // data in CSV files before bulk-loading it to the Neptune target database. + // AWS DMS maps the SQL source data to graph data before storing it in these + // CSV files. + // + // S3BucketName is a required field + S3BucketName *string `type:"string" required:"true"` + + // The ARN of the service role you have created for the Neptune target endpoint. + // For more information, see https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.ServiceRole + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.ServiceRole) + // in the AWS Database Migration Service User Guide. + ServiceAccessRoleArn *string `type:"string"` +} + +// String returns the string representation +func (s NeptuneSettings) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NeptuneSettings) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "NeptuneSettings"} + + if s.S3BucketFolder == nil { + invalidParams.Add(aws.NewErrParamRequired("S3BucketFolder")) + } + + if s.S3BucketName == nil { + invalidParams.Add(aws.NewErrParamRequired("S3BucketName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // In response to the DescribeOrderableReplicationInstances operation, this // object describes an available replication instance. This description includes // the replication instance's type, engine version, and allocated storage. @@ -1140,6 +1212,12 @@ type ReplicationTask struct { // The Amazon Resource Name (ARN) string that uniquely identifies the endpoint. TargetEndpointArn *string `type:"string"` + + // Supplemental information that the task requires to migrate the data for certain + // source and target endpoints. For more information, see Specifying Supplemental + // Data for Task Settings (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.TaskData.html) + // in the AWS Database Migration User Guide. + TaskData *string `type:"string"` } // String returns the string representation @@ -1528,6 +1606,11 @@ type SupportedEndpointType struct { // "kafka", "elasticsearch", "documentdb", and "sqlserver". EngineName *string `type:"string"` + // The earliest AWS DMS engine version that supports this endpoint engine. Note + // that endpoint engines released with AWS DMS versions earlier than 3.1.1 do + // not return a value for this parameter. + ReplicationInstanceEngineMinimumVersion *string `type:"string"` + // Indicates if Change Data Capture (CDC) is supported. SupportsCDC *bool `type:"boolean"` } diff --git a/service/dataexchange/api_enums.go b/service/dataexchange/api_enums.go index 9291488aee4..7309279168e 100644 --- a/service/dataexchange/api_enums.go +++ b/service/dataexchange/api_enums.go @@ -145,6 +145,24 @@ func (enum ResourceType) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +// The types of encryption supported in export jobs to Amazon S3. +type ServerSideEncryptionTypes string + +// Enum values for ServerSideEncryptionTypes +const ( + ServerSideEncryptionTypesAwsKms ServerSideEncryptionTypes = "aws:kms" + ServerSideEncryptionTypesAes256 ServerSideEncryptionTypes = "AES256" +) + +func (enum ServerSideEncryptionTypes) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum ServerSideEncryptionTypes) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type State string // Enum values for State diff --git a/service/dataexchange/api_types.go b/service/dataexchange/api_types.go index 3518ac4b795..b5785ee6733 100644 --- a/service/dataexchange/api_types.go +++ b/service/dataexchange/api_types.go @@ -605,6 +605,9 @@ type ExportAssetsToS3RequestDetails struct { // DataSetId is a required field DataSetId *string `type:"string" required:"true"` + // Encryption configuration for the export job. + Encryption *ExportServerSideEncryption `type:"structure"` + // The unique identifier for the revision associated with this export request. // // RevisionId is a required field @@ -638,6 +641,11 @@ func (s *ExportAssetsToS3RequestDetails) Validate() error { } } } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(aws.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -665,6 +673,12 @@ func (s ExportAssetsToS3RequestDetails) MarshalFields(e protocol.FieldEncoder) e metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.Encryption != nil { + v := s.Encryption + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Encryption", v, metadata) + } if s.RevisionId != nil { v := *s.RevisionId @@ -688,6 +702,9 @@ type ExportAssetsToS3ResponseDetails struct { // DataSetId is a required field DataSetId *string `type:"string" required:"true"` + // Encryption configuration of the export job. + Encryption *ExportServerSideEncryption `type:"structure"` + // The unique identifier for the revision associated with this export response. // // RevisionId is a required field @@ -719,6 +736,12 @@ func (s ExportAssetsToS3ResponseDetails) MarshalFields(e protocol.FieldEncoder) metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.Encryption != nil { + v := s.Encryption + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Encryption", v, metadata) + } if s.RevisionId != nil { v := *s.RevisionId @@ -728,6 +751,65 @@ func (s ExportAssetsToS3ResponseDetails) MarshalFields(e protocol.FieldEncoder) return nil } +// Encryption configuration of the export job. Includes the encryption type +// as well as the AWS KMS key. The KMS key is only necessary if you chose the +// KMS encryption type. +type ExportServerSideEncryption struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the the AWS KMS key you want to use to + // encrypt the Amazon S3 objects. This parameter is required if you choose aws:kms + // as an encryption type. + // + // KmsKeyArn is a required field + KmsKeyArn *string `type:"string" required:"true"` + + // The type of server side encryption used for encrypting the objects in Amazon + // S3. + // + // Type is a required field + Type ServerSideEncryptionTypes `type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s ExportServerSideEncryption) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportServerSideEncryption) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ExportServerSideEncryption"} + + if s.KmsKeyArn == nil { + invalidParams.Add(aws.NewErrParamRequired("KmsKeyArn")) + } + if len(s.Type) == 0 { + invalidParams.Add(aws.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ExportServerSideEncryption) MarshalFields(e protocol.FieldEncoder) error { + if s.KmsKeyArn != nil { + v := *s.KmsKeyArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "KmsKeyArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Type) > 0 { + v := s.Type + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Type", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + type ImportAssetFromSignedUrlJobErrorDetails struct { _ struct{} `type:"structure"` @@ -1195,7 +1277,7 @@ type JobError struct { // Message is a required field Message *string `type:"string" required:"true"` - // The unqiue identifier for the resource related to the error. + // The unique identifier for the resource related to the error. ResourceId *string `type:"string"` // The type of resource related to the error. diff --git a/service/dlm/api_types.go b/service/dlm/api_types.go index 68b495c2ce4..f928a4ce68e 100644 --- a/service/dlm/api_types.go +++ b/service/dlm/api_types.go @@ -18,8 +18,8 @@ var _ = awsutil.Prettify type CreateRule struct { _ struct{} `type:"structure"` - // The interval between snapshots. The supported values are 2, 3, 4, 6, 8, 12, - // and 24. + // The interval between snapshots. The supported values are 1, 2, 3, 4, 6, 8, + // 12, and 24. // // Interval is a required field Interval *int64 `min:"1" type:"integer" required:"true"` diff --git a/service/elasticinference/api_client.go b/service/elasticinference/api_client.go index dc865ac63ed..83e6dbf788f 100644 --- a/service/elasticinference/api_client.go +++ b/service/elasticinference/api_client.go @@ -28,7 +28,7 @@ var initRequest func(*Client, *aws.Request) const ( ServiceName = "Amazon Elastic Inference" // Service's name ServiceID = "ElasticInference" // Service's identifier - EndpointsID = "elastic-inference" // Service's Endpoint identifier + EndpointsID = "api.elastic-inference" // Service's Endpoint identifier ) // New creates a new instance of the client from the provided Config. diff --git a/service/elasticinference/api_enums.go b/service/elasticinference/api_enums.go index 2f7868f92b7..879ece0390a 100644 --- a/service/elasticinference/api_enums.go +++ b/service/elasticinference/api_enums.go @@ -1,3 +1,21 @@ // Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package elasticinference + +type LocationType string + +// Enum values for LocationType +const ( + LocationTypeRegion LocationType = "region" + LocationTypeAvailabilityZone LocationType = "availability-zone" + LocationTypeAvailabilityZoneId LocationType = "availability-zone-id" +) + +func (enum LocationType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum LocationType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} diff --git a/service/elasticinference/api_op_DescribeAcceleratorOfferings.go b/service/elasticinference/api_op_DescribeAcceleratorOfferings.go new file mode 100644 index 00000000000..700ca01367e --- /dev/null +++ b/service/elasticinference/api_op_DescribeAcceleratorOfferings.go @@ -0,0 +1,169 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package elasticinference + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DescribeAcceleratorOfferingsInput struct { + _ struct{} `type:"structure"` + + // The list of accelerator types to describe. + AcceleratorTypes []string `locationName:"acceleratorTypes" type:"list"` + + // The location type that you want to describe accelerator type offerings for. + // It can assume the following values: region: will return the accelerator type + // offering at the regional level. availability-zone: will return the accelerator + // type offering at the availability zone level. availability-zone-id: will + // return the accelerator type offering at the availability zone level returning + // the availability zone id. + // + // LocationType is a required field + LocationType LocationType `locationName:"locationType" min:"1" type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s DescribeAcceleratorOfferingsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAcceleratorOfferingsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeAcceleratorOfferingsInput"} + if len(s.LocationType) == 0 { + invalidParams.Add(aws.NewErrParamRequired("LocationType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeAcceleratorOfferingsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AcceleratorTypes != nil { + v := s.AcceleratorTypes + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "acceleratorTypes", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if len(s.LocationType) > 0 { + v := s.LocationType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "locationType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +type DescribeAcceleratorOfferingsOutput struct { + _ struct{} `type:"structure"` + + // The list of accelerator type offerings for a specific location. + AcceleratorTypeOfferings []AcceleratorTypeOffering `locationName:"acceleratorTypeOfferings" type:"list"` +} + +// String returns the string representation +func (s DescribeAcceleratorOfferingsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeAcceleratorOfferingsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.AcceleratorTypeOfferings != nil { + v := s.AcceleratorTypeOfferings + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "acceleratorTypeOfferings", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +const opDescribeAcceleratorOfferings = "DescribeAcceleratorOfferings" + +// DescribeAcceleratorOfferingsRequest returns a request value for making API operation for +// Amazon Elastic Inference. +// +// Describes the locations in which a given accelerator type or set of types +// is present in a given region. +// +// // Example sending a request using DescribeAcceleratorOfferingsRequest. +// req := client.DescribeAcceleratorOfferingsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elastic-inference-2017-07-25/DescribeAcceleratorOfferings +func (c *Client) DescribeAcceleratorOfferingsRequest(input *DescribeAcceleratorOfferingsInput) DescribeAcceleratorOfferingsRequest { + op := &aws.Operation{ + Name: opDescribeAcceleratorOfferings, + HTTPMethod: "POST", + HTTPPath: "/describe-accelerator-offerings", + } + + if input == nil { + input = &DescribeAcceleratorOfferingsInput{} + } + + req := c.newRequest(op, input, &DescribeAcceleratorOfferingsOutput{}) + return DescribeAcceleratorOfferingsRequest{Request: req, Input: input, Copy: c.DescribeAcceleratorOfferingsRequest} +} + +// DescribeAcceleratorOfferingsRequest is the request type for the +// DescribeAcceleratorOfferings API operation. +type DescribeAcceleratorOfferingsRequest struct { + *aws.Request + Input *DescribeAcceleratorOfferingsInput + Copy func(*DescribeAcceleratorOfferingsInput) DescribeAcceleratorOfferingsRequest +} + +// Send marshals and sends the DescribeAcceleratorOfferings API request. +func (r DescribeAcceleratorOfferingsRequest) Send(ctx context.Context) (*DescribeAcceleratorOfferingsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeAcceleratorOfferingsResponse{ + DescribeAcceleratorOfferingsOutput: r.Request.Data.(*DescribeAcceleratorOfferingsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeAcceleratorOfferingsResponse is the response type for the +// DescribeAcceleratorOfferings API operation. +type DescribeAcceleratorOfferingsResponse struct { + *DescribeAcceleratorOfferingsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeAcceleratorOfferings request. +func (r *DescribeAcceleratorOfferingsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/elasticinference/api_op_DescribeAcceleratorTypes.go b/service/elasticinference/api_op_DescribeAcceleratorTypes.go new file mode 100644 index 00000000000..7a611ef0528 --- /dev/null +++ b/service/elasticinference/api_op_DescribeAcceleratorTypes.go @@ -0,0 +1,125 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package elasticinference + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DescribeAcceleratorTypesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeAcceleratorTypesInput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeAcceleratorTypesInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + return nil +} + +type DescribeAcceleratorTypesOutput struct { + _ struct{} `type:"structure"` + + // The available accelerator types. + AcceleratorTypes []AcceleratorType `locationName:"acceleratorTypes" type:"list"` +} + +// String returns the string representation +func (s DescribeAcceleratorTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeAcceleratorTypesOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.AcceleratorTypes != nil { + v := s.AcceleratorTypes + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "acceleratorTypes", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +const opDescribeAcceleratorTypes = "DescribeAcceleratorTypes" + +// DescribeAcceleratorTypesRequest returns a request value for making API operation for +// Amazon Elastic Inference. +// +// Describes the accelerator types available in a given region, as well as their +// characteristics, such as memory and throughput. +// +// // Example sending a request using DescribeAcceleratorTypesRequest. +// req := client.DescribeAcceleratorTypesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elastic-inference-2017-07-25/DescribeAcceleratorTypes +func (c *Client) DescribeAcceleratorTypesRequest(input *DescribeAcceleratorTypesInput) DescribeAcceleratorTypesRequest { + op := &aws.Operation{ + Name: opDescribeAcceleratorTypes, + HTTPMethod: "GET", + HTTPPath: "/describe-accelerator-types", + } + + if input == nil { + input = &DescribeAcceleratorTypesInput{} + } + + req := c.newRequest(op, input, &DescribeAcceleratorTypesOutput{}) + return DescribeAcceleratorTypesRequest{Request: req, Input: input, Copy: c.DescribeAcceleratorTypesRequest} +} + +// DescribeAcceleratorTypesRequest is the request type for the +// DescribeAcceleratorTypes API operation. +type DescribeAcceleratorTypesRequest struct { + *aws.Request + Input *DescribeAcceleratorTypesInput + Copy func(*DescribeAcceleratorTypesInput) DescribeAcceleratorTypesRequest +} + +// Send marshals and sends the DescribeAcceleratorTypes API request. +func (r DescribeAcceleratorTypesRequest) Send(ctx context.Context) (*DescribeAcceleratorTypesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeAcceleratorTypesResponse{ + DescribeAcceleratorTypesOutput: r.Request.Data.(*DescribeAcceleratorTypesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeAcceleratorTypesResponse is the response type for the +// DescribeAcceleratorTypes API operation. +type DescribeAcceleratorTypesResponse struct { + *DescribeAcceleratorTypesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeAcceleratorTypes request. +func (r *DescribeAcceleratorTypesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/elasticinference/api_op_DescribeAccelerators.go b/service/elasticinference/api_op_DescribeAccelerators.go new file mode 100644 index 00000000000..16f4ee889a0 --- /dev/null +++ b/service/elasticinference/api_op_DescribeAccelerators.go @@ -0,0 +1,265 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package elasticinference + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DescribeAcceleratorsInput struct { + _ struct{} `type:"structure"` + + // The IDs of the accelerators to describe. + AcceleratorIds []string `locationName:"acceleratorIds" type:"list"` + + // One or more filters. Filter names and values are case-sensitive. Valid filter + // names are: accelerator-types: can provide a list of accelerator type names + // to filter for. instance-id: can provide a list of EC2 instance ids to filter + // for. + Filters []Filter `locationName:"filters" type:"list"` + + // The total number of items to return in the command's output. If the total + // number of items available is more than the value specified, a NextToken is + // provided in the command's output. To resume pagination, provide the NextToken + // value in the starting-token argument of a subsequent command. Do not use + // the NextToken response element directly outside of the AWS CLI. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // A token to specify where to start paginating. This is the NextToken from + // a previously truncated response. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeAcceleratorsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAcceleratorsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeAcceleratorsInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NextToken", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeAcceleratorsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AcceleratorIds != nil { + v := s.AcceleratorIds + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "acceleratorIds", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if s.Filters != nil { + v := s.Filters + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "filters", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "maxResults", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "nextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DescribeAcceleratorsOutput struct { + _ struct{} `type:"structure"` + + // The details of the Elastic Inference Accelerators. + AcceleratorSet []ElasticInferenceAccelerator `locationName:"acceleratorSet" type:"list"` + + // A token to specify where to start paginating. This is the NextToken from + // a previously truncated response. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeAcceleratorsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeAcceleratorsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.AcceleratorSet != nil { + v := s.AcceleratorSet + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "acceleratorSet", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "nextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +const opDescribeAccelerators = "DescribeAccelerators" + +// DescribeAcceleratorsRequest returns a request value for making API operation for +// Amazon Elastic Inference. +// +// Describes information over a provided set of accelerators belonging to an +// account. +// +// // Example sending a request using DescribeAcceleratorsRequest. +// req := client.DescribeAcceleratorsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elastic-inference-2017-07-25/DescribeAccelerators +func (c *Client) DescribeAcceleratorsRequest(input *DescribeAcceleratorsInput) DescribeAcceleratorsRequest { + op := &aws.Operation{ + Name: opDescribeAccelerators, + HTTPMethod: "POST", + HTTPPath: "/describe-accelerators", + Paginator: &aws.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAcceleratorsInput{} + } + + req := c.newRequest(op, input, &DescribeAcceleratorsOutput{}) + return DescribeAcceleratorsRequest{Request: req, Input: input, Copy: c.DescribeAcceleratorsRequest} +} + +// DescribeAcceleratorsRequest is the request type for the +// DescribeAccelerators API operation. +type DescribeAcceleratorsRequest struct { + *aws.Request + Input *DescribeAcceleratorsInput + Copy func(*DescribeAcceleratorsInput) DescribeAcceleratorsRequest +} + +// Send marshals and sends the DescribeAccelerators API request. +func (r DescribeAcceleratorsRequest) Send(ctx context.Context) (*DescribeAcceleratorsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeAcceleratorsResponse{ + DescribeAcceleratorsOutput: r.Request.Data.(*DescribeAcceleratorsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewDescribeAcceleratorsRequestPaginator returns a paginator for DescribeAccelerators. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.DescribeAcceleratorsRequest(input) +// p := elasticinference.NewDescribeAcceleratorsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewDescribeAcceleratorsPaginator(req DescribeAcceleratorsRequest) DescribeAcceleratorsPaginator { + return DescribeAcceleratorsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *DescribeAcceleratorsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// DescribeAcceleratorsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type DescribeAcceleratorsPaginator struct { + aws.Pager +} + +func (p *DescribeAcceleratorsPaginator) CurrentPage() *DescribeAcceleratorsOutput { + return p.Pager.CurrentPage().(*DescribeAcceleratorsOutput) +} + +// DescribeAcceleratorsResponse is the response type for the +// DescribeAccelerators API operation. +type DescribeAcceleratorsResponse struct { + *DescribeAcceleratorsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeAccelerators request. +func (r *DescribeAcceleratorsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/elasticinference/api_op_TagResource.go b/service/elasticinference/api_op_TagResource.go index f1341e57845..1aaa17aec4c 100644 --- a/service/elasticinference/api_op_TagResource.go +++ b/service/elasticinference/api_op_TagResource.go @@ -97,7 +97,7 @@ const opTagResource = "TagResource" // TagResourceRequest returns a request value for making API operation for // Amazon Elastic Inference. // -// Adds the specified tag(s) to an Elastic Inference Accelerator. +// Adds the specified tags to an Elastic Inference Accelerator. // // // Example sending a request using TagResourceRequest. // req := client.TagResourceRequest(params) diff --git a/service/elasticinference/api_op_UntagResource.go b/service/elasticinference/api_op_UntagResource.go index cdc473ace7c..dfe4a91bbc0 100644 --- a/service/elasticinference/api_op_UntagResource.go +++ b/service/elasticinference/api_op_UntagResource.go @@ -97,7 +97,7 @@ const opUntagResource = "UntagResource" // UntagResourceRequest returns a request value for making API operation for // Amazon Elastic Inference. // -// Removes the specified tag(s) from an Elastic Inference Accelerator. +// Removes the specified tags from an Elastic Inference Accelerator. // // // Example sending a request using UntagResourceRequest. // req := client.UntagResourceRequest(params) diff --git a/service/elasticinference/api_types.go b/service/elasticinference/api_types.go index 9b317a07724..ce59aabc8c6 100644 --- a/service/elasticinference/api_types.go +++ b/service/elasticinference/api_types.go @@ -5,7 +5,302 @@ package elasticinference import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" ) var _ aws.Config var _ = awsutil.Prettify + +// The details of an Elastic Inference Accelerator type. +type AcceleratorType struct { + _ struct{} `type:"structure"` + + // The name of the Elastic Inference Accelerator type. + AcceleratorTypeName *string `locationName:"acceleratorTypeName" min:"1" type:"string"` + + // The memory information of the Elastic Inference Accelerator type. + MemoryInfo *MemoryInfo `locationName:"memoryInfo" type:"structure"` + + // The throughput information of the Elastic Inference Accelerator type. + ThroughputInfo []KeyValuePair `locationName:"throughputInfo" type:"list"` +} + +// String returns the string representation +func (s AcceleratorType) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s AcceleratorType) MarshalFields(e protocol.FieldEncoder) error { + if s.AcceleratorTypeName != nil { + v := *s.AcceleratorTypeName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "acceleratorTypeName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MemoryInfo != nil { + v := s.MemoryInfo + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "memoryInfo", v, metadata) + } + if s.ThroughputInfo != nil { + v := s.ThroughputInfo + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "throughputInfo", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +// The offering for an Elastic Inference Accelerator type. +type AcceleratorTypeOffering struct { + _ struct{} `type:"structure"` + + // The name of the Elastic Inference Accelerator type. + AcceleratorType *string `locationName:"acceleratorType" min:"1" type:"string"` + + // The location for the offering. It will return either the region, availability + // zone or availability zone id for the offering depending on the locationType + // value. + Location *string `locationName:"location" min:"1" type:"string"` + + // The location type for the offering. It can assume the following values: region: + // defines that the offering is at the regional level. availability-zone: defines + // that the offering is at the availability zone level. availability-zone-id: + // defines that the offering is at the availability zone level, defined by the + // availability zone id. + LocationType LocationType `locationName:"locationType" min:"1" type:"string" enum:"true"` +} + +// String returns the string representation +func (s AcceleratorTypeOffering) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s AcceleratorTypeOffering) MarshalFields(e protocol.FieldEncoder) error { + if s.AcceleratorType != nil { + v := *s.AcceleratorType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "acceleratorType", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Location != nil { + v := *s.Location + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "location", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.LocationType) > 0 { + v := s.LocationType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "locationType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// The details of an Elastic Inference Accelerator. +type ElasticInferenceAccelerator struct { + _ struct{} `type:"structure"` + + // The health of the Elastic Inference Accelerator. + AcceleratorHealth *ElasticInferenceAcceleratorHealth `locationName:"acceleratorHealth" type:"structure"` + + // The ID of the Elastic Inference Accelerator. + AcceleratorId *string `locationName:"acceleratorId" min:"1" type:"string"` + + // The type of the Elastic Inference Accelerator. + AcceleratorType *string `locationName:"acceleratorType" min:"1" type:"string"` + + // The ARN of the resource that the Elastic Inference Accelerator is attached + // to. + AttachedResource *string `locationName:"attachedResource" min:"1" type:"string"` + + // The availability zone where the Elastic Inference Accelerator is present. + AvailabilityZone *string `locationName:"availabilityZone" min:"1" type:"string"` +} + +// String returns the string representation +func (s ElasticInferenceAccelerator) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ElasticInferenceAccelerator) MarshalFields(e protocol.FieldEncoder) error { + if s.AcceleratorHealth != nil { + v := s.AcceleratorHealth + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "acceleratorHealth", v, metadata) + } + if s.AcceleratorId != nil { + v := *s.AcceleratorId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "acceleratorId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AcceleratorType != nil { + v := *s.AcceleratorType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "acceleratorType", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AttachedResource != nil { + v := *s.AttachedResource + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "attachedResource", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AvailabilityZone != nil { + v := *s.AvailabilityZone + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "availabilityZone", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// The health details of an Elastic Inference Accelerator. +type ElasticInferenceAcceleratorHealth struct { + _ struct{} `type:"structure"` + + // The health status of the Elastic Inference Accelerator. + Status *string `locationName:"status" min:"1" type:"string"` +} + +// String returns the string representation +func (s ElasticInferenceAcceleratorHealth) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ElasticInferenceAcceleratorHealth) MarshalFields(e protocol.FieldEncoder) error { + if s.Status != nil { + v := *s.Status + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "status", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// A filter expression for the Elastic Inference Accelerator list. +type Filter struct { + _ struct{} `type:"structure"` + + // The filter name for the Elastic Inference Accelerator list. It can assume + // the following values: accelerator-type: the type of Elastic Inference Accelerator + // to filter for. instance-id: an EC2 instance id to filter for. + Name *string `locationName:"name" min:"1" type:"string"` + + // The values for the filter of the Elastic Inference Accelerator list. + Values []string `locationName:"values" type:"list"` +} + +// String returns the string representation +func (s Filter) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Filter) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "Filter"} + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Filter) MarshalFields(e protocol.FieldEncoder) error { + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Values != nil { + v := s.Values + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "values", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + return nil +} + +// A throughput entry for an Elastic Inference Accelerator type. +type KeyValuePair struct { + _ struct{} `type:"structure"` + + // The throughput value of the Elastic Inference Accelerator type. It can assume + // the following values: TFLOPS16bit: the throughput expressed in 16bit TeraFLOPS. + // TFLOPS32bit: the throughput expressed in 32bit TeraFLOPS. + Key *string `locationName:"key" min:"1" type:"string"` + + // The throughput value of the Elastic Inference Accelerator type. + Value *int64 `locationName:"value" type:"integer"` +} + +// String returns the string representation +func (s KeyValuePair) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s KeyValuePair) MarshalFields(e protocol.FieldEncoder) error { + if s.Key != nil { + v := *s.Key + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "key", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Value != nil { + v := *s.Value + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "value", protocol.Int64Value(v), metadata) + } + return nil +} + +// The memory information of an Elastic Inference Accelerator type. +type MemoryInfo struct { + _ struct{} `type:"structure"` + + // The size in mebibytes of the Elastic Inference Accelerator type. + SizeInMiB *int64 `locationName:"sizeInMiB" type:"integer"` +} + +// String returns the string representation +func (s MemoryInfo) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s MemoryInfo) MarshalFields(e protocol.FieldEncoder) error { + if s.SizeInMiB != nil { + v := *s.SizeInMiB + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "sizeInMiB", protocol.Int64Value(v), metadata) + } + return nil +} diff --git a/service/elasticinference/elasticinferenceiface/interface.go b/service/elasticinference/elasticinferenceiface/interface.go index adf3e4f7e71..8b4d5bafc61 100644 --- a/service/elasticinference/elasticinferenceiface/interface.go +++ b/service/elasticinference/elasticinferenceiface/interface.go @@ -23,7 +23,7 @@ import ( // // myFunc uses an SDK service client to make a request to // // Amazon Elastic Inference. // func myFunc(svc elasticinferenceiface.ClientAPI) bool { -// // Make svc.ListTagsForResource request +// // Make svc.DescribeAcceleratorOfferings request // } // // func main() { @@ -43,7 +43,7 @@ import ( // type mockClientClient struct { // elasticinferenceiface.ClientPI // } -// func (m *mockClientClient) ListTagsForResource(input *elasticinference.ListTagsForResourceInput) (*elasticinference.ListTagsForResourceOutput, error) { +// func (m *mockClientClient) DescribeAcceleratorOfferings(input *elasticinference.DescribeAcceleratorOfferingsInput) (*elasticinference.DescribeAcceleratorOfferingsOutput, error) { // // mock response/functionality // } // @@ -61,6 +61,12 @@ import ( // and waiters. Its suggested to use the pattern above for testing, or using // tooling to generate mocks to satisfy the interfaces. type ClientAPI interface { + DescribeAcceleratorOfferingsRequest(*elasticinference.DescribeAcceleratorOfferingsInput) elasticinference.DescribeAcceleratorOfferingsRequest + + DescribeAcceleratorTypesRequest(*elasticinference.DescribeAcceleratorTypesInput) elasticinference.DescribeAcceleratorTypesRequest + + DescribeAcceleratorsRequest(*elasticinference.DescribeAcceleratorsInput) elasticinference.DescribeAcceleratorsRequest + ListTagsForResourceRequest(*elasticinference.ListTagsForResourceInput) elasticinference.ListTagsForResourceRequest TagResourceRequest(*elasticinference.TagResourceInput) elasticinference.TagResourceRequest diff --git a/service/elasticsearchservice/api_types.go b/service/elasticsearchservice/api_types.go index e47015e4f00..f1a1ae913ea 100644 --- a/service/elasticsearchservice/api_types.go +++ b/service/elasticsearchservice/api_types.go @@ -2267,6 +2267,10 @@ type ServiceSoftwareOptions struct { // The new service software version if one is available. NewVersion *string `type:"string"` + // True if a service software is never automatically updated. False if a service + // software is automatically updated after AutomatedUpdateDate. + OptionalDeployment *bool `type:"boolean"` + // True if you are able to update you service software version. False if you // are not able to update your service software version. UpdateAvailable *bool `type:"boolean"` @@ -2314,6 +2318,12 @@ func (s ServiceSoftwareOptions) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "NewVersion", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.OptionalDeployment != nil { + v := *s.OptionalDeployment + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "OptionalDeployment", protocol.BoolValue(v), metadata) + } if s.UpdateAvailable != nil { v := *s.UpdateAvailable diff --git a/service/firehose/api_enums.go b/service/firehose/api_enums.go index 3547b53631f..6eb423da0ae 100644 --- a/service/firehose/api_enums.go +++ b/service/firehose/api_enums.go @@ -10,6 +10,7 @@ const ( CompressionFormatGzip CompressionFormat = "GZIP" CompressionFormatZip CompressionFormat = "ZIP" CompressionFormatSnappy CompressionFormat = "Snappy" + CompressionFormatHadoopSnappy CompressionFormat = "HADOOP_SNAPPY" ) func (enum CompressionFormat) MarshalValue() (string, error) { @@ -46,14 +47,21 @@ type DeliveryStreamFailureType string // Enum values for DeliveryStreamFailureType const ( - DeliveryStreamFailureTypeRetireKmsGrantFailed DeliveryStreamFailureType = "RETIRE_KMS_GRANT_FAILED" - DeliveryStreamFailureTypeCreateKmsGrantFailed DeliveryStreamFailureType = "CREATE_KMS_GRANT_FAILED" - DeliveryStreamFailureTypeKmsAccessDenied DeliveryStreamFailureType = "KMS_ACCESS_DENIED" - DeliveryStreamFailureTypeDisabledKmsKey DeliveryStreamFailureType = "DISABLED_KMS_KEY" - DeliveryStreamFailureTypeInvalidKmsKey DeliveryStreamFailureType = "INVALID_KMS_KEY" - DeliveryStreamFailureTypeKmsKeyNotFound DeliveryStreamFailureType = "KMS_KEY_NOT_FOUND" - DeliveryStreamFailureTypeKmsOptInRequired DeliveryStreamFailureType = "KMS_OPT_IN_REQUIRED" - DeliveryStreamFailureTypeUnknownError DeliveryStreamFailureType = "UNKNOWN_ERROR" + DeliveryStreamFailureTypeRetireKmsGrantFailed DeliveryStreamFailureType = "RETIRE_KMS_GRANT_FAILED" + DeliveryStreamFailureTypeCreateKmsGrantFailed DeliveryStreamFailureType = "CREATE_KMS_GRANT_FAILED" + DeliveryStreamFailureTypeKmsAccessDenied DeliveryStreamFailureType = "KMS_ACCESS_DENIED" + DeliveryStreamFailureTypeDisabledKmsKey DeliveryStreamFailureType = "DISABLED_KMS_KEY" + DeliveryStreamFailureTypeInvalidKmsKey DeliveryStreamFailureType = "INVALID_KMS_KEY" + DeliveryStreamFailureTypeKmsKeyNotFound DeliveryStreamFailureType = "KMS_KEY_NOT_FOUND" + DeliveryStreamFailureTypeKmsOptInRequired DeliveryStreamFailureType = "KMS_OPT_IN_REQUIRED" + DeliveryStreamFailureTypeCreateEniFailed DeliveryStreamFailureType = "CREATE_ENI_FAILED" + DeliveryStreamFailureTypeDeleteEniFailed DeliveryStreamFailureType = "DELETE_ENI_FAILED" + DeliveryStreamFailureTypeSubnetNotFound DeliveryStreamFailureType = "SUBNET_NOT_FOUND" + DeliveryStreamFailureTypeSecurityGroupNotFound DeliveryStreamFailureType = "SECURITY_GROUP_NOT_FOUND" + DeliveryStreamFailureTypeEniAccessDenied DeliveryStreamFailureType = "ENI_ACCESS_DENIED" + DeliveryStreamFailureTypeSubnetAccessDenied DeliveryStreamFailureType = "SUBNET_ACCESS_DENIED" + DeliveryStreamFailureTypeSecurityGroupAccessDenied DeliveryStreamFailureType = "SECURITY_GROUP_ACCESS_DENIED" + DeliveryStreamFailureTypeUnknownError DeliveryStreamFailureType = "UNKNOWN_ERROR" ) func (enum DeliveryStreamFailureType) MarshalValue() (string, error) { diff --git a/service/firehose/api_op_StartDeliveryStreamEncryption.go b/service/firehose/api_op_StartDeliveryStreamEncryption.go index 5ca238273b4..eb5ccfc821e 100644 --- a/service/firehose/api_op_StartDeliveryStreamEncryption.go +++ b/service/firehose/api_op_StartDeliveryStreamEncryption.go @@ -82,9 +82,11 @@ const opStartDeliveryStreamEncryption = "StartDeliveryStreamEncryption" // // Even if encryption is currently enabled for a delivery stream, you can still // invoke this operation on it to change the ARN of the CMK or both its type -// and ARN. In this case, Kinesis Data Firehose schedules the grant it had on -// the old CMK for retirement and creates a grant that enables it to use the -// new CMK to encrypt and decrypt data and to manage the grant. +// and ARN. If you invoke this method to change the CMK, and the old CMK is +// of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose schedules the grant it +// had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, +// Kinesis Data Firehose creates a grant that enables it to use the new CMK +// to encrypt and decrypt data and to manage the grant. // // If a delivery stream already has encryption enabled and then you invoke this // operation to change the ARN of the CMK or both its type and ARN and you get @@ -92,10 +94,12 @@ const opStartDeliveryStreamEncryption = "StartDeliveryStreamEncryption" // In this case, encryption remains enabled with the old CMK. // // If the encryption status of your delivery stream is ENABLING_FAILED, you -// can invoke this operation again. +// can invoke this operation again with a valid CMK. The CMK must be enabled +// and the key policy mustn't explicitly deny the permission for Kinesis Data +// Firehose to invoke KMS encrypt and decrypt operations. // -// You can only enable SSE for a delivery stream that uses DirectPut as its -// source. +// You can enable SSE for a delivery stream only if it's a delivery stream that +// uses DirectPut as its source. // // The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations // have a combined limit of 25 calls per delivery stream per 24 hours. For example, diff --git a/service/firehose/api_types.go b/service/firehose/api_types.go index 60180cd95c3..5ac303dc265 100644 --- a/service/firehose/api_types.go +++ b/service/firehose/api_types.go @@ -152,14 +152,17 @@ type DataFormatConversionConfiguration struct { Enabled *bool `type:"boolean"` // Specifies the deserializer that you want Kinesis Data Firehose to use to - // convert the format of your data from JSON. + // convert the format of your data from JSON. This parameter is required if + // Enabled is set to true. InputFormatConfiguration *InputFormatConfiguration `type:"structure"` // Specifies the serializer that you want Kinesis Data Firehose to use to convert - // the format of your data to the Parquet or ORC format. + // the format of your data to the Parquet or ORC format. This parameter is required + // if Enabled is set to true. OutputFormatConfiguration *OutputFormatConfiguration `type:"structure"` // Specifies the AWS Glue Data Catalog table that contains the column information. + // This parameter is required if Enabled is set to true. SchemaConfiguration *SchemaConfiguration `type:"structure"` } @@ -176,6 +179,11 @@ func (s *DataFormatConversionConfiguration) Validate() error { invalidParams.AddNested("OutputFormatConfiguration", err.(aws.ErrInvalidParams)) } } + if s.SchemaConfiguration != nil { + if err := s.SchemaConfiguration.Validate(); err != nil { + invalidParams.AddNested("SchemaConfiguration", err.(aws.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -292,8 +300,8 @@ func (s DeliveryStreamEncryptionConfiguration) String() string { return awsutil.Prettify(s) } -// Used to specify the type and Amazon Resource Name (ARN) of the CMK needed -// for Server-Side Encryption (SSE). +// Specifies the type and Amazon Resource Name (ARN) of the CMK to use for Server-Side +// Encryption (SSE). type DeliveryStreamEncryptionConfigurationInput struct { _ struct{} `type:"structure"` @@ -313,8 +321,17 @@ type DeliveryStreamEncryptionConfigurationInput struct { // manages that grant. // // When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery - // stream that is already encrypted with a customer managed CMK, Kinesis Data - // Firehose schedules the grant it had on the old CMK for retirement. + // stream that is encrypted with a customer managed CMK, Kinesis Data Firehose + // schedules the grant it had on the old CMK for retirement. + // + // You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery + // streams. If a CreateDeliveryStream or StartDeliveryStreamEncryption operation + // exceeds this limit, Kinesis Data Firehose throws a LimitExceededException. + // + // To encrypt your delivery stream, use symmetric CMKs. Kinesis Data Firehose + // doesn't support asymmetric CMKs. For information about symmetric and asymmetric + // CMKs, see About Symmetric and Asymmetric CMKs (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html) + // in the AWS Key Management Service developer guide. // // KeyType is a required field KeyType KeyType `type:"string" required:"true" enum:"true"` @@ -510,6 +527,9 @@ type ElasticsearchDestinationConfiguration struct { // // For Elasticsearch 7.x, don't specify a TypeName. TypeName *string `type:"string"` + + // The details of the VPC of the Amazon ES destination. + VpcConfiguration *VpcConfiguration `type:"structure"` } // String returns the string representation @@ -559,6 +579,11 @@ func (s *ElasticsearchDestinationConfiguration) Validate() error { invalidParams.AddNested("S3Configuration", err.(aws.ErrInvalidParams)) } } + if s.VpcConfiguration != nil { + if err := s.VpcConfiguration.Validate(); err != nil { + invalidParams.AddNested("VpcConfiguration", err.(aws.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -613,6 +638,9 @@ type ElasticsearchDestinationDescription struct { // The Elasticsearch type name. This applies to Elasticsearch 6.x and lower // versions. For Elasticsearch 7.x, there's no value for TypeName. TypeName *string `type:"string"` + + // The details of the VPC of the Amazon ES destination. + VpcConfigurationDescription *VpcConfigurationDescription `type:"structure"` } // String returns the string representation @@ -1050,7 +1078,7 @@ type FailureDescription struct { // A message providing details about the error that caused the failure. // // Details is a required field - Details *string `type:"string" required:"true"` + Details *string `min:"1" type:"string" required:"true"` // The type of error that caused the failure. // @@ -1087,7 +1115,7 @@ func (s HiveJsonSerDe) String() string { } // Specifies the deserializer you want to use to convert the format of the input -// data. +// data. This parameter is required if Enabled is set to true. type InputFormatConfiguration struct { _ struct{} `type:"structure"` @@ -1327,7 +1355,8 @@ func (s *OrcSerDe) Validate() error { } // Specifies the serializer that you want Kinesis Data Firehose to use to convert -// the format of your data before it writes it to Amazon S3. +// the format of your data before it writes it to Amazon S3. This parameter +// is required if Enabled is set to true. type OutputFormatConfiguration struct { _ struct{} `type:"structure"` @@ -2076,35 +2105,36 @@ func (s *S3DestinationUpdate) Validate() error { } // Specifies the schema to which you want Kinesis Data Firehose to configure -// your data before it writes it to Amazon S3. +// your data before it writes it to Amazon S3. This parameter is required if +// Enabled is set to true. type SchemaConfiguration struct { _ struct{} `type:"structure"` // The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account // ID is used by default. - CatalogId *string `type:"string"` + CatalogId *string `min:"1" type:"string"` // Specifies the name of the AWS Glue database that contains the schema for // the output data. - DatabaseName *string `type:"string"` + DatabaseName *string `min:"1" type:"string"` // If you don't specify an AWS Region, the default is the current Region. - Region *string `type:"string"` + Region *string `min:"1" type:"string"` // The role that Kinesis Data Firehose can use to access AWS Glue. This role // must be in the same account you use for Kinesis Data Firehose. Cross-account // roles aren't allowed. - RoleARN *string `type:"string"` + RoleARN *string `min:"1" type:"string"` // Specifies the AWS Glue table that contains the column information that constitutes // your data schema. - TableName *string `type:"string"` + TableName *string `min:"1" type:"string"` // Specifies the table version for the output data schema. If you don't specify // this version ID, or if you set it to LATEST, Kinesis Data Firehose uses the // most recent version. This means that any updates to the table are automatically // picked up. - VersionId *string `type:"string"` + VersionId *string `min:"1" type:"string"` } // String returns the string representation @@ -2112,6 +2142,34 @@ func (s SchemaConfiguration) String() string { return awsutil.Prettify(s) } +// Validate inspects the fields of the type to determine if they are valid. +func (s *SchemaConfiguration) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "SchemaConfiguration"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("CatalogId", 1)) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("DatabaseName", 1)) + } + if s.Region != nil && len(*s.Region) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Region", 1)) + } + if s.RoleARN != nil && len(*s.RoleARN) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleARN", 1)) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("TableName", 1)) + } + if s.VersionId != nil && len(*s.VersionId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("VersionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The serializer that you want Kinesis Data Firehose to use to convert data // to the target format before writing it to Amazon S3. Kinesis Data Firehose // supports two types of serializers: the ORC SerDe (https://hive.apache.org/javadocs/r1.2.2/api/org/apache/hadoop/hive/ql/io/orc/OrcSerde.html) @@ -2431,3 +2489,121 @@ func (s *Tag) Validate() error { } return nil } + +// The details of the VPC of the Amazon ES destination. +type VpcConfiguration struct { + _ struct{} `type:"structure"` + + // The ARN of the IAM role that you want the delivery stream to use to create + // endpoints in the destination VPC. + // + // RoleARN is a required field + RoleARN *string `min:"1" type:"string" required:"true"` + + // The IDs of the security groups that you want Kinesis Data Firehose to use + // when it creates ENIs in the VPC of the Amazon ES destination. + // + // SecurityGroupIds is a required field + SecurityGroupIds []string `min:"1" type:"list" required:"true"` + + // The IDs of the subnets that you want Kinesis Data Firehose to use to create + // ENIs in the VPC of the Amazon ES destination. Make sure that the routing + // tables and inbound and outbound rules allow traffic to flow from the subnets + // whose IDs are specified here to the subnets that have the destination Amazon + // ES endpoints. Kinesis Data Firehose creates at least one ENI in each of the + // subnets that are specified here. Do not delete or modify these ENIs. + // + // The number of ENIs that Kinesis Data Firehose creates in the subnets specified + // here scales up and down automatically based on throughput. To enable Kinesis + // Data Firehose to scale up the number of ENIs to match throughput, ensure + // that you have sufficient quota. To help you calculate the quota you need, + // assume that Kinesis Data Firehose can create up to three ENIs for this delivery + // stream for each of the subnets specified here. For more information about + // ENI quota, see Network Interfaces (https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html#vpc-limits-enis) + // in the Amazon VPC Quotas topic. + // + // SubnetIds is a required field + SubnetIds []string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s VpcConfiguration) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VpcConfiguration) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "VpcConfiguration"} + + if s.RoleARN == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleARN")) + } + if s.RoleARN != nil && len(*s.RoleARN) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleARN", 1)) + } + + if s.SecurityGroupIds == nil { + invalidParams.Add(aws.NewErrParamRequired("SecurityGroupIds")) + } + if s.SecurityGroupIds != nil && len(s.SecurityGroupIds) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("SecurityGroupIds", 1)) + } + + if s.SubnetIds == nil { + invalidParams.Add(aws.NewErrParamRequired("SubnetIds")) + } + if s.SubnetIds != nil && len(s.SubnetIds) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("SubnetIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The details of the VPC of the Amazon ES destination. +type VpcConfigurationDescription struct { + _ struct{} `type:"structure"` + + // The ARN of the IAM role that you want the delivery stream uses to create + // endpoints in the destination VPC. + // + // RoleARN is a required field + RoleARN *string `min:"1" type:"string" required:"true"` + + // The IDs of the security groups that Kinesis Data Firehose uses when it creates + // ENIs in the VPC of the Amazon ES destination. + // + // SecurityGroupIds is a required field + SecurityGroupIds []string `min:"1" type:"list" required:"true"` + + // The IDs of the subnets that Kinesis Data Firehose uses to create ENIs in + // the VPC of the Amazon ES destination. Make sure that the routing tables and + // inbound and outbound rules allow traffic to flow from the subnets whose IDs + // are specified here to the subnets that have the destination Amazon ES endpoints. + // Kinesis Data Firehose creates at least one ENI in each of the subnets that + // are specified here. Do not delete or modify these ENIs. + // + // The number of ENIs that Kinesis Data Firehose creates in the subnets specified + // here scales up and down automatically based on throughput. To enable Kinesis + // Data Firehose to scale up the number of ENIs to match throughput, ensure + // that you have sufficient quota. To help you calculate the quota you need, + // assume that Kinesis Data Firehose can create up to three ENIs for this delivery + // stream for each of the subnets specified here. For more information about + // ENI quota, see Network Interfaces (https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html#vpc-limits-enis) + // in the Amazon VPC Quotas topic. + // + // SubnetIds is a required field + SubnetIds []string `min:"1" type:"list" required:"true"` + + // The ID of the Amazon ES destination's VPC. + // + // VpcId is a required field + VpcId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s VpcConfigurationDescription) String() string { + return awsutil.Prettify(s) +} diff --git a/service/fms/api_enums.go b/service/fms/api_enums.go index 9b4cdf3e482..c16f4fe70b6 100644 --- a/service/fms/api_enums.go +++ b/service/fms/api_enums.go @@ -27,6 +27,7 @@ type CustomerPolicyScopeIdType string // Enum values for CustomerPolicyScopeIdType const ( CustomerPolicyScopeIdTypeAccount CustomerPolicyScopeIdType = "ACCOUNT" + CustomerPolicyScopeIdTypeOrgUnit CustomerPolicyScopeIdType = "ORG_UNIT" ) func (enum CustomerPolicyScopeIdType) MarshalValue() (string, error) { diff --git a/service/fms/api_op_PutPolicy.go b/service/fms/api_op_PutPolicy.go index cf03c655f0c..bbec84ab94f 100644 --- a/service/fms/api_op_PutPolicy.go +++ b/service/fms/api_op_PutPolicy.go @@ -80,15 +80,18 @@ const opPutPolicy = "PutPolicy" // * A Shield Advanced policy, which applies Shield Advanced protection to // specified accounts and resources // -// * An AWS WAF policy, which contains a rule group and defines which resources -// are to be protected by that rule group +// * An AWS WAF policy (type WAFV2), which defines rule groups to run first +// in the corresponding AWS WAF web ACL and rule groups to run last in the +// web ACL. +// +// * An AWS WAF Classic policy (type WAF), which defines a rule group. // // * A security group policy, which manages VPC security groups across your // AWS organization. // -// Each policy is specific to one of the three types. If you want to enforce -// more than one policy type across accounts, you can create multiple policies. -// You can create multiple policies for each type. +// Each policy is specific to one of the types. If you want to enforce more +// than one policy type across accounts, create multiple policies. You can create +// multiple policies for each type. // // You must be subscribed to Shield Advanced to create a Shield Advanced policy. // For more information about subscribing to Shield Advanced, see CreateSubscription diff --git a/service/fms/api_types.go b/service/fms/api_types.go index c98c3f54bd7..93ca6c036ad 100644 --- a/service/fms/api_types.go +++ b/service/fms/api_types.go @@ -64,13 +64,28 @@ func (s EvaluationResult) String() string { type Policy struct { _ struct{} `type:"structure"` - // Specifies the AWS account IDs to exclude from the policy. The IncludeMap - // values are evaluated first, with all the appropriate account IDs added to - // the policy. Then the accounts listed in ExcludeMap are removed, resulting - // in the final list of accounts to add to the policy. + // Specifies the AWS account IDs and AWS Organizations organizational units + // (OUs) to exclude from the policy. Specifying an OU is the equivalent of specifying + // all accounts in the OU and in any of its child OUs, including any child OUs + // and accounts that are added at a later time. // - // The key to the map is ACCOUNT. For example, a valid ExcludeMap would be {“ACCOUNT” - // : [“accountID1”, “accountID2”]}. + // You can specify inclusions or exclusions, but not both. If you specify an + // IncludeMap, AWS Firewall Manager applies the policy to all accounts specified + // by the IncludeMap, and does not evaluate any ExcludeMap specifications. If + // you do not specify an IncludeMap, then Firewall Manager applies the policy + // to all accounts except for those specified by the ExcludeMap. + // + // You can specify account IDs, OUs, or a combination: + // + // * Specify account IDs by setting the key to ACCOUNT. For example, the + // following is a valid map: {“ACCOUNT” : [“accountID1”, “accountID2”]}. + // + // * Specify OUs by setting the key to ORG_UNIT. For example, the following + // is a valid map: {“ORG_UNIT” : [“ouid111”, “ouid112”]}. + // + // * Specify accounts and OUs together in a single map, separated with a + // comma. For example, the following is a valid map: {“ACCOUNT” : [“accountID1”, + // “accountID2”], “ORG_UNIT” : [“ouid111”, “ouid112”]}. ExcludeMap map[string][]string `type:"map"` // If set to True, resources with the tags that are specified in the ResourceTag @@ -81,13 +96,28 @@ type Policy struct { // ExcludeResourceTags is a required field ExcludeResourceTags *bool `type:"boolean" required:"true"` - // Specifies the AWS account IDs to include in the policy. If IncludeMap is - // null, all accounts in the organization in AWS Organizations are included - // in the policy. If IncludeMap is not null, only values listed in IncludeMap - // are included in the policy. + // Specifies the AWS account IDs and AWS Organizations organizational units + // (OUs) to include in the policy. Specifying an OU is the equivalent of specifying + // all accounts in the OU and in any of its child OUs, including any child OUs + // and accounts that are added at a later time. + // + // You can specify inclusions or exclusions, but not both. If you specify an + // IncludeMap, AWS Firewall Manager applies the policy to all accounts specified + // by the IncludeMap, and does not evaluate any ExcludeMap specifications. If + // you do not specify an IncludeMap, then Firewall Manager applies the policy + // to all accounts except for those specified by the ExcludeMap. + // + // You can specify account IDs, OUs, or a combination: + // + // * Specify account IDs by setting the key to ACCOUNT. For example, the + // following is a valid map: {“ACCOUNT” : [“accountID1”, “accountID2”]}. + // + // * Specify OUs by setting the key to ORG_UNIT. For example, the following + // is a valid map: {“ORG_UNIT” : [“ouid111”, “ouid112”]}. // - // The key to the map is ACCOUNT. For example, a valid IncludeMap would be {“ACCOUNT” - // : [“accountID1”, “accountID2”]}. + // * Specify accounts and OUs together in a single map, separated with a + // comma. For example, the following is a valid map: {“ACCOUNT” : [“accountID1”, + // “accountID2”], “ORG_UNIT” : [“ouid111”, “ouid112”]}. IncludeMap map[string][]string `type:"map"` // The ID of the AWS Firewall Manager policy. @@ -354,23 +384,14 @@ type SecurityServicePolicyData struct { // Details about the service that are specific to the service type, in JSON // format. For service type SHIELD_ADVANCED, this is an empty string. // - // * Example: WAFV2 "SecurityServicePolicyData": "{ \"type\": \"WAFV2\", - // \"postProcessRuleGroups\": [ { \"managedRuleGroupIdentifier\": { \"managedRuleGroupName\": - // \"AWSManagedRulesAdminProtectionRuleSet\", \"vendor\": \"AWS\" } \"ruleGroupARN\": - // \"rule group arn", \"overrideAction\": { \"type\": \"COUNT|\" }, \"excludedRules\": - // [ { \"name\" : \"EntityName\" } ], \"type\": \"ManagedRuleGroup|RuleGroup\" - // } ], \"preProcessRuleGroups\": [ { \"managedRuleGroupIdentifier\": { \"managedRuleGroupName\": - // \"AWSManagedRulesAdminProtectionRuleSet\", \"vendor\": \"AWS\" } \"ruleGroupARN\": - // \"rule group arn\", \"overrideAction\": { \"type\": \"COUNT\" }, \"excludedRules\": - // [ { \"name\" : \"EntityName\" } ], \"type\": \"ManagedRuleGroup|RuleGroup\" - // } ], \"defaultAction\": { \"type\": \"BLOCK\" }}" + // * Example: WAFV2 "ManagedServiceData": "{\"type\":\"WAFV2\",\"defaultAction\":{\"type\":\"ALLOW\"},\"preProcessRuleGroups\":[{\"managedRuleGroupIdentifier\":null,\"ruleGroupArn\":\"rulegrouparn\",\"overrideAction\":{\"type\":\"COUNT\"},\"excludedRules\":[{\"name\":\"EntityName\"}],\"ruleGroupType\":\"RuleGroup\"}],\"postProcessRuleGroups\":[{\"managedRuleGroupIdentifier\":{\"managedRuleGroupName\":\"AWSManagedRulesAdminProtectionRuleSet\",\"vendor\":\"AWS\"},\"ruleGroupArn\":\"rulegrouparn\",\"overrideAction\":{\"type\":\"NONE\"},\"excludedRules\":[],\"ruleGroupType\":\"ManagedRuleGroup\"}],\"overrideCustomerWebACLAssociation\":false}" // - // * Example: WAF "ManagedServiceData": "{\"type\": \"WAF\", \"ruleGroups\": + // * Example: WAF Classic "ManagedServiceData": "{\"type\": \"WAF\", \"ruleGroups\": // [{\"id\": \"12345678-1bcd-9012-efga-0987654321ab\", \"overrideAction\" // : {\"type\": \"COUNT\"}}], \"defaultAction\": {\"type\": \"BLOCK\"}} // - // * Example: SECURITY_GROUPS_COMMON "SecurityServicePolicyData":{"Type":"SECURITY_GROUPS_COMMON","ManagedServiceData":"{\"type\":\"SECURITY_GROUPS_COMMON\",\"revertManualSecurityGroupChanges\":false,\"exclusiveResourceSecurityGroupManagement\":false,\"securityGroups\":[{\"id\":\" - // sg-000e55995d61a06bd\"}]}"},"RemediationEnabled":false,"ResourceType":"AWS::EC2::NetworkInterface"} + // * Example: SECURITY_GROUPS_COMMON "SecurityServicePolicyData":{"Type":"SECURITY_GROUPS_COMMON","ManagedServiceData":"{\"type\":\"SECURITY_GROUPS_COMMON\",\"revertManualSecurityGroupChanges\":false,\"exclusiveResourceSecurityGroupManagement\":false, + // \"applyToAllEC2InstanceENIs\":false,\"securityGroups\":[{\"id\":\" sg-000e55995d61a06bd\"}]}"},"RemediationEnabled":false,"ResourceType":"AWS::EC2::NetworkInterface"} // // * Example: SECURITY_GROUPS_CONTENT_AUDIT "SecurityServicePolicyData":{"Type":"SECURITY_GROUPS_CONTENT_AUDIT","ManagedServiceData":"{\"type\":\"SECURITY_GROUPS_CONTENT_AUDIT\",\"securityGroups\":[{\"id\":\" // sg-000e55995d61a06bd \"}],\"securityGroupAction\":{\"type\":\"ALLOW\"}}"},"RemediationEnabled":false,"ResourceType":"AWS::EC2::NetworkInterface"} diff --git a/service/mediapackagevod/api_op_CreateAsset.go b/service/mediapackagevod/api_op_CreateAsset.go index 44ef4ad7c7f..336e9a19870 100644 --- a/service/mediapackagevod/api_op_CreateAsset.go +++ b/service/mediapackagevod/api_op_CreateAsset.go @@ -26,6 +26,9 @@ type CreateAssetInput struct { // SourceRoleArn is a required field SourceRoleArn *string `locationName:"sourceRoleArn" type:"string" required:"true"` + + // A collection of tags associated with a resource + Tags map[string]string `locationName:"tags" type:"map"` } // String returns the string representation @@ -93,6 +96,18 @@ func (s CreateAssetInput) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sourceRoleArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } return nil } @@ -114,6 +129,9 @@ type CreateAssetOutput struct { SourceArn *string `locationName:"sourceArn" type:"string"` SourceRoleArn *string `locationName:"sourceRoleArn" type:"string"` + + // A collection of tags associated with a resource + Tags map[string]string `locationName:"tags" type:"map"` } // String returns the string representation @@ -177,6 +195,18 @@ func (s CreateAssetOutput) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sourceRoleArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } return nil } diff --git a/service/mediapackagevod/api_op_CreatePackagingConfiguration.go b/service/mediapackagevod/api_op_CreatePackagingConfiguration.go index feed5d2f1d7..f7021e30dac 100644 --- a/service/mediapackagevod/api_op_CreatePackagingConfiguration.go +++ b/service/mediapackagevod/api_op_CreatePackagingConfiguration.go @@ -30,6 +30,9 @@ type CreatePackagingConfigurationInput struct { // PackagingGroupId is a required field PackagingGroupId *string `locationName:"packagingGroupId" type:"string" required:"true"` + + // A collection of tags associated with a resource + Tags map[string]string `locationName:"tags" type:"map"` } // String returns the string representation @@ -115,6 +118,18 @@ func (s CreatePackagingConfigurationInput) MarshalFields(e protocol.FieldEncoder metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "packagingGroupId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } return nil } @@ -138,6 +153,9 @@ type CreatePackagingConfigurationOutput struct { MssPackage *MssPackage `locationName:"mssPackage" type:"structure"` PackagingGroupId *string `locationName:"packagingGroupId" type:"string"` + + // A collection of tags associated with a resource + Tags map[string]string `locationName:"tags" type:"map"` } // String returns the string representation @@ -189,6 +207,18 @@ func (s CreatePackagingConfigurationOutput) MarshalFields(e protocol.FieldEncode metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "packagingGroupId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } return nil } diff --git a/service/mediapackagevod/api_op_CreatePackagingGroup.go b/service/mediapackagevod/api_op_CreatePackagingGroup.go index ae653a6bf0a..511c795f3c0 100644 --- a/service/mediapackagevod/api_op_CreatePackagingGroup.go +++ b/service/mediapackagevod/api_op_CreatePackagingGroup.go @@ -15,6 +15,9 @@ type CreatePackagingGroupInput struct { // Id is a required field Id *string `locationName:"id" type:"string" required:"true"` + + // A collection of tags associated with a resource + Tags map[string]string `locationName:"tags" type:"map"` } // String returns the string representation @@ -46,6 +49,18 @@ func (s CreatePackagingGroupInput) MarshalFields(e protocol.FieldEncoder) error metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "id", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } return nil } @@ -57,6 +72,9 @@ type CreatePackagingGroupOutput struct { DomainName *string `locationName:"domainName" type:"string"` Id *string `locationName:"id" type:"string"` + + // A collection of tags associated with a resource + Tags map[string]string `locationName:"tags" type:"map"` } // String returns the string representation @@ -84,6 +102,18 @@ func (s CreatePackagingGroupOutput) MarshalFields(e protocol.FieldEncoder) error metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "id", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } return nil } diff --git a/service/mediapackagevod/api_op_DescribeAsset.go b/service/mediapackagevod/api_op_DescribeAsset.go index 51d83ba3fd8..ca5d3b999d1 100644 --- a/service/mediapackagevod/api_op_DescribeAsset.go +++ b/service/mediapackagevod/api_op_DescribeAsset.go @@ -67,6 +67,9 @@ type DescribeAssetOutput struct { SourceArn *string `locationName:"sourceArn" type:"string"` SourceRoleArn *string `locationName:"sourceRoleArn" type:"string"` + + // A collection of tags associated with a resource + Tags map[string]string `locationName:"tags" type:"map"` } // String returns the string representation @@ -130,6 +133,18 @@ func (s DescribeAssetOutput) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sourceRoleArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } return nil } diff --git a/service/mediapackagevod/api_op_DescribePackagingConfiguration.go b/service/mediapackagevod/api_op_DescribePackagingConfiguration.go index 8bca22e6a8d..8d41004776d 100644 --- a/service/mediapackagevod/api_op_DescribePackagingConfiguration.go +++ b/service/mediapackagevod/api_op_DescribePackagingConfiguration.go @@ -69,6 +69,9 @@ type DescribePackagingConfigurationOutput struct { MssPackage *MssPackage `locationName:"mssPackage" type:"structure"` PackagingGroupId *string `locationName:"packagingGroupId" type:"string"` + + // A collection of tags associated with a resource + Tags map[string]string `locationName:"tags" type:"map"` } // String returns the string representation @@ -120,6 +123,18 @@ func (s DescribePackagingConfigurationOutput) MarshalFields(e protocol.FieldEnco metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "packagingGroupId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } return nil } diff --git a/service/mediapackagevod/api_op_DescribePackagingGroup.go b/service/mediapackagevod/api_op_DescribePackagingGroup.go index 0bff5188083..61f4c7e94fd 100644 --- a/service/mediapackagevod/api_op_DescribePackagingGroup.go +++ b/service/mediapackagevod/api_op_DescribePackagingGroup.go @@ -57,6 +57,9 @@ type DescribePackagingGroupOutput struct { DomainName *string `locationName:"domainName" type:"string"` Id *string `locationName:"id" type:"string"` + + // A collection of tags associated with a resource + Tags map[string]string `locationName:"tags" type:"map"` } // String returns the string representation @@ -84,6 +87,18 @@ func (s DescribePackagingGroupOutput) MarshalFields(e protocol.FieldEncoder) err metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "id", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } return nil } diff --git a/service/mediapackagevod/api_op_ListTagsForResource.go b/service/mediapackagevod/api_op_ListTagsForResource.go new file mode 100644 index 00000000000..7f803c2d168 --- /dev/null +++ b/service/mediapackagevod/api_op_ListTagsForResource.go @@ -0,0 +1,144 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package mediapackagevod + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resource-arn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListTagsForResourceInput"} + + if s.ResourceArn == nil { + invalidParams.Add(aws.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListTagsForResourceInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ResourceArn != nil { + v := *s.ResourceArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "resource-arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + Tags map[string]string `locationName:"tags" type:"map"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListTagsForResourceOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } + return nil +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest returns a request value for making API operation for +// AWS Elemental MediaPackage VOD. +// +// // Example sending a request using ListTagsForResourceRequest. +// req := client.ListTagsForResourceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/mediapackage-vod-2018-11-07/ListTagsForResource +func (c *Client) ListTagsForResourceRequest(input *ListTagsForResourceInput) ListTagsForResourceRequest { + op := &aws.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/tags/{resource-arn}", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req := c.newRequest(op, input, &ListTagsForResourceOutput{}) + return ListTagsForResourceRequest{Request: req, Input: input, Copy: c.ListTagsForResourceRequest} +} + +// ListTagsForResourceRequest is the request type for the +// ListTagsForResource API operation. +type ListTagsForResourceRequest struct { + *aws.Request + Input *ListTagsForResourceInput + Copy func(*ListTagsForResourceInput) ListTagsForResourceRequest +} + +// Send marshals and sends the ListTagsForResource API request. +func (r ListTagsForResourceRequest) Send(ctx context.Context) (*ListTagsForResourceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListTagsForResourceResponse{ + ListTagsForResourceOutput: r.Request.Data.(*ListTagsForResourceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ListTagsForResourceResponse is the response type for the +// ListTagsForResource API operation. +type ListTagsForResourceResponse struct { + *ListTagsForResourceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListTagsForResource request. +func (r *ListTagsForResourceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/mediapackagevod/api_op_TagResource.go b/service/mediapackagevod/api_op_TagResource.go new file mode 100644 index 00000000000..11c2d0c923a --- /dev/null +++ b/service/mediapackagevod/api_op_TagResource.go @@ -0,0 +1,152 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package mediapackagevod + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/restjson" +) + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resource-arn" type:"string" required:"true"` + + // Tags is a required field + Tags map[string]string `locationName:"tags" type:"map" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "TagResourceInput"} + + if s.ResourceArn == nil { + invalidParams.Add(aws.NewErrParamRequired("ResourceArn")) + } + + if s.Tags == nil { + invalidParams.Add(aws.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TagResourceInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } + if s.ResourceArn != nil { + v := *s.ResourceArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "resource-arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TagResourceOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opTagResource = "TagResource" + +// TagResourceRequest returns a request value for making API operation for +// AWS Elemental MediaPackage VOD. +// +// // Example sending a request using TagResourceRequest. +// req := client.TagResourceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/mediapackage-vod-2018-11-07/TagResource +func (c *Client) TagResourceRequest(input *TagResourceInput) TagResourceRequest { + op := &aws.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/tags/{resource-arn}", + } + + if input == nil { + input = &TagResourceInput{} + } + + req := c.newRequest(op, input, &TagResourceOutput{}) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return TagResourceRequest{Request: req, Input: input, Copy: c.TagResourceRequest} +} + +// TagResourceRequest is the request type for the +// TagResource API operation. +type TagResourceRequest struct { + *aws.Request + Input *TagResourceInput + Copy func(*TagResourceInput) TagResourceRequest +} + +// Send marshals and sends the TagResource API request. +func (r TagResourceRequest) Send(ctx context.Context) (*TagResourceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &TagResourceResponse{ + TagResourceOutput: r.Request.Data.(*TagResourceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// TagResourceResponse is the response type for the +// TagResource API operation. +type TagResourceResponse struct { + *TagResourceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// TagResource request. +func (r *TagResourceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/mediapackagevod/api_op_UntagResource.go b/service/mediapackagevod/api_op_UntagResource.go new file mode 100644 index 00000000000..cb2e53862e4 --- /dev/null +++ b/service/mediapackagevod/api_op_UntagResource.go @@ -0,0 +1,152 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package mediapackagevod + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/restjson" +) + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resource-arn" type:"string" required:"true"` + + // TagKeys is a required field + TagKeys []string `location:"querystring" locationName:"tagKeys" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UntagResourceInput"} + + if s.ResourceArn == nil { + invalidParams.Add(aws.NewErrParamRequired("ResourceArn")) + } + + if s.TagKeys == nil { + invalidParams.Add(aws.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UntagResourceInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ResourceArn != nil { + v := *s.ResourceArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "resource-arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TagKeys != nil { + v := s.TagKeys + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.QueryTarget, "tagKeys", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + return nil +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UntagResourceOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest returns a request value for making API operation for +// AWS Elemental MediaPackage VOD. +// +// // Example sending a request using UntagResourceRequest. +// req := client.UntagResourceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/mediapackage-vod-2018-11-07/UntagResource +func (c *Client) UntagResourceRequest(input *UntagResourceInput) UntagResourceRequest { + op := &aws.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/tags/{resource-arn}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + req := c.newRequest(op, input, &UntagResourceOutput{}) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return UntagResourceRequest{Request: req, Input: input, Copy: c.UntagResourceRequest} +} + +// UntagResourceRequest is the request type for the +// UntagResource API operation. +type UntagResourceRequest struct { + *aws.Request + Input *UntagResourceInput + Copy func(*UntagResourceInput) UntagResourceRequest +} + +// Send marshals and sends the UntagResource API request. +func (r UntagResourceRequest) Send(ctx context.Context) (*UntagResourceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UntagResourceResponse{ + UntagResourceOutput: r.Request.Data.(*UntagResourceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UntagResourceResponse is the response type for the +// UntagResource API operation. +type UntagResourceResponse struct { + *UntagResourceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UntagResource request. +func (r *UntagResourceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/mediapackagevod/api_types.go b/service/mediapackagevod/api_types.go index 441949ecc6e..3968ab45564 100644 --- a/service/mediapackagevod/api_types.go +++ b/service/mediapackagevod/api_types.go @@ -35,6 +35,9 @@ type AssetShallow struct { // The IAM role ARN used to access the source S3 bucket. SourceRoleArn *string `locationName:"sourceRoleArn" type:"string"` + + // A collection of tags associated with a resource + Tags map[string]string `locationName:"tags" type:"map"` } // String returns the string representation @@ -86,6 +89,18 @@ func (s AssetShallow) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "sourceRoleArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } return nil } @@ -848,6 +863,9 @@ type PackagingConfiguration struct { // The ID of a PackagingGroup. PackagingGroupId *string `locationName:"packagingGroupId" type:"string"` + + // A collection of tags associated with a resource + Tags map[string]string `locationName:"tags" type:"map"` } // String returns the string representation @@ -899,6 +917,18 @@ func (s PackagingConfiguration) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "packagingGroupId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } return nil } @@ -914,6 +944,9 @@ type PackagingGroup struct { // The ID of the PackagingGroup. Id *string `locationName:"id" type:"string"` + + // A collection of tags associated with a resource + Tags map[string]string `locationName:"tags" type:"map"` } // String returns the string representation @@ -941,6 +974,18 @@ func (s PackagingGroup) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "id", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } return nil } diff --git a/service/mediapackagevod/mediapackagevodiface/interface.go b/service/mediapackagevod/mediapackagevodiface/interface.go index 809da1d983a..118537535f5 100644 --- a/service/mediapackagevod/mediapackagevodiface/interface.go +++ b/service/mediapackagevod/mediapackagevodiface/interface.go @@ -84,6 +84,12 @@ type ClientAPI interface { ListPackagingConfigurationsRequest(*mediapackagevod.ListPackagingConfigurationsInput) mediapackagevod.ListPackagingConfigurationsRequest ListPackagingGroupsRequest(*mediapackagevod.ListPackagingGroupsInput) mediapackagevod.ListPackagingGroupsRequest + + ListTagsForResourceRequest(*mediapackagevod.ListTagsForResourceInput) mediapackagevod.ListTagsForResourceRequest + + TagResourceRequest(*mediapackagevod.TagResourceInput) mediapackagevod.TagResourceRequest + + UntagResourceRequest(*mediapackagevod.UntagResourceInput) mediapackagevod.UntagResourceRequest } var _ ClientAPI = (*mediapackagevod.Client)(nil) diff --git a/service/pinpoint/api_enums.go b/service/pinpoint/api_enums.go index 4b6df5997c5..0d330266961 100644 --- a/service/pinpoint/api_enums.go +++ b/service/pinpoint/api_enums.go @@ -142,6 +142,32 @@ func (enum Duration) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type EndpointTypesElement string + +// Enum values for EndpointTypesElement +const ( + EndpointTypesElementGcm EndpointTypesElement = "GCM" + EndpointTypesElementApns EndpointTypesElement = "APNS" + EndpointTypesElementApnsSandbox EndpointTypesElement = "APNS_SANDBOX" + EndpointTypesElementApnsVoip EndpointTypesElement = "APNS_VOIP" + EndpointTypesElementApnsVoipSandbox EndpointTypesElement = "APNS_VOIP_SANDBOX" + EndpointTypesElementAdm EndpointTypesElement = "ADM" + EndpointTypesElementSms EndpointTypesElement = "SMS" + EndpointTypesElementVoice EndpointTypesElement = "VOICE" + EndpointTypesElementEmail EndpointTypesElement = "EMAIL" + EndpointTypesElementBaidu EndpointTypesElement = "BAIDU" + EndpointTypesElementCustom EndpointTypesElement = "CUSTOM" +) + +func (enum EndpointTypesElement) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum EndpointTypesElement) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type FilterType string // Enum values for FilterType diff --git a/service/pinpoint/api_op_UpdateEndpoint.go b/service/pinpoint/api_op_UpdateEndpoint.go index edc53fba98d..97a670d864e 100644 --- a/service/pinpoint/api_op_UpdateEndpoint.go +++ b/service/pinpoint/api_op_UpdateEndpoint.go @@ -109,8 +109,9 @@ const opUpdateEndpoint = "UpdateEndpoint" // // Creates a new endpoint for an application or updates the settings and attributes // of an existing endpoint for an application. You can also use this operation -// to define custom attributes (Attributes, Metrics, and UserAttributes properties) -// for an endpoint. +// to define custom attributes for an endpoint. If an update includes one or +// more values for a custom attribute, Amazon Pinpoint replaces (overwrites) +// any existing values with the new values. // // // Example sending a request using UpdateEndpointRequest. // req := client.UpdateEndpointRequest(params) diff --git a/service/pinpoint/api_op_UpdateEndpointsBatch.go b/service/pinpoint/api_op_UpdateEndpointsBatch.go index cb83997ad69..7f2bf346da5 100644 --- a/service/pinpoint/api_op_UpdateEndpointsBatch.go +++ b/service/pinpoint/api_op_UpdateEndpointsBatch.go @@ -102,8 +102,9 @@ const opUpdateEndpointsBatch = "UpdateEndpointsBatch" // // Creates a new batch of endpoints for an application or updates the settings // and attributes of a batch of existing endpoints for an application. You can -// also use this operation to define custom attributes (Attributes, Metrics, -// and UserAttributes properties) for a batch of endpoints. +// also use this operation to define custom attributes for a batch of endpoints. +// If an update includes one or more values for a custom attribute, Amazon Pinpoint +// replaces (overwrites) any existing values with the new values. // // // Example sending a request using UpdateEndpointsBatchRequest. // req := client.UpdateEndpointsBatchRequest(params) diff --git a/service/pinpoint/api_types.go b/service/pinpoint/api_types.go index a3e79dbc68c..e775cfbe9bd 100644 --- a/service/pinpoint/api_types.go +++ b/service/pinpoint/api_types.go @@ -2247,7 +2247,7 @@ type ApplicationDateRangeKpiResponse struct { // that the data was retrieved for. This value describes the associated metric // and consists of two or more terms, which are comprised of lowercase alphanumeric // characters, separated by a hyphen. For a list of possible values, see the - // Amazon Pinpoint Developer Guide (https://docs.aws.amazon.com/pinpoint/latest/developerguide/welcome.html). + // Amazon Pinpoint Developer Guide (https://docs.aws.amazon.com/pinpoint/latest/developerguide/analytics-standard-metrics.html). // // KpiName is a required field KpiName *string `type:"string" required:"true"` @@ -2393,15 +2393,16 @@ type ApplicationSettingsResource struct { // ApplicationId is a required field ApplicationId *string `type:"string" required:"true"` - // The settings for the AWS Lambda function to use by default as a code hook - // for campaigns in the application. + // The settings for the AWS Lambda function to invoke by default as a code hook + // for campaigns in the application. You can use this hook to customize segments + // that are used by campaigns in the application. CampaignHook *CampaignHook `type:"structure"` // The date and time, in ISO 8601 format, when the application's settings were // last modified. LastModifiedDate *string `type:"string"` - // The default sending limits for campaigns in the application. + // The default sending limits for campaigns and journeys in the application. Limits *CampaignLimits `type:"structure"` // The default quiet time for campaigns and journeys in the application. Quiet @@ -3035,6 +3036,32 @@ func (s BaseKpiResult) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Specifies the contents of a message that's sent through a custom channel +// to recipients of a campaign. +type CampaignCustomMessage struct { + _ struct{} `type:"structure"` + + // The raw, JSON-formatted string to use as the payload for the message. The + // maximum size is 5 KB. + Data *string `type:"string"` +} + +// String returns the string representation +func (s CampaignCustomMessage) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CampaignCustomMessage) MarshalFields(e protocol.FieldEncoder) error { + if s.Data != nil { + v := *s.Data + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Data", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + // Provides the results of a query that retrieved the data for a standard metric // that applies to a campaign, and provides information about that query. type CampaignDateRangeKpiResponse struct { @@ -3057,7 +3084,7 @@ type CampaignDateRangeKpiResponse struct { // that the data was retrieved for. This value describes the associated metric // and consists of two or more terms, which are comprised of lowercase alphanumeric // characters, separated by a hyphen. For a list of possible values, see the - // Amazon Pinpoint Developer Guide (https://docs.aws.amazon.com/pinpoint/latest/developerguide/welcome.html). + // Amazon Pinpoint Developer Guide (https://docs.aws.amazon.com/pinpoint/latest/developerguide/analytics-standard-metrics.html). // // KpiName is a required field KpiName *string `type:"string" required:"true"` @@ -3247,15 +3274,25 @@ func (s CampaignEventFilter) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Specifies the AWS Lambda function to use as a code hook for a campaign. +// Specifies settings for invoking an AWS Lambda function that customizes a +// segment for a campaign. type CampaignHook struct { _ struct{} `type:"structure"` // The name or Amazon Resource Name (ARN) of the AWS Lambda function that Amazon - // Pinpoint invokes to send messages for a campaign. + // Pinpoint invokes to customize a segment for a campaign. LambdaFunctionName *string `type:"string"` - // Specifies which Lambda mode to use when invoking the AWS Lambda function. + // The mode that Amazon Pinpoint uses to invoke the AWS Lambda function. Possible + // values are: + // + // * FILTER - Invoke the function to customize the segment that's used by + // a campaign. + // + // * DELIVERY - (Deprecated) Previously, invoked the function to send a campaign + // through a custom channel. This functionality is not supported anymore. + // To send a campaign through a custom channel, use the CustomDeliveryConfiguration + // and CampaignCustomMessage objects of the campaign. Mode Mode `type:"string" enum:"true"` // The web URL that Amazon Pinpoint calls to invoke the AWS Lambda function @@ -3291,12 +3328,16 @@ func (s CampaignHook) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Specifies limits on the messages that a campaign can send. +// For a campaign, specifies limits on the messages that the campaign can send. +// For an application, specifies the default limits for messages that campaigns +// and journeys in the application can send. type CampaignLimits struct { _ struct{} `type:"structure"` // The maximum number of messages that a campaign can send to a single endpoint - // during a 24-hour period. The maximum value is 100. + // during a 24-hour period. For an application, this value specifies the default + // limit for the number of messages that campaigns and journeys can send to + // a single endpoint during a 24-hour period. The maximum value is 100. Daily *int64 `type:"integer"` // The maximum amount of time, in seconds, that a campaign can attempt to deliver @@ -3304,12 +3345,15 @@ type CampaignLimits struct { // is 60 seconds. MaximumDuration *int64 `type:"integer"` - // The maximum number of messages that a campaign can send each second. The - // minimum value is 50. The maximum value is 20,000. + // The maximum number of messages that a campaign can send each second. For + // an application, this value specifies the default limit for the number of + // messages that campaigns and journeys can send each second. The minimum value + // is 50. The maximum value is 20,000. MessagesPerSecond *int64 `type:"integer"` // The maximum number of messages that a campaign can send to a single endpoint - // during the course of the campaign. The maximum value is 100. + // during the course of the campaign. If a campaign recurs, this setting applies + // to all runs of the campaign. The maximum value is 100. Total *int64 `type:"integer"` } @@ -3371,8 +3415,12 @@ type CampaignResponse struct { // CreationDate is a required field CreationDate *string `type:"string" required:"true"` + // The delivery configuration settings for sending the campaign through a custom + // channel. + CustomDeliveryConfiguration *CustomDeliveryConfiguration `type:"structure"` + // The current status of the campaign's default treatment. This value exists - // only for campaigns that have more than one treatment, to support A/B testing. + // only for campaigns that have more than one treatment. DefaultState *CampaignState `type:"structure"` // The custom description of the campaign. @@ -3383,6 +3431,7 @@ type CampaignResponse struct { HoldoutPercent *int64 `type:"integer"` // The settings for the AWS Lambda function to use as a code hook for the campaign. + // You can use this hook to customize the segment that's used by the campaign. Hook *CampaignHook `type:"structure"` // The unique identifier for the campaign. @@ -3432,11 +3481,12 @@ type CampaignResponse struct { // The message template that’s used for the campaign. TemplateConfiguration *TemplateConfiguration `type:"structure"` - // The custom description of a variation of the campaign that's used for A/B - // testing. + // The custom description of the default treatment for the campaign. TreatmentDescription *string `type:"string"` - // The custom name of a variation of the campaign that's used for A/B testing. + // The custom name of the default treatment for the campaign, if the campaign + // has multiple treatments. A treatment is a variation of a campaign that's + // used for A/B testing. TreatmentName *string `type:"string"` // The version number of the campaign. @@ -3480,6 +3530,12 @@ func (s CampaignResponse) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "CreationDate", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.CustomDeliveryConfiguration != nil { + v := s.CustomDeliveryConfiguration + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "CustomDeliveryConfiguration", v, metadata) + } if s.DefaultState != nil { v := s.DefaultState @@ -3655,9 +3711,12 @@ type CampaignState struct { _ struct{} `type:"structure"` // The current status of the campaign, or the current status of a treatment - // that belongs to an A/B test campaign. If a campaign uses A/B testing, the - // campaign has a status of COMPLETED only if all campaign treatments have a - // status of COMPLETED. + // that belongs to an A/B test campaign. + // + // If a campaign uses A/B testing, the campaign has a status of COMPLETED only + // if all campaign treatments have a status of COMPLETED. If you delete the + // segment that's associated with a campaign, the campaign fails and has a status + // of DELETED. CampaignStatus CampaignStatus `type:"string" enum:"true"` } @@ -4048,14 +4107,15 @@ type CreateRecommenderConfiguration struct { _ struct{} `type:"structure"` // A map of key-value pairs that defines 1-10 custom endpoint or user attributes, - // depending on the value for the RecommenderUserIdType property. Each of these - // attributes temporarily stores a recommended item that's retrieved from the - // recommender model and sent to an AWS Lambda function for additional processing. - // Each attribute can be used as a message variable in a message template. + // depending on the value for the RecommendationProviderIdType property. Each + // of these attributes temporarily stores a recommended item that's retrieved + // from the recommender model and sent to an AWS Lambda function for additional + // processing. Each attribute can be used as a message variable in a message + // template. // // In the map, the key is the name of a custom attribute and the value is a // custom display name for that attribute. The display name appears in the Attribute - // finder pane of the template editor on the Amazon Pinpoint console. The following + // finder of the template editor on the Amazon Pinpoint console. The following // restrictions apply to these names: // // * An attribute name must start with a letter or number and it can contain @@ -4067,12 +4127,13 @@ type CreateRecommenderConfiguration struct { // spaces, underscores (_), or hyphens (-). // // This object is required if the configuration invokes an AWS Lambda function - // (LambdaFunctionArn) to process recommendation data. Otherwise, don't include - // this object in your request. + // (RecommendationTransformerUri) to process recommendation data. Otherwise, + // don't include this object in your request. Attributes map[string]string `type:"map"` // A custom description of the configuration for the recommender model. The - // description can contain up to 128 characters. + // description can contain up to 128 characters. The characters can be letters, + // numbers, spaces, or the following symbols: _ ; () , ‐. Description *string `type:"string"` // A custom name of the configuration for the recommender model. The name must @@ -4092,7 +4153,7 @@ type CreateRecommenderConfiguration struct { // * PINPOINT_USER_ID - Associate each user in the model with a particular // user and endpoint in Amazon Pinpoint. The data is correlated based on // user IDs in Amazon Pinpoint. If you specify this value, an endpoint definition - // in Amazon Pinpoint has to specify a both a user ID (UserId) and an endpoint + // in Amazon Pinpoint has to specify both a user ID (UserId) and an endpoint // ID. Otherwise, messages won’t be sent to the user's endpoint. RecommendationProviderIdType *string `type:"string"` @@ -4115,26 +4176,26 @@ type CreateRecommenderConfiguration struct { RecommendationTransformerUri *string `type:"string"` // A custom display name for the standard endpoint or user attribute (RecommendationItems) - // that temporarily stores a recommended item for each endpoint or user, depending - // on the value for the RecommenderUserIdType property. This value is required - // if the configuration doesn't invoke an AWS Lambda function (LambdaFunctionArn) + // that temporarily stores recommended items for each endpoint or user, depending + // on the value for the RecommendationProviderIdType property. This value is + // required if the configuration doesn't invoke an AWS Lambda function (RecommendationTransformerUri) // to perform additional processing of recommendation data. // - // This name appears in the Attribute finder pane of the template editor on - // the Amazon Pinpoint console. The name can contain up to 25 characters. The - // characters can be letters, numbers, spaces, underscores (_), or hyphens (-). - // These restrictions don't apply to attribute values. + // This name appears in the Attribute finder of the template editor on the Amazon + // Pinpoint console. The name can contain up to 25 characters. The characters + // can be letters, numbers, spaces, underscores (_), or hyphens (-). These restrictions + // don't apply to attribute values. RecommendationsDisplayName *string `type:"string"` // The number of recommended items to retrieve from the model for each endpoint - // or user, depending on the value for the RecommenderUserIdType property. This - // number determines how many recommended attributes are available for use as - // message variables in message templates. The minimum value is 1. The maximum - // value is 5. The default value is 5. + // or user, depending on the value for the RecommendationProviderIdType property. + // This number determines how many recommended items are available for use in + // message variables. The minimum value is 1. The maximum value is 5. The default + // value is 5. // // To use multiple recommended items and custom attributes with message variables, - // you have to use an AWS Lambda function (LambdaFunctionArn) to perform additional - // processing of recommendation data. + // you have to use an AWS Lambda function (RecommendationTransformerUri) to + // perform additional processing of recommendation data. RecommendationsPerMessage *int64 `type:"integer"` } @@ -4269,6 +4330,74 @@ func (s CreateTemplateMessageBody) MarshalFields(e protocol.FieldEncoder) error return nil } +// Specifies the delivery configuration settings for sending a campaign or campaign +// treatment through a custom channel. This object is required if you use the +// CampaignCustomMessage object to define the message to send for the campaign +// or campaign treatment. +type CustomDeliveryConfiguration struct { + _ struct{} `type:"structure"` + + // The destination to send the campaign or treatment to. This value can be one + // of the following: + // + // * The name or Amazon Resource Name (ARN) of an AWS Lambda function to + // invoke to handle delivery of the campaign or treatment. + // + // * The URL for a web application or service that supports HTTPS and can + // receive the message. The URL has to be a full URL, including the HTTPS + // protocol. + // + // DeliveryUri is a required field + DeliveryUri *string `type:"string" required:"true"` + + // The types of endpoints to send the campaign or treatment to. Each valid value + // maps to a type of channel that you can associate with an endpoint by using + // the ChannelType property of an endpoint. + EndpointTypes []EndpointTypesElement `type:"list"` +} + +// String returns the string representation +func (s CustomDeliveryConfiguration) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomDeliveryConfiguration) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CustomDeliveryConfiguration"} + + if s.DeliveryUri == nil { + invalidParams.Add(aws.NewErrParamRequired("DeliveryUri")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CustomDeliveryConfiguration) MarshalFields(e protocol.FieldEncoder) error { + if s.DeliveryUri != nil { + v := *s.DeliveryUri + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DeliveryUri", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.EndpointTypes != nil { + v := s.EndpointTypes + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "EndpointTypes", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + return nil +} + // Specifies the default message for all channels. type DefaultMessage struct { _ struct{} `type:"structure"` @@ -4624,8 +4753,8 @@ func (s DirectMessageConfiguration) MarshalFields(e protocol.FieldEncoder) error type EmailChannelRequest struct { _ struct{} `type:"structure"` - // The configuration set that you want to apply to email that you send through - // the channel by using the Amazon Pinpoint Email API (emailAPIreference.html). + // The Amazon SES configuration set (https://docs.aws.amazon.com/ses/latest/APIReference/API_ConfigurationSet.html) + // that you want to apply to messages that you send through the channel. ConfigurationSet *string `type:"string"` // Specifies whether to enable the email channel for the application. @@ -4716,8 +4845,8 @@ type EmailChannelResponse struct { // to. ApplicationId *string `type:"string"` - // The configuration set that's applied to email that's sent through the channel - // by using the Amazon Pinpoint Email API (emailAPIreference.html). + // The Amazon SES configuration set (https://docs.aws.amazon.com/ses/latest/APIReference/API_ConfigurationSet.html) + // that's applied to messages that are sent through the channel. ConfigurationSet *string `type:"string"` // The date and time, in ISO 8601 format, when the email channel was enabled. @@ -4726,7 +4855,7 @@ type EmailChannelResponse struct { // Specifies whether the email channel is enabled for the application. Enabled *bool `type:"boolean"` - // The verified email address that you send email from when you send email through + // The verified email address that email is sent from when you send email through // the channel. FromAddress *string `type:"string"` @@ -4738,8 +4867,7 @@ type EmailChannelResponse struct { Id *string `type:"string"` // The Amazon Resource Name (ARN) of the identity, verified with Amazon Simple - // Email Service (Amazon SES), that you use when you send email through the - // channel. + // Email Service (Amazon SES), that's used when you send email through the channel. Identity *string `type:"string"` // Specifies whether the email channel is archived. @@ -4751,7 +4879,7 @@ type EmailChannelResponse struct { // The date and time, in ISO 8601 format, when the email channel was last modified. LastModifiedDate *string `type:"string"` - // The maximum number of emails that you can send through the channel each second. + // The maximum number of emails that can be sent through the channel each second. MessagesPerSecond *int64 `type:"integer"` // The type of messaging or notification platform for the channel. For the email @@ -5356,8 +5484,8 @@ type EndpointBatchItem struct { // The unique identifier for the request to create or update the endpoint. RequestId *string `type:"string"` - // One or more custom user attributes that describe the user who's associated - // with the endpoint. + // One or more custom attributes that describe the user who's associated with + // the endpoint. User *EndpointUser `type:"structure"` } @@ -5868,8 +5996,8 @@ type EndpointRequest struct { // The unique identifier for the most recent request to update the endpoint. RequestId *string `type:"string"` - // One or more custom user attributes that describe the user who's associated - // with the endpoint. + // One or more custom attributes that describe the user who's associated with + // the endpoint. User *EndpointUser `type:"structure"` } @@ -8371,7 +8499,7 @@ type JourneyDateRangeKpiResponse struct { // that the data was retrieved for. This value describes the associated metric // and consists of two or more terms, which are comprised of lowercase alphanumeric // characters, separated by a hyphen. For a list of possible values, see the - // Amazon Pinpoint Developer Guide (https://docs.aws.amazon.com/pinpoint/latest/developerguide/welcome.html). + // Amazon Pinpoint Developer Guide (https://docs.aws.amazon.com/pinpoint/latest/developerguide/analytics-standard-metrics.html). // // KpiName is a required field KpiName *string `type:"string" required:"true"` @@ -9266,31 +9394,38 @@ type MessageConfiguration struct { _ struct{} `type:"structure"` // The message that the campaign sends through the ADM (Amazon Device Messaging) - // channel. This message overrides the default message. + // channel. If specified, this message overrides the default message. ADMMessage *Message `type:"structure"` // The message that the campaign sends through the APNs (Apple Push Notification - // service) channel. This message overrides the default message. + // service) channel. If specified, this message overrides the default message. APNSMessage *Message `type:"structure"` // The message that the campaign sends through the Baidu (Baidu Cloud Push) - // channel. This message overrides the default message. + // channel. If specified, this message overrides the default message. BaiduMessage *Message `type:"structure"` + // The message that the campaign sends through a custom channel, as specified + // by the delivery configuration (CustomDeliveryConfiguration) settings for + // the campaign. If specified, this message overrides the default message. + CustomMessage *CampaignCustomMessage `type:"structure"` + // The default message that the campaign sends through all the channels that // are configured for the campaign. DefaultMessage *Message `type:"structure"` - // The message that the campaign sends through the email channel. + // The message that the campaign sends through the email channel. If specified, + // this message overrides the default message. EmailMessage *CampaignEmailMessage `type:"structure"` // The message that the campaign sends through the GCM channel, which enables // Amazon Pinpoint to send push notifications through the Firebase Cloud Messaging - // (FCM), formerly Google Cloud Messaging (GCM), service. This message overrides - // the default message. + // (FCM), formerly Google Cloud Messaging (GCM), service. If specified, this + // message overrides the default message. GCMMessage *Message `type:"structure"` - // The message that the campaign sends through the SMS channel. + // The message that the campaign sends through the SMS channel. If specified, + // this message overrides the default message. SMSMessage *CampaignSmsMessage `type:"structure"` } @@ -9319,6 +9454,12 @@ func (s MessageConfiguration) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "BaiduMessage", v, metadata) } + if s.CustomMessage != nil { + v := s.CustomMessage + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "CustomMessage", v, metadata) + } if s.DefaultMessage != nil { v := s.DefaultMessage @@ -10638,13 +10779,14 @@ type RecommenderConfigurationResponse struct { _ struct{} `type:"structure"` // A map that defines 1-10 custom endpoint or user attributes, depending on - // the value for the RecommenderUserIdType property. Each of these attributes + // the value for the RecommendationProviderIdType property. Each of these attributes // temporarily stores a recommended item that's retrieved from the recommender // model and sent to an AWS Lambda function for additional processing. Each // attribute can be used as a message variable in a message template. // // This value is null if the configuration doesn't invoke an AWS Lambda function - // (LambdaFunctionArn) to perform additional processing of recommendation data. + // (RecommendationTransformerUri) to perform additional processing of recommendation + // data. Attributes map[string]string `type:"map"` // The date, in extended ISO 8601 format, when the configuration was created @@ -10706,18 +10848,19 @@ type RecommenderConfigurationResponse struct { RecommendationTransformerUri *string `type:"string"` // The custom display name for the standard endpoint or user attribute (RecommendationItems) - // that temporarily stores a recommended item for each endpoint or user, depending - // on the value for the RecommenderUserIdType property. This name appears in - // the Attribute finder pane of the template editor on the Amazon Pinpoint console. + // that temporarily stores recommended items for each endpoint or user, depending + // on the value for the RecommendationProviderIdType property. This name appears + // in the Attribute finder of the template editor on the Amazon Pinpoint console. // // This value is null if the configuration doesn't invoke an AWS Lambda function - // (LambdaFunctionArn) to perform additional processing of recommendation data. + // (RecommendationTransformerUri) to perform additional processing of recommendation + // data. RecommendationsDisplayName *string `type:"string"` // The number of recommended items that are retrieved from the model for each - // endpoint or user, depending on the value for the RecommenderUserIdType property. - // This number determines how many recommended attributes are available for - // use as message variables in message templates. + // endpoint or user, depending on the value for the RecommendationProviderIdType + // property. This number determines how many recommended items are available + // for use in message variables. RecommendationsPerMessage *int64 `type:"integer"` } @@ -13428,6 +13571,11 @@ func (s TemplatesResponse) MarshalFields(e protocol.FieldEncoder) error { type TreatmentResource struct { _ struct{} `type:"structure"` + // The delivery configuration settings for sending the treatment through a custom + // channel. This object is required if the MessageConfiguration object for the + // treatment specifies a CustomMessage object. + CustomDeliveryConfiguration *CustomDeliveryConfiguration `type:"structure"` + // The unique identifier for the treatment. // // Id is a required field @@ -13454,8 +13602,7 @@ type TreatmentResource struct { // The custom description of the treatment. TreatmentDescription *string `type:"string"` - // The custom name of the treatment. A treatment is a variation of a campaign - // that's used for A/B testing of a campaign. + // The custom name of the treatment. TreatmentName *string `type:"string"` } @@ -13466,6 +13613,12 @@ func (s TreatmentResource) String() string { // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s TreatmentResource) MarshalFields(e protocol.FieldEncoder) error { + if s.CustomDeliveryConfiguration != nil { + v := s.CustomDeliveryConfiguration + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "CustomDeliveryConfiguration", v, metadata) + } if s.Id != nil { v := *s.Id @@ -13557,14 +13710,15 @@ type UpdateRecommenderConfiguration struct { _ struct{} `type:"structure"` // A map of key-value pairs that defines 1-10 custom endpoint or user attributes, - // depending on the value for the RecommenderUserIdType property. Each of these - // attributes temporarily stores a recommended item that's retrieved from the - // recommender model and sent to an AWS Lambda function for additional processing. - // Each attribute can be used as a message variable in a message template. + // depending on the value for the RecommendationProviderIdType property. Each + // of these attributes temporarily stores a recommended item that's retrieved + // from the recommender model and sent to an AWS Lambda function for additional + // processing. Each attribute can be used as a message variable in a message + // template. // // In the map, the key is the name of a custom attribute and the value is a // custom display name for that attribute. The display name appears in the Attribute - // finder pane of the template editor on the Amazon Pinpoint console. The following + // finder of the template editor on the Amazon Pinpoint console. The following // restrictions apply to these names: // // * An attribute name must start with a letter or number and it can contain @@ -13576,12 +13730,13 @@ type UpdateRecommenderConfiguration struct { // spaces, underscores (_), or hyphens (-). // // This object is required if the configuration invokes an AWS Lambda function - // (LambdaFunctionArn) to process recommendation data. Otherwise, don't include - // this object in your request. + // (RecommendationTransformerUri) to process recommendation data. Otherwise, + // don't include this object in your request. Attributes map[string]string `type:"map"` // A custom description of the configuration for the recommender model. The - // description can contain up to 128 characters. + // description can contain up to 128 characters. The characters can be letters, + // numbers, spaces, or the following symbols: _ ; () , ‐. Description *string `type:"string"` // A custom name of the configuration for the recommender model. The name must @@ -13601,7 +13756,7 @@ type UpdateRecommenderConfiguration struct { // * PINPOINT_USER_ID - Associate each user in the model with a particular // user and endpoint in Amazon Pinpoint. The data is correlated based on // user IDs in Amazon Pinpoint. If you specify this value, an endpoint definition - // in Amazon Pinpoint has to specify a both a user ID (UserId) and an endpoint + // in Amazon Pinpoint has to specify both a user ID (UserId) and an endpoint // ID. Otherwise, messages won’t be sent to the user's endpoint. RecommendationProviderIdType *string `type:"string"` @@ -13624,26 +13779,26 @@ type UpdateRecommenderConfiguration struct { RecommendationTransformerUri *string `type:"string"` // A custom display name for the standard endpoint or user attribute (RecommendationItems) - // that temporarily stores a recommended item for each endpoint or user, depending - // on the value for the RecommenderUserIdType property. This value is required - // if the configuration doesn't invoke an AWS Lambda function (LambdaFunctionArn) + // that temporarily stores recommended items for each endpoint or user, depending + // on the value for the RecommendationProviderIdType property. This value is + // required if the configuration doesn't invoke an AWS Lambda function (RecommendationTransformerUri) // to perform additional processing of recommendation data. // - // This name appears in the Attribute finder pane of the template editor on - // the Amazon Pinpoint console. The name can contain up to 25 characters. The - // characters can be letters, numbers, spaces, underscores (_), or hyphens (-). - // These restrictions don't apply to attribute values. + // This name appears in the Attribute finder of the template editor on the Amazon + // Pinpoint console. The name can contain up to 25 characters. The characters + // can be letters, numbers, spaces, underscores (_), or hyphens (-). These restrictions + // don't apply to attribute values. RecommendationsDisplayName *string `type:"string"` // The number of recommended items to retrieve from the model for each endpoint - // or user, depending on the value for the RecommenderUserIdType property. This - // number determines how many recommended attributes are available for use as - // message variables in message templates. The minimum value is 1. The maximum - // value is 5. The default value is 5. + // or user, depending on the value for the RecommendationProviderIdType property. + // This number determines how many recommended items are available for use in + // message variables. The minimum value is 1. The maximum value is 5. The default + // value is 5. // // To use multiple recommended items and custom attributes with message variables, - // you have to use an AWS Lambda function (LambdaFunctionArn) to perform additional - // processing of recommendation data. + // you have to use an AWS Lambda function (RecommendationTransformerUri) to + // perform additional processing of recommendation data. RecommendationsPerMessage *int64 `type:"integer"` } @@ -14270,18 +14425,20 @@ func (s WaitTime) MarshalFields(e protocol.FieldEncoder) error { type WriteApplicationSettingsRequest struct { _ struct{} `type:"structure"` - // The settings for the AWS Lambda function to use by default as a code hook - // for campaigns in the application. To override these settings for a specific - // campaign, use the Campaign resource to define custom Lambda function settings - // for the campaign. + // The settings for the AWS Lambda function to invoke by default as a code hook + // for campaigns in the application. You can use this hook to customize segments + // that are used by campaigns in the application. + // + // To override these settings and define custom settings for a specific campaign, + // use the CampaignHook object of the Campaign resource. CampaignHook *CampaignHook `type:"structure"` // Specifies whether to enable application-related alarms in Amazon CloudWatch. CloudWatchMetricsEnabled *bool `type:"boolean"` - // The default sending limits for campaigns in the application. To override - // these limits for a specific campaign, use the Campaign resource to define - // custom limits for the campaign. + // The default sending limits for campaigns and journeys in the application. + // To override these limits and define custom limits for a specific campaign + // or journey, use the Campaign resource or the Journey resource, respectively. Limits *CampaignLimits `type:"structure"` // The default quiet time for campaigns and journeys in the application. Quiet @@ -14350,6 +14507,11 @@ type WriteCampaignRequest struct { // in addition to the default treatment for the campaign. AdditionalTreatments []WriteTreatmentResource `type:"list"` + // The delivery configuration settings for sending the campaign through a custom + // channel. This object is required if the MessageConfiguration object for the + // campaign specifies a CustomMessage object. + CustomDeliveryConfiguration *CustomDeliveryConfiguration `type:"structure"` + // A custom description of the campaign. Description *string `type:"string"` @@ -14357,11 +14519,13 @@ type WriteCampaignRequest struct { // messages from the campaign. HoldoutPercent *int64 `type:"integer"` - // The settings for the AWS Lambda function to use as a code hook for the campaign. + // The settings for the AWS Lambda function to invoke as a code hook for the + // campaign. You can use this hook to customize the segment that's used by the + // campaign. Hook *CampaignHook `type:"structure"` // Specifies whether to pause the campaign. A paused campaign doesn't run unless - // you resume it by setting this value to false. + // you resume it by changing this value to false. IsPaused *bool `type:"boolean"` // The messaging limits for the campaign. @@ -14390,10 +14554,12 @@ type WriteCampaignRequest struct { // The message template to use for the campaign. TemplateConfiguration *TemplateConfiguration `type:"structure"` - // A custom description of a variation of the campaign to use for A/B testing. + // A custom description of the default treatment for the campaign. TreatmentDescription *string `type:"string"` - // A custom name for a variation of the campaign to use for A/B testing. + // A custom name of the default treatment for the campaign, if the campaign + // has multiple treatments. A treatment is a variation of a campaign that's + // used for A/B testing. TreatmentName *string `type:"string"` } @@ -14412,6 +14578,11 @@ func (s *WriteCampaignRequest) Validate() error { } } } + if s.CustomDeliveryConfiguration != nil { + if err := s.CustomDeliveryConfiguration.Validate(); err != nil { + invalidParams.AddNested("CustomDeliveryConfiguration", err.(aws.ErrInvalidParams)) + } + } if s.Schedule != nil { if err := s.Schedule.Validate(); err != nil { invalidParams.AddNested("Schedule", err.(aws.ErrInvalidParams)) @@ -14438,6 +14609,12 @@ func (s WriteCampaignRequest) MarshalFields(e protocol.FieldEncoder) error { ls0.End() } + if s.CustomDeliveryConfiguration != nil { + v := s.CustomDeliveryConfiguration + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "CustomDeliveryConfiguration", v, metadata) + } if s.Description != nil { v := *s.Description @@ -14876,6 +15053,11 @@ func (s WriteSegmentRequest) MarshalFields(e protocol.FieldEncoder) error { type WriteTreatmentResource struct { _ struct{} `type:"structure"` + // The delivery configuration settings for sending the treatment through a custom + // channel. This object is required if the MessageConfiguration object for the + // treatment specifies a CustomMessage object. + CustomDeliveryConfiguration *CustomDeliveryConfiguration `type:"structure"` + // The message configuration settings for the treatment. MessageConfiguration *MessageConfiguration `type:"structure"` @@ -14894,8 +15076,7 @@ type WriteTreatmentResource struct { // A custom description of the treatment. TreatmentDescription *string `type:"string"` - // A custom name for the treatment. A treatment is a variation of a campaign - // that's used for A/B testing of a campaign. + // A custom name for the treatment. TreatmentName *string `type:"string"` } @@ -14911,6 +15092,11 @@ func (s *WriteTreatmentResource) Validate() error { if s.SizePercent == nil { invalidParams.Add(aws.NewErrParamRequired("SizePercent")) } + if s.CustomDeliveryConfiguration != nil { + if err := s.CustomDeliveryConfiguration.Validate(); err != nil { + invalidParams.AddNested("CustomDeliveryConfiguration", err.(aws.ErrInvalidParams)) + } + } if s.Schedule != nil { if err := s.Schedule.Validate(); err != nil { invalidParams.AddNested("Schedule", err.(aws.ErrInvalidParams)) @@ -14925,6 +15111,12 @@ func (s *WriteTreatmentResource) Validate() error { // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s WriteTreatmentResource) MarshalFields(e protocol.FieldEncoder) error { + if s.CustomDeliveryConfiguration != nil { + v := s.CustomDeliveryConfiguration + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "CustomDeliveryConfiguration", v, metadata) + } if s.MessageConfiguration != nil { v := s.MessageConfiguration diff --git a/service/ram/api_op_ListPrincipals.go b/service/ram/api_op_ListPrincipals.go index d39dbe3613d..9576f06be5c 100644 --- a/service/ram/api_op_ListPrincipals.go +++ b/service/ram/api_op_ListPrincipals.go @@ -36,9 +36,11 @@ type ListPrincipalsInput struct { // The resource type. // - // Valid values: ec2:CapacityReservation | ec2:Subnet | ec2:TrafficMirrorTarget - // | ec2:TransitGateway | license-manager:LicenseConfiguration | rds:Cluster - // | route53resolver:ResolverRule I resource-groups:Group + // Valid values: codebuild:Project | codebuild:ReportGroup | ec2:CapacityReservation + // | ec2:DedicatedHost | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway + // | imagebuilder:Component | imagebuilder:Image | imagebuilder:ImageRecipe + // | license-manager:LicenseConfiguration I resource-groups:Group | rds:Cluster + // | route53resolver:ResolverRule ResourceType *string `locationName:"resourceType" type:"string"` } diff --git a/service/ram/api_op_ListResourceTypes.go b/service/ram/api_op_ListResourceTypes.go new file mode 100644 index 00000000000..4f7b7875eb4 --- /dev/null +++ b/service/ram/api_op_ListResourceTypes.go @@ -0,0 +1,166 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ram + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListResourceTypesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListResourceTypesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListResourceTypesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListResourceTypesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListResourceTypesInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "maxResults", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "nextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListResourceTypesOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The shareable resource types supported by AWS RAM. + ResourceTypes []ServiceNameAndResourceType `locationName:"resourceTypes" type:"list"` +} + +// String returns the string representation +func (s ListResourceTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListResourceTypesOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "nextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ResourceTypes != nil { + v := s.ResourceTypes + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "resourceTypes", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +const opListResourceTypes = "ListResourceTypes" + +// ListResourceTypesRequest returns a request value for making API operation for +// AWS Resource Access Manager. +// +// Lists the shareable resource types supported by AWS RAM. +// +// // Example sending a request using ListResourceTypesRequest. +// req := client.ListResourceTypesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ram-2018-01-04/ListResourceTypes +func (c *Client) ListResourceTypesRequest(input *ListResourceTypesInput) ListResourceTypesRequest { + op := &aws.Operation{ + Name: opListResourceTypes, + HTTPMethod: "POST", + HTTPPath: "/listresourcetypes", + } + + if input == nil { + input = &ListResourceTypesInput{} + } + + req := c.newRequest(op, input, &ListResourceTypesOutput{}) + return ListResourceTypesRequest{Request: req, Input: input, Copy: c.ListResourceTypesRequest} +} + +// ListResourceTypesRequest is the request type for the +// ListResourceTypes API operation. +type ListResourceTypesRequest struct { + *aws.Request + Input *ListResourceTypesInput + Copy func(*ListResourceTypesInput) ListResourceTypesRequest +} + +// Send marshals and sends the ListResourceTypes API request. +func (r ListResourceTypesRequest) Send(ctx context.Context) (*ListResourceTypesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListResourceTypesResponse{ + ListResourceTypesOutput: r.Request.Data.(*ListResourceTypesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ListResourceTypesResponse is the response type for the +// ListResourceTypes API operation. +type ListResourceTypesResponse struct { + *ListResourceTypesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListResourceTypes request. +func (r *ListResourceTypesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/ram/api_op_ListResources.go b/service/ram/api_op_ListResources.go index 69509e7e4c8..f0f7a4e8278 100644 --- a/service/ram/api_op_ListResources.go +++ b/service/ram/api_op_ListResources.go @@ -36,9 +36,11 @@ type ListResourcesInput struct { // The resource type. // - // Valid values: ec2:CapacityReservation | ec2:Subnet | ec2:TrafficMirrorTarget - // | ec2:TransitGateway | license-manager:LicenseConfiguration | rds:Cluster - // | route53resolver:ResolverRule | resource-groups:Group + // Valid values: codebuild:Project | codebuild:ReportGroup | ec2:CapacityReservation + // | ec2:DedicatedHost | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway + // | imagebuilder:Component | imagebuilder:Image | imagebuilder:ImageRecipe + // | license-manager:LicenseConfiguration I resource-groups:Group | rds:Cluster + // | route53resolver:ResolverRule ResourceType *string `locationName:"resourceType" type:"string"` } diff --git a/service/ram/api_types.go b/service/ram/api_types.go index e0aecdfa4d2..6f39b0934b4 100644 --- a/service/ram/api_types.go +++ b/service/ram/api_types.go @@ -670,6 +670,40 @@ func (s ResourceSharePermissionSummary) MarshalFields(e protocol.FieldEncoder) e return nil } +// Information about the shareable resource types and the AWS services to which +// they belong. +type ServiceNameAndResourceType struct { + _ struct{} `type:"structure"` + + // The shareable resource types. + ResourceType *string `locationName:"resourceType" type:"string"` + + // The name of the AWS services to which the resources belong. + ServiceName *string `locationName:"serviceName" type:"string"` +} + +// String returns the string representation +func (s ServiceNameAndResourceType) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ServiceNameAndResourceType) MarshalFields(e protocol.FieldEncoder) error { + if s.ResourceType != nil { + v := *s.ResourceType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "resourceType", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ServiceName != nil { + v := *s.ServiceName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "serviceName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + // Information about a tag. type Tag struct { _ struct{} `type:"structure"` diff --git a/service/ram/ramiface/interface.go b/service/ram/ramiface/interface.go index a711c1ffcf0..d8355a67998 100644 --- a/service/ram/ramiface/interface.go +++ b/service/ram/ramiface/interface.go @@ -95,6 +95,8 @@ type ClientAPI interface { ListResourceSharePermissionsRequest(*ram.ListResourceSharePermissionsInput) ram.ListResourceSharePermissionsRequest + ListResourceTypesRequest(*ram.ListResourceTypesInput) ram.ListResourceTypesRequest + ListResourcesRequest(*ram.ListResourcesInput) ram.ListResourcesRequest PromoteResourceShareCreatedFromPolicyRequest(*ram.PromoteResourceShareCreatedFromPolicyInput) ram.PromoteResourceShareCreatedFromPolicyRequest diff --git a/service/rds/api_op_DescribeOrderableDBInstanceOptions.go b/service/rds/api_op_DescribeOrderableDBInstanceOptions.go index b288a10accc..bb60e9a6ded 100644 --- a/service/rds/api_op_DescribeOrderableDBInstanceOptions.go +++ b/service/rds/api_op_DescribeOrderableDBInstanceOptions.go @@ -13,6 +13,13 @@ import ( type DescribeOrderableDBInstanceOptionsInput struct { _ struct{} `type:"structure"` + // The Availability Zone group associated with a Local Zone. Specify this parameter + // to retrieve available offerings for the Local Zones in the group. + // + // Omit this parameter to show the available offerings in the specified AWS + // Region. + AvailabilityZoneGroup *string `type:"string"` + // The DB instance class filter value. Specify this parameter to show only the // available offerings matching the specified DB instance class. DBInstanceClass *string `type:"string"` diff --git a/service/rds/api_op_RestoreDBClusterFromS3.go b/service/rds/api_op_RestoreDBClusterFromS3.go index e72c6e0e08a..75d3d700c27 100644 --- a/service/rds/api_op_RestoreDBClusterFromS3.go +++ b/service/rds/api_op_RestoreDBClusterFromS3.go @@ -345,6 +345,15 @@ const opRestoreDBClusterFromS3 = "RestoreDBClusterFromS3" // Data to an Amazon Aurora MySQL DB Cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Migrating.html) // in the Amazon Aurora User Guide. // +// This action only restores the DB cluster, not the DB instances for that DB +// cluster. You must invoke the CreateDBInstance action to create DB instances +// for the restored DB cluster, specifying the identifier of the restored DB +// cluster in DBClusterIdentifier. You can create DB instances only after the +// RestoreDBClusterFromS3 action has completed and the DB cluster is available. +// +// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +// in the Amazon Aurora User Guide. +// // This action only applies to Aurora DB clusters. // // // Example sending a request using RestoreDBClusterFromS3Request. diff --git a/service/rds/api_op_RestoreDBClusterFromSnapshot.go b/service/rds/api_op_RestoreDBClusterFromSnapshot.go index e8afcb34ecb..43247828577 100644 --- a/service/rds/api_op_RestoreDBClusterFromSnapshot.go +++ b/service/rds/api_op_RestoreDBClusterFromSnapshot.go @@ -257,6 +257,8 @@ const opRestoreDBClusterFromSnapshot = "RestoreDBClusterFromSnapshot" // For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // +// This action only applies to Aurora DB clusters. +// // // Example sending a request using RestoreDBClusterFromSnapshotRequest. // req := client.RestoreDBClusterFromSnapshotRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/rds/api_types.go b/service/rds/api_types.go index ced8e74065b..0e1c2653af7 100644 --- a/service/rds/api_types.go +++ b/service/rds/api_types.go @@ -2746,6 +2746,9 @@ func (s OptionVersion) String() string { type OrderableDBInstanceOption struct { _ struct{} `type:"structure"` + // The Availability Zone group for a DB instance. + AvailabilityZoneGroup *string `type:"string"` + // A list of Availability Zones for a DB instance. AvailabilityZones []AvailabilityZone `locationNameList:"AvailabilityZone" type:"list"` @@ -2815,8 +2818,8 @@ type OrderableDBInstanceOption struct { // True if a DB instance supports Performance Insights, otherwise false. SupportsPerformanceInsights *bool `type:"boolean"` - // Whether or not Amazon RDS can automatically scale storage for DB instances - // that use the specified instance class. + // Whether Amazon RDS can automatically scale storage for DB instances that + // use the specified DB instance class. SupportsStorageAutoscaling *bool `type:"boolean"` // Indicates whether a DB instance supports encrypted storage. diff --git a/service/redshift/api_enums.go b/service/redshift/api_enums.go index 1f756ff95ea..941b10eed91 100644 --- a/service/redshift/api_enums.go +++ b/service/redshift/api_enums.go @@ -256,3 +256,73 @@ func (enum TableRestoreStatusType) MarshalValueBuf(b []byte) ([]byte, error) { b = b[0:0] return append(b, enum...), nil } + +type UsageLimitBreachAction string + +// Enum values for UsageLimitBreachAction +const ( + UsageLimitBreachActionLog UsageLimitBreachAction = "log" + UsageLimitBreachActionEmitMetric UsageLimitBreachAction = "emit-metric" + UsageLimitBreachActionDisable UsageLimitBreachAction = "disable" +) + +func (enum UsageLimitBreachAction) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum UsageLimitBreachAction) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type UsageLimitFeatureType string + +// Enum values for UsageLimitFeatureType +const ( + UsageLimitFeatureTypeSpectrum UsageLimitFeatureType = "spectrum" + UsageLimitFeatureTypeConcurrencyScaling UsageLimitFeatureType = "concurrency-scaling" +) + +func (enum UsageLimitFeatureType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum UsageLimitFeatureType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type UsageLimitLimitType string + +// Enum values for UsageLimitLimitType +const ( + UsageLimitLimitTypeTime UsageLimitLimitType = "time" + UsageLimitLimitTypeDataScanned UsageLimitLimitType = "data-scanned" +) + +func (enum UsageLimitLimitType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum UsageLimitLimitType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type UsageLimitPeriod string + +// Enum values for UsageLimitPeriod +const ( + UsageLimitPeriodDaily UsageLimitPeriod = "daily" + UsageLimitPeriodWeekly UsageLimitPeriod = "weekly" + UsageLimitPeriodMonthly UsageLimitPeriod = "monthly" +) + +func (enum UsageLimitPeriod) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum UsageLimitPeriod) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} diff --git a/service/redshift/api_errors.go b/service/redshift/api_errors.go index f710284ae43..ada8b21cb9f 100644 --- a/service/redshift/api_errors.go +++ b/service/redshift/api_errors.go @@ -421,6 +421,12 @@ const ( // The tag is invalid. ErrCodeInvalidTagFault = "InvalidTagFault" + // ErrCodeInvalidUsageLimitFault for service response error code + // "InvalidUsageLimit". + // + // The usage limit is not valid. + ErrCodeInvalidUsageLimitFault = "InvalidUsageLimit" + // ErrCodeInvalidVPCNetworkStateFault for service response error code // "InvalidVPCNetworkStateFault". // @@ -695,4 +701,16 @@ const ( // // A request option was specified that is not supported. ErrCodeUnsupportedOptionFault = "UnsupportedOptionFault" + + // ErrCodeUsageLimitAlreadyExistsFault for service response error code + // "UsageLimitAlreadyExists". + // + // The usage limit already exists. + ErrCodeUsageLimitAlreadyExistsFault = "UsageLimitAlreadyExists" + + // ErrCodeUsageLimitNotFoundFault for service response error code + // "UsageLimitNotFound". + // + // The usage limit identifier can't be found. + ErrCodeUsageLimitNotFoundFault = "UsageLimitNotFound" ) diff --git a/service/redshift/api_op_CreateUsageLimit.go b/service/redshift/api_op_CreateUsageLimit.go new file mode 100644 index 00000000000..69676214061 --- /dev/null +++ b/service/redshift/api_op_CreateUsageLimit.go @@ -0,0 +1,191 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package redshift + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type CreateUsageLimitInput struct { + _ struct{} `type:"structure"` + + // The limit amount. If time-based, this amount is in minutes. If data-based, + // this amount is in terabytes (TB). The value must be a positive number. + // + // Amount is a required field + Amount *int64 `type:"long" required:"true"` + + // The action that Amazon Redshift takes when the limit is reached. The default + // is log. For more information about this parameter, see UsageLimit. + BreachAction UsageLimitBreachAction `type:"string" enum:"true"` + + // The identifier of the cluster that you want to limit usage. + // + // ClusterIdentifier is a required field + ClusterIdentifier *string `type:"string" required:"true"` + + // The Amazon Redshift feature that you want to limit. + // + // FeatureType is a required field + FeatureType UsageLimitFeatureType `type:"string" required:"true" enum:"true"` + + // The type of limit. Depending on the feature type, this can be based on a + // time duration or data size. If FeatureType is spectrum, then LimitType must + // be data-scanned. If FeatureType is concurrency-scaling, then LimitType must + // be time. + // + // LimitType is a required field + LimitType UsageLimitLimitType `type:"string" required:"true" enum:"true"` + + // The time period that the amount applies to. A weekly period begins on Sunday. + // The default is monthly. + Period UsageLimitPeriod `type:"string" enum:"true"` + + // A list of tag instances. + Tags []Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateUsageLimitInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateUsageLimitInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateUsageLimitInput"} + + if s.Amount == nil { + invalidParams.Add(aws.NewErrParamRequired("Amount")) + } + + if s.ClusterIdentifier == nil { + invalidParams.Add(aws.NewErrParamRequired("ClusterIdentifier")) + } + if len(s.FeatureType) == 0 { + invalidParams.Add(aws.NewErrParamRequired("FeatureType")) + } + if len(s.LimitType) == 0 { + invalidParams.Add(aws.NewErrParamRequired("LimitType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes a usage limit object for a cluster. +type CreateUsageLimitOutput struct { + _ struct{} `type:"structure"` + + // The limit amount. If time-based, this amount is in minutes. If data-based, + // this amount is in terabytes (TB). + Amount *int64 `type:"long"` + + // The action that Amazon Redshift takes when the limit is reached. Possible + // values are: + // + // * log - To log an event in a system table. The default is log. + // + // * emit-metric - To emit CloudWatch metrics. + // + // * disable - To disable the feature until the next usage period begins. + BreachAction UsageLimitBreachAction `type:"string" enum:"true"` + + // The identifier of the cluster with a usage limit. + ClusterIdentifier *string `type:"string"` + + // The Amazon Redshift feature to which the limit applies. + FeatureType UsageLimitFeatureType `type:"string" enum:"true"` + + // The type of limit. Depending on the feature type, this can be based on a + // time duration or data size. + LimitType UsageLimitLimitType `type:"string" enum:"true"` + + // The time period that the amount applies to. A weekly period begins on Sunday. + // The default is monthly. + Period UsageLimitPeriod `type:"string" enum:"true"` + + // A list of tag instances. + Tags []Tag `locationNameList:"Tag" type:"list"` + + // The identifier of the usage limit. + UsageLimitId *string `type:"string"` +} + +// String returns the string representation +func (s CreateUsageLimitOutput) String() string { + return awsutil.Prettify(s) +} + +const opCreateUsageLimit = "CreateUsageLimit" + +// CreateUsageLimitRequest returns a request value for making API operation for +// Amazon Redshift. +// +// Creates a usage limit for a specified Amazon Redshift feature on a cluster. +// The usage limit is identified by the returned usage limit identifier. +// +// // Example sending a request using CreateUsageLimitRequest. +// req := client.CreateUsageLimitRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/CreateUsageLimit +func (c *Client) CreateUsageLimitRequest(input *CreateUsageLimitInput) CreateUsageLimitRequest { + op := &aws.Operation{ + Name: opCreateUsageLimit, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateUsageLimitInput{} + } + + req := c.newRequest(op, input, &CreateUsageLimitOutput{}) + return CreateUsageLimitRequest{Request: req, Input: input, Copy: c.CreateUsageLimitRequest} +} + +// CreateUsageLimitRequest is the request type for the +// CreateUsageLimit API operation. +type CreateUsageLimitRequest struct { + *aws.Request + Input *CreateUsageLimitInput + Copy func(*CreateUsageLimitInput) CreateUsageLimitRequest +} + +// Send marshals and sends the CreateUsageLimit API request. +func (r CreateUsageLimitRequest) Send(ctx context.Context) (*CreateUsageLimitResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateUsageLimitResponse{ + CreateUsageLimitOutput: r.Request.Data.(*CreateUsageLimitOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateUsageLimitResponse is the response type for the +// CreateUsageLimit API operation. +type CreateUsageLimitResponse struct { + *CreateUsageLimitOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateUsageLimit request. +func (r *CreateUsageLimitResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/redshift/api_op_DeleteUsageLimit.go b/service/redshift/api_op_DeleteUsageLimit.go new file mode 100644 index 00000000000..562322cd777 --- /dev/null +++ b/service/redshift/api_op_DeleteUsageLimit.go @@ -0,0 +1,119 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package redshift + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +type DeleteUsageLimitInput struct { + _ struct{} `type:"structure"` + + // The identifier of the usage limit to delete. + // + // UsageLimitId is a required field + UsageLimitId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteUsageLimitInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteUsageLimitInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteUsageLimitInput"} + + if s.UsageLimitId == nil { + invalidParams.Add(aws.NewErrParamRequired("UsageLimitId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteUsageLimitOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUsageLimitOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteUsageLimit = "DeleteUsageLimit" + +// DeleteUsageLimitRequest returns a request value for making API operation for +// Amazon Redshift. +// +// Deletes a usage limit from a cluster. +// +// // Example sending a request using DeleteUsageLimitRequest. +// req := client.DeleteUsageLimitRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/DeleteUsageLimit +func (c *Client) DeleteUsageLimitRequest(input *DeleteUsageLimitInput) DeleteUsageLimitRequest { + op := &aws.Operation{ + Name: opDeleteUsageLimit, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUsageLimitInput{} + } + + req := c.newRequest(op, input, &DeleteUsageLimitOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteUsageLimitRequest{Request: req, Input: input, Copy: c.DeleteUsageLimitRequest} +} + +// DeleteUsageLimitRequest is the request type for the +// DeleteUsageLimit API operation. +type DeleteUsageLimitRequest struct { + *aws.Request + Input *DeleteUsageLimitInput + Copy func(*DeleteUsageLimitInput) DeleteUsageLimitRequest +} + +// Send marshals and sends the DeleteUsageLimit API request. +func (r DeleteUsageLimitRequest) Send(ctx context.Context) (*DeleteUsageLimitResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteUsageLimitResponse{ + DeleteUsageLimitOutput: r.Request.Data.(*DeleteUsageLimitOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteUsageLimitResponse is the response type for the +// DeleteUsageLimit API operation. +type DeleteUsageLimitResponse struct { + *DeleteUsageLimitOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteUsageLimit request. +func (r *DeleteUsageLimitResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/redshift/api_op_DescribeUsageLimits.go b/service/redshift/api_op_DescribeUsageLimits.go new file mode 100644 index 00000000000..f3502e0bd3d --- /dev/null +++ b/service/redshift/api_op_DescribeUsageLimits.go @@ -0,0 +1,215 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package redshift + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DescribeUsageLimitsInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster for which you want to describe usage limits. + ClusterIdentifier *string `type:"string"` + + // The feature type for which you want to describe usage limits. + FeatureType UsageLimitFeatureType `type:"string" enum:"true"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeUsageLimits request exceed + // the value specified in MaxRecords, AWS returns a value in the Marker field + // of the response. You can retrieve the next set of response records by providing + // the returned marker value in the Marker parameter and retrying the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // A tag key or keys for which you want to return all matching usage limit objects + // that are associated with the specified key or keys. For example, suppose + // that you have parameter groups that are tagged with keys called owner and + // environment. If you specify both of these tag keys in the request, Amazon + // Redshift returns a response with the usage limit objects have either or both + // of these tag keys associated with them. + TagKeys []string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching usage limit + // objects that are associated with the specified tag value or values. For example, + // suppose that you have parameter groups that are tagged with values called + // admin and test. If you specify both of these tag values in the request, Amazon + // Redshift returns a response with the usage limit objects that have either + // or both of these tag values associated with them. + TagValues []string `locationNameList:"TagValue" type:"list"` + + // The identifier of the usage limit to describe. + UsageLimitId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeUsageLimitsInput) String() string { + return awsutil.Prettify(s) +} + +type DescribeUsageLimitsOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // Contains the output from the DescribeUsageLimits action. + UsageLimits []UsageLimit `type:"list"` +} + +// String returns the string representation +func (s DescribeUsageLimitsOutput) String() string { + return awsutil.Prettify(s) +} + +const opDescribeUsageLimits = "DescribeUsageLimits" + +// DescribeUsageLimitsRequest returns a request value for making API operation for +// Amazon Redshift. +// +// Shows usage limits on a cluster. Results are filtered based on the combination +// of input usage limit identifier, cluster identifier, and feature type parameters: +// +// * If usage limit identifier, cluster identifier, and feature type are +// not provided, then all usage limit objects for the current account in +// the current region are returned. +// +// * If usage limit identifier is provided, then the corresponding usage +// limit object is returned. +// +// * If cluster identifier is provided, then all usage limit objects for +// the specified cluster are returned. +// +// * If cluster identifier and feature type are provided, then all usage +// limit objects for the combination of cluster and feature are returned. +// +// // Example sending a request using DescribeUsageLimitsRequest. +// req := client.DescribeUsageLimitsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/DescribeUsageLimits +func (c *Client) DescribeUsageLimitsRequest(input *DescribeUsageLimitsInput) DescribeUsageLimitsRequest { + op := &aws.Operation{ + Name: opDescribeUsageLimits, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeUsageLimitsInput{} + } + + req := c.newRequest(op, input, &DescribeUsageLimitsOutput{}) + return DescribeUsageLimitsRequest{Request: req, Input: input, Copy: c.DescribeUsageLimitsRequest} +} + +// DescribeUsageLimitsRequest is the request type for the +// DescribeUsageLimits API operation. +type DescribeUsageLimitsRequest struct { + *aws.Request + Input *DescribeUsageLimitsInput + Copy func(*DescribeUsageLimitsInput) DescribeUsageLimitsRequest +} + +// Send marshals and sends the DescribeUsageLimits API request. +func (r DescribeUsageLimitsRequest) Send(ctx context.Context) (*DescribeUsageLimitsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeUsageLimitsResponse{ + DescribeUsageLimitsOutput: r.Request.Data.(*DescribeUsageLimitsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewDescribeUsageLimitsRequestPaginator returns a paginator for DescribeUsageLimits. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.DescribeUsageLimitsRequest(input) +// p := redshift.NewDescribeUsageLimitsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewDescribeUsageLimitsPaginator(req DescribeUsageLimitsRequest) DescribeUsageLimitsPaginator { + return DescribeUsageLimitsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *DescribeUsageLimitsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// DescribeUsageLimitsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type DescribeUsageLimitsPaginator struct { + aws.Pager +} + +func (p *DescribeUsageLimitsPaginator) CurrentPage() *DescribeUsageLimitsOutput { + return p.Pager.CurrentPage().(*DescribeUsageLimitsOutput) +} + +// DescribeUsageLimitsResponse is the response type for the +// DescribeUsageLimits API operation. +type DescribeUsageLimitsResponse struct { + *DescribeUsageLimitsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeUsageLimits request. +func (r *DescribeUsageLimitsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/redshift/api_op_ModifyUsageLimit.go b/service/redshift/api_op_ModifyUsageLimit.go new file mode 100644 index 00000000000..7f982395a49 --- /dev/null +++ b/service/redshift/api_op_ModifyUsageLimit.go @@ -0,0 +1,158 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package redshift + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type ModifyUsageLimitInput struct { + _ struct{} `type:"structure"` + + // The new limit amount. For more information about this parameter, see UsageLimit. + Amount *int64 `type:"long"` + + // The new action that Amazon Redshift takes when the limit is reached. For + // more information about this parameter, see UsageLimit. + BreachAction UsageLimitBreachAction `type:"string" enum:"true"` + + // The identifier of the usage limit to modify. + // + // UsageLimitId is a required field + UsageLimitId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyUsageLimitInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyUsageLimitInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ModifyUsageLimitInput"} + + if s.UsageLimitId == nil { + invalidParams.Add(aws.NewErrParamRequired("UsageLimitId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes a usage limit object for a cluster. +type ModifyUsageLimitOutput struct { + _ struct{} `type:"structure"` + + // The limit amount. If time-based, this amount is in minutes. If data-based, + // this amount is in terabytes (TB). + Amount *int64 `type:"long"` + + // The action that Amazon Redshift takes when the limit is reached. Possible + // values are: + // + // * log - To log an event in a system table. The default is log. + // + // * emit-metric - To emit CloudWatch metrics. + // + // * disable - To disable the feature until the next usage period begins. + BreachAction UsageLimitBreachAction `type:"string" enum:"true"` + + // The identifier of the cluster with a usage limit. + ClusterIdentifier *string `type:"string"` + + // The Amazon Redshift feature to which the limit applies. + FeatureType UsageLimitFeatureType `type:"string" enum:"true"` + + // The type of limit. Depending on the feature type, this can be based on a + // time duration or data size. + LimitType UsageLimitLimitType `type:"string" enum:"true"` + + // The time period that the amount applies to. A weekly period begins on Sunday. + // The default is monthly. + Period UsageLimitPeriod `type:"string" enum:"true"` + + // A list of tag instances. + Tags []Tag `locationNameList:"Tag" type:"list"` + + // The identifier of the usage limit. + UsageLimitId *string `type:"string"` +} + +// String returns the string representation +func (s ModifyUsageLimitOutput) String() string { + return awsutil.Prettify(s) +} + +const opModifyUsageLimit = "ModifyUsageLimit" + +// ModifyUsageLimitRequest returns a request value for making API operation for +// Amazon Redshift. +// +// Modifies a usage limit in a cluster. You can't modify the feature type or +// period of a usage limit. +// +// // Example sending a request using ModifyUsageLimitRequest. +// req := client.ModifyUsageLimitRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/ModifyUsageLimit +func (c *Client) ModifyUsageLimitRequest(input *ModifyUsageLimitInput) ModifyUsageLimitRequest { + op := &aws.Operation{ + Name: opModifyUsageLimit, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyUsageLimitInput{} + } + + req := c.newRequest(op, input, &ModifyUsageLimitOutput{}) + return ModifyUsageLimitRequest{Request: req, Input: input, Copy: c.ModifyUsageLimitRequest} +} + +// ModifyUsageLimitRequest is the request type for the +// ModifyUsageLimit API operation. +type ModifyUsageLimitRequest struct { + *aws.Request + Input *ModifyUsageLimitInput + Copy func(*ModifyUsageLimitInput) ModifyUsageLimitRequest +} + +// Send marshals and sends the ModifyUsageLimit API request. +func (r ModifyUsageLimitRequest) Send(ctx context.Context) (*ModifyUsageLimitResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ModifyUsageLimitResponse{ + ModifyUsageLimitOutput: r.Request.Data.(*ModifyUsageLimitOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ModifyUsageLimitResponse is the response type for the +// ModifyUsageLimit API operation. +type ModifyUsageLimitResponse struct { + *ModifyUsageLimitOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ModifyUsageLimit request. +func (r *ModifyUsageLimitResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/redshift/api_op_RestoreFromClusterSnapshot.go b/service/redshift/api_op_RestoreFromClusterSnapshot.go index 2f55acd359a..10fc57fc67e 100644 --- a/service/redshift/api_op_RestoreFromClusterSnapshot.go +++ b/service/redshift/api_op_RestoreFromClusterSnapshot.go @@ -142,7 +142,7 @@ type RestoreFromClusterSnapshotInput struct { // If you have a DC instance type, you must restore into that same instance // type and size. In other words, you can only restore a dc1.large instance // type into another dc1.large instance type or dc2.large instance type. You - // can't restore dc1.8xlarge to dc2.8xlarge. First restore to a dc1.8xlareg + // can't restore dc1.8xlarge to dc2.8xlarge. First restore to a dc1.8xlarge // cluster, then resize to a dc2.8large cluster. For more information about // node types, see About Clusters and Nodes (https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-about-clusters-and-nodes) // in the Amazon Redshift Cluster Management Guide. diff --git a/service/redshift/api_types.go b/service/redshift/api_types.go index 5c33cf65d72..d650ab443aa 100644 --- a/service/redshift/api_types.go +++ b/service/redshift/api_types.go @@ -2073,6 +2073,50 @@ func (s UpdateTarget) String() string { return awsutil.Prettify(s) } +// Describes a usage limit object for a cluster. +type UsageLimit struct { + _ struct{} `type:"structure"` + + // The limit amount. If time-based, this amount is in minutes. If data-based, + // this amount is in terabytes (TB). + Amount *int64 `type:"long"` + + // The action that Amazon Redshift takes when the limit is reached. Possible + // values are: + // + // * log - To log an event in a system table. The default is log. + // + // * emit-metric - To emit CloudWatch metrics. + // + // * disable - To disable the feature until the next usage period begins. + BreachAction UsageLimitBreachAction `type:"string" enum:"true"` + + // The identifier of the cluster with a usage limit. + ClusterIdentifier *string `type:"string"` + + // The Amazon Redshift feature to which the limit applies. + FeatureType UsageLimitFeatureType `type:"string" enum:"true"` + + // The type of limit. Depending on the feature type, this can be based on a + // time duration or data size. + LimitType UsageLimitLimitType `type:"string" enum:"true"` + + // The time period that the amount applies to. A weekly period begins on Sunday. + // The default is monthly. + Period UsageLimitPeriod `type:"string" enum:"true"` + + // A list of tag instances. + Tags []Tag `locationNameList:"Tag" type:"list"` + + // The identifier of the usage limit. + UsageLimitId *string `type:"string"` +} + +// String returns the string representation +func (s UsageLimit) String() string { + return awsutil.Prettify(s) +} + // Describes the members of a VPC security group. type VpcSecurityGroupMembership struct { _ struct{} `type:"structure"` diff --git a/service/redshift/redshiftiface/interface.go b/service/redshift/redshiftiface/interface.go index 73d6cc72e87..6355e6ffd62 100644 --- a/service/redshift/redshiftiface/interface.go +++ b/service/redshift/redshiftiface/interface.go @@ -102,6 +102,8 @@ type ClientAPI interface { CreateTagsRequest(*redshift.CreateTagsInput) redshift.CreateTagsRequest + CreateUsageLimitRequest(*redshift.CreateUsageLimitInput) redshift.CreateUsageLimitRequest + DeleteClusterRequest(*redshift.DeleteClusterInput) redshift.DeleteClusterRequest DeleteClusterParameterGroupRequest(*redshift.DeleteClusterParameterGroupInput) redshift.DeleteClusterParameterGroupRequest @@ -126,6 +128,8 @@ type ClientAPI interface { DeleteTagsRequest(*redshift.DeleteTagsInput) redshift.DeleteTagsRequest + DeleteUsageLimitRequest(*redshift.DeleteUsageLimitInput) redshift.DeleteUsageLimitRequest + DescribeAccountAttributesRequest(*redshift.DescribeAccountAttributesInput) redshift.DescribeAccountAttributesRequest DescribeClusterDbRevisionsRequest(*redshift.DescribeClusterDbRevisionsInput) redshift.DescribeClusterDbRevisionsRequest @@ -182,6 +186,8 @@ type ClientAPI interface { DescribeTagsRequest(*redshift.DescribeTagsInput) redshift.DescribeTagsRequest + DescribeUsageLimitsRequest(*redshift.DescribeUsageLimitsInput) redshift.DescribeUsageLimitsRequest + DisableLoggingRequest(*redshift.DisableLoggingInput) redshift.DisableLoggingRequest DisableSnapshotCopyRequest(*redshift.DisableSnapshotCopyInput) redshift.DisableSnapshotCopyRequest @@ -218,6 +224,8 @@ type ClientAPI interface { ModifySnapshotScheduleRequest(*redshift.ModifySnapshotScheduleInput) redshift.ModifySnapshotScheduleRequest + ModifyUsageLimitRequest(*redshift.ModifyUsageLimitInput) redshift.ModifyUsageLimitRequest + PauseClusterRequest(*redshift.PauseClusterInput) redshift.PauseClusterRequest PurchaseReservedNodeOfferingRequest(*redshift.PurchaseReservedNodeOfferingInput) redshift.PurchaseReservedNodeOfferingRequest diff --git a/service/sagemaker/api_types.go b/service/sagemaker/api_types.go index 4a7c3433937..275dc410b15 100644 --- a/service/sagemaker/api_types.go +++ b/service/sagemaker/api_types.go @@ -7655,15 +7655,17 @@ func (s *ResourceLimits) Validate() error { return nil } -// The instance type and quantity. +// The instance type and the Amazon Resource Name (ARN) of the image created +// on the instance. The ARN is stored as metadata in Amazon SageMaker Studio +// notebooks. type ResourceSpec struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the environment. - EnvironmentArn *string `type:"string"` - // The instance type. InstanceType AppInstanceType `type:"string" enum:"true"` + + // The Amazon Resource Name (ARN) of the image created on the instance. + SageMakerImageArn *string `type:"string"` } // String returns the string representation diff --git a/service/storagegateway/api_doc.go b/service/storagegateway/api_doc.go index 88e261c94dd..f538ae55d1a 100644 --- a/service/storagegateway/api_doc.go +++ b/service/storagegateway/api_doc.go @@ -7,7 +7,7 @@ // appliance with cloud-based storage to provide seamless and secure integration // between an organization's on-premises IT environment and the AWS storage // infrastructure. The service enables you to securely upload data to the AWS -// cloud for cost effective backup and rapid disaster recovery. +// Cloud for cost effective backup and rapid disaster recovery. // // Use the following links to get started using the AWS Storage Gateway Service // API Reference: diff --git a/service/storagegateway/api_op_AddCache.go b/service/storagegateway/api_op_AddCache.go index b439eed018e..bc93196ceb0 100644 --- a/service/storagegateway/api_op_AddCache.go +++ b/service/storagegateway/api_op_AddCache.go @@ -13,7 +13,7 @@ type AddCacheInput struct { _ struct{} `type:"structure"` // An array of strings that identify disks that are to be configured as working - // storage. Each string have a minimum length of 1 and maximum length of 300. + // storage. Each string has a minimum length of 1 and maximum length of 300. // You can get the disk IDs from the ListLocalDisks API. // // DiskIds is a required field diff --git a/service/storagegateway/api_op_AddUploadBuffer.go b/service/storagegateway/api_op_AddUploadBuffer.go index 5d709a61751..b88437947ae 100644 --- a/service/storagegateway/api_op_AddUploadBuffer.go +++ b/service/storagegateway/api_op_AddUploadBuffer.go @@ -13,7 +13,7 @@ type AddUploadBufferInput struct { _ struct{} `type:"structure"` // An array of strings that identify disks that are to be configured as working - // storage. Each string have a minimum length of 1 and maximum length of 300. + // storage. Each string has a minimum length of 1 and maximum length of 300. // You can get the disk IDs from the ListLocalDisks API. // // DiskIds is a required field diff --git a/service/storagegateway/api_op_AddWorkingStorage.go b/service/storagegateway/api_op_AddWorkingStorage.go index e0056b09964..a384e7113c2 100644 --- a/service/storagegateway/api_op_AddWorkingStorage.go +++ b/service/storagegateway/api_op_AddWorkingStorage.go @@ -16,7 +16,7 @@ type AddWorkingStorageInput struct { _ struct{} `type:"structure"` // An array of strings that identify disks that are to be configured as working - // storage. Each string have a minimum length of 1 and maximum length of 300. + // storage. Each string has a minimum length of 1 and maximum length of 300. // You can get the disk IDs from the ListLocalDisks API. // // DiskIds is a required field @@ -55,8 +55,8 @@ func (s *AddWorkingStorageInput) Validate() error { return nil } -// A JSON object containing the of the gateway for which working storage was -// configured. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway for +// which working storage was configured. type AddWorkingStorageOutput struct { _ struct{} `type:"structure"` diff --git a/service/storagegateway/api_op_AssignTapePool.go b/service/storagegateway/api_op_AssignTapePool.go index 8eca6c00f89..62850a3096b 100644 --- a/service/storagegateway/api_op_AssignTapePool.go +++ b/service/storagegateway/api_op_AssignTapePool.go @@ -15,8 +15,8 @@ type AssignTapePoolInput struct { // The ID of the pool that you want to add your tape to for archiving. The tape // in this pool is archived in the S3 storage class that is associated with // the pool. When you use your backup application to eject the tape, the tape - // is archived directly into the storage class (Glacier or Deep Archive) that - // corresponds to the pool. + // is archived directly into the storage class (S3 Glacier or S3 Glacier Deep + // Archive) that corresponds to the pool. // // Valid values: "GLACIER", "DEEP_ARCHIVE" // @@ -80,8 +80,8 @@ const opAssignTapePool = "AssignTapePool" // Assigns a tape to a tape pool for archiving. The tape assigned to a pool // is archived in the S3 storage class that is associated with the pool. When // you use your backup application to eject the tape, the tape is archived directly -// into the S3 storage class (Glacier or Deep Archive) that corresponds to the -// pool. +// into the S3 storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds +// to the pool. // // Valid values: "GLACIER", "DEEP_ARCHIVE" // diff --git a/service/storagegateway/api_op_CreateCachediSCSIVolume.go b/service/storagegateway/api_op_CreateCachediSCSIVolume.go index 85794152197..f7b63ca2fd9 100644 --- a/service/storagegateway/api_op_CreateCachediSCSIVolume.go +++ b/service/storagegateway/api_op_CreateCachediSCSIVolume.go @@ -25,12 +25,12 @@ type CreateCachediSCSIVolumeInput struct { // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` - // True to use Amazon S3 server side encryption with your own AWS KMS key, or + // True to use Amazon S3 server-side encryption with your own AWS KMS key, or // false to use a key managed by Amazon S3. Optional. KMSEncrypted *bool `type:"boolean"` - // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side + // encryption. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The network interface of the gateway on which to expose the iSCSI target. diff --git a/service/storagegateway/api_op_CreateNFSFileShare.go b/service/storagegateway/api_op_CreateNFSFileShare.go index 4cd095c37a6..db95e8640f4 100644 --- a/service/storagegateway/api_op_CreateNFSFileShare.go +++ b/service/storagegateway/api_op_CreateNFSFileShare.go @@ -40,11 +40,11 @@ type CreateNFSFileShareInput struct { // and otherwise to false. The default value is true. GuessMIMETypeEnabled *bool `type:"boolean"` - // True to use Amazon S3 server side encryption with your own AWS KMS key, or + // True to use Amazon S3 server-side encryption with your own AWS KMS key, or // false to use a key managed by Amazon S3. Optional. KMSEncrypted *bool `type:"boolean"` - // The Amazon Resource Name (ARN) AWS KMS key used for Amazon S3 server side + // The Amazon Resource Name (ARN) AWS KMS key used for Amazon S3 server-side // encryption. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` @@ -186,11 +186,11 @@ const opCreateNFSFileShare = "CreateNFSFileShare" // // Creates a Network File System (NFS) file share on an existing file gateway. // In Storage Gateway, a file share is a file system mount point backed by Amazon -// S3 cloud storage. Storage Gateway exposes file shares using a NFS interface. +// S3 cloud storage. Storage Gateway exposes file shares using an NFS interface. // This operation is only supported for file gateways. // // File gateway requires AWS Security Token Service (AWS STS) to be activated -// to enable you create a file share. Make sure AWS STS is activated in the +// to enable you to create a file share. Make sure AWS STS is activated in the // AWS Region you are creating your file gateway in. If AWS STS is not activated // in the AWS Region, activate it. For information about how to activate AWS // STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS diff --git a/service/storagegateway/api_op_CreateSMBFileShare.go b/service/storagegateway/api_op_CreateSMBFileShare.go index c493dceae15..2956b98976d 100644 --- a/service/storagegateway/api_op_CreateSMBFileShare.go +++ b/service/storagegateway/api_op_CreateSMBFileShare.go @@ -53,15 +53,15 @@ type CreateSMBFileShareInput struct { // A list of users or groups in the Active Directory that are not allowed to // access the file share. A group must be prefixed with the @ character. For - // example @group1. Can only be set if Authentication is set to ActiveDirectory. + // example, @group1. Can only be set if Authentication is set to ActiveDirectory. InvalidUserList []string `type:"list"` - // True to use Amazon S3 server side encryption with your own AWS KMS key, or + // True to use Amazon S3 server-side encryption with your own AWS KMS key, or // false to use a key managed by Amazon S3. Optional. KMSEncrypted *bool `type:"boolean"` - // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side + // encryption. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The ARN of the backed storage used for storing file data. @@ -195,7 +195,7 @@ const opCreateSMBFileShare = "CreateSMBFileShare" // // Creates a Server Message Block (SMB) file share on an existing file gateway. // In Storage Gateway, a file share is a file system mount point backed by Amazon -// S3 cloud storage. Storage Gateway expose file shares using a SMB interface. +// S3 cloud storage. Storage Gateway expose file shares using an SMB interface. // This operation is only supported for file gateways. // // File gateways require AWS Security Token Service (AWS STS) to be activated diff --git a/service/storagegateway/api_op_CreateSnapshot.go b/service/storagegateway/api_op_CreateSnapshot.go index e8bd07c83a6..d2274cafe26 100644 --- a/service/storagegateway/api_op_CreateSnapshot.go +++ b/service/storagegateway/api_op_CreateSnapshot.go @@ -103,11 +103,12 @@ const opCreateSnapshot = "CreateSnapshot" // Initiates a snapshot of a volume. // // AWS Storage Gateway provides the ability to back up point-in-time snapshots -// of your data to Amazon Simple Storage (S3) for durable off-site recovery, -// as well as import the data to an Amazon Elastic Block Store (EBS) volume -// in Amazon Elastic Compute Cloud (EC2). You can take snapshots of your gateway -// volume on a scheduled or ad hoc basis. This API enables you to take ad-hoc -// snapshot. For more information, see Editing a Snapshot Schedule (https://docs.aws.amazon.com/storagegateway/latest/userguide/managing-volumes.html#SchedulingSnapshot). +// of your data to Amazon Simple Storage Service (Amazon S3) for durable off-site +// recovery, as well as import the data to an Amazon Elastic Block Store (EBS) +// volume in Amazon Elastic Compute Cloud (EC2). You can take snapshots of your +// gateway volume on a scheduled or ad hoc basis. This API enables you to take +// an ad hoc snapshot. For more information, see Editing a Snapshot Schedule +// (https://docs.aws.amazon.com/storagegateway/latest/userguide/managing-volumes.html#SchedulingSnapshot). // // In the CreateSnapshot request you identify the volume by providing its Amazon // Resource Name (ARN). You must also provide description for the snapshot. diff --git a/service/storagegateway/api_op_CreateStorediSCSIVolume.go b/service/storagegateway/api_op_CreateStorediSCSIVolume.go index 90aef9754ba..308928c3a46 100644 --- a/service/storagegateway/api_op_CreateStorediSCSIVolume.go +++ b/service/storagegateway/api_op_CreateStorediSCSIVolume.go @@ -37,11 +37,11 @@ type CreateStorediSCSIVolumeInput struct { // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` - // True to use Amazon S3 server side encryption with your own AWS KMS key, or + // True to use Amazon S3 server-side encryption with your own AWS KMS key, or // false to use a key managed by Amazon S3. Optional. KMSEncrypted *bool `type:"boolean"` - // The Amazon Resource Name (ARN) of the KMS key used for Amazon S3 server side + // The Amazon Resource Name (ARN) of the KMS key used for Amazon S3 server-side // encryption. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` diff --git a/service/storagegateway/api_op_CreateTapeWithBarcode.go b/service/storagegateway/api_op_CreateTapeWithBarcode.go index d63a32b0670..07b90d8e7c7 100644 --- a/service/storagegateway/api_op_CreateTapeWithBarcode.go +++ b/service/storagegateway/api_op_CreateTapeWithBarcode.go @@ -21,19 +21,19 @@ type CreateTapeWithBarcodeInput struct { // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` - // True to use Amazon S3 server side encryption with your own AWS KMS key, or + // True to use Amazon S3 server-side encryption with your own AWS KMS key, or // false to use a key managed by Amazon S3. Optional. KMSEncrypted *bool `type:"boolean"` - // The Amazon Resource Name (ARN) of the AWS KMS Key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side + // encryption. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The ID of the pool that you want to add your tape to for archiving. The tape // in this pool is archived in the S3 storage class that is associated with // the pool. When you use your backup application to eject the tape, the tape - // is archived directly into the storage class (Glacier or Deep Archive) that - // corresponds to the pool. + // is archived directly into the storage class (S3 Glacier or S3 Glacier Deep + // Archive) that corresponds to the pool. // // Valid values: "GLACIER", "DEEP_ARCHIVE" PoolId *string `min:"1" type:"string"` @@ -57,7 +57,7 @@ type CreateTapeWithBarcodeInput struct { // The size, in bytes, of the virtual tape that you want to create. // - // The size must be aligned by gigabyte (1024*1024*1024 byte). + // The size must be aligned by gigabyte (1024*1024*1024 bytes). // // TapeSizeInBytes is a required field TapeSizeInBytes *int64 `type:"long" required:"true"` @@ -129,9 +129,9 @@ const opCreateTapeWithBarcode = "CreateTapeWithBarcode" // AWS Storage Gateway. // // Creates a virtual tape by using your own barcode. You write data to the virtual -// tape and then archive the tape. A barcode is unique and can not be reused -// if it has already been used on a tape . This applies to barcodes used on -// deleted tapes. This operation is only supported in the tape gateway type. +// tape and then archive the tape. A barcode is unique and cannot be reused +// if it has already been used on a tape. This applies to barcodes used on deleted +// tapes. This operation is only supported in the tape gateway type. // // Cache storage must be allocated to the gateway before you can create a virtual // tape. Use the AddCache operation to add cache storage to a gateway. diff --git a/service/storagegateway/api_op_CreateTapes.go b/service/storagegateway/api_op_CreateTapes.go index c189e49dedb..bb840b48b51 100644 --- a/service/storagegateway/api_op_CreateTapes.go +++ b/service/storagegateway/api_op_CreateTapes.go @@ -29,12 +29,12 @@ type CreateTapesInput struct { // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` - // True to use Amazon S3 server side encryption with your own AWS KMS key, or + // True to use Amazon S3 server-side encryption with your own AWS KMS key, or // false to use a key managed by Amazon S3. Optional. KMSEncrypted *bool `type:"boolean"` - // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side + // encryption. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The number of virtual tapes that you want to create. @@ -45,8 +45,8 @@ type CreateTapesInput struct { // The ID of the pool that you want to add your tape to for archiving. The tape // in this pool is archived in the S3 storage class that is associated with // the pool. When you use your backup application to eject the tape, the tape - // is archived directly into the storage class (Glacier or Deep Archive) that - // corresponds to the pool. + // is archived directly into the storage class (S3 Glacier or S3 Glacier Deep + // Archive) that corresponds to the pool. // // Valid values: "GLACIER", "DEEP_ARCHIVE" PoolId *string `min:"1" type:"string"` @@ -71,7 +71,7 @@ type CreateTapesInput struct { // The size, in bytes, of the virtual tapes that you want to create. // - // The size must be aligned by gigabyte (1024*1024*1024 byte). + // The size must be aligned by gigabyte (1024*1024*1024 bytes). // // TapeSizeInBytes is a required field TapeSizeInBytes *int64 `type:"long" required:"true"` diff --git a/service/storagegateway/api_op_DeleteAutomaticTapeCreationPolicy.go b/service/storagegateway/api_op_DeleteAutomaticTapeCreationPolicy.go new file mode 100644 index 00000000000..143093f5e7f --- /dev/null +++ b/service/storagegateway/api_op_DeleteAutomaticTapeCreationPolicy.go @@ -0,0 +1,125 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package storagegateway + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DeleteAutomaticTapeCreationPolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and AWS Region. + // + // GatewayARN is a required field + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAutomaticTapeCreationPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAutomaticTapeCreationPolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteAutomaticTapeCreationPolicyInput"} + + if s.GatewayARN == nil { + invalidParams.Add(aws.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(aws.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteAutomaticTapeCreationPolicyOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and AWS Region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DeleteAutomaticTapeCreationPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteAutomaticTapeCreationPolicy = "DeleteAutomaticTapeCreationPolicy" + +// DeleteAutomaticTapeCreationPolicyRequest returns a request value for making API operation for +// AWS Storage Gateway. +// +// Deletes the automatic tape creation policy of a gateway. If you delete this +// policy, new virtual tapes must be created manually. Use the Amazon Resource +// Name (ARN) of the gateway in your request to remove the policy. +// +// // Example sending a request using DeleteAutomaticTapeCreationPolicyRequest. +// req := client.DeleteAutomaticTapeCreationPolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteAutomaticTapeCreationPolicy +func (c *Client) DeleteAutomaticTapeCreationPolicyRequest(input *DeleteAutomaticTapeCreationPolicyInput) DeleteAutomaticTapeCreationPolicyRequest { + op := &aws.Operation{ + Name: opDeleteAutomaticTapeCreationPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAutomaticTapeCreationPolicyInput{} + } + + req := c.newRequest(op, input, &DeleteAutomaticTapeCreationPolicyOutput{}) + return DeleteAutomaticTapeCreationPolicyRequest{Request: req, Input: input, Copy: c.DeleteAutomaticTapeCreationPolicyRequest} +} + +// DeleteAutomaticTapeCreationPolicyRequest is the request type for the +// DeleteAutomaticTapeCreationPolicy API operation. +type DeleteAutomaticTapeCreationPolicyRequest struct { + *aws.Request + Input *DeleteAutomaticTapeCreationPolicyInput + Copy func(*DeleteAutomaticTapeCreationPolicyInput) DeleteAutomaticTapeCreationPolicyRequest +} + +// Send marshals and sends the DeleteAutomaticTapeCreationPolicy API request. +func (r DeleteAutomaticTapeCreationPolicyRequest) Send(ctx context.Context) (*DeleteAutomaticTapeCreationPolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteAutomaticTapeCreationPolicyResponse{ + DeleteAutomaticTapeCreationPolicyOutput: r.Request.Data.(*DeleteAutomaticTapeCreationPolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteAutomaticTapeCreationPolicyResponse is the response type for the +// DeleteAutomaticTapeCreationPolicy API operation. +type DeleteAutomaticTapeCreationPolicyResponse struct { + *DeleteAutomaticTapeCreationPolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteAutomaticTapeCreationPolicy request. +func (r *DeleteAutomaticTapeCreationPolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/storagegateway/api_op_DeleteBandwidthRateLimit.go b/service/storagegateway/api_op_DeleteBandwidthRateLimit.go index 1085befa098..8ad11f0c67e 100644 --- a/service/storagegateway/api_op_DeleteBandwidthRateLimit.go +++ b/service/storagegateway/api_op_DeleteBandwidthRateLimit.go @@ -59,8 +59,8 @@ func (s *DeleteBandwidthRateLimitInput) Validate() error { return nil } -// A JSON object containing the of the gateway whose bandwidth rate information -// was deleted. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway whose +// bandwidth rate information was deleted. type DeleteBandwidthRateLimitOutput struct { _ struct{} `type:"structure"` diff --git a/service/storagegateway/api_op_DeleteSnapshotSchedule.go b/service/storagegateway/api_op_DeleteSnapshotSchedule.go index d07d43dbdde..aa381b2c357 100644 --- a/service/storagegateway/api_op_DeleteSnapshotSchedule.go +++ b/service/storagegateway/api_op_DeleteSnapshotSchedule.go @@ -66,8 +66,9 @@ const opDeleteSnapshotSchedule = "DeleteSnapshotSchedule" // its Amazon Resource Name (ARN). This operation is only supported in stored // and cached volume gateway types. // -// To list or delete a snapshot, you must use the Amazon EC2 API. in Amazon -// Elastic Compute Cloud API Reference. +// To list or delete a snapshot, you must use the Amazon EC2 API. For more information, +// go to DescribeSnapshots (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html) +// in the Amazon Elastic Compute Cloud API Reference. // // // Example sending a request using DeleteSnapshotScheduleRequest. // req := client.DeleteSnapshotScheduleRequest(params) diff --git a/service/storagegateway/api_op_DeleteVolume.go b/service/storagegateway/api_op_DeleteVolume.go index 0f375243efc..e9f9e389291 100644 --- a/service/storagegateway/api_op_DeleteVolume.go +++ b/service/storagegateway/api_op_DeleteVolume.go @@ -42,7 +42,8 @@ func (s *DeleteVolumeInput) Validate() error { return nil } -// A JSON object containing the of the storage volume that was deleted +// A JSON object containing the Amazon Resource Name (ARN) of the storage volume +// that was deleted type DeleteVolumeOutput struct { _ struct{} `type:"structure"` diff --git a/service/storagegateway/api_op_DescribeBandwidthRateLimit.go b/service/storagegateway/api_op_DescribeBandwidthRateLimit.go index 49c8182de4b..5e7a4ccf5cc 100644 --- a/service/storagegateway/api_op_DescribeBandwidthRateLimit.go +++ b/service/storagegateway/api_op_DescribeBandwidthRateLimit.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go-v2/internal/awsutil" ) -// A JSON object containing the of the gateway. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway. type DescribeBandwidthRateLimitInput struct { _ struct{} `type:"structure"` diff --git a/service/storagegateway/api_op_DescribeCache.go b/service/storagegateway/api_op_DescribeCache.go index 654829ed308..508a73a20c4 100644 --- a/service/storagegateway/api_op_DescribeCache.go +++ b/service/storagegateway/api_op_DescribeCache.go @@ -44,7 +44,7 @@ func (s *DescribeCacheInput) Validate() error { type DescribeCacheOutput struct { _ struct{} `type:"structure"` - // The amount of cache in bytes allocated to the a gateway. + // The amount of cache in bytes allocated to a gateway. CacheAllocatedInBytes *int64 `type:"long"` // The file share's contribution to the overall percentage of the gateway's @@ -66,7 +66,7 @@ type DescribeCacheOutput struct { CacheUsedPercentage *float64 `type:"double"` // An array of strings that identify disks that are to be configured as working - // storage. Each string have a minimum length of 1 and maximum length of 300. + // storage. Each string has a minimum length of 1 and maximum length of 300. // You can get the disk IDs from the ListLocalDisks API. DiskIds []string `type:"list"` @@ -86,7 +86,7 @@ const opDescribeCache = "DescribeCache" // AWS Storage Gateway. // // Returns information about the cache of a gateway. This operation is only -// supported in the cached volume, tape and file gateway types. +// supported in the cached volume, tape, and file gateway types. // // The response includes disk IDs that are configured as cache, and it includes // the amount of cache allocated and used. diff --git a/service/storagegateway/api_op_DescribeCachediSCSIVolumes.go b/service/storagegateway/api_op_DescribeCachediSCSIVolumes.go index 2fed97a10d4..1cc869ce005 100644 --- a/service/storagegateway/api_op_DescribeCachediSCSIVolumes.go +++ b/service/storagegateway/api_op_DescribeCachediSCSIVolumes.go @@ -13,8 +13,8 @@ type DescribeCachediSCSIVolumesInput struct { _ struct{} `type:"structure"` // An array of strings where each string represents the Amazon Resource Name - // (ARN) of a cached volume. All of the specified cached volumes must from the - // same gateway. Use ListVolumes to get volume ARNs for a gateway. + // (ARN) of a cached volume. All of the specified cached volumes must be from + // the same gateway. Use ListVolumes to get volume ARNs for a gateway. // // VolumeARNs is a required field VolumeARNs []string `type:"list" required:"true"` @@ -62,7 +62,7 @@ const opDescribeCachediSCSIVolumes = "DescribeCachediSCSIVolumes" // operation is only supported in the cached volume gateway types. // // The list of gateway volumes in the request must be from one gateway. In the -// response Amazon Storage Gateway returns volume information sorted by volume +// response, AWS Storage Gateway returns volume information sorted by volume // Amazon Resource Name (ARN). // // // Example sending a request using DescribeCachediSCSIVolumesRequest. diff --git a/service/storagegateway/api_op_DescribeMaintenanceStartTime.go b/service/storagegateway/api_op_DescribeMaintenanceStartTime.go index 86910f66826..dcb784add10 100644 --- a/service/storagegateway/api_op_DescribeMaintenanceStartTime.go +++ b/service/storagegateway/api_op_DescribeMaintenanceStartTime.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go-v2/internal/awsutil" ) -// A JSON object containing the of the gateway. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway. type DescribeMaintenanceStartTimeInput struct { _ struct{} `type:"structure"` diff --git a/service/storagegateway/api_op_DescribeStorediSCSIVolumes.go b/service/storagegateway/api_op_DescribeStorediSCSIVolumes.go index fc1a050a913..126a905c4f3 100644 --- a/service/storagegateway/api_op_DescribeStorediSCSIVolumes.go +++ b/service/storagegateway/api_op_DescribeStorediSCSIVolumes.go @@ -14,8 +14,8 @@ type DescribeStorediSCSIVolumesInput struct { _ struct{} `type:"structure"` // An array of strings where each string represents the Amazon Resource Name - // (ARN) of a stored volume. All of the specified stored volumes must from the - // same gateway. Use ListVolumes to get volume ARNs for a gateway. + // (ARN) of a stored volume. All of the specified stored volumes must be from + // the same gateway. Use ListVolumes to get volume ARNs for a gateway. // // VolumeARNs is a required field VolumeARNs []string `type:"list" required:"true"` @@ -105,7 +105,7 @@ const opDescribeStorediSCSIVolumes = "DescribeStorediSCSIVolumes" // // Returns the description of the gateway volumes specified in the request. // The list of gateway volumes in the request must be from one gateway. In the -// response Amazon Storage Gateway returns volume information sorted by volume +// response AWS Storage Gateway returns volume information sorted by volume // ARNs. This operation is only supported in stored volume gateway type. // // // Example sending a request using DescribeStorediSCSIVolumesRequest. diff --git a/service/storagegateway/api_op_DescribeTapeArchives.go b/service/storagegateway/api_op_DescribeTapeArchives.go index c05b288f322..030ccf96953 100644 --- a/service/storagegateway/api_op_DescribeTapeArchives.go +++ b/service/storagegateway/api_op_DescribeTapeArchives.go @@ -13,7 +13,7 @@ import ( type DescribeTapeArchivesInput struct { _ struct{} `type:"structure"` - // Specifies that the number of virtual tapes descried be limited to the specified + // Specifies that the number of virtual tapes described be limited to the specified // number. Limit *int64 `min:"1" type:"integer"` diff --git a/service/storagegateway/api_op_DescribeVTLDevices.go b/service/storagegateway/api_op_DescribeVTLDevices.go index 1ba477b5a73..95a47e5ceb3 100644 --- a/service/storagegateway/api_op_DescribeVTLDevices.go +++ b/service/storagegateway/api_op_DescribeVTLDevices.go @@ -78,7 +78,7 @@ type DescribeVTLDevicesOutput struct { // to describe, this field does not appear in the response. Marker *string `min:"1" type:"string"` - // An array of VTL device objects composed of the Amazon Resource Name(ARN) + // An array of VTL device objects composed of the Amazon Resource Name (ARN) // of the VTL devices. VTLDevices []VTLDevice `type:"list"` } diff --git a/service/storagegateway/api_op_DescribeWorkingStorage.go b/service/storagegateway/api_op_DescribeWorkingStorage.go index 77834f5e183..2aebb5491e0 100644 --- a/service/storagegateway/api_op_DescribeWorkingStorage.go +++ b/service/storagegateway/api_op_DescribeWorkingStorage.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go-v2/internal/awsutil" ) -// A JSON object containing the of the gateway. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway. type DescribeWorkingStorageInput struct { _ struct{} `type:"structure"` diff --git a/service/storagegateway/api_op_DisableGateway.go b/service/storagegateway/api_op_DisableGateway.go index dc5fe47f09d..3bf3fb61965 100644 --- a/service/storagegateway/api_op_DisableGateway.go +++ b/service/storagegateway/api_op_DisableGateway.go @@ -67,7 +67,7 @@ const opDisableGateway = "DisableGateway" // Use this operation for a tape gateway that is not reachable or not functioning. // This operation is only supported in the tape gateway type. // -// Once a gateway is disabled it cannot be enabled. +// After a gateway is disabled, it cannot be enabled. // // // Example sending a request using DisableGatewayRequest. // req := client.DisableGatewayRequest(params) diff --git a/service/storagegateway/api_op_ListAutomaticTapeCreationPolicies.go b/service/storagegateway/api_op_ListAutomaticTapeCreationPolicies.go new file mode 100644 index 00000000000..258ad937de4 --- /dev/null +++ b/service/storagegateway/api_op_ListAutomaticTapeCreationPolicies.go @@ -0,0 +1,121 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package storagegateway + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type ListAutomaticTapeCreationPoliciesInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and AWS Region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s ListAutomaticTapeCreationPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAutomaticTapeCreationPoliciesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListAutomaticTapeCreationPoliciesInput"} + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(aws.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListAutomaticTapeCreationPoliciesOutput struct { + _ struct{} `type:"structure"` + + // Gets a listing of information about the gateway's automatic tape creation + // policies, including the automatic tape creation rules and the gateway that + // is using the policies. + AutomaticTapeCreationPolicyInfos []AutomaticTapeCreationPolicyInfo `type:"list"` +} + +// String returns the string representation +func (s ListAutomaticTapeCreationPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +const opListAutomaticTapeCreationPolicies = "ListAutomaticTapeCreationPolicies" + +// ListAutomaticTapeCreationPoliciesRequest returns a request value for making API operation for +// AWS Storage Gateway. +// +// Lists the automatic tape creation policies for a gateway. If there are no +// automatic tape creation policies for the gateway, it returns an empty list. +// +// This operation is only supported for tape gateways. +// +// // Example sending a request using ListAutomaticTapeCreationPoliciesRequest. +// req := client.ListAutomaticTapeCreationPoliciesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListAutomaticTapeCreationPolicies +func (c *Client) ListAutomaticTapeCreationPoliciesRequest(input *ListAutomaticTapeCreationPoliciesInput) ListAutomaticTapeCreationPoliciesRequest { + op := &aws.Operation{ + Name: opListAutomaticTapeCreationPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListAutomaticTapeCreationPoliciesInput{} + } + + req := c.newRequest(op, input, &ListAutomaticTapeCreationPoliciesOutput{}) + return ListAutomaticTapeCreationPoliciesRequest{Request: req, Input: input, Copy: c.ListAutomaticTapeCreationPoliciesRequest} +} + +// ListAutomaticTapeCreationPoliciesRequest is the request type for the +// ListAutomaticTapeCreationPolicies API operation. +type ListAutomaticTapeCreationPoliciesRequest struct { + *aws.Request + Input *ListAutomaticTapeCreationPoliciesInput + Copy func(*ListAutomaticTapeCreationPoliciesInput) ListAutomaticTapeCreationPoliciesRequest +} + +// Send marshals and sends the ListAutomaticTapeCreationPolicies API request. +func (r ListAutomaticTapeCreationPoliciesRequest) Send(ctx context.Context) (*ListAutomaticTapeCreationPoliciesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListAutomaticTapeCreationPoliciesResponse{ + ListAutomaticTapeCreationPoliciesOutput: r.Request.Data.(*ListAutomaticTapeCreationPoliciesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ListAutomaticTapeCreationPoliciesResponse is the response type for the +// ListAutomaticTapeCreationPolicies API operation. +type ListAutomaticTapeCreationPoliciesResponse struct { + *ListAutomaticTapeCreationPoliciesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListAutomaticTapeCreationPolicies request. +func (r *ListAutomaticTapeCreationPoliciesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/storagegateway/api_op_ListFileShares.go b/service/storagegateway/api_op_ListFileShares.go index 5f3618d41be..5f27167c877 100644 --- a/service/storagegateway/api_op_ListFileShares.go +++ b/service/storagegateway/api_op_ListFileShares.go @@ -13,7 +13,7 @@ import ( type ListFileSharesInput struct { _ struct{} `type:"structure"` - // The Amazon resource Name (ARN) of the gateway whose file shares you want + // The Amazon Resource Name (ARN) of the gateway whose file shares you want // to list. If this field is not present, all file shares under your account // are listed. GatewayARN *string `min:"50" type:"string"` diff --git a/service/storagegateway/api_op_ListLocalDisks.go b/service/storagegateway/api_op_ListLocalDisks.go index b82506259a5..ef3f300758c 100644 --- a/service/storagegateway/api_op_ListLocalDisks.go +++ b/service/storagegateway/api_op_ListLocalDisks.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go-v2/internal/awsutil" ) -// A JSON object containing the of the gateway. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway. type ListLocalDisksInput struct { _ struct{} `type:"structure"` diff --git a/service/storagegateway/api_op_ListTapes.go b/service/storagegateway/api_op_ListTapes.go index 486225f5cca..ac4b3dff492 100644 --- a/service/storagegateway/api_op_ListTapes.go +++ b/service/storagegateway/api_op_ListTapes.go @@ -67,9 +67,9 @@ type ListTapesOutput struct { // in the response body. Marker *string `min:"1" type:"string"` - // An array of TapeInfo objects, where each object describes an a single tape. - // If there not tapes in the tape library or VTS, then the TapeInfos is an empty - // array. + // An array of TapeInfo objects, where each object describes a single tape. + // If there are no tapes in the tape library or VTS, then the TapeInfos is an + // empty array. TapeInfos []TapeInfo `type:"list"` } diff --git a/service/storagegateway/api_op_RemoveTagsFromResource.go b/service/storagegateway/api_op_RemoveTagsFromResource.go index afd2626e03f..4527ed6037d 100644 --- a/service/storagegateway/api_op_RemoveTagsFromResource.go +++ b/service/storagegateway/api_op_RemoveTagsFromResource.go @@ -20,7 +20,7 @@ type RemoveTagsFromResourceInput struct { ResourceARN *string `min:"50" type:"string" required:"true"` // The keys of the tags you want to remove from the specified resource. A tag - // is composed of a key/value pair. + // is composed of a key-value pair. // // TagKeys is a required field TagKeys []string `type:"list" required:"true"` diff --git a/service/storagegateway/api_op_ResetCache.go b/service/storagegateway/api_op_ResetCache.go index 5654aca9db9..1ed213a7fac 100644 --- a/service/storagegateway/api_op_ResetCache.go +++ b/service/storagegateway/api_op_ResetCache.go @@ -59,12 +59,12 @@ const opResetCache = "ResetCache" // ResetCacheRequest returns a request value for making API operation for // AWS Storage Gateway. // -// Resets all cache disks that have encountered a error and makes the disks +// Resets all cache disks that have encountered an error and makes the disks // available for reconfiguration as cache storage. If your cache disk encounters -// a error, the gateway prevents read and write operations on virtual tapes +// an error, the gateway prevents read and write operations on virtual tapes // in the gateway. For example, an error can occur when a disk is corrupted // or removed from the gateway. When a cache is reset, the gateway loses its -// cache storage. At this point you can reconfigure the disks as cache disks. +// cache storage. At this point, you can reconfigure the disks as cache disks. // This operation is only supported in the cached volume and tape types. // // If the cache disk you are resetting contains data that has not been uploaded diff --git a/service/storagegateway/api_op_ShutdownGateway.go b/service/storagegateway/api_op_ShutdownGateway.go index c23ed1addb5..0c030f1fa22 100644 --- a/service/storagegateway/api_op_ShutdownGateway.go +++ b/service/storagegateway/api_op_ShutdownGateway.go @@ -9,7 +9,8 @@ import ( "github.com/aws/aws-sdk-go-v2/internal/awsutil" ) -// A JSON object containing the of the gateway to shut down. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway to +// shut down. type ShutdownGatewayInput struct { _ struct{} `type:"structure"` @@ -42,7 +43,8 @@ func (s *ShutdownGatewayInput) Validate() error { return nil } -// A JSON object containing the of the gateway that was shut down. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway that +// was shut down. type ShutdownGatewayOutput struct { _ struct{} `type:"structure"` @@ -71,7 +73,7 @@ const opShutdownGateway = "ShutdownGateway" // the gateway component in the VM to avoid unpredictable conditions. // // After the gateway is shutdown, you cannot call any other API except StartGateway, -// DescribeGatewayInformation, and ListGateways. For more information, see ActivateGateway. +// DescribeGatewayInformation and ListGateways. For more information, see ActivateGateway. // Your applications cannot read from or write to the gateway's storage volumes, // and there are no snapshots taken. // diff --git a/service/storagegateway/api_op_StartGateway.go b/service/storagegateway/api_op_StartGateway.go index 40744078e02..5502ca7f3c1 100644 --- a/service/storagegateway/api_op_StartGateway.go +++ b/service/storagegateway/api_op_StartGateway.go @@ -9,7 +9,8 @@ import ( "github.com/aws/aws-sdk-go-v2/internal/awsutil" ) -// A JSON object containing the of the gateway to start. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway to +// start. type StartGatewayInput struct { _ struct{} `type:"structure"` @@ -42,7 +43,8 @@ func (s *StartGatewayInput) Validate() error { return nil } -// A JSON object containing the of the gateway that was restarted. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway that +// was restarted. type StartGatewayOutput struct { _ struct{} `type:"structure"` diff --git a/service/storagegateway/api_op_UpdateAutomaticTapeCreationPolicy.go b/service/storagegateway/api_op_UpdateAutomaticTapeCreationPolicy.go new file mode 100644 index 00000000000..33697393c35 --- /dev/null +++ b/service/storagegateway/api_op_UpdateAutomaticTapeCreationPolicy.go @@ -0,0 +1,150 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package storagegateway + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type UpdateAutomaticTapeCreationPolicyInput struct { + _ struct{} `type:"structure"` + + // An automatic tape creation policy consists of a list of automatic tape creation + // rules. The rules determine when and how to automatically create new tapes. + // + // AutomaticTapeCreationRules is a required field + AutomaticTapeCreationRules []AutomaticTapeCreationRule `min:"1" type:"list" required:"true"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and AWS Region. + // + // GatewayARN is a required field + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateAutomaticTapeCreationPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAutomaticTapeCreationPolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateAutomaticTapeCreationPolicyInput"} + + if s.AutomaticTapeCreationRules == nil { + invalidParams.Add(aws.NewErrParamRequired("AutomaticTapeCreationRules")) + } + if s.AutomaticTapeCreationRules != nil && len(s.AutomaticTapeCreationRules) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("AutomaticTapeCreationRules", 1)) + } + + if s.GatewayARN == nil { + invalidParams.Add(aws.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(aws.NewErrParamMinLen("GatewayARN", 50)) + } + if s.AutomaticTapeCreationRules != nil { + for i, v := range s.AutomaticTapeCreationRules { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AutomaticTapeCreationRules", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateAutomaticTapeCreationPolicyOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and AWS Region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s UpdateAutomaticTapeCreationPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdateAutomaticTapeCreationPolicy = "UpdateAutomaticTapeCreationPolicy" + +// UpdateAutomaticTapeCreationPolicyRequest returns a request value for making API operation for +// AWS Storage Gateway. +// +// Updates the automatic tape creation policy of a gateway. Use this to update +// the policy with a new set of automatic tape creation rules. This is only +// supported for tape gateways. +// +// By default, there is no automatic tape creation policy. +// +// A gateway can have only one automatic tape creation policy. +// +// // Example sending a request using UpdateAutomaticTapeCreationPolicyRequest. +// req := client.UpdateAutomaticTapeCreationPolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateAutomaticTapeCreationPolicy +func (c *Client) UpdateAutomaticTapeCreationPolicyRequest(input *UpdateAutomaticTapeCreationPolicyInput) UpdateAutomaticTapeCreationPolicyRequest { + op := &aws.Operation{ + Name: opUpdateAutomaticTapeCreationPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAutomaticTapeCreationPolicyInput{} + } + + req := c.newRequest(op, input, &UpdateAutomaticTapeCreationPolicyOutput{}) + return UpdateAutomaticTapeCreationPolicyRequest{Request: req, Input: input, Copy: c.UpdateAutomaticTapeCreationPolicyRequest} +} + +// UpdateAutomaticTapeCreationPolicyRequest is the request type for the +// UpdateAutomaticTapeCreationPolicy API operation. +type UpdateAutomaticTapeCreationPolicyRequest struct { + *aws.Request + Input *UpdateAutomaticTapeCreationPolicyInput + Copy func(*UpdateAutomaticTapeCreationPolicyInput) UpdateAutomaticTapeCreationPolicyRequest +} + +// Send marshals and sends the UpdateAutomaticTapeCreationPolicy API request. +func (r UpdateAutomaticTapeCreationPolicyRequest) Send(ctx context.Context) (*UpdateAutomaticTapeCreationPolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateAutomaticTapeCreationPolicyResponse{ + UpdateAutomaticTapeCreationPolicyOutput: r.Request.Data.(*UpdateAutomaticTapeCreationPolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateAutomaticTapeCreationPolicyResponse is the response type for the +// UpdateAutomaticTapeCreationPolicy API operation. +type UpdateAutomaticTapeCreationPolicyResponse struct { + *UpdateAutomaticTapeCreationPolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateAutomaticTapeCreationPolicy request. +func (r *UpdateAutomaticTapeCreationPolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/storagegateway/api_op_UpdateBandwidthRateLimit.go b/service/storagegateway/api_op_UpdateBandwidthRateLimit.go index 57b7659b908..236beb3b96a 100644 --- a/service/storagegateway/api_op_UpdateBandwidthRateLimit.go +++ b/service/storagegateway/api_op_UpdateBandwidthRateLimit.go @@ -58,8 +58,8 @@ func (s *UpdateBandwidthRateLimitInput) Validate() error { return nil } -// A JSON object containing the of the gateway whose throttle information was -// updated. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway whose +// throttle information was updated. type UpdateBandwidthRateLimitOutput struct { _ struct{} `type:"structure"` diff --git a/service/storagegateway/api_op_UpdateGatewaySoftwareNow.go b/service/storagegateway/api_op_UpdateGatewaySoftwareNow.go index 99e47e93ab5..4d5f3f012f4 100644 --- a/service/storagegateway/api_op_UpdateGatewaySoftwareNow.go +++ b/service/storagegateway/api_op_UpdateGatewaySoftwareNow.go @@ -9,7 +9,8 @@ import ( "github.com/aws/aws-sdk-go-v2/internal/awsutil" ) -// A JSON object containing the of the gateway to update. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway to +// update. type UpdateGatewaySoftwareNowInput struct { _ struct{} `type:"structure"` @@ -42,7 +43,8 @@ func (s *UpdateGatewaySoftwareNowInput) Validate() error { return nil } -// A JSON object containing the of the gateway that was updated. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway that +// was updated. type UpdateGatewaySoftwareNowOutput struct { _ struct{} `type:"structure"` diff --git a/service/storagegateway/api_op_UpdateMaintenanceStartTime.go b/service/storagegateway/api_op_UpdateMaintenanceStartTime.go index a603283be45..c34b80b88e0 100644 --- a/service/storagegateway/api_op_UpdateMaintenanceStartTime.go +++ b/service/storagegateway/api_op_UpdateMaintenanceStartTime.go @@ -86,8 +86,8 @@ func (s *UpdateMaintenanceStartTimeInput) Validate() error { return nil } -// A JSON object containing the of the gateway whose maintenance start time -// is updated. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway whose +// maintenance start time is updated. type UpdateMaintenanceStartTimeOutput struct { _ struct{} `type:"structure"` diff --git a/service/storagegateway/api_op_UpdateNFSFileShare.go b/service/storagegateway/api_op_UpdateNFSFileShare.go index 91458ff56cc..331be11a1c3 100644 --- a/service/storagegateway/api_op_UpdateNFSFileShare.go +++ b/service/storagegateway/api_op_UpdateNFSFileShare.go @@ -32,12 +32,12 @@ type UpdateNFSFileShareInput struct { // and otherwise to false. The default value is true. GuessMIMETypeEnabled *bool `type:"boolean"` - // True to use Amazon S3 server side encryption with your own AWS KMS key, or + // True to use Amazon S3 server-side encryption with your own AWS KMS key, or // false to use a key managed by Amazon S3. Optional. KMSEncrypted *bool `type:"boolean"` - // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side + // encryption. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The default values for the file share. Optional. diff --git a/service/storagegateway/api_op_UpdateSMBFileShare.go b/service/storagegateway/api_op_UpdateSMBFileShare.go index bd8179b990a..fe423071d6f 100644 --- a/service/storagegateway/api_op_UpdateSMBFileShare.go +++ b/service/storagegateway/api_op_UpdateSMBFileShare.go @@ -41,12 +41,12 @@ type UpdateSMBFileShareInput struct { // example @group1. Can only be set if Authentication is set to ActiveDirectory. InvalidUserList []string `type:"list"` - // True to use Amazon S3 server side encryption with your own AWS KMS key, or + // True to use Amazon S3 server-side encryption with your own AWS KMS key, or // false to use a key managed by Amazon S3. Optional. KMSEncrypted *bool `type:"boolean"` - // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side + // encryption. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // A value that sets the access control list permission for objects in the S3 diff --git a/service/storagegateway/api_op_UpdateSnapshotSchedule.go b/service/storagegateway/api_op_UpdateSnapshotSchedule.go index 07d8559113d..5d1241897b2 100644 --- a/service/storagegateway/api_op_UpdateSnapshotSchedule.go +++ b/service/storagegateway/api_op_UpdateSnapshotSchedule.go @@ -96,7 +96,8 @@ func (s *UpdateSnapshotScheduleInput) Validate() error { return nil } -// A JSON object containing the of the updated storage volume. +// A JSON object containing the Amazon Resource Name (ARN) of the updated storage +// volume. type UpdateSnapshotScheduleOutput struct { _ struct{} `type:"structure"` diff --git a/service/storagegateway/api_types.go b/service/storagegateway/api_types.go index 8dd3a8d35d2..b6c18176155 100644 --- a/service/storagegateway/api_types.go +++ b/service/storagegateway/api_types.go @@ -12,6 +12,105 @@ import ( var _ aws.Config var _ = awsutil.Prettify +// Information about the gateway's automatic tape creation policies, including +// the automatic tape creation rules and the gateway that is using the policies. +type AutomaticTapeCreationPolicyInfo struct { + _ struct{} `type:"structure"` + + // An automatic tape creation policy consists of a list of automatic tape creation + // rules. This returns the rules that determine when and how to automatically + // create new tapes. + AutomaticTapeCreationRules []AutomaticTapeCreationRule `min:"1" type:"list"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and AWS Region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s AutomaticTapeCreationPolicyInfo) String() string { + return awsutil.Prettify(s) +} + +// An automatic tape creation policy consists of automatic tape creation rules +// where each rule defines when and how to create new tapes. +type AutomaticTapeCreationRule struct { + _ struct{} `type:"structure"` + + // The minimum number of available virtual tapes that the gateway maintains + // at all times. If the number of tapes on the gateway goes below this value, + // the gateway creates as many new tapes as are needed to have MinimumNumTapes + // on the gateway. + // + // MinimumNumTapes is a required field + MinimumNumTapes *int64 `min:"1" type:"integer" required:"true"` + + // The ID of the pool that you want to add your tape to for archiving. The tape + // in this pool is archived in the Amazon S3 storage class that is associated + // with the pool. When you use your backup application to eject the tape, the + // tape is archived directly into the storage class (S3 Glacier or S3 Glacier + // Deep Archive) that corresponds to the pool. + // + // Valid values: "GLACIER", "DEEP_ARCHIVE" + // + // PoolId is a required field + PoolId *string `min:"1" type:"string" required:"true"` + + // A prefix that you append to the barcode of the virtual tape that you are + // creating. This prefix makes the barcode unique. + // + // The prefix must be 1-4 characters in length and must be one of the uppercase + // letters from A to Z. + // + // TapeBarcodePrefix is a required field + TapeBarcodePrefix *string `min:"1" type:"string" required:"true"` + + // The size, in bytes, of the virtual tape capacity. + // + // TapeSizeInBytes is a required field + TapeSizeInBytes *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s AutomaticTapeCreationRule) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AutomaticTapeCreationRule) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "AutomaticTapeCreationRule"} + + if s.MinimumNumTapes == nil { + invalidParams.Add(aws.NewErrParamRequired("MinimumNumTapes")) + } + if s.MinimumNumTapes != nil && *s.MinimumNumTapes < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MinimumNumTapes", 1)) + } + + if s.PoolId == nil { + invalidParams.Add(aws.NewErrParamRequired("PoolId")) + } + if s.PoolId != nil && len(*s.PoolId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PoolId", 1)) + } + + if s.TapeBarcodePrefix == nil { + invalidParams.Add(aws.NewErrParamRequired("TapeBarcodePrefix")) + } + if s.TapeBarcodePrefix != nil && len(*s.TapeBarcodePrefix) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("TapeBarcodePrefix", 1)) + } + + if s.TapeSizeInBytes == nil { + invalidParams.Add(aws.NewErrParamRequired("TapeSizeInBytes")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // Describes an iSCSI cached volume. type CachediSCSIVolume struct { _ struct{} `type:"structure"` @@ -20,8 +119,8 @@ type CachediSCSIVolume struct { // don’t have this time stamp. CreatedDate *time.Time `type:"timestamp"` - // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side + // encryption. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // If the cached volume was created from a snapshot, this field contains the @@ -317,12 +416,12 @@ type NFSFileShareInfo struct { // and otherwise to false. The default value is true. GuessMIMETypeEnabled *bool `type:"boolean"` - // True to use Amazon S3 server side encryption with your own AWS KMS key, or + // True to use Amazon S3 server-side encryption with your own AWS KMS key, or // false to use a key managed by Amazon S3. Optional. KMSEncrypted *bool `type:"boolean"` - // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side + // encryption. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The ARN of the backend storage used for storing file data. @@ -455,8 +554,8 @@ type SMBFileShareInfo struct { // false to use a key managed by Amazon S3. Optional. KMSEncrypted *bool `type:"boolean"` - // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side + // encryption. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The ARN of the backend storage used for storing file data. @@ -511,9 +610,9 @@ func (s SMBFileShareInfo) String() string { return awsutil.Prettify(s) } -// Provides additional information about an error that was returned by the service -// as an or. See the errorCode and errorDetails members for more information -// about the error. +// Provides additional information about an error that was returned by the service. +// See the errorCode and errorDetails members for more information about the +// error. type StorageGatewayError struct { _ struct{} `type:"structure"` @@ -537,8 +636,8 @@ type StorediSCSIVolume struct { // don’t have this time stamp. CreatedDate *time.Time `type:"timestamp"` - // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side + // encryption. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // Indicates if when the stored volume was created, existing data on the underlying @@ -657,15 +756,15 @@ func (s *Tag) Validate() error { type Tape struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side + // encryption. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The ID of the pool that contains tapes that will be archived. The tapes in // this pool are archived in the S3 storage class that is associated with the // pool. When you use your backup application to eject the tape, the tape is - // archived directly into the storage class (Glacier or Deep Archive) that corresponds - // to the pool. + // archived directly into the storage class (S3 Glacier or S# Glacier Deep Archive) + // that corresponds to the pool. // // Valid values: "GLACIER", "DEEP_ARCHIVE" PoolId *string `min:"1" type:"string"` @@ -716,8 +815,8 @@ type TapeArchive struct { // format. CompletionTime *time.Time `type:"timestamp"` - // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side + // encryption. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The ID of the pool that was used to archive the tape. The tapes in this pool @@ -769,8 +868,8 @@ type TapeInfo struct { // The ID of the pool that you want to add your tape to for archiving. The tape // in this pool is archived in the S3 storage class that is associated with // the pool. When you use your backup application to eject the tape, the tape - // is archived directly into the storage class (Glacier or Deep Archive) that - // corresponds to the pool. + // is archived directly into the storage class (S3 Glacier or S3 Glacier Deep + // Archive) that corresponds to the pool. // // Valid values: "GLACIER", "DEEP_ARCHIVE" PoolId *string `min:"1" type:"string"` diff --git a/service/storagegateway/storagegatewayiface/interface.go b/service/storagegateway/storagegatewayiface/interface.go index 7f8398d36c1..b047d5a3f61 100644 --- a/service/storagegateway/storagegatewayiface/interface.go +++ b/service/storagegateway/storagegatewayiface/interface.go @@ -95,6 +95,8 @@ type ClientAPI interface { CreateTapesRequest(*storagegateway.CreateTapesInput) storagegateway.CreateTapesRequest + DeleteAutomaticTapeCreationPolicyRequest(*storagegateway.DeleteAutomaticTapeCreationPolicyInput) storagegateway.DeleteAutomaticTapeCreationPolicyRequest + DeleteBandwidthRateLimitRequest(*storagegateway.DeleteBandwidthRateLimitInput) storagegateway.DeleteBandwidthRateLimitRequest DeleteChapCredentialsRequest(*storagegateway.DeleteChapCredentialsInput) storagegateway.DeleteChapCredentialsRequest @@ -153,6 +155,8 @@ type ClientAPI interface { JoinDomainRequest(*storagegateway.JoinDomainInput) storagegateway.JoinDomainRequest + ListAutomaticTapeCreationPoliciesRequest(*storagegateway.ListAutomaticTapeCreationPoliciesInput) storagegateway.ListAutomaticTapeCreationPoliciesRequest + ListFileSharesRequest(*storagegateway.ListFileSharesInput) storagegateway.ListFileSharesRequest ListGatewaysRequest(*storagegateway.ListGatewaysInput) storagegateway.ListGatewaysRequest @@ -191,6 +195,8 @@ type ClientAPI interface { StartGatewayRequest(*storagegateway.StartGatewayInput) storagegateway.StartGatewayRequest + UpdateAutomaticTapeCreationPolicyRequest(*storagegateway.UpdateAutomaticTapeCreationPolicyInput) storagegateway.UpdateAutomaticTapeCreationPolicyRequest + UpdateBandwidthRateLimitRequest(*storagegateway.UpdateBandwidthRateLimitInput) storagegateway.UpdateBandwidthRateLimitRequest UpdateChapCredentialsRequest(*storagegateway.UpdateChapCredentialsInput) storagegateway.UpdateChapCredentialsRequest diff --git a/service/transfer/api_doc.go b/service/transfer/api_doc.go index ca7afb397b3..14ce064e3d2 100644 --- a/service/transfer/api_doc.go +++ b/service/transfer/api_doc.go @@ -3,16 +3,17 @@ // Package transfer provides the client and types for making API // requests to AWS Transfer. // -// AWS Transfer for SFTP is a fully managed service that enables the transfer -// of files directly into and out of Amazon S3 using the Secure File Transfer -// Protocol (SFTP)—also known as Secure Shell (SSH) File Transfer Protocol. -// AWS helps you seamlessly migrate your file transfer workflows to AWS Transfer -// for SFTP—by integrating with existing authentication systems, and providing -// DNS routing with Amazon Route 53—so nothing changes for your customers -// and partners, or their applications. With your data in S3, you can use it -// with AWS services for processing, analytics, machine learning, and archiving. -// Getting started with AWS Transfer for SFTP (AWS SFTP) is easy; there is no -// infrastructure to buy and set up. +// AWS Transfer Family is a fully managed service that enables the transfer +// of files over the the File Transfer Protocol (FTP), File Transfer Protocol +// over SSL (FTPS), or Secure Shell (SSH) File Transfer Protocol (SFTP) directly +// into and out of Amazon Simple Storage Service (Amazon S3). AWS helps you +// seamlessly migrate your file transfer workflows to AWS Transfer Family by +// integrating with existing authentication systems, and providing DNS routing +// with Amazon Route 53 so nothing changes for your customers and partners, +// or their applications. With your data in Amazon S3, you can use it with AWS +// services for processing, analytics, machine learning, and archiving. Getting +// started with AWS Transfer Family is easy since there is no infrastructure +// to buy and set up. // // See https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05 for more information on this service. // diff --git a/service/transfer/api_enums.go b/service/transfer/api_enums.go index 3c61d9e8edd..76a969d9787 100644 --- a/service/transfer/api_enums.go +++ b/service/transfer/api_enums.go @@ -38,10 +38,11 @@ func (enum HomeDirectoryType) MarshalValueBuf(b []byte) ([]byte, error) { } // Returns information related to the type of user authentication that is in -// use for a server's users. For SERVICE_MANAGED authentication, the Secure -// Shell (SSH) public keys are stored with a user on an SFTP server instance. -// For API_GATEWAY authentication, your custom authentication method is implemented -// by using an API call. A server can have only one method of authentication. +// use for a file transfer protocol-enabled server's users. For SERVICE_MANAGED +// authentication, the Secure Shell (SSH) public keys are stored with a user +// on the server instance. For API_GATEWAY authentication, your custom authentication +// method is implemented by using an API call. The server can have only one +// method of authentication. type IdentityProviderType string // Enum values for IdentityProviderType @@ -59,15 +60,33 @@ func (enum IdentityProviderType) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } -// Describes the condition of the SFTP server with respect to its ability to -// perform file operations. There are six possible states: OFFLINE, ONLINE, -// STARTING, STOPPING, START_FAILED, and STOP_FAILED. +type Protocol string + +// Enum values for Protocol +const ( + ProtocolSftp Protocol = "SFTP" + ProtocolFtp Protocol = "FTP" + ProtocolFtps Protocol = "FTPS" +) + +func (enum Protocol) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum Protocol) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +// Describes the condition of a file transfer protocol-enabled server with respect +// to its ability to perform file operations. There are six possible states: +// OFFLINE, ONLINE, STARTING, STOPPING, START_FAILED, and STOP_FAILED. // -// OFFLINE indicates that the SFTP server exists, but that it is not available -// for file operations. ONLINE indicates that the SFTP server is available to -// perform file operations. STARTING indicates that the SFTP server's was instantiated, -// but the server is not yet available to perform file operations. Under normal -// conditions, it can take a couple of minutes for an SFTP server to be completely +// OFFLINE indicates that the server exists, but that it is not available for +// file operations. ONLINE indicates that the server is available to perform +// file operations. STARTING indicates that the server's was instantiated, but +// the server is not yet available to perform file operations. Under normal +// conditions, it can take a couple of minutes for the server to be completely // operational. Both START_FAILED and STOP_FAILED are error conditions. type State string diff --git a/service/transfer/api_errors.go b/service/transfer/api_errors.go index 6fe1ad77274..fe1021e7496 100644 --- a/service/transfer/api_errors.go +++ b/service/transfer/api_errors.go @@ -4,18 +4,24 @@ package transfer const ( + // ErrCodeAccessDeniedException for service response error code + // "AccessDeniedException". + // + // You do not have sufficient access to perform this action. + ErrCodeAccessDeniedException = "AccessDeniedException" + // ErrCodeConflictException for service response error code // "ConflictException". // - // This exception is thrown when the UpdatServer is called for a server that - // has VPC as the endpoint type and the server's VpcEndpointID is not in the - // available state. + // This exception is thrown when the UpdatServer is called for a file transfer + // protocol-enabled server that has VPC as the endpoint type and the server's + // VpcEndpointID is not in the available state. ErrCodeConflictException = "ConflictException" // ErrCodeInternalServiceError for service response error code // "InternalServiceError". // - // This exception is thrown when an error occurs in the AWS Transfer for SFTP + // This exception is thrown when an error occurs in the AWS Transfer Family // service. ErrCodeInternalServiceError = "InternalServiceError" @@ -41,13 +47,13 @@ const ( // "ResourceNotFoundException". // // This exception is thrown when a resource is not found by the AWS Transfer - // for SFTP service. + // Family service. ErrCodeResourceNotFoundException = "ResourceNotFoundException" // ErrCodeServiceUnavailableException for service response error code // "ServiceUnavailableException". // - // The request has failed because the AWS Transfer for SFTP service is not available. + // The request has failed because the AWS Transfer Family service is not available. ErrCodeServiceUnavailableException = "ServiceUnavailableException" // ErrCodeThrottlingException for service response error code diff --git a/service/transfer/api_op_CreateServer.go b/service/transfer/api_op_CreateServer.go index 2cbe0f25ee1..e584ba6e814 100644 --- a/service/transfer/api_op_CreateServer.go +++ b/service/transfer/api_op_CreateServer.go @@ -13,49 +13,67 @@ import ( type CreateServerInput struct { _ struct{} `type:"structure"` + // The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. + // Required when Protocols is set to FTPS. + Certificate *string `type:"string"` + // The virtual private cloud (VPC) endpoint settings that are configured for - // your SFTP server. With a VPC endpoint, you can restrict access to your SFTP - // server to resources only within your VPC. To control incoming internet traffic, - // you will need to invoke the UpdateServer API and attach an Elastic IP to - // your server's endpoint. + // your file transfer protocol-enabled server. When you host your endpoint within + // your VPC, you can make it accessible only to resources within your VPC, or + // you can attach Elastic IPs and make it accessible to clients over the internet. + // Your VPC's default security groups are automatically assigned to your endpoint. EndpointDetails *EndpointDetails `type:"structure"` - // The type of VPC endpoint that you want your SFTP server to connect to. You - // can choose to connect to the public internet or a virtual private cloud (VPC) - // endpoint. With a VPC endpoint, you can restrict access to your SFTP server - // and resources only within your VPC. + // The type of VPC endpoint that you want your file transfer protocol-enabled + // server to connect to. You can choose to connect to the public internet or + // a virtual private cloud (VPC) endpoint. With a VPC endpoint, you can restrict + // access to your server and resources only within your VPC. EndpointType EndpointType `type:"string" enum:"true"` // The RSA private key as generated by the ssh-keygen -N "" -f my-new-server-key // command. // - // If you aren't planning to migrate existing users from an existing SFTP server - // to a new AWS SFTP server, don't update the host key. Accidentally changing + // If you aren't planning to migrate existing users from an existing SFTP-enabled + // server to a new server, don't update the host key. Accidentally changing // a server's host key can be disruptive. // - // For more information, see "https://alpha-docs-aws.amazon.com/transfer/latest/userguide/configuring-servers.html#change-host-key" - // in the AWS SFTP User Guide. + // For more information, see Changing the Host Key for Your AWS Transfer Family + // Server (https://docs.aws.amazon.com/transfer/latest/userguide/configuring-servers.html#change-host-key) + // in the AWS Transfer Family User Guide. HostKey *string `type:"string" sensitive:"true"` - // This parameter is required when the IdentityProviderType is set to API_GATEWAY. - // Accepts an array containing all of the information required to call a customer-supplied - // authentication API, including the API Gateway URL. This property is not required - // when the IdentityProviderType is set to SERVICE_MANAGED. + // Required when IdentityProviderType is set to API_GATEWAY. Accepts an array + // containing all of the information required to call a customer-supplied authentication + // API, including the API Gateway URL. Not required when IdentityProviderType + // is set to SERVICE_MANAGED. IdentityProviderDetails *IdentityProviderDetails `type:"structure"` - // Specifies the mode of authentication for the SFTP server. The default value - // is SERVICE_MANAGED, which allows you to store and access SFTP user credentials - // within the AWS Transfer for SFTP service. Use the API_GATEWAY value to integrate - // with an identity provider of your choosing. The API_GATEWAY setting requires - // you to provide an API Gateway endpoint URL to call for authentication using - // the IdentityProviderDetails parameter. + // Specifies the mode of authentication for a file transfer protocol-enabled + // server. The default value is SERVICE_MANAGED, which allows you to store and + // access user credentials within the AWS Transfer Family service. Use the API_GATEWAY + // value to integrate with an identity provider of your choosing. The API_GATEWAY + // setting requires you to provide an API Gateway endpoint URL to call for authentication + // using the IdentityProviderDetails parameter. IdentityProviderType IdentityProviderType `type:"string" enum:"true"` - // A value that allows the service to write your SFTP users' activity to your - // Amazon CloudWatch logs for monitoring and auditing purposes. + // Allows the service to write your users' activity to your Amazon CloudWatch + // logs for monitoring and auditing purposes. LoggingRole *string `min:"20" type:"string"` - // Key-value pairs that can be used to group and search for servers. + // Specifies the file transfer protocol or protocols over which your file transfer + // protocol client can connect to your server's endpoint. The available protocols + // are: + // + // * Secure Shell (SSH) File Transfer Protocol (SFTP): File transfer over + // SSH + // + // * File Transfer Protocol Secure (FTPS): File transfer with TLS encryption + // + // * File Transfer Protocol (FTP): Unencrypted file transfer + Protocols []Protocol `min:"1" type:"list"` + + // Key-value pairs that can be used to group and search for file transfer protocol-enabled + // servers. Tags []Tag `min:"1" type:"list"` } @@ -70,6 +88,9 @@ func (s *CreateServerInput) Validate() error { if s.LoggingRole != nil && len(*s.LoggingRole) < 20 { invalidParams.Add(aws.NewErrParamMinLen("LoggingRole", 20)) } + if s.Protocols != nil && len(s.Protocols) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Protocols", 1)) + } if s.Tags != nil && len(s.Tags) < 1 { invalidParams.Add(aws.NewErrParamMinLen("Tags", 1)) } @@ -100,7 +121,8 @@ func (s *CreateServerInput) Validate() error { type CreateServerOutput struct { _ struct{} `type:"structure"` - // The service-assigned ID of the SFTP server that is created. + // The service-assigned ID of the file transfer protocol-enabled server that + // is created. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -114,12 +136,12 @@ func (s CreateServerOutput) String() string { const opCreateServer = "CreateServer" // CreateServerRequest returns a request value for making API operation for -// AWS Transfer for SFTP. +// AWS Transfer Family. // -// Instantiates an autoscaling virtual server based on Secure File Transfer -// Protocol (SFTP) in AWS. When you make updates to your server or when you -// work with users, use the service-generated ServerId property that is assigned -// to the newly created server. +// Instantiates an autoscaling virtual server based on the selected file transfer +// protocol in AWS. When you make updates to your file transfer protocol-enabled +// server or when you work with users, use the service-generated ServerId property +// that is assigned to the newly created server. // // // Example sending a request using CreateServerRequest. // req := client.CreateServerRequest(params) diff --git a/service/transfer/api_op_CreateUser.go b/service/transfer/api_op_CreateUser.go index 5224e22e53a..3631806ab54 100644 --- a/service/transfer/api_op_CreateUser.go +++ b/service/transfer/api_op_CreateUser.go @@ -13,40 +13,42 @@ import ( type CreateUserInput struct { _ struct{} `type:"structure"` - // The landing directory (folder) for a user when they log in to the server - // using their SFTP client. + // The landing directory (folder) for a user when they log in to the file transfer + // protocol-enabled server using the client. // - // An example is /home/username. + // An example is your-Amazon-S3-bucket-name>/home/username. HomeDirectory *string `type:"string"` - // Logical directory mappings that specify what S3 paths and keys should be - // visible to your user and how you want to make them visible. You will need + // Logical directory mappings that specify what Amazon S3 paths and keys should + // be visible to your user and how you want to make them visible. You will need // to specify the "Entry" and "Target" pair, where Entry shows how the path - // is made visible and Target is the actual S3 path. If you only specify a target, - // it will be displayed as is. You will need to also make sure that your AWS - // IAM Role provides access to paths in Target. The following is an example. + // is made visible and Target is the actual Amazon S3 path. If you only specify + // a target, it will be displayed as is. You will need to also make sure that + // your AWS IAM Role provides access to paths in Target. The following is an + // example. // // '[ "/bucket2/documentation", { "Entry": "your-personal-report.pdf", "Target": // "/bucket3/customized-reports/${transfer:UserName}.pdf" } ]' // - // In most cases, you can use this value instead of the scope down policy to + // In most cases, you can use this value instead of the scope-down policy to // lock your user down to the designated home directory ("chroot"). To do this, // you can set Entry to '/' and set Target to the HomeDirectory parameter value. // - // If the target of a logical directory entry does not exist in S3, the entry - // will be ignored. As a workaround, you can use the S3 api to create 0 byte - // objects as place holders for your directory. If using the CLI, use the s3api - // call instead of s3 so you can use the put-object operation. For example, - // you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. - // Make sure that the end of the key name ends in a / for it to be considered - // a folder. + // If the target of a logical directory entry does not exist in Amazon S3, the + // entry will be ignored. As a workaround, you can use the Amazon S3 api to + // create 0 byte objects as place holders for your directory. If using the CLI, + // use the s3api call instead of s3 so you can use the put-object operation. + // For example, you use the following: aws s3api put-object --bucket bucketname + // --key path/to/folder/. Make sure that the end of the key name ends in a '/' + // for it to be considered a folder. HomeDirectoryMappings []HomeDirectoryMapEntry `min:"1" type:"list"` // The type of landing directory (folder) you want your users' home directory - // to be when they log into the SFTP server. If you set it to PATH, the user - // will see the absolute Amazon S3 bucket paths as is in their SFTP clients. - // If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings - // for how you want to make S3 paths visible to your user. + // to be when they log into the file transfer protocol-enabled server. If you + // set it to PATH, the user will see the absolute Amazon S3 bucket paths as + // is in their file transfer protocol clients. If you set it LOGICAL, you will + // need to provide mappings in the HomeDirectoryMappings for how you want to + // make Amazon S3 paths visible to your users. HomeDirectoryType HomeDirectoryType `type:"string" enum:"true"` // A scope-down policy for your user so you can use the same IAM role across @@ -54,45 +56,45 @@ type CreateUserInput struct { // Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName}, // ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}. // - // For scope-down policies, AWS Transfer for SFTP stores the policy as a JSON + // For scope-down policies, AWS Transfer Family stores the policy as a JSON // blob, instead of the Amazon Resource Name (ARN) of the policy. You save the // policy as a JSON blob and pass it in the Policy argument. // - // For an example of a scope-down policy, see "https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down">Creating - // a Scope-Down Policy. + // For an example of a scope-down policy, see Creating a Scope-Down Policy (https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down). // - // For more information, see "https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html" + // For more information, see AssumeRole (https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) // in the AWS Security Token Service API Reference. Policy *string `type:"string"` - // The IAM role that controls your user's access to your Amazon S3 bucket. The + // The IAM role that controls your users' access to your Amazon S3 bucket. The // policies attached to this role will determine the level of access you want // to provide your users when transferring files into and out of your Amazon // S3 bucket or buckets. The IAM role should also contain a trust relationship - // that allows the SFTP server to access your resources when servicing your - // SFTP user's transfer requests. + // that allows the file transfer protocol-enabled server to access your resources + // when servicing your users' transfer requests. // // Role is a required field Role *string `min:"20" type:"string" required:"true"` - // A system-assigned unique identifier for an SFTP server instance. This is - // the specific SFTP server that you added your user to. + // A system-assigned unique identifier for a file transfer protocol-enabled + // server instance. This is the specific server that you added your user to. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` // The public portion of the Secure Shell (SSH) key used to authenticate the - // user to the SFTP server. + // user to the file transfer protocol-enabled server. SshPublicKeyBody *string `type:"string"` // Key-value pairs that can be used to group and search for users. Tags are // metadata attached to users for any purpose. Tags []Tag `min:"1" type:"list"` - // A unique string that identifies a user and is associated with a server as - // specified by the ServerId. This user name must be a minimum of 3 and a maximum - // of 32 characters long. The following are valid characters: a-z, A-Z, 0-9, - // underscore, and hyphen. The user name can't start with a hyphen. + // A unique string that identifies a user and is associated with a file transfer + // protocol-enabled server as specified by the ServerId. This user name must + // be a minimum of 3 and a maximum of 32 characters long. The following are + // valid characters: a-z, A-Z, 0-9, underscore, and hyphen. The user name can't + // start with a hyphen. // // UserName is a required field UserName *string `min:"3" type:"string" required:"true"` @@ -157,12 +159,14 @@ func (s *CreateUserInput) Validate() error { type CreateUserOutput struct { _ struct{} `type:"structure"` - // The ID of the SFTP server that the user is attached to. + // The ID of the file transfer protocol-enabled server that the user is attached + // to. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` - // A unique string that identifies a user account associated with an SFTP server. + // A unique string that identifies a user account associated with a file transfer + // protocol-enabled server. // // UserName is a required field UserName *string `min:"3" type:"string" required:"true"` @@ -176,15 +180,15 @@ func (s CreateUserOutput) String() string { const opCreateUser = "CreateUser" // CreateUserRequest returns a request value for making API operation for -// AWS Transfer for SFTP. +// AWS Transfer Family. // -// Creates a user and associates them with an existing Secure File Transfer -// Protocol (SFTP) server. You can only create and associate users with SFTP -// servers that have the IdentityProviderType set to SERVICE_MANAGED. Using -// parameters for CreateUser, you can specify the user name, set the home directory, -// store the user's public key, and assign the user's AWS Identity and Access -// Management (IAM) role. You can also optionally add a scope-down policy, and -// assign metadata with tags that can be used to group and search for users. +// Creates a user and associates them with an existing file transfer protocol-enabled +// server. You can only create and associate users with servers that have the +// IdentityProviderType set to SERVICE_MANAGED. Using parameters for CreateUser, +// you can specify the user name, set the home directory, store the user's public +// key, and assign the user's AWS Identity and Access Management (IAM) role. +// You can also optionally add a scope-down policy, and assign metadata with +// tags that can be used to group and search for users. // // // Example sending a request using CreateUserRequest. // req := client.CreateUserRequest(params) diff --git a/service/transfer/api_op_DeleteServer.go b/service/transfer/api_op_DeleteServer.go index 110ff4d0a3e..44c65237991 100644 --- a/service/transfer/api_op_DeleteServer.go +++ b/service/transfer/api_op_DeleteServer.go @@ -14,7 +14,8 @@ import ( type DeleteServerInput struct { _ struct{} `type:"structure"` - // A unique system-assigned identifier for an SFTP server instance. + // A unique system-assigned identifier for a file transfer protocol-enabled + // server instance. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -54,9 +55,9 @@ func (s DeleteServerOutput) String() string { const opDeleteServer = "DeleteServer" // DeleteServerRequest returns a request value for making API operation for -// AWS Transfer for SFTP. +// AWS Transfer Family. // -// Deletes the Secure File Transfer Protocol (SFTP) server that you specify. +// Deletes the file transfer protocol-enabled server that you specify. // // No response returns from this operation. // diff --git a/service/transfer/api_op_DeleteSshPublicKey.go b/service/transfer/api_op_DeleteSshPublicKey.go index d00e5d64cc8..e1082c3fedd 100644 --- a/service/transfer/api_op_DeleteSshPublicKey.go +++ b/service/transfer/api_op_DeleteSshPublicKey.go @@ -14,7 +14,7 @@ import ( type DeleteSshPublicKeyInput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for a Secure File Transfer Protocol (SFTP) + // A system-assigned unique identifier for a file transfer protocol-enabled // server instance that has the user assigned to it. // // ServerId is a required field @@ -79,7 +79,7 @@ func (s DeleteSshPublicKeyOutput) String() string { const opDeleteSshPublicKey = "DeleteSshPublicKey" // DeleteSshPublicKeyRequest returns a request value for making API operation for -// AWS Transfer for SFTP. +// AWS Transfer Family. // // Deletes a user's Secure Shell (SSH) public key. // diff --git a/service/transfer/api_op_DeleteUser.go b/service/transfer/api_op_DeleteUser.go index c7356b6e18d..7e8c2323b7b 100644 --- a/service/transfer/api_op_DeleteUser.go +++ b/service/transfer/api_op_DeleteUser.go @@ -14,13 +14,14 @@ import ( type DeleteUserInput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for an SFTP server instance that has - // the user assigned to it. + // A system-assigned unique identifier for a file transfer protocol-enabled + // server instance that has the user assigned to it. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` - // A unique string that identifies a user that is being deleted from the server. + // A unique string that identifies a user that is being deleted from a file + // transfer protocol-enabled server. // // UserName is a required field UserName *string `min:"3" type:"string" required:"true"` @@ -67,9 +68,10 @@ func (s DeleteUserOutput) String() string { const opDeleteUser = "DeleteUser" // DeleteUserRequest returns a request value for making API operation for -// AWS Transfer for SFTP. +// AWS Transfer Family. // -// Deletes the user belonging to the server you specify. +// Deletes the user belonging to a file transfer protocol-enabled server you +// specify. // // No response returns from this operation. // diff --git a/service/transfer/api_op_DescribeServer.go b/service/transfer/api_op_DescribeServer.go index f810d8a0165..bdb0d990339 100644 --- a/service/transfer/api_op_DescribeServer.go +++ b/service/transfer/api_op_DescribeServer.go @@ -12,7 +12,8 @@ import ( type DescribeServerInput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for an SFTP server. + // A system-assigned unique identifier for a file transfer protocol-enabled + // server. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -43,7 +44,8 @@ func (s *DescribeServerInput) Validate() error { type DescribeServerOutput struct { _ struct{} `type:"structure"` - // An array containing the properties of the server with the ServerID you specified. + // An array containing the properties of a file transfer protocol-enabled server + // with the ServerID you specified. // // Server is a required field Server *DescribedServer `type:"structure" required:"true"` @@ -57,12 +59,13 @@ func (s DescribeServerOutput) String() string { const opDescribeServer = "DescribeServer" // DescribeServerRequest returns a request value for making API operation for -// AWS Transfer for SFTP. +// AWS Transfer Family. // -// Describes the server that you specify by passing the ServerId parameter. +// Describes a file transfer protocol-enabled server that you specify by passing +// the ServerId parameter. // -// The response contains a description of the server's properties. When you -// set EndpointType to VPC, the response will contain the EndpointDetails. +// The response contains a description of a server's properties. When you set +// EndpointType to VPC, the response will contain the EndpointDetails. // // // Example sending a request using DescribeServerRequest. // req := client.DescribeServerRequest(params) diff --git a/service/transfer/api_op_DescribeUser.go b/service/transfer/api_op_DescribeUser.go index 58401465e7e..e371b3475df 100644 --- a/service/transfer/api_op_DescribeUser.go +++ b/service/transfer/api_op_DescribeUser.go @@ -12,15 +12,15 @@ import ( type DescribeUserInput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for an SFTP server that has this user - // assigned. + // A system-assigned unique identifier for a file transfer protocol-enabled + // server that has this user assigned. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` - // The name of the user assigned to one or more servers. User names are part - // of the sign-in credentials to use the AWS Transfer for SFTP service and perform - // file transfer tasks. + // The name of the user assigned to one or more file transfer protocol-enabled + // servers. User names are part of the sign-in credentials to use the AWS Transfer + // Family service and perform file transfer tasks. // // UserName is a required field UserName *string `min:"3" type:"string" required:"true"` @@ -58,8 +58,8 @@ func (s *DescribeUserInput) Validate() error { type DescribeUserOutput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for an SFTP server that has this user - // assigned. + // A system-assigned unique identifier for a file transfer protocol-enabled + // server that has this user assigned. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -79,10 +79,10 @@ func (s DescribeUserOutput) String() string { const opDescribeUser = "DescribeUser" // DescribeUserRequest returns a request value for making API operation for -// AWS Transfer for SFTP. +// AWS Transfer Family. // -// Describes the user assigned to a specific server, as identified by its ServerId -// property. +// Describes the user assigned to the specific file transfer protocol-enabled +// server, as identified by its ServerId property. // // The response from this call returns the properties of the user associated // with the ServerId value that was specified. diff --git a/service/transfer/api_op_ImportSshPublicKey.go b/service/transfer/api_op_ImportSshPublicKey.go index 808e4d28952..d993e16f99d 100644 --- a/service/transfer/api_op_ImportSshPublicKey.go +++ b/service/transfer/api_op_ImportSshPublicKey.go @@ -12,7 +12,8 @@ import ( type ImportSshPublicKeyInput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for an SFTP server. + // A system-assigned unique identifier for a file transfer protocol-enabled + // server. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -22,7 +23,8 @@ type ImportSshPublicKeyInput struct { // SshPublicKeyBody is a required field SshPublicKeyBody *string `type:"string" required:"true"` - // The name of the user account that is assigned to one or more servers. + // The name of the user account that is assigned to one or more file transfer + // protocol-enabled servers. // // UserName is a required field UserName *string `min:"3" type:"string" required:"true"` @@ -61,19 +63,19 @@ func (s *ImportSshPublicKeyInput) Validate() error { return nil } -// This response identifies the user, the server they belong to, and the identifier -// of the SSH public key associated with that user. A user can have more than -// one key on each server that they are associated with. +// Identifies the user, the file transfer protocol-enabled server they belong +// to, and the identifier of the SSH public key associated with that user. A +// user can have more than one key on each server that they are associated with. type ImportSshPublicKeyOutput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for an SFTP server. + // A system-assigned unique identifier for a file transfer protocol-enabled + // server. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` - // This identifier is the name given to a public key by the system that was - // imported. + // The name given to a public key by the system that was imported. // // SshPublicKeyId is a required field SshPublicKeyId *string `min:"21" type:"string" required:"true"` @@ -92,10 +94,11 @@ func (s ImportSshPublicKeyOutput) String() string { const opImportSshPublicKey = "ImportSshPublicKey" // ImportSshPublicKeyRequest returns a request value for making API operation for -// AWS Transfer for SFTP. +// AWS Transfer Family. // // Adds a Secure Shell (SSH) public key to a user account identified by a UserName -// value assigned to a specific server, identified by ServerId. +// value assigned to the specific file transfer protocol-enabled server, identified +// by ServerId. // // The response returns the UserName value, the ServerId value, and the name // of the SshPublicKeyId. diff --git a/service/transfer/api_op_ListServers.go b/service/transfer/api_op_ListServers.go index 29fb328a928..abb82acddfb 100644 --- a/service/transfer/api_op_ListServers.go +++ b/service/transfer/api_op_ListServers.go @@ -12,13 +12,14 @@ import ( type ListServersInput struct { _ struct{} `type:"structure"` - // Specifies the number of servers to return as a response to the ListServers - // query. + // Specifies the number of file transfer protocol-enabled servers to return + // as a response to the ListServers query. MaxResults *int64 `min:"1" type:"integer"` - // When additional results are obtained from the ListServers command, a NextToken + // When additional results are obtained from theListServers command, a NextToken // parameter is returned in the output. You can then pass the NextToken parameter - // in a subsequent command to continue listing additional servers. + // in a subsequent command to continue listing additional file transfer protocol-enabled + // servers. NextToken *string `min:"1" type:"string"` } @@ -48,10 +49,11 @@ type ListServersOutput struct { // When you can get additional results from the ListServers operation, a NextToken // parameter is returned in the output. In a following command, you can pass - // in the NextToken parameter to continue listing additional servers. + // in the NextToken parameter to continue listing additional file transfer protocol-enabled + // servers. NextToken *string `min:"1" type:"string"` - // An array of servers that were listed. + // An array of file transfer protocol-enabled servers that were listed. // // Servers is a required field Servers []ListedServer `type:"list" required:"true"` @@ -65,10 +67,10 @@ func (s ListServersOutput) String() string { const opListServers = "ListServers" // ListServersRequest returns a request value for making API operation for -// AWS Transfer for SFTP. +// AWS Transfer Family. // -// Lists the Secure File Transfer Protocol (SFTP) servers that are associated -// with your AWS account. +// Lists the file transfer protocol-enabled servers that are associated with +// your AWS account. // // // Example sending a request using ListServersRequest. // req := client.ListServersRequest(params) diff --git a/service/transfer/api_op_ListTagsForResource.go b/service/transfer/api_op_ListTagsForResource.go index 61009e52a85..c872e75ffb2 100644 --- a/service/transfer/api_op_ListTagsForResource.go +++ b/service/transfer/api_op_ListTagsForResource.go @@ -60,7 +60,7 @@ func (s *ListTagsForResourceInput) Validate() error { type ListTagsForResourceOutput struct { _ struct{} `type:"structure"` - // This value is the ARN you specified to list the tags of. + // The ARN you specified to list the tags of. Arn *string `min:"20" type:"string"` // When you can get additional results from the ListTagsForResource call, a @@ -81,7 +81,7 @@ func (s ListTagsForResourceOutput) String() string { const opListTagsForResource = "ListTagsForResource" // ListTagsForResourceRequest returns a request value for making API operation for -// AWS Transfer for SFTP. +// AWS Transfer Family. // // Lists all of the tags associated with the Amazon Resource Number (ARN) you // specify. The resource can be a user, server, or role. diff --git a/service/transfer/api_op_ListUsers.go b/service/transfer/api_op_ListUsers.go index 742612d0bdc..fdc4afc269f 100644 --- a/service/transfer/api_op_ListUsers.go +++ b/service/transfer/api_op_ListUsers.go @@ -20,7 +20,7 @@ type ListUsersInput struct { // to the NextToken parameter to continue listing additional users. NextToken *string `min:"1" type:"string"` - // A system-assigned unique identifier for a Secure File Transfer Protocol (SFTP) + // A system-assigned unique identifier for a file transfer protocol-enabled // server that has users assigned to it. // // ServerId is a required field @@ -63,8 +63,8 @@ type ListUsersOutput struct { // to the NextToken parameter to continue listing additional users. NextToken *string `min:"1" type:"string"` - // A system-assigned unique identifier for an SFTP server that the users are - // assigned to. + // A system-assigned unique identifier for a file transfer protocol-enabled + // server that the users are assigned to. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -84,9 +84,10 @@ func (s ListUsersOutput) String() string { const opListUsers = "ListUsers" // ListUsersRequest returns a request value for making API operation for -// AWS Transfer for SFTP. +// AWS Transfer Family. // -// Lists the users for the server that you specify by passing the ServerId parameter. +// Lists the users for a file transfer protocol-enabled server that you specify +// by passing the ServerId parameter. // // // Example sending a request using ListUsersRequest. // req := client.ListUsersRequest(params) diff --git a/service/transfer/api_op_StartServer.go b/service/transfer/api_op_StartServer.go index c090d8791ce..92b702345bf 100644 --- a/service/transfer/api_op_StartServer.go +++ b/service/transfer/api_op_StartServer.go @@ -14,7 +14,8 @@ import ( type StartServerInput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for an SFTP server that you start. + // A system-assigned unique identifier for a file transfer protocol-enabled + // server that you start. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -54,11 +55,11 @@ func (s StartServerOutput) String() string { const opStartServer = "StartServer" // StartServerRequest returns a request value for making API operation for -// AWS Transfer for SFTP. +// AWS Transfer Family. // -// Changes the state of a Secure File Transfer Protocol (SFTP) server from OFFLINE -// to ONLINE. It has no impact on an SFTP server that is already ONLINE. An -// ONLINE server can accept and process file transfer jobs. +// Changes the state of a file transfer protocol-enabled server from OFFLINE +// to ONLINE. It has no impact on a server that is already ONLINE. An ONLINE +// server can accept and process file transfer jobs. // // The state of STARTING indicates that the server is in an intermediate state, // either not fully able to respond, or not fully online. The values of START_FAILED diff --git a/service/transfer/api_op_StopServer.go b/service/transfer/api_op_StopServer.go index 950b9862f00..8f3b873d77d 100644 --- a/service/transfer/api_op_StopServer.go +++ b/service/transfer/api_op_StopServer.go @@ -14,7 +14,8 @@ import ( type StopServerInput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for an SFTP server that you stopped. + // A system-assigned unique identifier for a file transfer protocol-enabled + // server that you stopped. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -54,13 +55,13 @@ func (s StopServerOutput) String() string { const opStopServer = "StopServer" // StopServerRequest returns a request value for making API operation for -// AWS Transfer for SFTP. +// AWS Transfer Family. // -// Changes the state of an SFTP server from ONLINE to OFFLINE. An OFFLINE server -// cannot accept and process file transfer jobs. Information tied to your server -// such as server and user properties are not affected by stopping your server. -// Stopping a server will not reduce or impact your Secure File Transfer Protocol -// (SFTP) endpoint billing. +// Changes the state of a file transfer protocol-enabled server from ONLINE +// to OFFLINE. An OFFLINE server cannot accept and process file transfer jobs. +// Information tied to your server, such as server and user properties, are +// not affected by stopping your server. Stopping the server will not reduce +// or impact your file transfer protocol endpoint billing. // // The state of STOPPING indicates that the server is in an intermediate state, // either not fully able to respond, or not fully offline. The values of STOP_FAILED diff --git a/service/transfer/api_op_TagResource.go b/service/transfer/api_op_TagResource.go index 96c1b264a31..2a3f8f47e48 100644 --- a/service/transfer/api_op_TagResource.go +++ b/service/transfer/api_op_TagResource.go @@ -77,7 +77,7 @@ func (s TagResourceOutput) String() string { const opTagResource = "TagResource" // TagResourceRequest returns a request value for making API operation for -// AWS Transfer for SFTP. +// AWS Transfer Family. // // Attaches a key-value pair to a resource, as identified by its Amazon Resource // Name (ARN). Resources are users, servers, roles, and other entities. diff --git a/service/transfer/api_op_TestIdentityProvider.go b/service/transfer/api_op_TestIdentityProvider.go index 7ca1fb7420a..426e9192e41 100644 --- a/service/transfer/api_op_TestIdentityProvider.go +++ b/service/transfer/api_op_TestIdentityProvider.go @@ -12,13 +12,25 @@ import ( type TestIdentityProviderInput struct { _ struct{} `type:"structure"` - // A system-assigned identifier for a specific server. That server's user authentication - // method is tested with a user name and password. + // A system-assigned identifier for a specific file transfer protocol-enabled + // server. That server's user authentication method is tested with a user name + // and password. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` - // This request parameter is the name of the user account to be tested. + // The type of file transfer protocol to be tested. + // + // The available protocols are: + // + // * Secure Shell (SSH) File Transfer Protocol (SFTP) + // + // * File Transfer Protocol Secure (FTPS) + // + // * File Transfer Protocol (FTP) + ServerProtocol Protocol `type:"string" enum:"true"` + + // The name of the user account to be tested. // // UserName is a required field UserName *string `min:"3" type:"string" required:"true"` @@ -84,13 +96,14 @@ func (s TestIdentityProviderOutput) String() string { const opTestIdentityProvider = "TestIdentityProvider" // TestIdentityProviderRequest returns a request value for making API operation for -// AWS Transfer for SFTP. +// AWS Transfer Family. // -// If the IdentityProviderType of the server is API_Gateway, tests whether your -// API Gateway is set up successfully. We highly recommend that you call this -// operation to test your authentication method as soon as you create your server. -// By doing so, you can troubleshoot issues with the API Gateway integration -// to ensure that your users can successfully use the service. +// If the IdentityProviderType of a file transfer protocol-enabled server is +// API_Gateway, tests whether your API Gateway is set up successfully. We highly +// recommend that you call this operation to test your authentication method +// as soon as you create your server. By doing so, you can troubleshoot issues +// with the API Gateway integration to ensure that your users can successfully +// use the service. // // // Example sending a request using TestIdentityProviderRequest. // req := client.TestIdentityProviderRequest(params) diff --git a/service/transfer/api_op_UntagResource.go b/service/transfer/api_op_UntagResource.go index 4a722cff700..7699fe97dde 100644 --- a/service/transfer/api_op_UntagResource.go +++ b/service/transfer/api_op_UntagResource.go @@ -14,9 +14,9 @@ import ( type UntagResourceInput struct { _ struct{} `type:"structure"` - // This is the value of the resource that will have the tag removed. An Amazon - // Resource Name (ARN) is an identifier for a specific AWS resource, such as - // a server, user, or role. + // The value of the resource that will have the tag removed. An Amazon Resource + // Name (ARN) is an identifier for a specific AWS resource, such as a server, + // user, or role. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` @@ -70,7 +70,7 @@ func (s UntagResourceOutput) String() string { const opUntagResource = "UntagResource" // UntagResourceRequest returns a request value for making API operation for -// AWS Transfer for SFTP. +// AWS Transfer Family. // // Detaches a key-value pair from a resource, as identified by its Amazon Resource // Name (ARN). Resources are users, servers, roles, and other entities. diff --git a/service/transfer/api_op_UpdateServer.go b/service/transfer/api_op_UpdateServer.go index 5a695a8d94c..47b59871717 100644 --- a/service/transfer/api_op_UpdateServer.go +++ b/service/transfer/api_op_UpdateServer.go @@ -12,40 +12,56 @@ import ( type UpdateServerInput struct { _ struct{} `type:"structure"` + // The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. + // Required when Protocols is set to FTPS. + Certificate *string `type:"string"` + // The virtual private cloud (VPC) endpoint settings that are configured for - // your SFTP server. With a VPC endpoint, you can restrict access to your SFTP - // server to resources only within your VPC. To control incoming internet traffic, - // you will need to associate one or more Elastic IP addresses with your server's - // endpoint. + // your file transfer protocol-enabled server. With a VPC endpoint, you can + // restrict access to your server to resources only within your VPC. To control + // incoming internet traffic, you will need to associate one or more Elastic + // IP addresses with your server's endpoint. EndpointDetails *EndpointDetails `type:"structure"` - // The type of endpoint that you want your SFTP server to connect to. You can - // choose to connect to the public internet or a virtual private cloud (VPC) - // endpoint. With a VPC endpoint, your SFTP server isn't accessible over the - // public internet. + // The type of endpoint that you want your file transfer protocol-enabled server + // to connect to. You can choose to connect to the public internet or a VPC + // endpoint. With a VPC endpoint, your server isn't accessible over the public + // internet. EndpointType EndpointType `type:"string" enum:"true"` // The RSA private key as generated by ssh-keygen -N "" -f my-new-server-key. // - // If you aren't planning to migrate existing users from an existing SFTP server - // to a new AWS SFTP server, don't update the host key. Accidentally changing - // a server's host key can be disruptive. + // If you aren't planning to migrate existing users from an existing file transfer + // protocol-enabled server to a new server, don't update the host key. Accidentally + // changing a server's host key can be disruptive. // - // For more information, see "https://docs.aws.amazon.com/transfer/latest/userguide/configuring-servers.html#change-host-key" - // in the AWS SFTP User Guide. + // For more information, see Changing the Host Key for Your AWS Transfer Family + // Server (https://docs.aws.amazon.com/transfer/latest/userguide/configuring-servers.html#change-host-key) + // in the AWS Transfer Family User Guide. HostKey *string `type:"string" sensitive:"true"` - // This response parameter is an array containing all of the information required - // to call a customer's authentication API method. + // An array containing all of the information required to call a customer's + // authentication API method. IdentityProviderDetails *IdentityProviderDetails `type:"structure"` - // A value that changes the AWS Identity and Access Management (IAM) role that - // allows Amazon S3 events to be logged in Amazon CloudWatch, turning logging - // on or off. + // Changes the AWS Identity and Access Management (IAM) role that allows Amazon + // S3 events to be logged in Amazon CloudWatch, turning logging on or off. LoggingRole *string `type:"string"` - // A system-assigned unique identifier for an SFTP server instance that the - // user account is assigned to. + // Specifies the file transfer protocol or protocols over which your file transfer + // protocol client can connect to your server's endpoint. The available protocols + // are: + // + // * Secure Shell (SSH) File Transfer Protocol (SFTP): File transfer over + // SSH + // + // * File Transfer Protocol Secure (FTPS): File transfer with TLS encryption + // + // * File Transfer Protocol (FTP): Unencrypted file transfer + Protocols []Protocol `min:"1" type:"list"` + + // A system-assigned unique identifier for a file transfer protocol-enabled + // server instance that the user account is assigned to. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -59,6 +75,9 @@ func (s UpdateServerInput) String() string { // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateServerInput) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "UpdateServerInput"} + if s.Protocols != nil && len(s.Protocols) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Protocols", 1)) + } if s.ServerId == nil { invalidParams.Add(aws.NewErrParamRequired("ServerId")) @@ -86,8 +105,8 @@ func (s *UpdateServerInput) Validate() error { type UpdateServerOutput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for an SFTP server that the user account - // is assigned to. + // A system-assigned unique identifier for a file transfer protocol-enabled + // server that the user account is assigned to. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -101,12 +120,12 @@ func (s UpdateServerOutput) String() string { const opUpdateServer = "UpdateServer" // UpdateServerRequest returns a request value for making API operation for -// AWS Transfer for SFTP. +// AWS Transfer Family. // -// Updates the server properties after that server has been created. +// Updates the file transfer protocol-enabled server's properties after that +// server has been created. // -// The UpdateServer call returns the ServerId of the Secure File Transfer Protocol -// (SFTP) server you updated. +// The UpdateServer call returns the ServerId of the server you updated. // // // Example sending a request using UpdateServerRequest. // req := client.UpdateServerRequest(params) diff --git a/service/transfer/api_op_UpdateUser.go b/service/transfer/api_op_UpdateUser.go index e577e755fc6..8fc03ec8986 100644 --- a/service/transfer/api_op_UpdateUser.go +++ b/service/transfer/api_op_UpdateUser.go @@ -13,40 +13,43 @@ import ( type UpdateUserInput struct { _ struct{} `type:"structure"` - // A parameter that specifies the landing directory (folder) for a user when - // they log in to the server using their client. + // Specifies the landing directory (folder) for a user when they log in to the + // file transfer protocol-enabled server using their file transfer protocol + // client. // - // An example is /home/username. + // An example is your-Amazon-S3-bucket-name>/home/username. HomeDirectory *string `type:"string"` - // Logical directory mappings that specify what S3 paths and keys should be - // visible to your user and how you want to make them visible. You will need + // Logical directory mappings that specify what Amazon S3 paths and keys should + // be visible to your user and how you want to make them visible. You will need // to specify the "Entry" and "Target" pair, where Entry shows how the path - // is made visible and Target is the actual S3 path. If you only specify a target, - // it will be displayed as is. You will need to also make sure that your AWS - // IAM Role provides access to paths in Target. The following is an example. + // is made visible and Target is the actual Amazon S3 path. If you only specify + // a target, it will be displayed as is. You will need to also make sure that + // your AWS IAM Role provides access to paths in Target. The following is an + // example. // // '[ "/bucket2/documentation", { "Entry": "your-personal-report.pdf", "Target": // "/bucket3/customized-reports/${transfer:UserName}.pdf" } ]' // - // In most cases, you can use this value instead of the scope down policy to + // In most cases, you can use this value instead of the scope-down policy to // lock your user down to the designated home directory ("chroot"). To do this, // you can set Entry to '/' and set Target to the HomeDirectory parameter value. // - // If the target of a logical directory entry does not exist in S3, the entry - // will be ignored. As a workaround, you can use the S3 api to create 0 byte - // objects as place holders for your directory. If using the CLI, use the s3api - // call instead of s3 so you can use the put-object operation. For example, - // you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. - // Make sure that the end of the key name ends in a / for it to be considered - // a folder. + // If the target of a logical directory entry does not exist in Amazon S3, the + // entry will be ignored. As a workaround, you can use the Amazon S3 api to + // create 0 byte objects as place holders for your directory. If using the CLI, + // use the s3api call instead of s3 so you can use the put-object operation. + // For example, you use the following: aws s3api put-object --bucket bucketname + // --key path/to/folder/. Make sure that the end of the key name ends in a / + // for it to be considered a folder. HomeDirectoryMappings []HomeDirectoryMapEntry `min:"1" type:"list"` // The type of landing directory (folder) you want your users' home directory - // to be when they log into the SFTP serve. If you set it to PATH, the user - // will see the absolute Amazon S3 bucket paths as is in their SFTP clients. - // If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings - // for how you want to make S3 paths visible to your user. + // to be when they log into the file transfer protocol-enabled server. If you + // set it to PATH, the user will see the absolute Amazon S3 bucket paths as + // is in their file transfer protocol clients. If you set it LOGICAL, you will + // need to provide mappings in the HomeDirectoryMappings for how you want to + // make Amazon S3 paths visible to your users. HomeDirectoryType HomeDirectoryType `type:"string" enum:"true"` // Allows you to supply a scope-down policy for your user so you can use the @@ -55,36 +58,36 @@ type UpdateUserInput struct { // Variables you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, // and ${Transfer:HomeBucket}. // - // For scope-down policies, AWS Transfer for SFTP stores the policy as a JSON + // For scope-down policies, AWS Transfer Family stores the policy as a JSON // blob, instead of the Amazon Resource Name (ARN) of the policy. You save the // policy as a JSON blob and pass it in the Policy argument. // - // For an example of a scope-down policy, see "https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down">Creating - // a Scope-Down Policy. + // For an example of a scope-down policy, see Creating a Scope-Down Policy (https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down). // - // For more information, see "https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html" + // For more information, see AssumeRole (https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) // in the AWS Security Token Service API Reference. Policy *string `type:"string"` - // The IAM role that controls your user's access to your Amazon S3 bucket. The + // The IAM role that controls your users' access to your Amazon S3 bucket. The // policies attached to this role will determine the level of access you want // to provide your users when transferring files into and out of your Amazon // S3 bucket or buckets. The IAM role should also contain a trust relationship - // that allows the Secure File Transfer Protocol (SFTP) server to access your - // resources when servicing your SFTP user's transfer requests. + // that allows the file transfer protocol-enabled server to access your resources + // when servicing your users' transfer requests. Role *string `min:"20" type:"string"` - // A system-assigned unique identifier for an SFTP server instance that the - // user account is assigned to. + // A system-assigned unique identifier for a file transfer protocol-enabled + // server instance that the user account is assigned to. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` - // A unique string that identifies a user and is associated with a server as - // specified by the ServerId. This is the string that will be used by your user - // when they log in to your SFTP server. This user name is a minimum of 3 and - // a maximum of 32 characters long. The following are valid characters: a-z, - // A-Z, 0-9, underscore, and hyphen. The user name can't start with a hyphen. + // A unique string that identifies a user and is associated with a file transfer + // protocol-enabled server as specified by the ServerId. This is the string + // that will be used by your user when they log in to your server. This user + // name is a minimum of 3 and a maximum of 32 characters long. The following + // are valid characters: a-z, A-Z, 0-9, underscore, and hyphen. The user name + // can't start with a hyphen. // // UserName is a required field UserName *string `min:"3" type:"string" required:"true"` @@ -132,19 +135,19 @@ func (s *UpdateUserInput) Validate() error { return nil } -// UpdateUserResponse returns the user name and server identifier for the request -// to update a user's properties. +// UpdateUserResponse returns the user name and file transfer protocol-enabled +// server identifier for the request to update a user's properties. type UpdateUserOutput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for an SFTP server instance that the - // user account is assigned to. + // A system-assigned unique identifier for a file transfer protocol-enabled + // server instance that the user account is assigned to. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` - // The unique identifier for a user that is assigned to the SFTP server instance - // that was specified in the request. + // The unique identifier for a user that is assigned to a file transfer protocol-enabled + // server instance that was specified in the request. // // UserName is a required field UserName *string `min:"3" type:"string" required:"true"` @@ -158,7 +161,7 @@ func (s UpdateUserOutput) String() string { const opUpdateUser = "UpdateUser" // UpdateUserRequest returns a request value for making API operation for -// AWS Transfer for SFTP. +// AWS Transfer Family. // // Assigns new properties to a user. Parameters you pass modify any or all of // the following: the home directory, role, and policy for the UserName and diff --git a/service/transfer/api_types.go b/service/transfer/api_types.go index 40e8846a9b0..fa5293a33ca 100644 --- a/service/transfer/api_types.go +++ b/service/transfer/api_types.go @@ -12,68 +12,87 @@ import ( var _ aws.Config var _ = awsutil.Prettify -// Describes the properties of the server that was specified. Information returned -// includes the following: the server Amazon Resource Name (ARN), the authentication -// configuration and type, the logging role, the server ID and state, and assigned -// tags or metadata. +// Describes the properties of a file transfer protocol-enabled server that +// was specified. Information returned includes the following: the server Amazon +// Resource Name (ARN), the authentication configuration and type, the logging +// role, the server ID and state, and assigned tags or metadata. type DescribedServer struct { _ struct{} `type:"structure"` - // Specifies the unique Amazon Resource Name (ARN) for the server to be described. + // Specifies the unique Amazon Resource Name (ARN) for a file transfer protocol-enabled + // server to be described. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` + // The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. + // Required when Protocols is set to FTPS. + Certificate *string `type:"string"` + // The virtual private cloud (VPC) endpoint settings that you configured for - // your SFTP server. + // your file transfer protocol-enabled server. EndpointDetails *EndpointDetails `type:"structure"` - // The type of endpoint that your SFTP server is connected to. If your SFTP - // server is connected to a VPC endpoint, your server isn't accessible over - // the public internet. + // The type of endpoint that your file transfer protocol-enabled server is connected + // to. If your server is connected to a VPC endpoint, your server isn't accessible + // over the public internet. EndpointType EndpointType `type:"string" enum:"true"` - // This value contains the message-digest algorithm (MD5) hash of the server's - // host key. This value is equivalent to the output of the ssh-keygen -l -E - // md5 -f my-new-server-key command. + // Contains the message-digest algorithm (MD5) hash of a file transfer protocol-enabled + // server's host key. This value is equivalent to the output of the ssh-keygen + // -l -E md5 -f my-new-server-key command. HostKeyFingerprint *string `type:"string"` // Specifies information to call a customer-supplied authentication API. This - // field is not populated when the IdentityProviderType of the server is SERVICE_MANAGED>. + // field is not populated when the IdentityProviderType of a file transfer protocol-enabled + // server is SERVICE_MANAGED. IdentityProviderDetails *IdentityProviderDetails `type:"structure"` - // This property defines the mode of authentication method enabled for this - // service. A value of SERVICE_MANAGED means that you are using this server - // to store and access SFTP user credentials within the service. A value of + // Defines the mode of authentication method enabled for this service. A value + // of SERVICE_MANAGED means that you are using this file transfer protocol-enabled + // server to store and access user credentials within the service. A value of // API_GATEWAY indicates that you have integrated an API Gateway endpoint that // will be invoked for authenticating your user into the service. IdentityProviderType IdentityProviderType `type:"string" enum:"true"` - // This property is an AWS Identity and Access Management (IAM) entity that - // allows the server to turn on Amazon CloudWatch logging for Amazon S3 events. - // When set, user activity can be viewed in your CloudWatch logs. + // An AWS Identity and Access Management (IAM) entity that allows a file transfer + // protocol-enabled server to turn on Amazon CloudWatch logging for Amazon S3 + // events. When set, user activity can be viewed in your CloudWatch logs. LoggingRole *string `min:"20" type:"string"` - // This property is a unique system-assigned identifier for the SFTP server + // Specifies the file transfer protocol or protocols over which your file transfer + // protocol client can connect to your server's endpoint. The available protocols + // are: + // + // * Secure Shell (SSH) File Transfer Protocol (SFTP): File transfer over + // SSH + // + // * File Transfer Protocol Secure (FTPS): File transfer with TLS encryption + // + // * File Transfer Protocol (FTP): Unencrypted file transfer + Protocols []Protocol `min:"1" type:"list"` + + // Unique system-assigned identifier for a file transfer protocol-enabled server // that you instantiate. ServerId *string `min:"19" type:"string"` - // The condition of the SFTP server for the server that was described. A value - // of ONLINE indicates that the server can accept jobs and transfer files. A - // State value of OFFLINE means that the server cannot perform file transfer - // operations. + // The condition of a file transfer protocol-enabled server for the server that + // was described. A value of ONLINE indicates that the server can accept jobs + // and transfer files. A State value of OFFLINE means that the server cannot + // perform file transfer operations. // // The states of STARTING and STOPPING indicate that the server is in an intermediate // state, either not fully able to respond, or not fully offline. The values // of START_FAILED or STOP_FAILED can indicate an error condition. State State `type:"string" enum:"true"` - // This property contains the key-value pairs that you can use to search for - // and group servers that were assigned to the server that was described. + // Contains the key-value pairs that you can use to search for and group file + // transfer protocol-enabled servers that were assigned to the server that was + // described. Tags []Tag `min:"1" type:"list"` - // The number of users that are assigned to the SFTP server you specified with - // the ServerId. + // The number of users that are assigned to a file transfer protocol-enabled + // server you specified with the ServerId. UserCount *int64 `type:"integer"` } @@ -86,62 +105,59 @@ func (s DescribedServer) String() string { type DescribedUser struct { _ struct{} `type:"structure"` - // This property contains the unique Amazon Resource Name (ARN) for the user - // that was requested to be described. + // Contains the unique Amazon Resource Name (ARN) for the user that was requested + // to be described. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` - // This property specifies the landing directory (or folder), which is the location - // that files are written to or read from in an Amazon S3 bucket for the described - // user. An example is /your s3 bucket name/home/username . + // Specifies the landing directory (or folder), which is the location that files + // are written to or read from in an Amazon S3 bucket for the described user. + // An example is /your s3 bucket name/home/username . HomeDirectory *string `type:"string"` - // Logical directory mappings that you specified for what S3 paths and keys - // should be visible to your user and how you want to make them visible. You - // will need to specify the "Entry" and "Target" pair, where Entry shows how - // the path is made visible and Target is the actual S3 path. If you only specify - // a target, it will be displayed as is. You will need to also make sure that - // your AWS IAM Role provides access to paths in Target. - // - // In most cases, you can use this value instead of the scope down policy to - // lock your user down to the designated home directory ("chroot"). To do this, - // you can set Entry to '/' and set Target to the HomeDirectory parameter value. + // Logical directory mappings that you specified for what Amazon S3 paths and + // keys should be visible to your user and how you want to make them visible. + // You will need to specify the "Entry" and "Target" pair, where Entry shows + // how the path is made visible and Target is the actual Amazon S3 path. If + // you only specify a target, it will be displayed as is. You will need to also + // make sure that your AWS IAM Role provides access to paths in Target. // - // In most cases, you can use this value instead of the scope down policy to + // In most cases, you can use this value instead of the scope-down policy to // lock your user down to the designated home directory ("chroot"). To do this, // you can set Entry to '/' and set Target to the HomeDirectory parameter value. HomeDirectoryMappings []HomeDirectoryMapEntry `min:"1" type:"list"` - // The type of landing directory (folder) you mapped for your users' to see - // when they log into the SFTP server. If you set it to PATH, the user will - // see the absolute Amazon S3 bucket paths as is in their SFTP clients. If you - // set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings - // for how you want to make S3 paths visible to your user. + // The type of landing directory (folder) you mapped for your users to see when + // they log into the file transfer protocol-enabled server. If you set it to + // PATH, the user will see the absolute Amazon S3 bucket paths as is in their + // file transfer protocol clients. If you set it LOGICAL, you will need to provide + // mappings in the HomeDirectoryMappings for how you want to make Amazon S3 + // paths visible to your users. HomeDirectoryType HomeDirectoryType `type:"string" enum:"true"` // Specifies the name of the policy in use for the described user. Policy *string `type:"string"` - // This property specifies the IAM role that controls your user's access to - // your Amazon S3 bucket. The policies attached to this role will determine - // the level of access you want to provide your users when transferring files - // into and out of your Amazon S3 bucket or buckets. The IAM role should also - // contain a trust relationship that allows the SFTP server to access your resources - // when servicing your SFTP user's transfer requests. + // Specifies the IAM role that controls your users' access to your Amazon S3 + // bucket. The policies attached to this role will determine the level of access + // you want to provide your users when transferring files into and out of your + // Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship + // that allows a file transfer protocol-enabled server to access your resources + // when servicing your users' transfer requests. Role *string `min:"20" type:"string"` - // This property contains the public key portion of the Secure Shell (SSH) keys - // stored for the described user. + // Contains the public key portion of the Secure Shell (SSH) keys stored for + // the described user. SshPublicKeys []SshPublicKey `type:"list"` - // This property contains the key-value pairs for the user requested. Tag can - // be used to search for and group users for a variety of purposes. + // Contains the key-value pairs for the user requested. Tag can be used to search + // for and group users for a variety of purposes. Tags []Tag `min:"1" type:"list"` - // This property is the name of the user that was requested to be described. - // User names are used for authentication purposes. This is the string that - // will be used by your user when they log in to your SFTP server. + // The name of the user that was requested to be described. User names are used + // for authentication purposes. This is the string that will be used by your + // user when they log in to your file transfer protocol-enabled server. UserName *string `min:"3" type:"string"` } @@ -151,28 +167,29 @@ func (s DescribedUser) String() string { } // The virtual private cloud (VPC) endpoint settings that are configured for -// your SFTP server. With a VPC endpoint, you can restrict access to your SFTP -// server and resources only within your VPC. To control incoming internet traffic, -// invoke the UpdateServer API and attach an Elastic IP to your server's endpoint. +// your file transfer protocol-enabled server. With a VPC endpoint, you can +// restrict access to your server and resources only within your VPC. To control +// incoming internet traffic, invoke the UpdateServer API and attach an Elastic +// IP to your server's endpoint. type EndpointDetails struct { _ struct{} `type:"structure"` // A list of address allocation IDs that are required to attach an Elastic IP - // address to your SFTP server's endpoint. This is only valid in the UpdateServer - // API. + // address to your file transfer protocol-enabled server's endpoint. This is + // only valid in the UpdateServer API. // // This property can only be use when EndpointType is set to VPC. AddressAllocationIds []string `type:"list"` - // A list of subnet IDs that are required to host your SFTP server endpoint - // in your VPC. + // A list of subnet IDs that are required to host your file transfer protocol-enabled + // server endpoint in your VPC. SubnetIds []string `type:"list"` // The ID of the VPC endpoint. VpcEndpointId *string `min:"22" type:"string"` - // The VPC ID of the virtual private cloud in which the SFTP server's endpoint - // will be hosted. + // The VPC ID of the VPC in which a file transfer protocol-enabled server's + // endpoint will be hosted. VpcId *string `type:"string"` } @@ -233,16 +250,15 @@ func (s *HomeDirectoryMapEntry) Validate() error { } // Returns information related to the type of user authentication that is in -// use for a server's users. A server can have only one method of authentication. +// use for a file transfer protocol-enabled server's users. A server can have +// only one method of authentication. type IdentityProviderDetails struct { _ struct{} `type:"structure"` - // The InvocationRole parameter provides the type of InvocationRole used to - // authenticate the user account. + // Provides the type of InvocationRole used to authenticate the user account. InvocationRole *string `min:"20" type:"string"` - // The Url parameter provides contains the location of the service endpoint - // used to authenticate users. + // Contains the location of the service endpoint used to authenticate users. Url *string `type:"string"` } @@ -264,46 +280,47 @@ func (s *IdentityProviderDetails) Validate() error { return nil } -// Returns properties of the server that was specified. +// Returns properties of a file transfer protocol-enabled server that was specified. type ListedServer struct { _ struct{} `type:"structure"` - // The unique Amazon Resource Name (ARN) for the server to be listed. + // The unique Amazon Resource Name (ARN) for a file transfer protocol-enabled + // server to be listed. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` - // The type of VPC endpoint that your SFTP server is connected to. If your SFTP - // server is connected to a VPC endpoint, your server isn't accessible over - // the public internet. + // The type of VPC endpoint that your file transfer protocol-enabled server + // is connected to. If your server is connected to a VPC endpoint, your server + // isn't accessible over the public internet. EndpointType EndpointType `type:"string" enum:"true"` - // The authentication method used to validate a user for the server that was - // specified. This can include Secure Shell (SSH), user name and password combinations, - // or your own custom authentication method. Valid values include SERVICE_MANAGED - // or API_GATEWAY. + // The authentication method used to validate a user for a file transfer protocol-enabled + // server that was specified. This can include Secure Shell (SSH), user name + // and password combinations, or your own custom authentication method. Valid + // values include SERVICE_MANAGED or API_GATEWAY. IdentityProviderType IdentityProviderType `type:"string" enum:"true"` - // The AWS Identity and Access Management entity that allows the server to turn - // on Amazon CloudWatch logging. + // The AWS Identity and Access Management (IAM) entity that allows a file transfer + // protocol-enabled server to turn on Amazon CloudWatch logging. LoggingRole *string `min:"20" type:"string"` - // This value is the unique system assigned identifier for the SFTP servers - // that were listed. + // The unique system assigned identifier for a file transfer protocol-enabled + // servers that were listed. ServerId *string `min:"19" type:"string"` - // This property describes the condition of the SFTP server for the server that - // was described. A value of ONLINE> indicates that the server can accept jobs - // and transfer files. A State value of OFFLINE means that the server cannot - // perform file transfer operations. + // Describes the condition of a file transfer protocol-enabled server for the + // server that was described. A value of ONLINE indicates that the server can + // accept jobs and transfer files. A State value of OFFLINE means that the server + // cannot perform file transfer operations. // // The states of STARTING and STOPPING indicate that the server is in an intermediate // state, either not fully able to respond, or not fully offline. The values // of START_FAILED or STOP_FAILED can indicate an error condition. State State `type:"string" enum:"true"` - // This property is a numeric value that indicates the number of users that - // are assigned to the SFTP server you specified with the ServerId. + // A numeric value that indicates the number of users that are assigned to a + // file transfer protocol-enabled server you specified with the ServerId. UserCount *int64 `type:"integer"` } @@ -316,30 +333,31 @@ func (s ListedServer) String() string { type ListedUser struct { _ struct{} `type:"structure"` - // This property is the unique Amazon Resource Name (ARN) for the user that - // you want to learn about. + // The unique Amazon Resource Name (ARN) for the user that you want to learn + // about. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` - // This value specifies the location that files are written to or read from - // an Amazon S3 bucket for the user you specify by their ARN. + // Specifies the location that files are written to or read from an Amazon S3 + // bucket for the user you specify by their ARN. HomeDirectory *string `type:"string"` // The type of landing directory (folder) you mapped for your users' home directory. // If you set it to PATH, the user will see the absolute Amazon S3 bucket paths - // as is in their SFTP clients. If you set it LOGICAL, you will need to provide - // mappings in the HomeDirectoryMappings for how you want to make S3 paths visible - // to your user. + // as is in their file transfer protocol clients. If you set it LOGICAL, you + // will need to provide mappings in the HomeDirectoryMappings for how you want + // to make Amazon S3 paths visible to your users. HomeDirectoryType HomeDirectoryType `type:"string" enum:"true"` // The role in use by this user. A role is an AWS Identity and Access Management - // (IAM) entity that, in this case, allows the SFTP server to act on a user's - // behalf. It allows the server to inherit the trust relationship that enables - // that user to perform file operations to their Amazon S3 bucket. + // (IAM) entity that, in this case, allows a file transfer protocol-enabled + // server to act on a user's behalf. It allows the server to inherit the trust + // relationship that enables that user to perform file operations to their Amazon + // S3 bucket. Role *string `min:"20" type:"string"` - // This value is the number of SSH public keys stored for the user you specified. + // The number of SSH public keys stored for the user you specified. SshPublicKeyCount *int64 `type:"integer"` // The name of the user whose ARN was specified. User names are used for authentication @@ -353,10 +371,11 @@ func (s ListedUser) String() string { } // Provides information about the public Secure Shell (SSH) key that is associated -// with a user account for a specific server (as identified by ServerId). The -// information returned includes the date the key was imported, the public key -// contents, and the public key ID. A user can store more than one SSH public -// key associated with their user name on a specific SFTP server. +// with a user account for the specific file transfer protocol-enabled server +// (as identified by ServerId). The information returned includes the date the +// key was imported, the public key contents, and the public key ID. A user +// can store more than one SSH public key associated with their user name on +// a specific server. type SshPublicKey struct { _ struct{} `type:"structure"` @@ -395,8 +414,7 @@ type Tag struct { // Key is a required field Key *string `type:"string" required:"true"` - // This property contains one or more values that you assigned to the key name - // you create. + // Contains one or more values that you assigned to the key name you create. // // Value is a required field Value *string `type:"string" required:"true"` diff --git a/service/transfer/transferiface/interface.go b/service/transfer/transferiface/interface.go index 053f42ef3f1..22dcc297eea 100644 --- a/service/transfer/transferiface/interface.go +++ b/service/transfer/transferiface/interface.go @@ -1,6 +1,6 @@ // Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. -// Package transferiface provides an interface to enable mocking the AWS Transfer for SFTP service client +// Package transferiface provides an interface to enable mocking the AWS Transfer Family service client // for testing your code. // // It is important to note that this interface will have breaking changes