From 2a3c84c256d2c04889919ca9f0009489d149ae7d Mon Sep 17 00:00:00 2001 From: AWS SDK for Ruby Date: Mon, 19 Jun 2023 18:05:34 +0000 Subject: [PATCH] Updated API models and rebuilt service gems. --- apis/cloudformation/2010-05-15/api-2.json | 14 +- apis/cloudformation/2010-05-15/docs-2.json | 9 +- apis/ec2/2016-11-15/api-2.json | 17 +- apis/ec2/2016-11-15/docs-2.json | 19 +- apis/ec2/2016-11-15/examples-1.json | 399 +--------------- apis/ecs/2014-11-13/docs-2.json | 6 +- apis/glue/2017-03-31/api-2.json | 6 +- apis/glue/2017-03-31/docs-2.json | 2 + apis/pricing/2017-10-15/api-2.json | 52 +-- apis/pricing/2017-10-15/docs-2.json | 14 +- apis/pricing/2017-10-15/endpoint-tests-1.json | 124 +++-- apis/pricing/2017-10-15/examples-1.json | 63 --- apis/pricing/2017-10-15/smoke.json | 6 + apis/pricing/2017-10-15/waiters-2.json | 5 + apis/route53domains/2014-05-15/api-2.json | 6 +- apis/route53domains/2014-05-15/docs-2.json | 19 +- .../2014-05-15/endpoint-rule-set-1.json | 392 +++++++++------- .../2014-05-15/endpoint-tests-1.json | 239 +++++++++- apis/sagemaker/2017-07-24/api-2.json | 57 ++- apis/sagemaker/2017-07-24/docs-2.json | 138 ++++-- gems/aws-sdk-cloudformation/CHANGELOG.md | 5 + gems/aws-sdk-cloudformation/VERSION | 2 +- .../lib/aws-sdk-cloudformation.rb | 2 +- .../lib/aws-sdk-cloudformation/client.rb | 49 +- .../lib/aws-sdk-cloudformation/client_api.rb | 3 + .../lib/aws-sdk-cloudformation/types.rb | 75 ++- gems/aws-sdk-ec2/CHANGELOG.md | 5 + gems/aws-sdk-ec2/VERSION | 2 +- gems/aws-sdk-ec2/lib/aws-sdk-ec2.rb | 2 +- gems/aws-sdk-ec2/lib/aws-sdk-ec2/client.rb | 373 ++------------- .../aws-sdk-ec2/lib/aws-sdk-ec2/client_api.rb | 9 +- gems/aws-sdk-ec2/lib/aws-sdk-ec2/types.rb | 30 +- gems/aws-sdk-ecs/CHANGELOG.md | 5 + gems/aws-sdk-ecs/VERSION | 2 +- gems/aws-sdk-ecs/lib/aws-sdk-ecs.rb | 2 +- gems/aws-sdk-ecs/lib/aws-sdk-ecs/client.rb | 15 +- gems/aws-sdk-ecs/lib/aws-sdk-ecs/types.rb | 5 + gems/aws-sdk-glue/CHANGELOG.md | 5 + gems/aws-sdk-glue/VERSION | 2 +- gems/aws-sdk-glue/lib/aws-sdk-glue.rb | 2 +- gems/aws-sdk-glue/lib/aws-sdk-glue/client.rb | 14 +- .../lib/aws-sdk-glue/client_api.rb | 2 + gems/aws-sdk-glue/lib/aws-sdk-glue/types.rb | 14 +- gems/aws-sdk-pricing/CHANGELOG.md | 5 + gems/aws-sdk-pricing/VERSION | 2 +- gems/aws-sdk-pricing/features/smoke.feature | 11 + .../features/smoke_step_definitions.rb | 35 ++ gems/aws-sdk-pricing/lib/aws-sdk-pricing.rb | 3 +- .../lib/aws-sdk-pricing/client.rb | 55 +-- .../lib/aws-sdk-pricing/client_api.rb | 30 +- .../lib/aws-sdk-pricing/waiters.rb | 15 + .../spec/endpoint_provider_spec.rb | 105 ++++- gems/aws-sdk-route53domains/CHANGELOG.md | 5 + gems/aws-sdk-route53domains/VERSION | 2 +- .../lib/aws-sdk-route53domains.rb | 2 +- .../lib/aws-sdk-route53domains/client.rb | 51 +-- .../lib/aws-sdk-route53domains/client_api.rb | 3 +- .../endpoint_parameters.rb | 3 - .../endpoint_provider.rb | 51 ++- .../lib/aws-sdk-route53domains/types.rb | 33 +- .../spec/endpoint_provider_spec.rb | 267 ++++++++++- gems/aws-sdk-sagemaker/CHANGELOG.md | 5 + gems/aws-sdk-sagemaker/VERSION | 2 +- .../lib/aws-sdk-sagemaker.rb | 2 +- .../lib/aws-sdk-sagemaker/client.rb | 163 +++++-- .../lib/aws-sdk-sagemaker/client_api.rb | 38 ++ .../lib/aws-sdk-sagemaker/types.rb | 431 +++++++++++++++--- 67 files changed, 2092 insertions(+), 1439 deletions(-) create mode 100644 apis/pricing/2017-10-15/smoke.json create mode 100644 apis/pricing/2017-10-15/waiters-2.json create mode 100644 gems/aws-sdk-pricing/features/smoke.feature create mode 100644 gems/aws-sdk-pricing/features/smoke_step_definitions.rb create mode 100644 gems/aws-sdk-pricing/lib/aws-sdk-pricing/waiters.rb diff --git a/apis/cloudformation/2010-05-15/api-2.json b/apis/cloudformation/2010-05-15/api-2.json index bb357c01aa6..96f3ed4533a 100644 --- a/apis/cloudformation/2010-05-15/api-2.json +++ b/apis/cloudformation/2010-05-15/api-2.json @@ -1442,7 +1442,8 @@ "Description":{"shape":"Description"}, "ChangeSetType":{"shape":"ChangeSetType"}, "ResourcesToImport":{"shape":"ResourcesToImport"}, - "IncludeNestedStacks":{"shape":"IncludeNestedStacks"} + "IncludeNestedStacks":{"shape":"IncludeNestedStacks"}, + "OnStackFailure":{"shape":"OnStackFailure"} } }, "CreateChangeSetOutput":{ @@ -1731,7 +1732,8 @@ "NextToken":{"shape":"NextToken"}, "IncludeNestedStacks":{"shape":"IncludeNestedStacks"}, "ParentChangeSetId":{"shape":"ChangeSetId"}, - "RootChangeSetId":{"shape":"ChangeSetId"} + "RootChangeSetId":{"shape":"ChangeSetId"}, + "OnStackFailure":{"shape":"OnStackFailure"} } }, "DescribeOrganizationsAccessInput":{ @@ -2677,6 +2679,14 @@ "DELETE" ] }, + "OnStackFailure":{ + "type":"string", + "enum":[ + "DO_NOTHING", + "ROLLBACK", + "DELETE" + ] + }, "OperationIdAlreadyExistsException":{ "type":"structure", "members":{ diff --git a/apis/cloudformation/2010-05-15/docs-2.json b/apis/cloudformation/2010-05-15/docs-2.json index b53dfb239b1..0b46a1b8e0c 100644 --- a/apis/cloudformation/2010-05-15/docs-2.json +++ b/apis/cloudformation/2010-05-15/docs-2.json @@ -877,7 +877,7 @@ "base": null, "refs": { "CreateStackInput$DisableRollback": "

Set to true to disable rollback of the stack if stack creation failed. You can specify either DisableRollback or OnFailure, but not both.

Default: false

", - "ExecuteChangeSetInput$DisableRollback": "

Preserves the state of previously provisioned resources when an operation fails.

Default: True

", + "ExecuteChangeSetInput$DisableRollback": "

Preserves the state of previously provisioned resources when an operation fails. This parameter can't be specified when the OnStackFailure parameter to the CreateChangeSet API operation was specified.

Default: True

", "Stack$DisableRollback": "

Boolean to enable or disable rollback on stack creation failures:

", "UpdateStackInput$DisableRollback": "

Preserve the state of previously provisioned resources when an operation fails.

Default: False

" } @@ -1529,6 +1529,13 @@ "CreateStackInput$OnFailure": "

Determines what action will be taken if stack creation fails. This must be one of: DO_NOTHING, ROLLBACK, or DELETE. You can specify either OnFailure or DisableRollback, but not both.

Default: ROLLBACK

" } }, + "OnStackFailure": { + "base": null, + "refs": { + "CreateChangeSetInput$OnStackFailure": "

Determines what action will be taken if stack creation fails. If this parameter is specified, the DisableRollback parameter to the ExecuteChangeSet API operation must not be specified. This must be one of these values:

For nested stacks, when the OnStackFailure parameter is set to DELETE for the change set for the parent stack, any failure in a child stack will cause the parent stack creation to fail and all stacks to be deleted.

", + "DescribeChangeSetOutput$OnStackFailure": "

Determines what action will be taken if stack creation fails. When this parameter is specified, the DisableRollback parameter to the ExecuteChangeSet API operation must not be specified. This must be one of these values:

" + } + }, "OperationIdAlreadyExistsException": { "base": "

The specified operation ID already exists.

", "refs": { diff --git a/apis/ec2/2016-11-15/api-2.json b/apis/ec2/2016-11-15/api-2.json index 85aa50d62b4..c128541d072 100644 --- a/apis/ec2/2016-11-15/api-2.json +++ b/apis/ec2/2016-11-15/api-2.json @@ -7592,6 +7592,7 @@ } } }, + "AvailabilityZoneId":{"type":"string"}, "AvailabilityZoneList":{ "type":"list", "member":{ @@ -9786,8 +9787,8 @@ "ClientToken":{"shape":"String"}, "InstanceType":{"shape":"String"}, "InstancePlatform":{"shape":"CapacityReservationInstancePlatform"}, - "AvailabilityZone":{"shape":"String"}, - "AvailabilityZoneId":{"shape":"String"}, + "AvailabilityZone":{"shape":"AvailabilityZoneName"}, + "AvailabilityZoneId":{"shape":"AvailabilityZoneId"}, "Tenancy":{"shape":"CapacityReservationTenancy"}, "InstanceCount":{"shape":"Integer"}, "EbsOptimized":{"shape":"Boolean"}, @@ -43447,7 +43448,9 @@ "members":{ "S3":{"shape":"VerifiedAccessLogS3DestinationOptions"}, "CloudWatchLogs":{"shape":"VerifiedAccessLogCloudWatchLogsDestinationOptions"}, - "KinesisDataFirehose":{"shape":"VerifiedAccessLogKinesisDataFirehoseDestinationOptions"} + "KinesisDataFirehose":{"shape":"VerifiedAccessLogKinesisDataFirehoseDestinationOptions"}, + "LogVersion":{"shape":"String"}, + "IncludeTrustContext":{"shape":"Boolean"} } }, "VerifiedAccessLogS3Destination":{ @@ -43499,6 +43502,14 @@ "KinesisDataFirehose":{ "shape":"VerifiedAccessLogKinesisDataFirehoseDestination", "locationName":"kinesisDataFirehose" + }, + "LogVersion":{ + "shape":"String", + "locationName":"logVersion" + }, + "IncludeTrustContext":{ + "shape":"Boolean", + "locationName":"includeTrustContext" } } }, diff --git a/apis/ec2/2016-11-15/docs-2.json b/apis/ec2/2016-11-15/docs-2.json index cfaa5fae72d..82a1c236932 100644 --- a/apis/ec2/2016-11-15/docs-2.json +++ b/apis/ec2/2016-11-15/docs-2.json @@ -219,7 +219,7 @@ "DeregisterInstanceEventNotificationAttributes": "

Deregisters tag keys to prevent tags that have the specified tag keys from being included in scheduled event notifications for resources in the Region.

", "DeregisterTransitGatewayMulticastGroupMembers": "

Deregisters the specified members (network interfaces) from the transit gateway multicast group.

", "DeregisterTransitGatewayMulticastGroupSources": "

Deregisters the specified sources (network interfaces) from the transit gateway multicast group.

", - "DescribeAccountAttributes": "

Describes attributes of your Amazon Web Services account. The following are the supported account attributes:

We are retiring EC2-Classic on August 15, 2022. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon EC2 User Guide.

", + "DescribeAccountAttributes": "

Describes attributes of your Amazon Web Services account. The following are the supported account attributes:

", "DescribeAddressTransfers": "

Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon Virtual Private Cloud User Guide.

When you transfer an Elastic IP address, there is a two-step handshake between the source and transfer Amazon Web Services accounts. When the source account starts the transfer, the transfer account has seven days to accept the Elastic IP address transfer. During those seven days, the source account can view the pending transfer by using this action. After seven days, the transfer expires and ownership of the Elastic IP address returns to the source account. Accepted transfers are visible to the source account for three days after the transfers have been accepted.

", "DescribeAddresses": "

Describes the specified Elastic IP addresses or all of your Elastic IP addresses.

", "DescribeAddressesAttribute": "

Describes the attributes of the specified Elastic IP addresses. For requirements, see Using reverse DNS for email applications.

", @@ -1703,6 +1703,12 @@ "AvailabilityZoneList$member": null } }, + "AvailabilityZoneId": { + "base": null, + "refs": { + "CreateCapacityReservationRequest$AvailabilityZoneId": "

The ID of the Availability Zone in which to create the Capacity Reservation.

" + } + }, "AvailabilityZoneList": { "base": null, "refs": { @@ -1724,6 +1730,7 @@ "AvailabilityZoneName": { "base": null, "refs": { + "CreateCapacityReservationRequest$AvailabilityZone": "

The Availability Zone in which to create the Capacity Reservation.

", "CreateVolumeRequest$AvailabilityZone": "

The Availability Zone in which to create the volume.

" } }, @@ -2737,8 +2744,10 @@ "VerifiedAccessLogCloudWatchLogsDestinationOptions$Enabled": "

Indicates whether logging is enabled.

", "VerifiedAccessLogKinesisDataFirehoseDestination$Enabled": "

Indicates whether logging is enabled.

", "VerifiedAccessLogKinesisDataFirehoseDestinationOptions$Enabled": "

Indicates whether logging is enabled.

", + "VerifiedAccessLogOptions$IncludeTrustContext": "

Include trust data sent by trust providers into the logs.

", "VerifiedAccessLogS3Destination$Enabled": "

Indicates whether logging is enabled.

", "VerifiedAccessLogS3DestinationOptions$Enabled": "

Indicates whether logging is enabled.

", + "VerifiedAccessLogs$IncludeTrustContext": "

Describes current setting for including trust data into the logs.

", "Volume$Encrypted": "

Indicates whether the volume is encrypted.

", "Volume$FastRestored": "

Indicates whether the volume was created using fast snapshot restore.

", "Volume$MultiAttachEnabled": "

Indicates whether Amazon EBS Multi-Attach is enabled.

", @@ -18487,8 +18496,6 @@ "CreateCapacityReservationFleetResult$AllocationStrategy": "

The allocation strategy used by the Capacity Reservation Fleet.

", "CreateCapacityReservationRequest$ClientToken": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensure Idempotency.

", "CreateCapacityReservationRequest$InstanceType": "

The instance type for which to reserve capacity. For more information, see Instance types in the Amazon EC2 User Guide.

", - "CreateCapacityReservationRequest$AvailabilityZone": "

The Availability Zone in which to create the Capacity Reservation.

", - "CreateCapacityReservationRequest$AvailabilityZoneId": "

The ID of the Availability Zone in which to create the Capacity Reservation.

", "CreateCarrierGatewayRequest$ClientToken": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "CreateClientVpnEndpointRequest$ClientCidrBlock": "

The IPv4 address range, in CIDR notation, from which to assign client IP addresses. The address range cannot overlap with the local CIDR of the VPC in which the associated subnet is located, or the routes that you add manually. The address range cannot be changed after the Client VPN endpoint has been created. Client CIDR range must have a size of at least /22 and must not be greater than /12.

", "CreateClientVpnEndpointRequest$ServerCertificateArn": "

The ARN of the server certificate. For more information, see the Certificate Manager User Guide.

", @@ -20151,12 +20158,14 @@ "VerifiedAccessLogDeliveryStatus$Message": "

The status message.

", "VerifiedAccessLogKinesisDataFirehoseDestination$DeliveryStream": "

The ID of the delivery stream.

", "VerifiedAccessLogKinesisDataFirehoseDestinationOptions$DeliveryStream": "

The ID of the delivery stream.

", + "VerifiedAccessLogOptions$LogVersion": "

The logging version to use.

Valid values: ocsf-0.1 | ocsf-1.0.0-rc.2

", "VerifiedAccessLogS3Destination$BucketName": "

The bucket name.

", "VerifiedAccessLogS3Destination$Prefix": "

The bucket prefix.

", "VerifiedAccessLogS3Destination$BucketOwner": "

The Amazon Web Services account number that owns the bucket.

", "VerifiedAccessLogS3DestinationOptions$BucketName": "

The bucket name.

", "VerifiedAccessLogS3DestinationOptions$Prefix": "

The bucket prefix.

", "VerifiedAccessLogS3DestinationOptions$BucketOwner": "

The ID of the Amazon Web Services account that owns the Amazon S3 bucket.

", + "VerifiedAccessLogs$LogVersion": "

Describes current setting for the logging version.

", "VerifiedAccessTrustProvider$VerifiedAccessTrustProviderId": "

The ID of the Amazon Web Services Verified Access trust provider.

", "VerifiedAccessTrustProvider$Description": "

A description for the Amazon Web Services Verified Access trust provider.

", "VerifiedAccessTrustProvider$PolicyReferenceName": "

The identifier to be used when working with policy rules.

", @@ -22463,7 +22472,7 @@ } }, "VerifiedAccessLogOptions": { - "base": "

Describes the destinations for Verified Access logs.

", + "base": "

Options for Verified Access logs.

", "refs": { "ModifyVerifiedAccessInstanceLoggingConfigurationRequest$AccessLogs": "

The configuration options for Verified Access instances.

" } @@ -22481,7 +22490,7 @@ } }, "VerifiedAccessLogs": { - "base": "

Describes the destinations for Verified Access logs.

", + "base": "

Describes the options for Verified Access logs.

", "refs": { "VerifiedAccessInstanceLoggingConfiguration$AccessLogs": "

Details about the logging options.

" } diff --git a/apis/ec2/2016-11-15/examples-1.json b/apis/ec2/2016-11-15/examples-1.json index 93b4bf8835c..9b2d9f0b51e 100644 --- a/apis/ec2/2016-11-15/examples-1.json +++ b/apis/ec2/2016-11-15/examples-1.json @@ -3,13 +3,12 @@ "examples": { "AllocateAddress": [ { - "input": { - "Domain": "vpc" - }, "output": { "AllocationId": "eipalloc-64d5890a", "Domain": "vpc", - "PublicIp": "203.0.113.0" + "NetworkBorderGroup": "us-east-1", + "PublicIp": "203.0.113.0", + "PublicIpv4Pool": "amazon" }, "comments": { "input": { @@ -17,24 +16,9 @@ "output": { } }, - "description": "This example allocates an Elastic IP address to use with an instance in a VPC.", + "description": "This example allocates an Elastic IP address.", "id": "ec2-allocate-address-1", - "title": "To allocate an Elastic IP address for EC2-VPC" - }, - { - "output": { - "Domain": "standard", - "PublicIp": "198.51.100.0" - }, - "comments": { - "input": { - }, - "output": { - } - }, - "description": "This example allocates an Elastic IP address to use with an instance in EC2-Classic.", - "id": "ec2-allocate-address-2", - "title": "To allocate an Elastic IP address for EC2-Classic" + "title": "To allocate an Elastic IP address" } ], "AssignPrivateIpAddresses": [ @@ -86,9 +70,9 @@ "output": { } }, - "description": "This example associates the specified Elastic IP address with the specified instance in a VPC.", + "description": "This example associates the specified Elastic IP address with the specified instance.", "id": "ec2-associate-address-1", - "title": "To associate an Elastic IP address in EC2-VPC" + "title": "To associate an Elastic IP address" }, { "input": { @@ -107,21 +91,6 @@ "description": "This example associates the specified Elastic IP address with the specified network interface.", "id": "ec2-associate-address-2", "title": "To associate an Elastic IP address with a network interface" - }, - { - "input": { - "InstanceId": "i-07ffe74c7330ebf53", - "PublicIp": "198.51.100.0" - }, - "comments": { - "input": { - }, - "output": { - } - }, - "description": "This example associates an Elastic IP address with an instance in EC2-Classic.", - "id": "ec2-associate-address-3", - "title": "To associate an Elastic IP address in EC2-Classic" } ], "AssociateDhcpOptions": [ @@ -1734,71 +1703,6 @@ "description": "This example describes your Elastic IP addresses.", "id": "ec2-describe-addresses-1", "title": "To describe your Elastic IP addresses" - }, - { - "input": { - "Filters": [ - { - "Name": "domain", - "Values": [ - "vpc" - ] - } - ] - }, - "output": { - "Addresses": [ - { - "AllocationId": "eipalloc-12345678", - "AssociationId": "eipassoc-12345678", - "Domain": "vpc", - "InstanceId": "i-1234567890abcdef0", - "NetworkInterfaceId": "eni-12345678", - "NetworkInterfaceOwnerId": "123456789012", - "PrivateIpAddress": "10.0.1.241", - "PublicIp": "203.0.113.0" - } - ] - }, - "comments": { - "input": { - }, - "output": { - } - }, - "description": "This example describes your Elastic IP addresses for use with instances in a VPC.", - "id": "ec2-describe-addresses-2", - "title": "To describe your Elastic IP addresses for EC2-VPC" - }, - { - "input": { - "Filters": [ - { - "Name": "domain", - "Values": [ - "standard" - ] - } - ] - }, - "output": { - "Addresses": [ - { - "Domain": "standard", - "InstanceId": "i-1234567890abcdef0", - "PublicIp": "198.51.100.0" - } - ] - }, - "comments": { - "input": { - }, - "output": { - } - }, - "description": "This example describes your Elastic IP addresses for use with instances in EC2-Classic.", - "id": "ec2-describe-addresses-3", - "title": "To describe your Elastic IP addresses for EC2-Classic" } ], "DescribeAvailabilityZones": [ @@ -2235,7 +2139,7 @@ { "Attachments": [ { - "State": "available", + "State": "attached", "VpcId": "vpc-a01106c2" } ], @@ -2385,7 +2289,7 @@ "output": { "MovingAddressStatuses": [ { - "MoveStatus": "MovingToVpc", + "MoveStatus": "movingToVpc", "PublicIp": "198.51.100.0" } ] @@ -2771,104 +2675,6 @@ "title": "To describe a route table" } ], - "DescribeScheduledInstanceAvailability": [ - { - "input": { - "FirstSlotStartTimeRange": { - "EarliestTime": "2016-01-31T00:00:00Z", - "LatestTime": "2016-01-31T04:00:00Z" - }, - "Recurrence": { - "Frequency": "Weekly", - "Interval": 1, - "OccurrenceDays": [ - 1 - ] - } - }, - "output": { - "ScheduledInstanceAvailabilitySet": [ - { - "AvailabilityZone": "us-west-2b", - "AvailableInstanceCount": 20, - "FirstSlotStartTime": "2016-01-31T00:00:00Z", - "HourlyPrice": "0.095", - "InstanceType": "c4.large", - "MaxTermDurationInDays": 366, - "MinTermDurationInDays": 366, - "NetworkPlatform": "EC2-VPC", - "Platform": "Linux/UNIX", - "PurchaseToken": "eyJ2IjoiMSIsInMiOjEsImMiOi...", - "Recurrence": { - "Frequency": "Weekly", - "Interval": 1, - "OccurrenceDaySet": [ - 1 - ], - "OccurrenceRelativeToEnd": false - }, - "SlotDurationInHours": 23, - "TotalScheduledInstanceHours": 1219 - } - ] - }, - "comments": { - "input": { - }, - "output": { - } - }, - "description": "This example describes a schedule that occurs every week on Sunday, starting on the specified date. Note that the output contains a single schedule as an example.", - "id": "ec2-describe-scheduled-instance-availability-1", - "title": "To describe an available schedule" - } - ], - "DescribeScheduledInstances": [ - { - "input": { - "ScheduledInstanceIds": [ - "sci-1234-1234-1234-1234-123456789012" - ] - }, - "output": { - "ScheduledInstanceSet": [ - { - "AvailabilityZone": "us-west-2b", - "CreateDate": "2016-01-25T21:43:38.612Z", - "HourlyPrice": "0.095", - "InstanceCount": 1, - "InstanceType": "c4.large", - "NetworkPlatform": "EC2-VPC", - "NextSlotStartTime": "2016-01-31T09:00:00Z", - "Platform": "Linux/UNIX", - "Recurrence": { - "Frequency": "Weekly", - "Interval": 1, - "OccurrenceDaySet": [ - 1 - ], - "OccurrenceRelativeToEnd": false, - "OccurrenceUnit": "" - }, - "ScheduledInstanceId": "sci-1234-1234-1234-1234-123456789012", - "SlotDurationInHours": 32, - "TermEndDate": "2017-01-31T09:00:00Z", - "TermStartDate": "2016-01-31T09:00:00Z", - "TotalScheduledInstanceHours": 1696 - } - ] - }, - "comments": { - "input": { - }, - "output": { - } - }, - "description": "This example describes the specified Scheduled Instance.", - "id": "ec2-describe-scheduled-instances-1", - "title": "To describe your Scheduled Instances" - } - ], "DescribeSecurityGroupReferences": [ { "input": { @@ -3258,14 +3064,14 @@ "DescribeSpotPriceHistory": [ { "input": { - "EndTime": "2014-01-06T08:09:10", + "EndTime": "2014-01-06T08:09:10.05Z", "InstanceTypes": [ "m1.xlarge" ], "ProductDescriptions": [ "Linux/UNIX (Amazon VPC)" ], - "StartTime": "2014-01-06T07:08:09" + "StartTime": "2014-01-06T07:08:09.05Z" }, "output": { "SpotPriceHistory": [ @@ -3719,23 +3525,9 @@ "output": { } }, - "description": "This example disassociates an Elastic IP address from an instance in a VPC.", + "description": "This example disassociates an Elastic IP address from an instance.", "id": "ec2-disassociate-address-1", - "title": "To disassociate an Elastic IP address in EC2-VPC" - }, - { - "input": { - "PublicIp": "198.51.100.0" - }, - "comments": { - "input": { - }, - "output": { - } - }, - "description": "This example disassociates an Elastic IP address from an instance in EC2-Classic.", - "id": "ec2-disassociate-address-2", - "title": "To disassociate an Elastic IP addresses in EC2-Classic" + "title": "To disassociate an Elastic IP address" } ], "DisassociateIamInstanceProfile": [ @@ -4269,55 +4061,6 @@ "title": "To move an address to EC2-VPC" } ], - "PurchaseScheduledInstances": [ - { - "input": { - "PurchaseRequests": [ - { - "InstanceCount": 1, - "PurchaseToken": "eyJ2IjoiMSIsInMiOjEsImMiOi..." - } - ] - }, - "output": { - "ScheduledInstanceSet": [ - { - "AvailabilityZone": "us-west-2b", - "CreateDate": "2016-01-25T21:43:38.612Z", - "HourlyPrice": "0.095", - "InstanceCount": 1, - "InstanceType": "c4.large", - "NetworkPlatform": "EC2-VPC", - "NextSlotStartTime": "2016-01-31T09:00:00Z", - "Platform": "Linux/UNIX", - "Recurrence": { - "Frequency": "Weekly", - "Interval": 1, - "OccurrenceDaySet": [ - 1 - ], - "OccurrenceRelativeToEnd": false, - "OccurrenceUnit": "" - }, - "ScheduledInstanceId": "sci-1234-1234-1234-1234-123456789012", - "SlotDurationInHours": 32, - "TermEndDate": "2017-01-31T09:00:00Z", - "TermStartDate": "2016-01-31T09:00:00Z", - "TotalScheduledInstanceHours": 1696 - } - ] - }, - "comments": { - "input": { - }, - "output": { - } - }, - "description": "This example purchases a Scheduled Instance.", - "id": "ec2-purchase-scheduled-instances-1", - "title": "To purchase a Scheduled Instance" - } - ], "RebootInstances": [ { "input": { @@ -4349,23 +4092,9 @@ "output": { } }, - "description": "This example releases an Elastic IP address for use with instances in a VPC.", + "description": "This example releases the specified Elastic IP address.", "id": "ec2-release-address-1", - "title": "To release an Elastic IP address for EC2-VPC" - }, - { - "input": { - "PublicIp": "198.51.100.0" - }, - "comments": { - "input": { - }, - "output": { - } - }, - "description": "This example releases an Elastic IP address for use with instances in EC2-Classic.", - "id": "ec2-release-address-2", - "title": "To release an Elastic IP addresses for EC2-Classic" + "title": "To release an Elastic IP address" } ], "ReplaceNetworkAclAssociation": [ @@ -4524,7 +4253,7 @@ "output": { } }, - "description": "This example creates a Spot fleet request with two launch specifications that differ only by Availability Zone. The Spot fleet launches the instances in the specified Availability Zone with the lowest price. If your account supports EC2-VPC only, Amazon EC2 launches the Spot instances in the default subnet of the Availability Zone. If your account supports EC2-Classic, Amazon EC2 launches the instances in EC2-Classic in the Availability Zone.", + "description": "This example creates a Spot fleet request with two launch specifications that differ only by Availability Zone. The Spot fleet launches the instances in the specified Availability Zone with the lowest price. If your account supports EC2-VPC only, Amazon EC2 launches the Spot instances in the default subnet of the Availability Zone.", "id": "ec2-request-spot-fleet-2", "title": "To request a Spot fleet in the Availability Zone with the lowest price" }, @@ -4636,7 +4365,7 @@ "output": { } }, - "description": "This example creates a one-time Spot Instance request for five instances in the specified Availability Zone. If your account supports EC2-VPC only, Amazon EC2 launches the instances in the default subnet of the specified Availability Zone. If your account supports EC2-Classic, Amazon EC2 launches the instances in EC2-Classic in the specified Availability Zone.", + "description": "This example creates a one-time Spot Instance request for five instances in the specified Availability Zone. If your account supports EC2-VPC only, Amazon EC2 launches the instances in the default subnet of the specified Availability Zone.", "id": "ec2-request-spot-instances-1", "title": "To create a one-time Spot Instance request" }, @@ -4725,26 +4454,6 @@ "title": "To reset a snapshot attribute" } ], - "RestoreAddressToClassic": [ - { - "input": { - "PublicIp": "198.51.100.0" - }, - "output": { - "PublicIp": "198.51.100.0", - "Status": "MoveInProgress" - }, - "comments": { - "input": { - }, - "output": { - } - }, - "description": "This example restores the specified Elastic IP address to the EC2-Classic platform.", - "id": "ec2-restore-address-to-classic-1", - "title": "To restore an address to EC2-Classic" - } - ], "RunInstances": [ { "input": { @@ -4790,80 +4499,6 @@ "title": "To launch an instance" } ], - "RunScheduledInstances": [ - { - "input": { - "InstanceCount": 1, - "LaunchSpecification": { - "IamInstanceProfile": { - "Name": "my-iam-role" - }, - "ImageId": "ami-12345678", - "InstanceType": "c4.large", - "KeyName": "my-key-pair", - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeviceIndex": 0, - "Groups": [ - "sg-12345678" - ], - "SubnetId": "subnet-12345678" - } - ] - }, - "ScheduledInstanceId": "sci-1234-1234-1234-1234-123456789012" - }, - "output": { - "InstanceIdSet": [ - "i-1234567890abcdef0" - ] - }, - "comments": { - "input": { - }, - "output": { - } - }, - "description": "This example launches the specified Scheduled Instance in a VPC.", - "id": "ec2-run-scheduled-instances-1", - "title": "To launch a Scheduled Instance in a VPC" - }, - { - "input": { - "InstanceCount": 1, - "LaunchSpecification": { - "IamInstanceProfile": { - "Name": "my-iam-role" - }, - "ImageId": "ami-12345678", - "InstanceType": "c4.large", - "KeyName": "my-key-pair", - "Placement": { - "AvailabilityZone": "us-west-2b" - }, - "SecurityGroupIds": [ - "sg-12345678" - ] - }, - "ScheduledInstanceId": "sci-1234-1234-1234-1234-123456789012" - }, - "output": { - "InstanceIdSet": [ - "i-1234567890abcdef0" - ] - }, - "comments": { - "input": { - }, - "output": { - } - }, - "description": "This example launches the specified Scheduled Instance in EC2-Classic.", - "id": "ec2-run-scheduled-instances-2", - "title": "To launch a Scheduled Instance in EC2-Classic" - } - ], "StartInstances": [ { "input": { diff --git a/apis/ecs/2014-11-13/docs-2.json b/apis/ecs/2014-11-13/docs-2.json index ed8819ac8aa..0d0064d8b6f 100644 --- a/apis/ecs/2014-11-13/docs-2.json +++ b/apis/ecs/2014-11-13/docs-2.json @@ -11,7 +11,7 @@ "DeleteCapacityProvider": "

Deletes the specified capacity provider.

The FARGATE and FARGATE_SPOT capacity providers are reserved and can't be deleted. You can disassociate them from a cluster using either the PutClusterCapacityProviders API or by deleting the cluster.

Prior to a capacity provider being deleted, the capacity provider must be removed from the capacity provider strategy from all services. The UpdateService API can be used to remove a capacity provider from a service's capacity provider strategy. When updating a service, the forceNewDeployment option can be used to ensure that any tasks using the Amazon EC2 instance capacity provided by the capacity provider are transitioned to use the capacity from the remaining capacity providers. Only capacity providers that aren't associated with a cluster can be deleted. To remove a capacity provider from a cluster, you can either use PutClusterCapacityProviders or delete the cluster.

", "DeleteCluster": "

Deletes the specified cluster. The cluster transitions to the INACTIVE state. Clusters with an INACTIVE status might remain discoverable in your account for a period of time. However, this behavior is subject to change in the future. We don't recommend that you rely on INACTIVE clusters persisting.

You must deregister all container instances from this cluster before you may delete it. You can list the container instances in a cluster with ListContainerInstances and deregister them with DeregisterContainerInstance.

", "DeleteService": "

Deletes a specified service within a cluster. You can delete a service if you have no running tasks in it and the desired task count is zero. If the service is actively maintaining tasks, you can't delete it, and you must update the service to a desired task count of zero. For more information, see UpdateService.

When you delete a service, if there are still running tasks that require cleanup, the service status moves from ACTIVE to DRAINING, and the service is no longer visible in the console or in the ListServices API operation. After all tasks have transitioned to either STOPPING or STOPPED status, the service status moves from DRAINING to INACTIVE. Services in the DRAINING or INACTIVE status can still be viewed with the DescribeServices API operation. However, in the future, INACTIVE services may be cleaned up and purged from Amazon ECS record keeping, and DescribeServices calls on those services return a ServiceNotFoundException error.

If you attempt to create a new service with the same name as an existing service in either ACTIVE or DRAINING status, you receive an error.

", - "DeleteTaskDefinitions": "

Deletes one or more task definitions.

You must deregister a task definition revision before you delete it. For more information, see DeregisterTaskDefinition.

When you delete a task definition revision, it is immediately transitions from the INACTIVE to DELETE_IN_PROGRESS. Existing tasks and services that reference a DELETE_IN_PROGRESS task definition revision continue to run without disruption. Existing services that reference a DELETE_IN_PROGRESS task definition revision can still scale up or down by modifying the service's desired count.

You can't use a DELETE_IN_PROGRESS task definition revision to run new tasks or create new services. You also can't update an existing service to reference a DELETE_IN_PROGRESS task definition revision.

A task definition revision will stay in DELETE_IN_PROGRESS status until all the associated tasks and services have been terminated.

", + "DeleteTaskDefinitions": "

Deletes one or more task definitions.

You must deregister a task definition revision before you delete it. For more information, see DeregisterTaskDefinition.

When you delete a task definition revision, it is immediately transitions from the INACTIVE to DELETE_IN_PROGRESS. Existing tasks and services that reference a DELETE_IN_PROGRESS task definition revision continue to run without disruption. Existing services that reference a DELETE_IN_PROGRESS task definition revision can still scale up or down by modifying the service's desired count.

You can't use a DELETE_IN_PROGRESS task definition revision to run new tasks or create new services. You also can't update an existing service to reference a DELETE_IN_PROGRESS task definition revision.

A task definition revision will stay in DELETE_IN_PROGRESS status until all the associated tasks and services have been terminated.

When you delete all INACTIVE task definition revisions, the task definition name is not displayed in the console and not returned in the API. If a task definition revisions are in the DELETE_IN_PROGRESS state, the task definition name is displayed in the console and returned in the API. The task definition name is retained by Amazon ECS and the revision is incremented the next time you create a task definition with that name.

", "DeleteTaskSet": "

Deletes a specified task set within a service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.

", "DeregisterContainerInstance": "

Deregisters an Amazon ECS container instance from the specified cluster. This instance is no longer available to run tasks.

If you intend to use the container instance for some other purpose after deregistration, we recommend that you stop all of the tasks running on the container instance before deregistration. That prevents any orphaned tasks from consuming resources.

Deregistering a container instance removes the instance from a cluster, but it doesn't terminate the EC2 instance. If you are finished using the instance, be sure to terminate it in the Amazon EC2 console to stop billing.

If you terminate a running container instance, Amazon ECS automatically deregisters the instance from your cluster (stopped container instances or instances with disconnected agents aren't automatically deregistered when terminated).

", "DeregisterTaskDefinition": "

Deregisters the specified task definition by family and revision. Upon deregistration, the task definition is marked as INACTIVE. Existing tasks and services that reference an INACTIVE task definition continue to run without disruption. Existing services that reference an INACTIVE task definition can still scale up or down by modifying the service's desired count. If you want to delete a task definition revision, you must first deregister the task definition revision.

You can't use an INACTIVE task definition to run new tasks or create new services, and you can't update an existing service to reference an INACTIVE task definition. However, there may be up to a 10-minute window following deregistration where these restrictions have not yet taken effect.

At this time, INACTIVE task definitions remain discoverable in your account indefinitely. However, this behavior is subject to change in the future. We don't recommend that you rely on INACTIVE task definitions persisting beyond the lifecycle of any associated tasks and services.

You must deregister a task definition revision before you delete it. For more information, see DeleteTaskDefinitions.

", @@ -169,7 +169,7 @@ "base": null, "refs": { "ContainerInstance$agentConnected": "

This parameter returns true if the agent is connected to Amazon ECS. An instance with an agent that may be unhealthy or stopped return false. Only instances connected to an agent can accept task placement requests.

", - "CreateServiceRequest$enableECSManagedTags": "

Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For more information, see Tagging your Amazon ECS resources in the Amazon Elastic Container Service Developer Guide.

", + "CreateServiceRequest$enableECSManagedTags": "

Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For more information, see Tagging your Amazon ECS resources in the Amazon Elastic Container Service Developer Guide.

When you use Amazon ECS managed tags, you need to set the propagateTags request parameter.

", "CreateServiceRequest$enableExecuteCommand": "

Determines whether the execute command functionality is turned on for the service. If true, this enables execute command functionality on all containers in the service tasks.

", "DeploymentAlarms$enable": "

Determines whether to use the CloudWatch alarm option in the service deployment process.

", "DeploymentAlarms$rollback": "

Determines whether to configure Amazon ECS to roll back the service if a service deployment fails. If rollback is used, when a service deployment fails, the service is rolled back to the last deployment that completed successfully.

", @@ -1583,7 +1583,7 @@ "PropagateTags": { "base": null, "refs": { - "CreateServiceRequest$propagateTags": "

Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the TagResource API action.

", + "CreateServiceRequest$propagateTags": "

Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the TagResource API action.

The default is NONE.

", "RunTaskRequest$propagateTags": "

Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the TagResource API action.

An error will be received if you specify the SERVICE option when running a task.

", "Service$propagateTags": "

Determines whether to propagate the tags from the task definition or the service to the task. If no value is specified, the tags aren't propagated.

", "StartTaskRequest$propagateTags": "

Specifies whether to propagate the tags from the task definition or the service to the task. If no value is specified, the tags aren't propagated.

", diff --git a/apis/glue/2017-03-31/api-2.json b/apis/glue/2017-03-31/api-2.json index b4c7472f50b..2069d663d9e 100644 --- a/apis/glue/2017-03-31/api-2.json +++ b/apis/glue/2017-03-31/api-2.json @@ -5745,7 +5745,8 @@ "type":"structure", "members":{ "CatalogId":{"shape":"CatalogIdString"}, - "DatabaseName":{"shape":"NameString"} + "DatabaseName":{"shape":"NameString"}, + "Region":{"shape":"NameString"} } }, "DatabaseInput":{ @@ -11501,7 +11502,8 @@ "members":{ "CatalogId":{"shape":"CatalogIdString"}, "DatabaseName":{"shape":"NameString"}, - "Name":{"shape":"NameString"} + "Name":{"shape":"NameString"}, + "Region":{"shape":"NameString"} } }, "TableInput":{ diff --git a/apis/glue/2017-03-31/docs-2.json b/apis/glue/2017-03-31/docs-2.json index da00b9b1332..e5bea6093b9 100644 --- a/apis/glue/2017-03-31/docs-2.json +++ b/apis/glue/2017-03-31/docs-2.json @@ -5347,6 +5347,7 @@ "DataSourceMap$key": null, "Database$Name": "

The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.

", "DatabaseIdentifier$DatabaseName": "

The name of the catalog database.

", + "DatabaseIdentifier$Region": "

Region of the target database.

", "DatabaseInput$Name": "

The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.

", "DeleteBlueprintRequest$Name": "

The name of the blueprint to delete.

", "DeleteBlueprintResponse$Name": "

Returns the name of the blueprint that was deleted.

", @@ -5518,6 +5519,7 @@ "TableError$TableName": "

The name of the table. For Hive compatibility, this must be entirely lowercase.

", "TableIdentifier$DatabaseName": "

The name of the catalog database that contains the target table.

", "TableIdentifier$Name": "

The name of the target table.

", + "TableIdentifier$Region": "

Region of the target table.

", "TableInput$Name": "

The table name. For Hive compatibility, this is folded to lowercase when it is stored.

", "TableInput$Owner": "

The table owner. Included for Apache Hive compatibility. Not used in the normal course of Glue operations.

", "TableVersionError$TableName": "

The name of the table in question.

", diff --git a/apis/pricing/2017-10-15/api-2.json b/apis/pricing/2017-10-15/api-2.json index 83d8d735c43..3f1032f32d2 100644 --- a/apis/pricing/2017-10-15/api-2.json +++ b/apis/pricing/2017-10-15/api-2.json @@ -23,10 +23,10 @@ "input":{"shape":"DescribeServicesRequest"}, "output":{"shape":"DescribeServicesResponse"}, "errors":[ - {"shape":"InternalErrorException"}, {"shape":"InvalidParameterException"}, - {"shape":"NotFoundException"}, {"shape":"InvalidNextTokenException"}, + {"shape":"NotFoundException"}, + {"shape":"InternalErrorException"}, {"shape":"ExpiredNextTokenException"} ] }, @@ -39,10 +39,10 @@ "input":{"shape":"GetAttributeValuesRequest"}, "output":{"shape":"GetAttributeValuesResponse"}, "errors":[ - {"shape":"InternalErrorException"}, {"shape":"InvalidParameterException"}, - {"shape":"NotFoundException"}, {"shape":"InvalidNextTokenException"}, + {"shape":"NotFoundException"}, + {"shape":"InternalErrorException"}, {"shape":"ExpiredNextTokenException"} ] }, @@ -55,10 +55,10 @@ "input":{"shape":"GetPriceListFileUrlRequest"}, "output":{"shape":"GetPriceListFileUrlResponse"}, "errors":[ - {"shape":"InternalErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"NotFoundException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"InternalErrorException"} ] }, "GetProducts":{ @@ -70,10 +70,10 @@ "input":{"shape":"GetProductsRequest"}, "output":{"shape":"GetProductsResponse"}, "errors":[ - {"shape":"InternalErrorException"}, {"shape":"InvalidParameterException"}, - {"shape":"NotFoundException"}, {"shape":"InvalidNextTokenException"}, + {"shape":"NotFoundException"}, + {"shape":"InternalErrorException"}, {"shape":"ExpiredNextTokenException"} ] }, @@ -86,12 +86,12 @@ "input":{"shape":"ListPriceListsRequest"}, "output":{"shape":"ListPriceListsResponse"}, "errors":[ - {"shape":"InternalErrorException"}, {"shape":"InvalidParameterException"}, - {"shape":"NotFoundException"}, {"shape":"InvalidNextTokenException"}, - {"shape":"ExpiredNextTokenException"}, - {"shape":"AccessDeniedException"} + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalErrorException"}, + {"shape":"ExpiredNextTokenException"} ] } }, @@ -119,12 +119,13 @@ }, "BoxedInteger":{ "type":"integer", + "box":true, "max":100, "min":1 }, "CurrencyCode":{ "type":"string", - "pattern":"^[A-Z]{3}$" + "pattern":"[A-Z]{3}" }, "DescribeServicesRequest":{ "type":"structure", @@ -132,10 +133,7 @@ "ServiceCode":{"shape":"String"}, "FormatVersion":{"shape":"String"}, "NextToken":{"shape":"String"}, - "MaxResults":{ - "shape":"BoxedInteger", - "box":true - } + "MaxResults":{"shape":"BoxedInteger"} } }, "DescribeServicesResponse":{ @@ -194,10 +192,7 @@ "ServiceCode":{"shape":"String"}, "AttributeName":{"shape":"String"}, "NextToken":{"shape":"String"}, - "MaxResults":{ - "shape":"BoxedInteger", - "box":true - } + "MaxResults":{"shape":"BoxedInteger"} } }, "GetAttributeValuesResponse":{ @@ -232,10 +227,7 @@ "Filters":{"shape":"Filters"}, "FormatVersion":{"shape":"String"}, "NextToken":{"shape":"String"}, - "MaxResults":{ - "shape":"BoxedInteger", - "box":true - } + "MaxResults":{"shape":"BoxedInteger"} } }, "GetProductsResponse":{ @@ -251,7 +243,8 @@ "members":{ "Message":{"shape":"errorMessage"} }, - "exception":true + "exception":true, + "fault":true }, "InvalidNextTokenException":{ "type":"structure", @@ -292,6 +285,7 @@ }, "MaxResults":{ "type":"integer", + "box":true, "max":100, "min":1 }, @@ -315,13 +309,12 @@ "type":"string", "max":2048, "min":18, - "pattern":"^arn:.+:pricing::.*:price-list/.{1,255}/.{1,32}/[A-Z]{3}/[0-9]{14}/[^/]*$" + "pattern":"arn:[A-Za-z0-9][-.A-Za-z0-9]{0,62}:pricing:::price-list/[A-Za-z0-9_/.-]{1,1023}" }, - "PriceListJsonItem":{"type":"string"}, "PriceListJsonItems":{ "type":"list", "member":{ - "shape":"PriceListJsonItem", + "shape":"SynthesizedJsonPriceListJsonItem", "jsonvalue":true } }, @@ -352,6 +345,7 @@ "member":{"shape":"Service"} }, "String":{"type":"string"}, + "SynthesizedJsonPriceListJsonItem":{"type":"string"}, "errorMessage":{"type":"string"} } } diff --git a/apis/pricing/2017-10-15/docs-2.json b/apis/pricing/2017-10-15/docs-2.json index ff5c9236871..4b2862d7e46 100644 --- a/apis/pricing/2017-10-15/docs-2.json +++ b/apis/pricing/2017-10-15/docs-2.json @@ -1,6 +1,6 @@ { "version": "2.0", - "service": "

Amazon Web Services Price List API is a centralized and convenient way to programmatically query Amazon Web Services for services, products, and pricing information. The Amazon Web Services Price List uses standardized product attributes such as Location, Storage Class, and Operating System, and provides prices at the SKU level. You can use the Amazon Web Services Price List to build cost control and scenario planning tools, reconcile billing data, forecast future spend for budgeting purposes, and provide cost benefit analysis that compare your internal workloads with Amazon Web Services.

Use GetServices without a service code to retrieve the service codes for all AWS services, then GetServices with a service code to retrieve the attribute names for that service. After you have the service code and attribute names, you can use GetAttributeValues to see what values are available for an attribute. With the service code and an attribute name and value, you can use GetProducts to find specific products that you're interested in, such as an AmazonEC2 instance, with a Provisioned IOPS volumeType.

Service Endpoint

Amazon Web Services Price List service API provides the following two endpoints:

", + "service": "

The Amazon Web Services Price List API is a centralized and convenient way to programmatically query Amazon Web Services for services, products, and pricing information. The Amazon Web Services Price List uses standardized product attributes such as Location, Storage Class, and Operating System, and provides prices at the SKU level. You can use the Amazon Web Services Price List to do the following:

Use GetServices without a service code to retrieve the service codes for all Amazon Web Services, then GetServices with a service code to retrieve the attribute names for that service. After you have the service code and attribute names, you can use GetAttributeValues to see what values are available for an attribute. With the service code and an attribute name and value, you can use GetProducts to find specific products that you're interested in, such as an AmazonEC2 instance, with a Provisioned IOPS volumeType.

You can use the following endpoints for the Amazon Web Services Price List API:

", "operations": { "DescribeServices": "

Returns the metadata for one service or a list of the metadata for all services. Use this without a service code to get the service codes for all services. Use it with a service code, such as AmazonEC2, to get information specific to that service, such as the attribute names available for that service. For example, some of the attribute names available for EC2 are volumeType, maxIopsVolume, operation, locationType, and instanceCapacity10xlarge.

", "GetAttributeValues": "

Returns a list of attribute values. Attributes are similar to the details in a Price List API offer file. For a list of available attributes, see Offer File Definitions in the Billing and Cost Management User Guide.

", @@ -178,12 +178,6 @@ "PriceList$PriceListArn": "

The unique identifier that maps to where your Price List files are located. PriceListArn can be obtained from the ListPriceList response.

" } }, - "PriceListJsonItem": { - "base": null, - "refs": { - "PriceListJsonItems$member": null - } - }, "PriceListJsonItems": { "base": null, "refs": { @@ -248,6 +242,12 @@ "Service$ServiceCode": "

The code for the Amazon Web Services service.

" } }, + "SynthesizedJsonPriceListJsonItem": { + "base": null, + "refs": { + "PriceListJsonItems$member": null + } + }, "errorMessage": { "base": null, "refs": { diff --git a/apis/pricing/2017-10-15/endpoint-tests-1.json b/apis/pricing/2017-10-15/endpoint-tests-1.json index 6d77c402ab9..db676384e56 100644 --- a/apis/pricing/2017-10-15/endpoint-tests-1.json +++ b/apis/pricing/2017-10-15/endpoint-tests-1.json @@ -8,9 +8,9 @@ } }, "params": { + "Region": "ap-south-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-south-1" + "UseDualStack": false } }, { @@ -21,9 +21,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -47,9 +47,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -73,9 +73,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -86,9 +86,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -99,9 +99,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -112,9 +112,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -125,9 +125,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -138,9 +138,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -151,9 +151,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -164,9 +164,20 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -177,9 +188,20 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -190,9 +212,20 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -203,9 +236,20 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -216,9 +260,9 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -229,9 +273,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -254,9 +298,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -266,11 +310,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1", "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/apis/pricing/2017-10-15/examples-1.json b/apis/pricing/2017-10-15/examples-1.json index 2a1be6c9153..0ea7e3b0bbe 100644 --- a/apis/pricing/2017-10-15/examples-1.json +++ b/apis/pricing/2017-10-15/examples-1.json @@ -1,68 +1,5 @@ { "version": "1.0", "examples": { - "DescribeServices": [ - { - "input": { - "FormatVersion": "aws_v1", - "MaxResults": 1, - "ServiceCode": "AmazonEC2" - }, - "output": { - "FormatVersion": "aws_v1", - "NextToken": "abcdefg123", - "Services": [ - { - "AttributeNames": [ - "volumeType", - "maxIopsvolume", - "instanceCapacity10xlarge", - "locationType", - "operation" - ], - "ServiceCode": "AmazonEC2" - } - ] - }, - "comments": { - "input": { - }, - "output": { - } - }, - "description": "Retrieves the service for the given Service Code.", - "id": "to-retrieve-service-metadata", - "title": "To retrieve a list of services and service codes" - } - ], - "GetAttributeValues": [ - { - "input": { - "AttributeName": "volumeType", - "MaxResults": 2, - "ServiceCode": "AmazonEC2" - }, - "output": { - "AttributeValues": [ - { - "Value": "Throughput Optimized HDD" - }, - { - "Value": "Provisioned IOPS" - } - ], - "NextToken": "GpgauEXAMPLEezucl5LV0w==:7GzYJ0nw0DBTJ2J66EoTIIynE6O1uXwQtTRqioJzQadBnDVgHPzI1en4BUQnPCLpzeBk9RQQAWaFieA4+DapFAGLgk+Z/9/cTw9GldnPOHN98+FdmJP7wKU3QQpQ8MQr5KOeBkIsAqvAQYdL0DkL7tHwPtE5iCEByAmg9gcC/yBU1vAOsf7R3VaNN4M5jMDv3woSWqASSIlBVB6tgW78YL22KhssoItM/jWW+aP6Jqtq4mldxp/ct6DWAl+xLFwHU/CbketimPPXyqHF3/UXDw==" - }, - "comments": { - "input": { - }, - "output": { - } - }, - "description": "This operation returns a list of values available for the given attribute.", - "id": "to-retreive-attribute-values", - "title": "To retrieve a list of attribute values" - } - ] } } diff --git a/apis/pricing/2017-10-15/smoke.json b/apis/pricing/2017-10-15/smoke.json new file mode 100644 index 00000000000..a9756813e4a --- /dev/null +++ b/apis/pricing/2017-10-15/smoke.json @@ -0,0 +1,6 @@ +{ + "version": 1, + "defaultRegion": "us-west-2", + "testCases": [ + ] +} diff --git a/apis/pricing/2017-10-15/waiters-2.json b/apis/pricing/2017-10-15/waiters-2.json new file mode 100644 index 00000000000..13f60ee66be --- /dev/null +++ b/apis/pricing/2017-10-15/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/apis/route53domains/2014-05-15/api-2.json b/apis/route53domains/2014-05-15/api-2.json index 4a45fb97234..abdac264ad4 100644 --- a/apis/route53domains/2014-05-15/api-2.json +++ b/apis/route53domains/2014-05-15/api-2.json @@ -1387,12 +1387,16 @@ "type":"string", "enum":["SubmittedDate"] }, + "ListPricesPageMaxItems":{ + "type":"integer", + "max":1000 + }, "ListPricesRequest":{ "type":"structure", "members":{ "Tld":{"shape":"TldName"}, "Marker":{"shape":"PageMarker"}, - "MaxItems":{"shape":"PageMaxItems"} + "MaxItems":{"shape":"ListPricesPageMaxItems"} } }, "ListPricesResponse":{ diff --git a/apis/route53domains/2014-05-15/docs-2.json b/apis/route53domains/2014-05-15/docs-2.json index aaf62911803..ab7806f0b58 100644 --- a/apis/route53domains/2014-05-15/docs-2.json +++ b/apis/route53domains/2014-05-15/docs-2.json @@ -23,16 +23,16 @@ "ListPrices": "

Lists the following prices for either all the TLDs supported by Route 53, or the specified TLD:

", "ListTagsForDomain": "

This operation returns all of the tags that are associated with the specified domain.

All tag operations are eventually consistent; subsequent operations might not immediately represent all issued operations.

", "PushDomain": "

Moves a domain from Amazon Web Services to another registrar.

Supported actions:

", - "RegisterDomain": "

This operation registers a domain. Domains are registered either by Amazon Registrar (for .com, .net, and .org domains) or by our registrar associate, Gandi (for all other domains). For some top-level domains (TLDs), this operation requires extra parameters.

When you register a domain, Amazon Route 53 does the following:

", + "RegisterDomain": "

This operation registers a domain. For some top-level domains (TLDs), this operation requires extra parameters.

When you register a domain, Amazon Route 53 does the following:

", "RejectDomainTransferFromAnotherAwsAccount": "

Rejects the transfer of a domain from another Amazon Web Services account to the current Amazon Web Services account. You initiate a transfer betweenAmazon Web Services accounts using TransferDomainToAnotherAwsAccount.

Use either ListOperations or GetOperationDetail to determine whether the operation succeeded. GetOperationDetail provides additional information, for example, Domain Transfer from Aws Account 111122223333 has been cancelled.

", "RenewDomain": "

This operation renews a domain for the specified number of years. The cost of renewing your domain is billed to your Amazon Web Services account.

We recommend that you renew your domain several weeks before the expiration date. Some TLD registries delete domains before the expiration date if you haven't renewed far enough in advance. For more information about renewing domain registration, see Renewing Registration for a Domain in the Amazon Route 53 Developer Guide.

", "ResendContactReachabilityEmail": "

For operations that require confirmation that the email address for the registrant contact is valid, such as registering a new domain, this operation resends the confirmation email to the current email address for the registrant contact.

", "ResendOperationAuthorization": "

Resend the form of authorization email for this operation.

", "RetrieveDomainAuthCode": "

This operation returns the authorization code for the domain. To transfer a domain to another registrar, you provide this value to the new registrar.

", - "TransferDomain": "

Transfers a domain from another registrar to Amazon Route 53. When the transfer is complete, the domain is registered either with Amazon Registrar (for .com, .net, and .org domains) or with our registrar associate, Gandi (for all other TLDs).

For more information about transferring domains, see the following topics:

If the registrar for your domain is also the DNS service provider for the domain, we highly recommend that you transfer your DNS service to Route 53 or to another DNS service provider before you transfer your registration. Some registrars provide free DNS service when you purchase a domain registration. When you transfer the registration, the previous registrar will not renew your domain registration and could end your DNS service at any time.

If the registrar for your domain is also the DNS service provider for the domain and you don't transfer DNS service to another provider, your website, email, and the web applications associated with the domain might become unavailable.

If the transfer is successful, this method returns an operation ID that you can use to track the progress and completion of the action. If the transfer doesn't complete successfully, the domain registrant will be notified by email.

", + "TransferDomain": "

Transfers a domain from another registrar to Amazon Route 53.

For more information about transferring domains, see the following topics:

If the registrar for your domain is also the DNS service provider for the domain, we highly recommend that you transfer your DNS service to Route 53 or to another DNS service provider before you transfer your registration. Some registrars provide free DNS service when you purchase a domain registration. When you transfer the registration, the previous registrar will not renew your domain registration and could end your DNS service at any time.

If the registrar for your domain is also the DNS service provider for the domain and you don't transfer DNS service to another provider, your website, email, and the web applications associated with the domain might become unavailable.

If the transfer is successful, this method returns an operation ID that you can use to track the progress and completion of the action. If the transfer doesn't complete successfully, the domain registrant will be notified by email.

", "TransferDomainToAnotherAwsAccount": "

Transfers a domain from the current Amazon Web Services account to another Amazon Web Services account. Note the following:

When you transfer a domain from one Amazon Web Services account to another, Route 53 doesn't transfer the hosted zone that is associated with the domain. DNS resolution isn't affected if the domain and the hosted zone are owned by separate accounts, so transferring the hosted zone is optional. For information about transferring the hosted zone to another Amazon Web Services account, see Migrating a Hosted Zone to a Different Amazon Web Services Account in the Amazon Route 53 Developer Guide.

Use either ListOperations or GetOperationDetail to determine whether the operation succeeded. GetOperationDetail provides additional information, for example, Domain Transfer from Aws Account 111122223333 has been cancelled.

", "UpdateDomainContact": "

This operation updates the contact information for a particular domain. You must specify information for at least one contact: registrant, administrator, or technical.

If the update is successful, this method returns an operation ID that you can use to track the progress and completion of the operation. If the request is not completed successfully, the domain registrant will be notified by email.

", - "UpdateDomainContactPrivacy": "

This operation updates the specified domain contact's privacy setting. When privacy protection is enabled, contact information such as email address is replaced either with contact information for Amazon Registrar (for .com, .net, and .org domains) or with contact information for our registrar associate, Gandi.

You must specify the same privacy setting for the administrative, registrant, and technical contacts.

This operation affects only the contact information for the specified contact type (administrative, registrant, or technical). If the request succeeds, Amazon Route 53 returns an operation ID that you can use with GetOperationDetail to track the progress and completion of the action. If the request doesn't complete successfully, the domain registrant will be notified by email.

By disabling the privacy service via API, you consent to the publication of the contact information provided for this domain via the public WHOIS database. You certify that you are the registrant of this domain name and have the authority to make this decision. You may withdraw your consent at any time by enabling privacy protection using either UpdateDomainContactPrivacy or the Route 53 console. Enabling privacy protection removes the contact information provided for this domain from the WHOIS database. For more information on our privacy practices, see https://aws.amazon.com/privacy/.

", + "UpdateDomainContactPrivacy": "

This operation updates the specified domain contact's privacy setting. When privacy protection is enabled, your contact information is replaced with contact information for the registrar or with the phrase \"REDACTED FOR PRIVACY\", or \"On behalf of <domain name> owner.\"

While some domains may allow different privacy settings per contact, we recommend specifying the same privacy setting for all contacts.

This operation affects only the contact information for the specified contact type (administrative, registrant, or technical). If the request succeeds, Amazon Route 53 returns an operation ID that you can use with GetOperationDetail to track the progress and completion of the action. If the request doesn't complete successfully, the domain registrant will be notified by email.

By disabling the privacy service via API, you consent to the publication of the contact information provided for this domain via the public WHOIS database. You certify that you are the registrant of this domain name and have the authority to make this decision. You may withdraw your consent at any time by enabling privacy protection using either UpdateDomainContactPrivacy or the Route 53 console. Enabling privacy protection removes the contact information provided for this domain from the WHOIS database. For more information on our privacy practices, see https://aws.amazon.com/privacy/.

", "UpdateDomainNameservers": "

This operation replaces the current set of name servers for the domain with the specified set of name servers. If you use Amazon Route 53 as your DNS service, specify the four name servers in the delegation set for the hosted zone for the domain.

If successful, this operation returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant will be notified by email.

", "UpdateTagsForDomain": "

This operation adds or updates tags for a specified domain.

All tag operations are eventually consistent; subsequent operations might not immediately represent all issued operations.

", "ViewBilling": "

Returns all the domain-related billing records for the current Amazon Web Services account for a specified period

" @@ -99,7 +99,7 @@ "RegisterDomainRequest$PrivacyProtectTechContact": "

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that you entered for the technical contact.

You must specify the same privacy setting for the administrative, registrant, and technical contacts.

Default: true

", "ResendContactReachabilityEmailResponse$isAlreadyVerified": "

True if the email address for the registrant contact has already been verified, and false otherwise. If the email address has already been verified, we don't send another confirmation email.

", "TransferDomainRequest$AutoRenew": "

Indicates whether the domain will be automatically renewed (true) or not (false). Auto renewal only takes effect after the account is charged.

Default: true

", - "TransferDomainRequest$PrivacyProtectAdminContact": "

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that you entered for the admin contact.

You must specify the same privacy setting for the administrative, registrant, and technical contacts.

Default: true

", + "TransferDomainRequest$PrivacyProtectAdminContact": "

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information for the registrar, the phrase \"REDACTED FOR PRIVACY\", or \"On behalf of <domain name> owner.\".

While some domains may allow different privacy settings per contact, we recommend specifying the same privacy setting for all contacts.

Default: true

", "TransferDomainRequest$PrivacyProtectRegistrantContact": "

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that you entered for the registrant contact (domain owner).

You must specify the same privacy setting for the administrative, registrant, and technical contacts.

Default: true

", "TransferDomainRequest$PrivacyProtectTechContact": "

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that you entered for the technical contact.

You must specify the same privacy setting for the administrative, registrant, and technical contacts.

Default: true

", "UpdateDomainContactPrivacyRequest$AdminPrivacy": "

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that you entered for the admin contact.

You must specify the same privacy setting for the administrative, registrant, and technical contacts.

", @@ -146,7 +146,7 @@ "Consent": { "base": "

Customer's consent for the owner change request.

", "refs": { - "UpdateDomainContactRequest$Consent": "

Customer's consent for the owner change request.

" + "UpdateDomainContactRequest$Consent": "

Customer's consent for the owner change request. Required if the domain is not free (consent price is more than $0.00).

" } }, "ContactDetail": { @@ -483,7 +483,7 @@ "ExtraParamName": { "base": null, "refs": { - "ExtraParam$Name": "

The name of an additional parameter that is required by a top-level domain. Here are the top-level domains that require additional parameters and the names of the parameters that they require:

.com.au and .net.au
  • AU_ID_NUMBER

  • AU_ID_TYPE

    Valid values include the following:

    • ABN (Australian business number)

    • ACN (Australian company number)

    • TM (Trademark number)

.ca
  • BRAND_NUMBER

  • CA_BUSINESS_ENTITY_TYPE

    Valid values include the following:

    • BANK (Bank)

    • COMMERCIAL_COMPANY (Commercial company)

    • COMPANY (Company)

    • COOPERATION (Cooperation)

    • COOPERATIVE (Cooperative)

    • COOPRIX (Cooprix)

    • CORP (Corporation)

    • CREDIT_UNION (Credit union)

    • FOMIA (Federation of mutual insurance associations)

    • INC (Incorporated)

    • LTD (Limited)

    • LTEE (Limitée)

    • LLC (Limited liability corporation)

    • LLP (Limited liability partnership)

    • LTE (Lte.)

    • MBA (Mutual benefit association)

    • MIC (Mutual insurance company)

    • NFP (Not-for-profit corporation)

    • SA (S.A.)

    • SAVINGS_COMPANY (Savings company)

    • SAVINGS_UNION (Savings union)

    • SARL (Société à responsabilité limitée)

    • TRUST (Trust)

    • ULC (Unlimited liability corporation)

  • CA_LEGAL_TYPE

    When ContactType is PERSON, valid values include the following:

    • ABO (Aboriginal Peoples indigenous to Canada)

    • CCT (Canadian citizen)

    • LGR (Legal Representative of a Canadian Citizen or Permanent Resident)

    • RES (Permanent resident of Canada)

    When ContactType is a value other than PERSON, valid values include the following:

    • ASS (Canadian unincorporated association)

    • CCO (Canadian corporation)

    • EDU (Canadian educational institution)

    • GOV (Government or government entity in Canada)

    • HOP (Canadian Hospital)

    • INB (Indian Band recognized by the Indian Act of Canada)

    • LAM (Canadian Library, Archive, or Museum)

    • MAJ (Her/His Majesty the Queen/King)

    • OMK (Official mark registered in Canada)

    • PLT (Canadian Political Party)

    • PRT (Partnership Registered in Canada)

    • TDM (Trademark registered in Canada)

    • TRD (Canadian Trade Union)

    • TRS (Trust established in Canada)

.es
  • ES_IDENTIFICATION

    The value of ES_IDENTIFICATION depends on the following values:

    • The value of ES_LEGAL_FORM

    • The value of ES_IDENTIFICATION_TYPE

    If ES_LEGAL_FORM is any value other than INDIVIDUAL:

    • Specify 1 letter + 8 numbers (CIF [Certificado de Identificación Fiscal])

    • Example: B12345678

    If ES_LEGAL_FORM is INDIVIDUAL, the value that you specify for ES_IDENTIFICATION depends on the value of ES_IDENTIFICATION_TYPE:

    • If ES_IDENTIFICATION_TYPE is DNI_AND_NIF (for Spanish contacts):

      • Specify 8 numbers + 1 letter (DNI [Documento Nacional de Identidad], NIF [Número de Identificación Fiscal])

      • Example: 12345678M

    • If ES_IDENTIFICATION_TYPE is NIE (for foreigners with legal residence):

      • Specify 1 letter + 7 numbers + 1 letter ( NIE [Número de Identidad de Extranjero])

      • Example: Y1234567X

    • If ES_IDENTIFICATION_TYPE is OTHER (for contacts outside of Spain):

      • Specify a passport number, drivers license number, or national identity card number

  • ES_IDENTIFICATION_TYPE

    Valid values include the following:

    • DNI_AND_NIF (For Spanish contacts)

    • NIE (For foreigners with legal residence)

    • OTHER (For contacts outside of Spain)

  • ES_LEGAL_FORM

    Valid values include the following:

    • ASSOCIATION

    • CENTRAL_GOVERNMENT_BODY

    • CIVIL_SOCIETY

    • COMMUNITY_OF_OWNERS

    • COMMUNITY_PROPERTY

    • CONSULATE

    • COOPERATIVE

    • DESIGNATION_OF_ORIGIN_SUPERVISORY_COUNCIL

    • ECONOMIC_INTEREST_GROUP

    • EMBASSY

    • ENTITY_MANAGING_NATURAL_AREAS

    • FARM_PARTNERSHIP

    • FOUNDATION

    • GENERAL_AND_LIMITED_PARTNERSHIP

    • GENERAL_PARTNERSHIP

    • INDIVIDUAL

    • LIMITED_COMPANY

    • LOCAL_AUTHORITY

    • LOCAL_PUBLIC_ENTITY

    • MUTUAL_INSURANCE_COMPANY

    • NATIONAL_PUBLIC_ENTITY

    • ORDER_OR_RELIGIOUS_INSTITUTION

    • OTHERS (Only for contacts outside of Spain)

    • POLITICAL_PARTY

    • PROFESSIONAL_ASSOCIATION

    • PUBLIC_LAW_ASSOCIATION

    • PUBLIC_LIMITED_COMPANY

    • REGIONAL_GOVERNMENT_BODY

    • REGIONAL_PUBLIC_ENTITY

    • SAVINGS_BANK

    • SPANISH_OFFICE

    • SPORTS_ASSOCIATION

    • SPORTS_FEDERATION

    • SPORTS_LIMITED_COMPANY

    • TEMPORARY_ALLIANCE_OF_ENTERPRISES

    • TRADE_UNION

    • WORKER_OWNED_COMPANY

    • WORKER_OWNED_LIMITED_COMPANY

.eu
  • EU_COUNTRY_OF_CITIZENSHIP

.fi
  • BIRTH_DATE_IN_YYYY_MM_DD

  • FI_BUSINESS_NUMBER

  • FI_ID_NUMBER

  • FI_NATIONALITY

    Valid values include the following:

    • FINNISH

    • NOT_FINNISH

  • FI_ORGANIZATION_TYPE

    Valid values include the following:

    • COMPANY

    • CORPORATION

    • GOVERNMENT

    • INSTITUTION

    • POLITICAL_PARTY

    • PUBLIC_COMMUNITY

    • TOWNSHIP

.fr
  • BIRTH_CITY

  • BIRTH_COUNTRY

  • BIRTH_DATE_IN_YYYY_MM_DD

  • BIRTH_DEPARTMENT: Specify the INSEE code that corresponds with the department where the contact was born. If the contact was born somewhere other than France or its overseas departments, specify 99. For more information, including a list of departments and the corresponding INSEE numbers, see the Wikipedia entry Departments of France.

  • BRAND_NUMBER

.it
  • IT_NATIONALITY

  • IT_PIN

  • IT_REGISTRANT_ENTITY_TYPE

    Valid values include the following:

    • FOREIGNERS

    • FREELANCE_WORKERS (Freelance workers and professionals)

    • ITALIAN_COMPANIES (Italian companies and one-person companies)

    • NON_PROFIT_ORGANIZATIONS

    • OTHER_SUBJECTS

    • PUBLIC_ORGANIZATIONS

.ru
  • BIRTH_DATE_IN_YYYY_MM_DD

  • RU_PASSPORT_DATA

.se
  • BIRTH_COUNTRY

  • SE_ID_NUMBER

.sg
  • SG_ID_NUMBER

.uk, .co.uk, .me.uk, and .org.uk
  • UK_CONTACT_TYPE

    Valid values include the following:

    • CRC (UK Corporation by Royal Charter)

    • FCORP (Non-UK Corporation)

    • FIND (Non-UK Individual, representing self)

    • FOTHER (Non-UK Entity that does not fit into any other category)

    • GOV (UK Government Body)

    • IND (UK Individual (representing self))

    • IP (UK Industrial/Provident Registered Company)

    • LLP (UK Limited Liability Partnership)

    • LTD (UK Limited Company)

    • OTHER (UK Entity that does not fit into any other category)

    • PLC (UK Public Limited Company)

    • PTNR (UK Partnership)

    • RCHAR (UK Registered Charity)

    • SCH (UK School)

    • STAT (UK Statutory Body)

    • STRA (UK Sole Trader)

  • UK_COMPANY_NUMBER

In addition, many TLDs require a VAT_NUMBER.

" + "ExtraParam$Name": "

The name of an additional parameter that is required by a top-level domain. Here are the top-level domains that require additional parameters and the names of the parameters that they require:

.com.au and .net.au
  • AU_ID_NUMBER

  • AU_ID_TYPE

    Valid values include the following:

    • ABN (Australian business number)

    • ACN (Australian company number)

    • TM (Trademark number)

.ca
  • BRAND_NUMBER

  • CA_BUSINESS_ENTITY_TYPE

    Valid values include the following:

    • BANK (Bank)

    • COMMERCIAL_COMPANY (Commercial company)

    • COMPANY (Company)

    • COOPERATION (Cooperation)

    • COOPERATIVE (Cooperative)

    • COOPRIX (Cooprix)

    • CORP (Corporation)

    • CREDIT_UNION (Credit union)

    • FOMIA (Federation of mutual insurance associations)

    • INC (Incorporated)

    • LTD (Limited)

    • LTEE (Limitée)

    • LLC (Limited liability corporation)

    • LLP (Limited liability partnership)

    • LTE (Lte.)

    • MBA (Mutual benefit association)

    • MIC (Mutual insurance company)

    • NFP (Not-for-profit corporation)

    • SA (S.A.)

    • SAVINGS_COMPANY (Savings company)

    • SAVINGS_UNION (Savings union)

    • SARL (Société à responsabilité limitée)

    • TRUST (Trust)

    • ULC (Unlimited liability corporation)

  • CA_LEGAL_TYPE

    When ContactType is PERSON, valid values include the following:

    • ABO (Aboriginal Peoples indigenous to Canada)

    • CCT (Canadian citizen)

    • LGR (Legal Representative of a Canadian Citizen or Permanent Resident)

    • RES (Permanent resident of Canada)

    When ContactType is a value other than PERSON, valid values include the following:

    • ASS (Canadian unincorporated association)

    • CCO (Canadian corporation)

    • EDU (Canadian educational institution)

    • GOV (Government or government entity in Canada)

    • HOP (Canadian Hospital)

    • INB (Indian Band recognized by the Indian Act of Canada)

    • LAM (Canadian Library, Archive, or Museum)

    • MAJ (Her/His Majesty the Queen/King)

    • OMK (Official mark registered in Canada)

    • PLT (Canadian Political Party)

    • PRT (Partnership Registered in Canada)

    • TDM (Trademark registered in Canada)

    • TRD (Canadian Trade Union)

    • TRS (Trust established in Canada)

.es
  • ES_IDENTIFICATION

    The value of ES_IDENTIFICATION depends on the following values:

    • The value of ES_LEGAL_FORM

    • The value of ES_IDENTIFICATION_TYPE

    If ES_LEGAL_FORM is any value other than INDIVIDUAL:

    • Specify 1 letter + 8 numbers (CIF [Certificado de Identificación Fiscal])

    • Example: B12345678

    If ES_LEGAL_FORM is INDIVIDUAL, the value that you specify for ES_IDENTIFICATION depends on the value of ES_IDENTIFICATION_TYPE:

    • If ES_IDENTIFICATION_TYPE is DNI_AND_NIF (for Spanish contacts):

      • Specify 8 numbers + 1 letter (DNI [Documento Nacional de Identidad], NIF [Número de Identificación Fiscal])

      • Example: 12345678M

    • If ES_IDENTIFICATION_TYPE is NIE (for foreigners with legal residence):

      • Specify 1 letter + 7 numbers + 1 letter ( NIE [Número de Identidad de Extranjero])

      • Example: Y1234567X

    • If ES_IDENTIFICATION_TYPE is OTHER (for contacts outside of Spain):

      • Specify a passport number, drivers license number, or national identity card number

  • ES_IDENTIFICATION_TYPE

    Valid values include the following:

    • DNI_AND_NIF (For Spanish contacts)

    • NIE (For foreigners with legal residence)

    • OTHER (For contacts outside of Spain)

  • ES_LEGAL_FORM

    Valid values include the following:

    • ASSOCIATION

    • CENTRAL_GOVERNMENT_BODY

    • CIVIL_SOCIETY

    • COMMUNITY_OF_OWNERS

    • COMMUNITY_PROPERTY

    • CONSULATE

    • COOPERATIVE

    • DESIGNATION_OF_ORIGIN_SUPERVISORY_COUNCIL

    • ECONOMIC_INTEREST_GROUP

    • EMBASSY

    • ENTITY_MANAGING_NATURAL_AREAS

    • FARM_PARTNERSHIP

    • FOUNDATION

    • GENERAL_AND_LIMITED_PARTNERSHIP

    • GENERAL_PARTNERSHIP

    • INDIVIDUAL

    • LIMITED_COMPANY

    • LOCAL_AUTHORITY

    • LOCAL_PUBLIC_ENTITY

    • MUTUAL_INSURANCE_COMPANY

    • NATIONAL_PUBLIC_ENTITY

    • ORDER_OR_RELIGIOUS_INSTITUTION

    • OTHERS (Only for contacts outside of Spain)

    • POLITICAL_PARTY

    • PROFESSIONAL_ASSOCIATION

    • PUBLIC_LAW_ASSOCIATION

    • PUBLIC_LIMITED_COMPANY

    • REGIONAL_GOVERNMENT_BODY

    • REGIONAL_PUBLIC_ENTITY

    • SAVINGS_BANK

    • SPANISH_OFFICE

    • SPORTS_ASSOCIATION

    • SPORTS_FEDERATION

    • SPORTS_LIMITED_COMPANY

    • TEMPORARY_ALLIANCE_OF_ENTERPRISES

    • TRADE_UNION

    • WORKER_OWNED_COMPANY

    • WORKER_OWNED_LIMITED_COMPANY

.eu
  • EU_COUNTRY_OF_CITIZENSHIP

.fi
  • BIRTH_DATE_IN_YYYY_MM_DD

  • FI_BUSINESS_NUMBER

  • FI_ID_NUMBER

  • FI_NATIONALITY

    Valid values include the following:

    • FINNISH

    • NOT_FINNISH

  • FI_ORGANIZATION_TYPE

    Valid values include the following:

    • COMPANY

    • CORPORATION

    • GOVERNMENT

    • INSTITUTION

    • POLITICAL_PARTY

    • PUBLIC_COMMUNITY

    • TOWNSHIP

.it
  • IT_NATIONALITY

  • IT_PIN

  • IT_REGISTRANT_ENTITY_TYPE

    Valid values include the following:

    • FOREIGNERS

    • FREELANCE_WORKERS (Freelance workers and professionals)

    • ITALIAN_COMPANIES (Italian companies and one-person companies)

    • NON_PROFIT_ORGANIZATIONS

    • OTHER_SUBJECTS

    • PUBLIC_ORGANIZATIONS

.ru
  • BIRTH_DATE_IN_YYYY_MM_DD

  • RU_PASSPORT_DATA

.se
  • BIRTH_COUNTRY

  • SE_ID_NUMBER

.sg
  • SG_ID_NUMBER

.uk, .co.uk, .me.uk, and .org.uk
  • UK_CONTACT_TYPE

    Valid values include the following:

    • CRC (UK Corporation by Royal Charter)

    • FCORP (Non-UK Corporation)

    • FIND (Non-UK Individual, representing self)

    • FOTHER (Non-UK Entity that does not fit into any other category)

    • GOV (UK Government Body)

    • IND (UK Individual (representing self))

    • IP (UK Industrial/Provident Registered Company)

    • LLP (UK Limited Liability Partnership)

    • LTD (UK Limited Company)

    • OTHER (UK Entity that does not fit into any other category)

    • PLC (UK Public Limited Company)

    • PTNR (UK Partnership)

    • RCHAR (UK Registered Charity)

    • SCH (UK School)

    • STAT (UK Statutory Body)

    • STRA (UK Sole Trader)

  • UK_COMPANY_NUMBER

In addition, many TLDs require a VAT_NUMBER.

" } }, "ExtraParamValue": { @@ -632,6 +632,12 @@ "ListOperationsRequest$SortBy": "

The sort type for returned values.

" } }, + "ListPricesPageMaxItems": { + "base": null, + "refs": { + "ListPricesRequest$MaxItems": "

Number of Prices to be returned.

Used only for all TLDs. If you specify a TLD, don't specify a MaxItems.

" + } + }, "ListPricesRequest": { "base": null, "refs": { @@ -771,7 +777,6 @@ "refs": { "ListDomainsRequest$MaxItems": "

Number of domains to be returned.

Default: 20

", "ListOperationsRequest$MaxItems": "

Number of domains to be returned.

Default: 20

", - "ListPricesRequest$MaxItems": "

Number of Prices to be returned.

Used only for all TLDs. If you specify a TLD, don't specify a MaxItems.

", "ViewBillingRequest$MaxItems": "

The number of billing records to be returned.

Default: 20

" } }, diff --git a/apis/route53domains/2014-05-15/endpoint-rule-set-1.json b/apis/route53domains/2014-05-15/endpoint-rule-set-1.json index 4f994c0ade7..3ccf51cbf5b 100644 --- a/apis/route53domains/2014-05-15/endpoint-rule-set-1.json +++ b/apis/route53domains/2014-05-15/endpoint-rule-set-1.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] }, - "supportsFIPS" + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://route53domains-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://route53domains-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://route53domains-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://route53domains.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -222,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://route53domains-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://route53domains.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -231,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://route53domains.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://route53domains.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/apis/route53domains/2014-05-15/endpoint-tests-1.json b/apis/route53domains/2014-05-15/endpoint-tests-1.json index b2039e36464..f837786f8dd 100644 --- a/apis/route53domains/2014-05-15/endpoint-tests-1.json +++ b/apis/route53domains/2014-05-15/endpoint-tests-1.json @@ -1,5 +1,18 @@ { "testCases": [ + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { @@ -8,8 +21,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -21,8 +34,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -34,34 +47,234 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://route53domains.us-east-1.amazonaws.com" + "url": "https://route53domains-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://route53domains.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://example.com" + "url": "https://route53domains-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://route53domains.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -72,8 +285,8 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -84,11 +297,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/apis/sagemaker/2017-07-24/api-2.json b/apis/sagemaker/2017-07-24/api-2.json index 6ead2d3a9ab..a7966a9a87c 100644 --- a/apis/sagemaker/2017-07-24/api-2.json +++ b/apis/sagemaker/2017-07-24/api-2.json @@ -4425,7 +4425,23 @@ "type":"structure", "members":{ "ImageClassificationJobConfig":{"shape":"ImageClassificationJobConfig"}, - "TextClassificationJobConfig":{"shape":"TextClassificationJobConfig"} + "TextClassificationJobConfig":{"shape":"TextClassificationJobConfig"}, + "TabularJobConfig":{"shape":"TabularJobConfig"} + }, + "union":true + }, + "AutoMLProblemTypeConfigName":{ + "type":"string", + "enum":[ + "ImageClassification", + "TextClassification", + "Tabular" + ] + }, + "AutoMLProblemTypeResolvedAttributes":{ + "type":"structure", + "members":{ + "TabularResolvedAttributes":{"shape":"TabularResolvedAttributes"} }, "union":true }, @@ -4436,6 +4452,14 @@ "GPU" ] }, + "AutoMLResolvedAttributes":{ + "type":"structure", + "members":{ + "AutoMLJobObjective":{"shape":"AutoMLJobObjective"}, + "CompletionCriteria":{"shape":"AutoMLJobCompletionCriteria"}, + "AutoMLProblemTypeResolvedAttributes":{"shape":"AutoMLProblemTypeResolvedAttributes"} + } + }, "AutoMLS3DataSource":{ "type":"structure", "required":[ @@ -4689,6 +4713,12 @@ "type":"string", "min":1 }, + "CandidateGenerationConfig":{ + "type":"structure", + "members":{ + "AlgorithmsConfig":{"shape":"AutoMLAlgorithmsConfig"} + } + }, "CandidateName":{ "type":"string", "max":64, @@ -7844,7 +7874,10 @@ "ModelDeployConfig":{"shape":"ModelDeployConfig"}, "ModelDeployResult":{"shape":"ModelDeployResult"}, "DataSplitConfig":{"shape":"AutoMLDataSplitConfig"}, - "SecurityConfig":{"shape":"AutoMLSecurityConfig"} + "SecurityConfig":{"shape":"AutoMLSecurityConfig"}, + "AutoMLJobArtifacts":{"shape":"AutoMLJobArtifacts"}, + "ResolvedAttributes":{"shape":"AutoMLResolvedAttributes"}, + "AutoMLProblemTypeConfigName":{"shape":"AutoMLProblemTypeConfigName"} } }, "DescribeCodeRepositoryInput":{ @@ -18996,6 +19029,26 @@ "min":1, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" }, + "TabularJobConfig":{ + "type":"structure", + "required":["TargetAttributeName"], + "members":{ + "CandidateGenerationConfig":{"shape":"CandidateGenerationConfig"}, + "CompletionCriteria":{"shape":"AutoMLJobCompletionCriteria"}, + "FeatureSpecificationS3Uri":{"shape":"S3Uri"}, + "Mode":{"shape":"AutoMLMode"}, + "GenerateCandidateDefinitionsOnly":{"shape":"GenerateCandidateDefinitionsOnly"}, + "ProblemType":{"shape":"ProblemType"}, + "TargetAttributeName":{"shape":"TargetAttributeName"}, + "SampleWeightAttributeName":{"shape":"SampleWeightAttributeName"} + } + }, + "TabularResolvedAttributes":{ + "type":"structure", + "members":{ + "ProblemType":{"shape":"ProblemType"} + } + }, "Tag":{ "type":"structure", "required":[ diff --git a/apis/sagemaker/2017-07-24/docs-2.json b/apis/sagemaker/2017-07-24/docs-2.json index b513bf97430..b8c54efc213 100644 --- a/apis/sagemaker/2017-07-24/docs-2.json +++ b/apis/sagemaker/2017-07-24/docs-2.json @@ -11,8 +11,8 @@ "CreateApp": "

Creates a running app for the specified UserProfile. This operation is automatically invoked by Amazon SageMaker Studio upon access to the associated Domain, and when new kernel configurations are selected by the user. A user may have multiple Apps active simultaneously.

", "CreateAppImageConfig": "

Creates a configuration for running a SageMaker image as a KernelGateway app. The configuration specifies the Amazon Elastic File System (EFS) storage volume on the image, and a list of the kernels in the image.

", "CreateArtifact": "

Creates an artifact. An artifact is a lineage tracking entity that represents a URI addressable object or data. Some examples are the S3 URI of a dataset and the ECR registry path of an image. For more information, see Amazon SageMaker ML Lineage Tracking.

", - "CreateAutoMLJob": "

Creates an Autopilot job.

Find the best-performing model after you run an Autopilot job by calling DescribeAutoMLJob.

For information about how to use Autopilot, see Automate Model Development with Amazon SageMaker Autopilot.

", - "CreateAutoMLJobV2": "

Creates an Amazon SageMaker AutoML job that uses non-tabular data such as images or text for Computer Vision or Natural Language Processing problems.

Find the resulting model after you run an AutoML job V2 by calling DescribeAutoMLJobV2.

To create an AutoMLJob using tabular data, see CreateAutoMLJob.

This API action is callable through SageMaker Canvas only. Calling it directly from the CLI or an SDK results in an error.

", + "CreateAutoMLJob": "

Creates an Autopilot job also referred to as Autopilot experiment or AutoML job.

Find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob.

CreateAutoMLJob only accepts tabular input data. We recommend using CreateAutoMLJobV2 for all problem types. CreateAutoMLJobV2 can process the same tabular data as its previous version CreateAutoMLJob, as well as non-tabular data for problem types such as image or text classification.

Find guidelines about how to migrate CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

", + "CreateAutoMLJobV2": "

Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2.

We recommend using CreateAutoMLJobV2 for all problem types. CreateAutoMLJobV2 can process the same tabular data as its previous version CreateAutoMLJob, as well as non-tabular data for problem types such as image or text classification.

Find guidelines about how to migrate CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

For the list of available problem types supported by CreateAutoMLJobV2, see AutoMLProblemTypeConfig.

Find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2. Calling DescribeAutoMLJob on a AutoML job V2 results in an error.

", "CreateCodeRepository": "

Creates a Git repository as a resource in your SageMaker account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your SageMaker account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with.

The repository can be hosted either in Amazon Web Services CodeCommit or in any other Git repository.

", "CreateCompilationJob": "

Starts a model compilation job. After the model has been compiled, Amazon SageMaker saves the resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts with Amazon Web Services IoT Greengrass. In that case, deploy them as an ML resource.

In the request body, you provide the following:

You can also provide a Tag to track the model compilation job's resource use and costs. The response body contains the CompilationJobArn for the compiled job.

To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation job, use DescribeCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs.

", "CreateContext": "

Creates a context. A context is a lineage tracking entity that represents a logical grouping of other tracking or experiment entities. Some examples are an endpoint and a model package. For more information, see Amazon SageMaker ML Lineage Tracking.

", @@ -111,8 +111,8 @@ "DescribeApp": "

Describes the app.

", "DescribeAppImageConfig": "

Describes an AppImageConfig.

", "DescribeArtifact": "

Describes an artifact.

", - "DescribeAutoMLJob": "

Returns information about an Amazon SageMaker AutoML job.

", - "DescribeAutoMLJobV2": "

Returns information about an Amazon SageMaker AutoML V2 job.

This API action is callable through SageMaker Canvas only. Calling it directly from the CLI or an SDK results in an error.

", + "DescribeAutoMLJob": "

Returns information about an AutoML job created by calling CreateAutoMLJob.

", + "DescribeAutoMLJobV2": "

Returns information about an AutoML job V2 created by calling CreateAutoMLJobV2.

", "DescribeCodeRepository": "

Gets details about the specified Git repository.

", "DescribeCompilationJob": "

Returns information about a model compilation job.

To create a model compilation job, use CreateCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs.

", "DescribeContext": "

Describes a context.

", @@ -916,7 +916,8 @@ "AutoMLAlgorithmsConfig": { "base": null, "refs": { - "AutoMLCandidateGenerationConfig$AlgorithmsConfig": "

Stores the configuration information for the selection of algorithms used to train the model candidates.

The list of available algorithms to choose from depends on the training mode set in AutoMLJobConfig.Mode .

For the list of all algorithms per training mode, see AutoMLAlgorithmConfig.

For more information on each algorithm, see the Algorithm support section in Autopilot developer guide.

" + "AutoMLCandidateGenerationConfig$AlgorithmsConfig": "

Stores the configuration information for the selection of algorithms used to train the model candidates.

The list of available algorithms to choose from depends on the training mode set in AutoMLJobConfig.Mode .

For the list of all algorithms per training mode, see AutoMLAlgorithmConfig.

For more information on each algorithm, see the Algorithm support section in Autopilot developer guide.

", + "CandidateGenerationConfig$AlgorithmsConfig": "

Stores the configuration information for the selection of algorithms used to train model candidates on tabular data.

The list of available algorithms to choose from depends on the training mode set in TabularJobConfig.Mode .

For the list of all algorithms per problem type and training mode, see AutoMLAlgorithmConfig.

For more information on each algorithm, see the Algorithm support section in Autopilot developer guide.

" } }, "AutoMLCandidate": { @@ -975,14 +976,14 @@ "base": "

The data source for the Autopilot job.

", "refs": { "AutoMLChannel$DataSource": "

The data source for an AutoML channel.

", - "AutoMLJobChannel$DataSource": "

The data source for an AutoML channel.

" + "AutoMLJobChannel$DataSource": "

The data source for an AutoML channel (Required).

" } }, "AutoMLDataSplitConfig": { - "base": "

This structure specifies how to split the data into train and validation datasets.

If you are using the V1 API (for example CreateAutoMLJob) or the V2 API for Natural Language Processing problems (for example CreateAutoMLJobV2 with a TextClassificationJobConfig problem type), the validation and training datasets must contain the same headers. Also, for V1 API jobs, the validation dataset must be less than 2 GB in size.

", + "base": "

This structure specifies how to split the data into train and validation datasets.

The validation and training datasets must contain the same headers. For jobs created by calling CreateAutoMLJob, the validation dataset must be less than 2 GB in size.

", "refs": { "AutoMLJobConfig$DataSplitConfig": "

The configuration for splitting the input training dataset.

Type: AutoMLDataSplitConfig

", - "CreateAutoMLJobV2Request$DataSplitConfig": "

This structure specifies how to split the data into train and validation datasets.

If you are using the V1 API (for example CreateAutoMLJob) or the V2 API for Natural Language Processing problems (for example CreateAutoMLJobV2 with a TextClassificationJobConfig problem type), the validation and training datasets must contain the same headers. Also, for V1 API jobs, the validation dataset must be less than 2 GB in size.

", + "CreateAutoMLJobV2Request$DataSplitConfig": "

This structure specifies how to split the data into train and validation datasets.

The validation and training datasets must contain the same headers. For jobs created by calling CreateAutoMLJob, the validation dataset must be less than 2 GB in size.

", "DescribeAutoMLJobV2Response$DataSplitConfig": "

Returns the configuration settings of how the data are split into train and validation datasets.

" } }, @@ -993,13 +994,13 @@ "AutoMLJobSummary$FailureReason": "

The failure reason of an AutoML job.

", "AutoMLPartialFailureReason$PartialFailureMessage": "

The message containing the reason for a partial failure of an AutoML job.

", "DescribeAutoMLJobResponse$FailureReason": "

Returns the failure reason for an AutoML job, when applicable.

", - "DescribeAutoMLJobV2Response$FailureReason": "

Returns the reason for the failure of the AutoML V2 job, when applicable.

" + "DescribeAutoMLJobV2Response$FailureReason": "

Returns the reason for the failure of the AutoML job V2, when applicable.

" } }, "AutoMLInferenceContainerDefinitions": { "base": "

The mapping of all supported processing unit (CPU, GPU, etc...) to inference container definitions for the candidate. This field is populated for the V2 API only (for example, for jobs created by calling CreateAutoMLJobV2).

", "refs": { - "AutoMLCandidate$InferenceContainerDefinitions": "

The mapping of all supported processing unit (CPU, GPU, etc...) to inference container definitions for the candidate. This field is populated for the V2 API only (for example, for jobs created by calling CreateAutoMLJobV2).

" + "AutoMLCandidate$InferenceContainerDefinitions": "

The mapping of all supported processing unit (CPU, GPU, etc...) to inference container definitions for the candidate. This field is populated for the AutoML jobs V2 (for example, for jobs created by calling CreateAutoMLJobV2) related to image or text classification problem types only.

" } }, "AutoMLInputDataConfig": { @@ -1017,7 +1018,7 @@ "CreateAutoMLJobResponse$AutoMLJobArn": "

The unique ARN assigned to the AutoML job when it is created.

", "CreateAutoMLJobV2Response$AutoMLJobArn": "

The unique ARN assigned to the AutoMLJob when it is created.

", "DescribeAutoMLJobResponse$AutoMLJobArn": "

Returns the ARN of the AutoML job.

", - "DescribeAutoMLJobV2Response$AutoMLJobArn": "

Returns the Amazon Resource Name (ARN) of the AutoML V2 job.

", + "DescribeAutoMLJobV2Response$AutoMLJobArn": "

Returns the Amazon Resource Name (ARN) of the AutoML job V2.

", "DescribeProcessingJobResponse$AutoMLJobArn": "

The ARN of an AutoML job associated with this processing job.

", "DescribeTrainingJobResponse$AutoMLJobArn": "

The Amazon Resource Name (ARN) of an AutoML job.

", "DescribeTransformJobResponse$AutoMLJobArn": "

The Amazon Resource Name (ARN) of the AutoML transform job.

", @@ -1029,11 +1030,12 @@ "AutoMLJobArtifacts": { "base": "

The artifacts that are generated during an AutoML job.

", "refs": { - "DescribeAutoMLJobResponse$AutoMLJobArtifacts": "

Returns information on the job's artifacts found in AutoMLJobArtifacts.

" + "DescribeAutoMLJobResponse$AutoMLJobArtifacts": "

Returns information on the job's artifacts found in AutoMLJobArtifacts.

", + "DescribeAutoMLJobV2Response$AutoMLJobArtifacts": null } }, "AutoMLJobChannel": { - "base": "

A channel is a named input source that training algorithms can consume. This channel is used for the non tabular training data of an AutoML job using the V2 API. For tabular training data, see AutoMLChannel. For more information, see Channel.

", + "base": "

A channel is a named input source that training algorithms can consume. This channel is used for AutoML jobs V2 (jobs created by calling CreateAutoMLJobV2).

", "refs": { "AutoMLJobInputDataConfig$member": null } @@ -1042,8 +1044,10 @@ "base": "

How long a job is allowed to run, or how many candidates a job is allowed to generate.

", "refs": { "AutoMLJobConfig$CompletionCriteria": "

How long an AutoML job is allowed to run, or how many candidates a job is allowed to generate.

", + "AutoMLResolvedAttributes$CompletionCriteria": null, "ImageClassificationJobConfig$CompletionCriteria": "

How long a job is allowed to run, or how many candidates a job is allowed to generate.

", "ResolvedAttributes$CompletionCriteria": null, + "TabularJobConfig$CompletionCriteria": null, "TextClassificationJobConfig$CompletionCriteria": "

How long a job is allowed to run, or how many candidates a job is allowed to generate.

" } }, @@ -1057,7 +1061,7 @@ "AutoMLJobInputDataConfig": { "base": null, "refs": { - "CreateAutoMLJobV2Request$AutoMLJobInputDataConfig": "

An array of channel objects describing the input data and their location. Each channel is a named input source. Similar to InputDataConfig supported by CreateAutoMLJob. The supported formats depend on the problem type:

", + "CreateAutoMLJobV2Request$AutoMLJobInputDataConfig": "

An array of channel objects describing the input data and their location. Each channel is a named input source. Similar to InputDataConfig supported by CreateAutoMLJob. The supported formats depend on the problem type:

", "DescribeAutoMLJobV2Response$AutoMLJobInputDataConfig": "

Returns an array of channel objects describing the input data and their location.

" } }, @@ -1069,17 +1073,18 @@ "CreateAutoMLJobV2Request$AutoMLJobName": "

Identifies an Autopilot job. The name must be unique to your account and is case insensitive.

", "DescribeAutoMLJobRequest$AutoMLJobName": "

Requests information about an AutoML job using its unique name.

", "DescribeAutoMLJobResponse$AutoMLJobName": "

Returns the name of the AutoML job.

", - "DescribeAutoMLJobV2Request$AutoMLJobName": "

Requests information about an AutoML V2 job using its unique name.

", - "DescribeAutoMLJobV2Response$AutoMLJobName": "

Returns the name of the AutoML V2 job.

", + "DescribeAutoMLJobV2Request$AutoMLJobName": "

Requests information about an AutoML job V2 using its unique name.

", + "DescribeAutoMLJobV2Response$AutoMLJobName": "

Returns the name of the AutoML job V2.

", "ListCandidatesForAutoMLJobRequest$AutoMLJobName": "

List the candidates created for the job by providing the job's name.

", "StopAutoMLJobRequest$AutoMLJobName": "

The name of the object you are requesting.

" } }, "AutoMLJobObjective": { - "base": "

Specifies a metric to minimize or maximize as the objective of a job. V2 API jobs (for example jobs created by calling CreateAutoMLJobV2), support Accuracy only.

", + "base": "

Specifies a metric to minimize or maximize as the objective of a job.

", "refs": { - "CreateAutoMLJobRequest$AutoMLJobObjective": "

Defines the objective metric used to measure the predictive quality of an AutoML job. You provide an AutoMLJobObjective$MetricName and Autopilot infers whether to minimize or maximize it. For CreateAutoMLJobV2, only Accuracy is supported.

", - "CreateAutoMLJobV2Request$AutoMLJobObjective": "

Specifies a metric to minimize or maximize as the objective of a job. For CreateAutoMLJobV2, only Accuracy is supported.

", + "AutoMLResolvedAttributes$AutoMLJobObjective": null, + "CreateAutoMLJobRequest$AutoMLJobObjective": "

Specifies a metric to minimize or maximize as the objective of a job. If not specified, the default objective metric depends on the problem type. See AutoMLJobObjective for the default values.

", + "CreateAutoMLJobV2Request$AutoMLJobObjective": "

Specifies a metric to minimize or maximize as the objective of a job. If not specified, the default objective metric depends on the problem type. For the list of default values per problem type, see AutoMLJobObjective.

For tabular problem types, you must either provide the AutoMLJobObjective and indicate the type of supervised learning problem in AutoMLProblemTypeConfig (TabularJobConfig.ProblemType), or none.

", "DescribeAutoMLJobResponse$AutoMLJobObjective": "

Returns the job's objective.

", "DescribeAutoMLJobV2Response$AutoMLJobObjective": "

Returns the job's objective.

", "ResolvedAttributes$AutoMLJobObjective": null @@ -1096,7 +1101,7 @@ "refs": { "AutoMLJobSummary$AutoMLJobSecondaryStatus": "

The secondary status of the AutoML job.

", "DescribeAutoMLJobResponse$AutoMLJobSecondaryStatus": "

Returns the secondary status of the AutoML job.

", - "DescribeAutoMLJobV2Response$AutoMLJobSecondaryStatus": "

Returns the secondary status of the AutoML V2 job.

" + "DescribeAutoMLJobV2Response$AutoMLJobSecondaryStatus": "

Returns the secondary status of the AutoML job V2.

" } }, "AutoMLJobStatus": { @@ -1104,7 +1109,7 @@ "refs": { "AutoMLJobSummary$AutoMLJobStatus": "

The status of the AutoML job.

", "DescribeAutoMLJobResponse$AutoMLJobStatus": "

Returns the status of the AutoML job.

", - "DescribeAutoMLJobV2Response$AutoMLJobStatus": "

Returns the status of the AutoML V2 job.

", + "DescribeAutoMLJobV2Response$AutoMLJobStatus": "

Returns the status of the AutoML job V2.

", "ListAutoMLJobsRequest$StatusEquals": "

Request a list of jobs, using a filter for status.

" } }, @@ -1136,7 +1141,7 @@ "AutoMLMetricEnum": { "base": null, "refs": { - "AutoMLJobObjective$MetricName": "

The name of the objective metric used to measure the predictive quality of a machine learning system. During training, the model's parameters are updated iteratively to optimize its performance based on the feedback provided by the objective metric when evaluating the model on the validation dataset.

For the list of all available metrics supported by Autopilot, see Autopilot metrics.

If you do not specify a metric explicitly, the default behavior is to automatically use:

", + "AutoMLJobObjective$MetricName": "

The name of the objective metric used to measure the predictive quality of a machine learning system. During training, the model's parameters are updated iteratively to optimize its performance based on the feedback provided by the objective metric when evaluating the model on the validation dataset.

For the list of all available metrics supported by Autopilot, see Autopilot metrics.

If you do not specify a metric explicitly, the default behavior is to automatically use:

", "FinalAutoMLJobObjectiveMetric$MetricName": "

The name of the metric with the best result. For a description of the possible objective metrics, see AutoMLJobObjective$MetricName.

", "FinalAutoMLJobObjectiveMetric$StandardMetricName": "

The name of the standard metric. For a description of the standard metrics, see Autopilot candidate metrics.

", "MetricDatum$MetricName": "

The name of the metric.

" @@ -1151,7 +1156,8 @@ "AutoMLMode": { "base": null, "refs": { - "AutoMLJobConfig$Mode": "

The method that Autopilot uses to train the data. You can either specify the mode manually or let Autopilot choose for you based on the dataset size by selecting AUTO. In AUTO mode, Autopilot chooses ENSEMBLING for datasets smaller than 100 MB, and HYPERPARAMETER_TUNING for larger ones.

The ENSEMBLING mode uses a multi-stack ensemble model to predict classification and regression tasks directly from your dataset. This machine learning mode combines several base models to produce an optimal predictive model. It then uses a stacking ensemble method to combine predictions from contributing members. A multi-stack ensemble model can provide better performance over a single model by combining the predictive capabilities of multiple models. See Autopilot algorithm support for a list of algorithms supported by ENSEMBLING mode.

The HYPERPARAMETER_TUNING (HPO) mode uses the best hyperparameters to train the best version of a model. HPO automatically selects an algorithm for the type of problem you want to solve. Then HPO finds the best hyperparameters according to your objective metric. See Autopilot algorithm support for a list of algorithms supported by HYPERPARAMETER_TUNING mode.

" + "AutoMLJobConfig$Mode": "

The method that Autopilot uses to train the data. You can either specify the mode manually or let Autopilot choose for you based on the dataset size by selecting AUTO. In AUTO mode, Autopilot chooses ENSEMBLING for datasets smaller than 100 MB, and HYPERPARAMETER_TUNING for larger ones.

The ENSEMBLING mode uses a multi-stack ensemble model to predict classification and regression tasks directly from your dataset. This machine learning mode combines several base models to produce an optimal predictive model. It then uses a stacking ensemble method to combine predictions from contributing members. A multi-stack ensemble model can provide better performance over a single model by combining the predictive capabilities of multiple models. See Autopilot algorithm support for a list of algorithms supported by ENSEMBLING mode.

The HYPERPARAMETER_TUNING (HPO) mode uses the best hyperparameters to train the best version of a model. HPO automatically selects an algorithm for the type of problem you want to solve. Then HPO finds the best hyperparameters according to your objective metric. See Autopilot algorithm support for a list of algorithms supported by HYPERPARAMETER_TUNING mode.

", + "TabularJobConfig$Mode": "

The method that Autopilot uses to train the data. You can either specify the mode manually or let Autopilot choose for you based on the dataset size by selecting AUTO. In AUTO mode, Autopilot chooses ENSEMBLING for datasets smaller than 100 MB, and HYPERPARAMETER_TUNING for larger ones.

The ENSEMBLING mode uses a multi-stack ensemble model to predict classification and regression tasks directly from your dataset. This machine learning mode combines several base models to produce an optimal predictive model. It then uses a stacking ensemble method to combine predictions from contributing members. A multi-stack ensemble model can provide better performance over a single model by combining the predictive capabilities of multiple models. See Autopilot algorithm support for a list of algorithms supported by ENSEMBLING mode.

The HYPERPARAMETER_TUNING (HPO) mode uses the best hyperparameters to train the best version of a model. HPO automatically selects an algorithm for the type of problem you want to solve. Then HPO finds the best hyperparameters according to your objective metric. See Autopilot algorithm support for a list of algorithms supported by HYPERPARAMETER_TUNING mode.

" } }, "AutoMLNameContains": { @@ -1180,14 +1186,26 @@ "refs": { "AutoMLJobSummary$PartialFailureReasons": "

The list of reasons for partial failures within an AutoML job.

", "DescribeAutoMLJobResponse$PartialFailureReasons": "

Returns a list of reasons for partial failures within an AutoML job.

", - "DescribeAutoMLJobV2Response$PartialFailureReasons": "

Returns a list of reasons for partial failures within an AutoML V2 job.

" + "DescribeAutoMLJobV2Response$PartialFailureReasons": "

Returns a list of reasons for partial failures within an AutoML job V2.

" } }, "AutoMLProblemTypeConfig": { - "base": "

A collection of settings specific to the problem type used to configure an AutoML job using the V2 API. There must be one and only one config of the following type.

", + "base": "

A collection of settings specific to the problem type used to configure an AutoML job V2. There must be one and only one config of the following type.

", "refs": { - "CreateAutoMLJobV2Request$AutoMLProblemTypeConfig": "

Defines the configuration settings of one of the supported problem types.

", - "DescribeAutoMLJobV2Response$AutoMLProblemTypeConfig": "

Returns the configuration settings of the problem type set for the AutoML V2 job.

" + "CreateAutoMLJobV2Request$AutoMLProblemTypeConfig": "

Defines the configuration settings of one of the supported problem types.

For tabular problem types, you must either specify the type of supervised learning problem in AutoMLProblemTypeConfig (TabularJobConfig.ProblemType) and provide the AutoMLJobObjective, or none at all.

", + "DescribeAutoMLJobV2Response$AutoMLProblemTypeConfig": "

Returns the configuration settings of the problem type set for the AutoML job V2.

" + } + }, + "AutoMLProblemTypeConfigName": { + "base": null, + "refs": { + "DescribeAutoMLJobV2Response$AutoMLProblemTypeConfigName": "

Returns the name of the problem type configuration set for the AutoML job V2.

" + } + }, + "AutoMLProblemTypeResolvedAttributes": { + "base": "

The resolved attributes specific to the problem type of an AutoML job V2.

", + "refs": { + "AutoMLResolvedAttributes$AutoMLProblemTypeResolvedAttributes": "

Defines the resolved attributes specific to a problem type.

" } }, "AutoMLProcessingUnit": { @@ -1196,6 +1214,12 @@ "AutoMLInferenceContainerDefinitions$key": "

Processing unit for an inference container. Currently Autopilot only supports CPU or GPU.

" } }, + "AutoMLResolvedAttributes": { + "base": "

The resolved attributes used to configure an AutoML job V2.

", + "refs": { + "DescribeAutoMLJobV2Response$ResolvedAttributes": "

Returns the resolved attributes used by the AutoML job V2.

" + } + }, "AutoMLS3DataSource": { "base": "

Describes the Amazon S3 data source.

", "refs": { @@ -1445,6 +1469,12 @@ "AutoMLJobArtifacts$CandidateDefinitionNotebookLocation": "

The URL of the notebook location.

" } }, + "CandidateGenerationConfig": { + "base": "

Stores the configuration information for how model candidates are generated using an AutoML job V2.

", + "refs": { + "TabularJobConfig$CandidateGenerationConfig": "

The configuration information of how model candidates are generated.

" + } + }, "CandidateName": { "base": null, "refs": { @@ -2002,7 +2032,7 @@ "base": null, "refs": { "AutoMLChannel$CompressionType": "

You can use Gzip or None. The default value is None.

", - "AutoMLJobChannel$CompressionType": "

The allowed compression types depend on the input format. We allow the compression type Gzip for S3Prefix inputs only. For all other inputs, the compression type should be None. If no compression type is provided, we default to None.

", + "AutoMLJobChannel$CompressionType": "

The allowed compression types depend on the input format and problem type. We allow the compression type Gzip for S3Prefix inputs on tabular data only. For all other inputs, the compression type should be None. If no compression type is provided, we default to None.

", "Channel$CompressionType": "

If training data is compressed, the compression type. The default value is None. CompressionType is used only in Pipe input mode. In File mode, leave this field unset or set it to None.

", "CompressionTypes$member": null, "TransformInput$CompressionType": "

If your transform data is compressed, specify the compression type. Amazon SageMaker automatically decompresses the data for the transform job accordingly. The default value is None.

" @@ -2133,7 +2163,7 @@ "ContentColumn": { "base": null, "refs": { - "TextClassificationJobConfig$ContentColumn": "

The name of the column used to provide the sentences to be classified. It should not be the same as the target column.

" + "TextClassificationJobConfig$ContentColumn": "

The name of the column used to provide the sentences to be classified. It should not be the same as the target column (Required).

" } }, "ContentDigest": { @@ -2147,7 +2177,7 @@ "base": null, "refs": { "AutoMLChannel$ContentType": "

The content type of the data from the input source. You can use text/csv;header=present or x-application/vnd.amazon+parquet. The default value is text/csv;header=present.

", - "AutoMLJobChannel$ContentType": "

The content type of the data from the input source. The following are the allowed content types for different problems:

", + "AutoMLJobChannel$ContentType": "

The content type of the data from the input source. The following are the allowed content types for different problems:

", "Channel$ContentType": "

The MIME type of the data.

", "ContentTypes$member": null, "FileSource$ContentType": "

The type of content stored in the file source.

", @@ -5562,7 +5592,8 @@ "base": null, "refs": { "CreateAutoMLJobRequest$GenerateCandidateDefinitionsOnly": "

Generates possible candidates without training the models. A candidate is a combination of data preprocessors, algorithms, and algorithm parameter settings.

", - "DescribeAutoMLJobResponse$GenerateCandidateDefinitionsOnly": "

Indicates whether the output for an AutoML job generates candidate definitions only.

" + "DescribeAutoMLJobResponse$GenerateCandidateDefinitionsOnly": "

Indicates whether the output for an AutoML job generates candidate definitions only.

", + "TabularJobConfig$GenerateCandidateDefinitionsOnly": "

Generates possible candidates without training the models. A model candidate is a combination of data preprocessors, algorithms, and algorithm parameter settings.

" } }, "GetDeviceFleetReportRequest": { @@ -6281,9 +6312,9 @@ } }, "ImageClassificationJobConfig": { - "base": "

Stores the configuration information for the image classification problem of an AutoML job using the V2 API.

", + "base": "

Stores the configuration information for the image classification problem of an AutoML job V2.

", "refs": { - "AutoMLProblemTypeConfig$ImageClassificationJobConfig": "

Settings used to configure an AutoML job using the V2 API for the image classification problem type.

" + "AutoMLProblemTypeConfig$ImageClassificationJobConfig": "

Settings used to configure an AutoML job V2 for the image classification problem type.

" } }, "ImageConfig": { @@ -8066,7 +8097,7 @@ "MaxCandidates": { "base": null, "refs": { - "AutoMLJobCompletionCriteria$MaxCandidates": "

The maximum number of times a training job is allowed to run.

For V2 jobs (jobs created by calling CreateAutoMLJobV2), the supported value is 1.

" + "AutoMLJobCompletionCriteria$MaxCandidates": "

The maximum number of times a training job is allowed to run.

For job V2s (jobs created by calling CreateAutoMLJobV2), the supported value is 1.

" } }, "MaxConcurrentInvocationsPerInstance": { @@ -8224,7 +8255,7 @@ "MaxRuntimePerTrainingJobInSeconds": { "base": null, "refs": { - "AutoMLJobCompletionCriteria$MaxRuntimePerTrainingJobInSeconds": "

The maximum time, in seconds, that each training job executed inside hyperparameter tuning is allowed to run as part of a hyperparameter tuning job. For more information, see the StoppingCondition used by the CreateHyperParameterTuningJob action.

For V2 jobs (jobs created by calling CreateAutoMLJobV2), this field controls the runtime of the job candidate.

" + "AutoMLJobCompletionCriteria$MaxRuntimePerTrainingJobInSeconds": "

The maximum time, in seconds, that each training job executed inside hyperparameter tuning is allowed to run as part of a hyperparameter tuning job. For more information, see the StoppingCondition used by the CreateHyperParameterTuningJob action.

For job V2s (jobs created by calling CreateAutoMLJobV2), this field controls the runtime of the job candidate.

" } }, "MaxWaitTimeInSeconds": { @@ -10544,7 +10575,9 @@ "refs": { "CreateAutoMLJobRequest$ProblemType": "

Defines the type of supervised learning problem available for the candidates. For more information, see Amazon SageMaker Autopilot problem types.

", "DescribeAutoMLJobResponse$ProblemType": "

Returns the job's problem type.

", - "ResolvedAttributes$ProblemType": "

The problem type.

" + "ResolvedAttributes$ProblemType": "

The problem type.

", + "TabularJobConfig$ProblemType": "

The type of supervised learning problem available for the model candidates of the AutoML job V2. For more information, see Amazon SageMaker Autopilot problem types.

", + "TabularResolvedAttributes$ProblemType": "

The type of supervised learning problem available for the model candidates of the AutoML job V2 (Binary Classification, Multiclass Classification, Regression). For more information, see Amazon SageMaker Autopilot problem types.

" } }, "ProcessingClusterConfig": { @@ -11540,7 +11573,7 @@ "ResolvedAttributes": { "base": "

The resolved attributes.

", "refs": { - "DescribeAutoMLJobResponse$ResolvedAttributes": "

Contains ProblemType, AutoMLJobObjective, and CompletionCriteria. If you do not provide these values, they are auto-inferred. If you do provide them, the values used are the ones you provide.

" + "DescribeAutoMLJobResponse$ResolvedAttributes": "

Contains ProblemType, AutoMLJobObjective, and CompletionCriteria. If you do not provide these values, they are inferred.

" } }, "ResourceArn": { @@ -11861,6 +11894,7 @@ "S3StorageConfig$S3Uri": "

The S3 URI, or location in Amazon S3, of OfflineStore.

S3 URIs have a format similar to the following: s3://example-bucket/prefix/.

", "S3StorageConfig$ResolvedOutputS3Uri": "

The S3 path where offline records are written.

", "SharingSettings$S3OutputPath": "

When NotebookOutputOption is Allowed, the Amazon S3 bucket used to store the shared notebook snapshots.

", + "TabularJobConfig$FeatureSpecificationS3Uri": "

A URL to the Amazon S3 data source containing selected features from the input data source to run an Autopilot job V2. You can input FeatureAttributeNames (optional) in JSON format as shown below:

{ \"FeatureAttributeNames\":[\"col1\", \"col2\", ...] }.

You can also specify the data type of the feature (optional) in the format shown below:

{ \"FeatureDataTypes\":{\"col1\":\"numeric\", \"col2\":\"categorical\" ... } }

These column keys may not include the target column.

In ensembling mode, Autopilot only supports the following data types: numeric, categorical, text, and datetime. In HPO mode, Autopilot can support numeric, categorical, text, datetime, and sequence.

If only FeatureDataTypes is provided, the column keys (col1, col2,..) should be a subset of the column names in the input data.

If both FeatureDataTypes and FeatureAttributeNames are provided, then the column keys should be a subset of the column names provided in FeatureAttributeNames.

The key name FeatureAttributeNames is fixed. The values listed in [\"col1\", \"col2\", ...] are case sensitive and should be a list of strings containing unique values that are a subset of the column names in the input data. The list of columns provided must not include the target column.

", "TensorBoardOutputConfig$S3OutputPath": "

Path to Amazon S3 storage location for TensorBoard output.

", "TransformOutput$S3OutputPath": "

The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job. For example, s3://bucket-name/key-name-prefix.

For every S3 object used as input for the transform job, batch transform stores the transformed data with an .out suffix in a corresponding subfolder in the location in the output prefix. For example, for the input data stored at s3://bucket-name/input-name-prefix/dataset01/data.csv, batch transform stores the transformed data at s3://bucket-name/output-name-prefix/input-name-prefix/data.csv.out. Batch transform doesn't upload partially processed objects. For an input S3 object that contains multiple records, it creates an .out file only if the transform job succeeds on the entire file. When the input contains multiple S3 objects, the batch transform job processes the listed S3 objects and uploads only the output for successfully processed objects. If any object fails in the transform job batch transform marks the job as failed to prompt investigation.

", "TransformS3DataSource$S3Uri": "

Depending on the value specified for the S3DataType, identifies either a key name prefix or a manifest. For example:

", @@ -11895,7 +11929,8 @@ "SampleWeightAttributeName": { "base": null, "refs": { - "AutoMLChannel$SampleWeightAttributeName": "

If specified, this column name indicates which column of the dataset should be treated as sample weights for use by the objective metric during the training, evaluation, and the selection of the best model. This column is not considered as a predictive feature. For more information on Autopilot metrics, see Metrics and validation.

Sample weights should be numeric, non-negative, with larger values indicating which rows are more important than others. Data points that have invalid or no weight value are excluded.

Support for sample weights is available in Ensembling mode only.

" + "AutoMLChannel$SampleWeightAttributeName": "

If specified, this column name indicates which column of the dataset should be treated as sample weights for use by the objective metric during the training, evaluation, and the selection of the best model. This column is not considered as a predictive feature. For more information on Autopilot metrics, see Metrics and validation.

Sample weights should be numeric, non-negative, with larger values indicating which rows are more important than others. Data points that have invalid or no weight value are excluded.

Support for sample weights is available in Ensembling mode only.

", + "TabularJobConfig$SampleWeightAttributeName": "

If specified, this column name indicates which column of the dataset should be treated as sample weights for use by the objective metric during the training, evaluation, and the selection of the best model. This column is not considered as a predictive feature. For more information on Autopilot metrics, see Metrics and validation.

Sample weights should be numeric, non-negative, with larger values indicating which rows are more important than others. Data points that have invalid or no weight value are excluded.

Support for sample weights is available in Ensembling mode only.

" } }, "SamplingPercentage": { @@ -12897,6 +12932,18 @@ "DataCatalogConfig$TableName": "

The name of the Glue table.

" } }, + "TabularJobConfig": { + "base": "

The collection of settings used by an AutoML job V2 for the TABULAR problem type.

", + "refs": { + "AutoMLProblemTypeConfig$TabularJobConfig": "

Settings used to configure an AutoML job V2 for a tabular problem type (regression, classification).

" + } + }, + "TabularResolvedAttributes": { + "base": "

The resolved attributes specific to the TABULAR problem type.

", + "refs": { + "AutoMLProblemTypeResolvedAttributes$TabularResolvedAttributes": "

Defines the resolved attributes for the TABULAR problem type.

" + } + }, "Tag": { "base": "

A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources.

You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags.

For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources. For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy.

", "refs": { @@ -13002,20 +13049,21 @@ "TargetAttributeName": { "base": null, "refs": { - "AutoMLChannel$TargetAttributeName": "

The name of the target variable in supervised learning, usually represented by 'y'.

" + "AutoMLChannel$TargetAttributeName": "

The name of the target variable in supervised learning, usually represented by 'y'.

", + "TabularJobConfig$TargetAttributeName": "

The name of the target variable in supervised learning, usually represented by 'y'.

" } }, "TargetDevice": { "base": null, "refs": { "CompilationJobSummary$CompilationTargetDevice": "

The type of device that the model will run on after the compilation job has completed.

", - "OutputConfig$TargetDevice": "

Identifies the target device or the machine learning instance that you want to run your model on after the compilation has completed. Alternatively, you can specify OS, architecture, and accelerator using TargetPlatform fields. It can be used instead of TargetPlatform.

" + "OutputConfig$TargetDevice": "

Identifies the target device or the machine learning instance that you want to run your model on after the compilation has completed. Alternatively, you can specify OS, architecture, and accelerator using TargetPlatform fields. It can be used instead of TargetPlatform.

Currently ml_trn1 is available only in US East (N. Virginia) Region, and ml_inf2 is available only in US East (Ohio) Region.

" } }, "TargetLabelColumn": { "base": null, "refs": { - "TextClassificationJobConfig$TargetLabelColumn": "

The name of the column used to provide the class labels. It should not be same as the content column.

" + "TextClassificationJobConfig$TargetLabelColumn": "

The name of the column used to provide the class labels. It should not be same as the content column (Required).

" } }, "TargetObjectiveMetricValue": { @@ -13149,9 +13197,9 @@ } }, "TextClassificationJobConfig": { - "base": "

Stores the configuration information for the text classification problem of an AutoML job using the V2 API.

", + "base": "

Stores the configuration information for the text classification problem of an AutoML job V2.

", "refs": { - "AutoMLProblemTypeConfig$TextClassificationJobConfig": "

Settings used to configure an AutoML job using the V2 API for the text classification problem type.

" + "AutoMLProblemTypeConfig$TextClassificationJobConfig": "

Settings used to configure an AutoML job V2 for the text classification problem type.

" } }, "ThingName": { @@ -13203,8 +13251,8 @@ "DescribeAutoMLJobResponse$CreationTime": "

Returns the creation time of the AutoML job.

", "DescribeAutoMLJobResponse$EndTime": "

Returns the end time of the AutoML job.

", "DescribeAutoMLJobResponse$LastModifiedTime": "

Returns the job's last modified time.

", - "DescribeAutoMLJobV2Response$CreationTime": "

Returns the creation time of the AutoML V2 job.

", - "DescribeAutoMLJobV2Response$EndTime": "

Returns the end time of the AutoML V2 job.

", + "DescribeAutoMLJobV2Response$CreationTime": "

Returns the creation time of the AutoML job V2.

", + "DescribeAutoMLJobV2Response$EndTime": "

Returns the end time of the AutoML job V2.

", "DescribeAutoMLJobV2Response$LastModifiedTime": "

Returns the job's last modified time.

", "DescribeCompilationJobResponse$CompilationStartTime": "

The time when the model compilation job started the CompilationJob instances.

You are billed for the time between this timestamp and the timestamp in the CompilationEndTime field. In Amazon CloudWatch Logs, the start time might be later than this time. That's because it takes time to download the compilation job, which depends on the size of the compilation job container.

", "DescribeCompilationJobResponse$CompilationEndTime": "

The time when the model compilation job on a compilation job instance ended. For a successful or stopped job, this is when the job's model artifacts have finished uploading. For a failed job, this is when Amazon SageMaker detected that the job failed.

", diff --git a/gems/aws-sdk-cloudformation/CHANGELOG.md b/gems/aws-sdk-cloudformation/CHANGELOG.md index cf15e016182..de1fb837e23 100644 --- a/gems/aws-sdk-cloudformation/CHANGELOG.md +++ b/gems/aws-sdk-cloudformation/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.82.0 (2023-06-19) +------------------ + +* Feature - Specify desired CloudFormation behavior in the event of ChangeSet execution failure using the CreateChangeSet OnStackFailure parameter + 1.81.0 (2023-06-15) ------------------ diff --git a/gems/aws-sdk-cloudformation/VERSION b/gems/aws-sdk-cloudformation/VERSION index dbd41264aa9..71fae54fb27 100644 --- a/gems/aws-sdk-cloudformation/VERSION +++ b/gems/aws-sdk-cloudformation/VERSION @@ -1 +1 @@ -1.81.0 +1.82.0 diff --git a/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation.rb b/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation.rb index 079c8c4f5f9..2dbd9976758 100644 --- a/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation.rb +++ b/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation.rb @@ -57,6 +57,6 @@ # @!group service module Aws::CloudFormation - GEM_VERSION = '1.81.0' + GEM_VERSION = '1.82.0' end diff --git a/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation/client.rb b/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation/client.rb index ec9ea85c622..058f4ad3346 100644 --- a/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation/client.rb +++ b/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation/client.rb @@ -977,6 +977,34 @@ def continue_update_rollback(params = {}, options = {}) # template. The default behavior of this action is set to `False`. To # include nested sets in a change set, specify `True`. # + # @option params [String] :on_stack_failure + # Determines what action will be taken if stack creation fails. If this + # parameter is specified, the `DisableRollback` parameter to the + # [ExecuteChangeSet][1] API operation must not be specified. This must + # be one of these values: + # + # * `DELETE` - Deletes the change set if the stack creation fails. This + # is only valid when the `ChangeSetType` parameter is set to `CREATE`. + # If the deletion of the stack fails, the status of the stack is + # `DELETE_FAILED`. + # + # * `DO_NOTHING` - if the stack creation fails, do nothing. This is + # equivalent to specifying `true` for the `DisableRollback` parameter + # to the [ExecuteChangeSet][1] API operation. + # + # * `ROLLBACK` - if the stack creation fails, roll back the stack. This + # is equivalent to specifying `false` for the `DisableRollback` + # parameter to the [ExecuteChangeSet][1] API operation. + # + # For nested stacks, when the `OnStackFailure` parameter is set to + # `DELETE` for the change set for the parent stack, any failure in a + # child stack will cause the parent stack creation to fail and all + # stacks to be deleted. + # + # + # + # [1]: https://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ExecuteChangeSet.html + # # @return [Types::CreateChangeSetOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::CreateChangeSetOutput#id #id} => String @@ -1030,6 +1058,7 @@ def continue_update_rollback(params = {}, options = {}) # }, # ], # include_nested_stacks: false, + # on_stack_failure: "DO_NOTHING", # accepts DO_NOTHING, ROLLBACK, DELETE # }) # # @example Response structure @@ -2280,6 +2309,7 @@ def describe_account_limits(params = {}, options = {}) # * {Types::DescribeChangeSetOutput#include_nested_stacks #include_nested_stacks} => Boolean # * {Types::DescribeChangeSetOutput#parent_change_set_id #parent_change_set_id} => String # * {Types::DescribeChangeSetOutput#root_change_set_id #root_change_set_id} => String + # * {Types::DescribeChangeSetOutput#on_stack_failure #on_stack_failure} => String # # @example Request syntax with placeholder values # @@ -2340,6 +2370,7 @@ def describe_account_limits(params = {}, options = {}) # resp.include_nested_stacks #=> Boolean # resp.parent_change_set_id #=> String # resp.root_change_set_id #=> String + # resp.on_stack_failure #=> String, one of "DO_NOTHING", "ROLLBACK", "DELETE" # # # The following waiters are defined for this operation (see {Client#wait_until} for detailed usage): @@ -3807,10 +3838,24 @@ def estimate_template_cost(params = {}, options = {}) # # @option params [Boolean] :disable_rollback # Preserves the state of previously provisioned resources when an - # operation fails. + # operation fails. This parameter can't be specified when the + # `OnStackFailure` parameter to the [CreateChangeSet][1] API operation + # was specified. + # + # * `True` - if the stack creation fails, do nothing. This is equivalent + # to specifying `DO_NOTHING` for the `OnStackFailure` parameter to the + # [CreateChangeSet][1] API operation. + # + # * `False` - if the stack creation fails, roll back the stack. This is + # equivalent to specifying `ROLLBACK` for the `OnStackFailure` + # parameter to the [CreateChangeSet][1] API operation. # # Default: `True` # + # + # + # [1]: https://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_CreateChangeSet.html + # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # @example Request syntax with placeholder values @@ -6936,7 +6981,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-cloudformation' - context[:gem_version] = '1.81.0' + context[:gem_version] = '1.82.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation/client_api.rb b/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation/client_api.rb index 1d1028297c0..aab593993e7 100644 --- a/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation/client_api.rb +++ b/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation/client_api.rb @@ -240,6 +240,7 @@ module ClientApi NotificationARN = Shapes::StringShape.new(name: 'NotificationARN') NotificationARNs = Shapes::ListShape.new(name: 'NotificationARNs') OnFailure = Shapes::StringShape.new(name: 'OnFailure') + OnStackFailure = Shapes::StringShape.new(name: 'OnStackFailure') OperationIdAlreadyExistsException = Shapes::StructureShape.new(name: 'OperationIdAlreadyExistsException') OperationInProgressException = Shapes::StructureShape.new(name: 'OperationInProgressException') OperationNotFoundException = Shapes::StructureShape.new(name: 'OperationNotFoundException') @@ -618,6 +619,7 @@ module ClientApi CreateChangeSetInput.add_member(:change_set_type, Shapes::ShapeRef.new(shape: ChangeSetType, location_name: "ChangeSetType")) CreateChangeSetInput.add_member(:resources_to_import, Shapes::ShapeRef.new(shape: ResourcesToImport, location_name: "ResourcesToImport")) CreateChangeSetInput.add_member(:include_nested_stacks, Shapes::ShapeRef.new(shape: IncludeNestedStacks, location_name: "IncludeNestedStacks")) + CreateChangeSetInput.add_member(:on_stack_failure, Shapes::ShapeRef.new(shape: OnStackFailure, location_name: "OnStackFailure")) CreateChangeSetInput.struct_class = Types::CreateChangeSetInput CreateChangeSetOutput.add_member(:id, Shapes::ShapeRef.new(shape: ChangeSetId, location_name: "Id")) @@ -783,6 +785,7 @@ module ClientApi DescribeChangeSetOutput.add_member(:include_nested_stacks, Shapes::ShapeRef.new(shape: IncludeNestedStacks, location_name: "IncludeNestedStacks")) DescribeChangeSetOutput.add_member(:parent_change_set_id, Shapes::ShapeRef.new(shape: ChangeSetId, location_name: "ParentChangeSetId")) DescribeChangeSetOutput.add_member(:root_change_set_id, Shapes::ShapeRef.new(shape: ChangeSetId, location_name: "RootChangeSetId")) + DescribeChangeSetOutput.add_member(:on_stack_failure, Shapes::ShapeRef.new(shape: OnStackFailure, location_name: "OnStackFailure")) DescribeChangeSetOutput.struct_class = Types::DescribeChangeSetOutput DescribeOrganizationsAccessInput.add_member(:call_as, Shapes::ShapeRef.new(shape: CallAs, location_name: "CallAs")) diff --git a/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation/types.rb b/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation/types.rb index 2873f154176..f49803ea317 100644 --- a/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation/types.rb +++ b/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation/types.rb @@ -928,6 +928,35 @@ class ContinueUpdateRollbackOutput < Aws::EmptyStructure; end # include nested sets in a change set, specify `True`. # @return [Boolean] # + # @!attribute [rw] on_stack_failure + # Determines what action will be taken if stack creation fails. If + # this parameter is specified, the `DisableRollback` parameter to the + # [ExecuteChangeSet][1] API operation must not be specified. This must + # be one of these values: + # + # * `DELETE` - Deletes the change set if the stack creation fails. + # This is only valid when the `ChangeSetType` parameter is set to + # `CREATE`. If the deletion of the stack fails, the status of the + # stack is `DELETE_FAILED`. + # + # * `DO_NOTHING` - if the stack creation fails, do nothing. This is + # equivalent to specifying `true` for the `DisableRollback` + # parameter to the [ExecuteChangeSet][1] API operation. + # + # * `ROLLBACK` - if the stack creation fails, roll back the stack. + # This is equivalent to specifying `false` for the `DisableRollback` + # parameter to the [ExecuteChangeSet][1] API operation. + # + # For nested stacks, when the `OnStackFailure` parameter is set to + # `DELETE` for the change set for the parent stack, any failure in a + # child stack will cause the parent stack creation to fail and all + # stacks to be deleted. + # + # + # + # [1]: https://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ExecuteChangeSet.html + # @return [String] + # # @see http://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/CreateChangeSetInput AWS API Documentation # class CreateChangeSetInput < Struct.new( @@ -947,7 +976,8 @@ class CreateChangeSetInput < Struct.new( :description, :change_set_type, :resources_to_import, - :include_nested_stacks) + :include_nested_stacks, + :on_stack_failure) SENSITIVE = [] include Aws::Structure end @@ -2337,6 +2367,30 @@ class DescribeChangeSetInput < Struct.new( # nested change set hierarchy. # @return [String] # + # @!attribute [rw] on_stack_failure + # Determines what action will be taken if stack creation fails. When + # this parameter is specified, the `DisableRollback` parameter to the + # [ExecuteChangeSet][1] API operation must not be specified. This must + # be one of these values: + # + # * `DELETE` - Deletes the change set if the stack creation fails. + # This is only valid when the `ChangeSetType` parameter is set to + # `CREATE`. If the deletion of the stack fails, the status of the + # stack is `DELETE_FAILED`. + # + # * `DO_NOTHING` - if the stack creation fails, do nothing. This is + # equivalent to specifying `true` for the `DisableRollback` + # parameter to the [ExecuteChangeSet][1] API operation. + # + # * `ROLLBACK` - if the stack creation fails, roll back the stack. + # This is equivalent to specifying `false` for the `DisableRollback` + # parameter to the [ExecuteChangeSet][1] API operation. + # + # + # + # [1]: https://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ExecuteChangeSet.html + # @return [String] + # # @see http://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeChangeSetOutput AWS API Documentation # class DescribeChangeSetOutput < Struct.new( @@ -2358,7 +2412,8 @@ class DescribeChangeSetOutput < Struct.new( :next_token, :include_nested_stacks, :parent_change_set_id, - :root_change_set_id) + :root_change_set_id, + :on_stack_failure) SENSITIVE = [] include Aws::Structure end @@ -3672,9 +3727,23 @@ class EstimateTemplateCostOutput < Struct.new( # # @!attribute [rw] disable_rollback # Preserves the state of previously provisioned resources when an - # operation fails. + # operation fails. This parameter can't be specified when the + # `OnStackFailure` parameter to the [CreateChangeSet][1] API operation + # was specified. + # + # * `True` - if the stack creation fails, do nothing. This is + # equivalent to specifying `DO_NOTHING` for the `OnStackFailure` + # parameter to the [CreateChangeSet][1] API operation. + # + # * `False` - if the stack creation fails, roll back the stack. This + # is equivalent to specifying `ROLLBACK` for the `OnStackFailure` + # parameter to the [CreateChangeSet][1] API operation. # # Default: `True` + # + # + # + # [1]: https://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_CreateChangeSet.html # @return [Boolean] # # @see http://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ExecuteChangeSetInput AWS API Documentation diff --git a/gems/aws-sdk-ec2/CHANGELOG.md b/gems/aws-sdk-ec2/CHANGELOG.md index 119ac6decf6..a31308f8b65 100644 --- a/gems/aws-sdk-ec2/CHANGELOG.md +++ b/gems/aws-sdk-ec2/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.385.0 (2023-06-19) +------------------ + +* Feature - API changes to AWS Verified Access to include data from trust providers in logs + 1.384.0 (2023-06-15) ------------------ diff --git a/gems/aws-sdk-ec2/VERSION b/gems/aws-sdk-ec2/VERSION index 18b364a1a84..98c24f68585 100644 --- a/gems/aws-sdk-ec2/VERSION +++ b/gems/aws-sdk-ec2/VERSION @@ -1 +1 @@ -1.384.0 +1.385.0 diff --git a/gems/aws-sdk-ec2/lib/aws-sdk-ec2.rb b/gems/aws-sdk-ec2/lib/aws-sdk-ec2.rb index e20f177329f..4d89e7e955f 100644 --- a/gems/aws-sdk-ec2/lib/aws-sdk-ec2.rb +++ b/gems/aws-sdk-ec2/lib/aws-sdk-ec2.rb @@ -76,6 +76,6 @@ # @!group service module Aws::EC2 - GEM_VERSION = '1.384.0' + GEM_VERSION = '1.385.0' end diff --git a/gems/aws-sdk-ec2/lib/aws-sdk-ec2/client.rb b/gems/aws-sdk-ec2/lib/aws-sdk-ec2/client.rb index 8cce12e04f5..c026f1c2dfc 100644 --- a/gems/aws-sdk-ec2/lib/aws-sdk-ec2/client.rb +++ b/gems/aws-sdk-ec2/lib/aws-sdk-ec2/client.rb @@ -903,32 +903,20 @@ def advertise_byoip_cidr(params = {}, options = {}) # * {Types::AllocateAddressResult#carrier_ip #carrier_ip} => String # # - # @example Example: To allocate an Elastic IP address for EC2-VPC + # @example Example: To allocate an Elastic IP address # - # # This example allocates an Elastic IP address to use with an instance in a VPC. + # # This example allocates an Elastic IP address. # # resp = client.allocate_address({ - # domain: "vpc", # }) # # resp.to_h outputs the following: # { # allocation_id: "eipalloc-64d5890a", # domain: "vpc", + # network_border_group: "us-east-1", # public_ip: "203.0.113.0", - # } - # - # @example Example: To allocate an Elastic IP address for EC2-Classic - # - # # This example allocates an Elastic IP address to use with an instance in EC2-Classic. - # - # resp = client.allocate_address({ - # }) - # - # resp.to_h outputs the following: - # { - # domain: "standard", - # public_ip: "198.51.100.0", + # public_ipv_4_pool: "amazon", # } # # @example Request syntax with placeholder values @@ -1589,9 +1577,9 @@ def assign_private_nat_gateway_address(params = {}, options = {}) # * {Types::AssociateAddressResult#association_id #association_id} => String # # - # @example Example: To associate an Elastic IP address in EC2-VPC + # @example Example: To associate an Elastic IP address # - # # This example associates the specified Elastic IP address with the specified instance in a VPC. + # # This example associates the specified Elastic IP address with the specified instance. # # resp = client.associate_address({ # allocation_id: "eipalloc-64d5890a", @@ -1617,15 +1605,6 @@ def assign_private_nat_gateway_address(params = {}, options = {}) # association_id: "eipassoc-2bebb745", # } # - # @example Example: To associate an Elastic IP address in EC2-Classic - # - # # This example associates an Elastic IP address with an instance in EC2-Classic. - # - # resp = client.associate_address({ - # instance_id: "i-07ffe74c7330ebf53", - # public_ip: "198.51.100.0", - # }) - # # @example Request syntax with placeholder values # # resp = client.associate_address({ @@ -4898,8 +4877,8 @@ def copy_snapshot(params = {}, options = {}) # client_token: "String", # instance_type: "String", # required # instance_platform: "Linux/UNIX", # required, accepts Linux/UNIX, Red Hat Enterprise Linux, SUSE Linux, Windows, Windows with SQL Server, Windows with SQL Server Enterprise, Windows with SQL Server Standard, Windows with SQL Server Web, Linux with SQL Server Standard, Linux with SQL Server Web, Linux with SQL Server Enterprise, RHEL with SQL Server Standard, RHEL with SQL Server Enterprise, RHEL with SQL Server Web, RHEL with HA, RHEL with HA and SQL Server Standard, RHEL with HA and SQL Server Enterprise - # availability_zone: "String", - # availability_zone_id: "String", + # availability_zone: "AvailabilityZoneName", + # availability_zone_id: "AvailabilityZoneId", # tenancy: "default", # accepts default, dedicated # instance_count: 1, # required # ebs_optimized: false, @@ -18944,9 +18923,6 @@ def deregister_transit_gateway_multicast_group_sources(params = {}, options = {} # Describes attributes of your Amazon Web Services account. The # following are the supported account attributes: # - # * `supported-platforms`: Indicates whether your account can launch - # instances into EC2-Classic and EC2-VPC, or only into EC2-VPC. - # # * `default-vpc`: The ID of the default VPC for your account, or # `none`. # @@ -18955,25 +18931,20 @@ def deregister_transit_gateway_multicast_group_sources(params = {}, options = {} # Instances. For more information, see [On-Demand Instance Limits][1] # in the *Amazon Elastic Compute Cloud User Guide*. # - # * `vpc-max-security-groups-per-interface`: The maximum number of - # security groups that you can assign to a network interface. - # # * `max-elastic-ips`: The maximum number of Elastic IP addresses that - # you can allocate for use with EC2-Classic. + # you can allocate. # - # * `vpc-max-elastic-ips`: The maximum number of Elastic IP addresses - # that you can allocate for use with EC2-VPC. + # * `supported-platforms`: This attribute is deprecated. # - # We are retiring EC2-Classic on August 15, 2022. We recommend that you - # migrate from EC2-Classic to a VPC. For more information, see [Migrate - # from EC2-Classic to a VPC][2] in the *Amazon EC2 User Guide*. + # * `vpc-max-elastic-ips`: The maximum number of Elastic IP addresses + # that you can allocate. # - # + # * `vpc-max-security-groups-per-interface`: The maximum number of + # security groups that you can assign to a network interface. # # # # [1]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-on-demand-instances.html#ec2-on-demand-instances-limits - # [2]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html # # @option params [Array] :attribute_names # The account attribute names. @@ -19259,63 +19230,6 @@ def describe_address_transfers(params = {}, options = {}) # ], # } # - # @example Example: To describe your Elastic IP addresses for EC2-VPC - # - # # This example describes your Elastic IP addresses for use with instances in a VPC. - # - # resp = client.describe_addresses({ - # filters: [ - # { - # name: "domain", - # values: [ - # "vpc", - # ], - # }, - # ], - # }) - # - # resp.to_h outputs the following: - # { - # addresses: [ - # { - # allocation_id: "eipalloc-12345678", - # association_id: "eipassoc-12345678", - # domain: "vpc", - # instance_id: "i-1234567890abcdef0", - # network_interface_id: "eni-12345678", - # network_interface_owner_id: "123456789012", - # private_ip_address: "10.0.1.241", - # public_ip: "203.0.113.0", - # }, - # ], - # } - # - # @example Example: To describe your Elastic IP addresses for EC2-Classic - # - # # This example describes your Elastic IP addresses for use with instances in EC2-Classic. - # - # resp = client.describe_addresses({ - # filters: [ - # { - # name: "domain", - # values: [ - # "standard", - # ], - # }, - # ], - # }) - # - # resp.to_h outputs the following: - # { - # addresses: [ - # { - # domain: "standard", - # instance_id: "i-1234567890abcdef0", - # public_ip: "198.51.100.0", - # }, - # ], - # } - # # @example Request syntax with placeholder values # # resp = client.describe_addresses({ @@ -25208,7 +25122,7 @@ def describe_instances(params = {}, options = {}) # { # attachments: [ # { - # state: "available", + # state: "attached", # vpc_id: "vpc-a01106c2", # }, # ], @@ -26953,7 +26867,7 @@ def describe_managed_prefix_lists(params = {}, options = {}) # { # moving_address_statuses: [ # { - # move_status: "MovingToVpc", + # move_status: "movingToVpc", # public_ip: "198.51.100.0", # }, # ], @@ -30385,54 +30299,6 @@ def describe_route_tables(params = {}, options = {}) # # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. # - # - # @example Example: To describe an available schedule - # - # # This example describes a schedule that occurs every week on Sunday, starting on the specified date. Note that the output - # # contains a single schedule as an example. - # - # resp = client.describe_scheduled_instance_availability({ - # first_slot_start_time_range: { - # earliest_time: Time.parse("2016-01-31T00:00:00Z"), - # latest_time: Time.parse("2016-01-31T04:00:00Z"), - # }, - # recurrence: { - # frequency: "Weekly", - # interval: 1, - # occurrence_days: [ - # 1, - # ], - # }, - # }) - # - # resp.to_h outputs the following: - # { - # scheduled_instance_availability_set: [ - # { - # availability_zone: "us-west-2b", - # available_instance_count: 20, - # first_slot_start_time: Time.parse("2016-01-31T00:00:00Z"), - # hourly_price: "0.095", - # instance_type: "c4.large", - # max_term_duration_in_days: 366, - # min_term_duration_in_days: 366, - # network_platform: "EC2-VPC", - # platform: "Linux/UNIX", - # purchase_token: "eyJ2IjoiMSIsInMiOjEsImMiOi...", - # recurrence: { - # frequency: "Weekly", - # interval: 1, - # occurrence_day_set: [ - # 1, - # ], - # occurrence_relative_to_end: false, - # }, - # slot_duration_in_hours: 23, - # total_scheduled_instance_hours: 1219, - # }, - # ], - # } - # # @example Request syntax with placeholder values # # resp = client.describe_scheduled_instance_availability({ @@ -30533,47 +30399,6 @@ def describe_scheduled_instance_availability(params = {}, options = {}) # # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. # - # - # @example Example: To describe your Scheduled Instances - # - # # This example describes the specified Scheduled Instance. - # - # resp = client.describe_scheduled_instances({ - # scheduled_instance_ids: [ - # "sci-1234-1234-1234-1234-123456789012", - # ], - # }) - # - # resp.to_h outputs the following: - # { - # scheduled_instance_set: [ - # { - # availability_zone: "us-west-2b", - # create_date: Time.parse("2016-01-25T21:43:38.612Z"), - # hourly_price: "0.095", - # instance_count: 1, - # instance_type: "c4.large", - # network_platform: "EC2-VPC", - # next_slot_start_time: Time.parse("2016-01-31T09:00:00Z"), - # platform: "Linux/UNIX", - # recurrence: { - # frequency: "Weekly", - # interval: 1, - # occurrence_day_set: [ - # 1, - # ], - # occurrence_relative_to_end: false, - # occurrence_unit: "", - # }, - # scheduled_instance_id: "sci-1234-1234-1234-1234-123456789012", - # slot_duration_in_hours: 32, - # term_end_date: Time.parse("2017-01-31T09:00:00Z"), - # term_start_date: Time.parse("2016-01-31T09:00:00Z"), - # total_scheduled_instance_hours: 1696, - # }, - # ], - # } - # # @example Request syntax with placeholder values # # resp = client.describe_scheduled_instances({ @@ -32480,14 +32305,14 @@ def describe_spot_instance_requests(params = {}, options = {}) # # January. # # resp = client.describe_spot_price_history({ - # end_time: Time.parse("2014-01-06T08:09:10"), + # end_time: Time.parse("2014-01-06T08:09:10.05Z"), # instance_types: [ # "m1.xlarge", # ], # product_descriptions: [ # "Linux/UNIX (Amazon VPC)", # ], - # start_time: Time.parse("2014-01-06T07:08:09"), + # start_time: Time.parse("2014-01-06T07:08:09.05Z"), # }) # # resp.to_h outputs the following: @@ -34533,6 +34358,8 @@ def describe_verified_access_groups(params = {}, options = {}) # resp.logging_configurations[0].access_logs.kinesis_data_firehose.delivery_status.code #=> String, one of "success", "failed" # resp.logging_configurations[0].access_logs.kinesis_data_firehose.delivery_status.message #=> String # resp.logging_configurations[0].access_logs.kinesis_data_firehose.delivery_stream #=> String + # resp.logging_configurations[0].access_logs.log_version #=> String + # resp.logging_configurations[0].access_logs.include_trust_context #=> Boolean # resp.next_token #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVerifiedAccessInstanceLoggingConfigurations AWS API Documentation @@ -37709,22 +37536,14 @@ def disable_vpc_classic_link_dns_support(params = {}, options = {}) # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # - # @example Example: To disassociate an Elastic IP address in EC2-VPC + # @example Example: To disassociate an Elastic IP address # - # # This example disassociates an Elastic IP address from an instance in a VPC. + # # This example disassociates an Elastic IP address from an instance. # # resp = client.disassociate_address({ # association_id: "eipassoc-2bebb745", # }) # - # @example Example: To disassociate an Elastic IP addresses in EC2-Classic - # - # # This example disassociates an Elastic IP address from an instance in EC2-Classic. - # - # resp = client.disassociate_address({ - # public_ip: "198.51.100.0", - # }) - # # @example Request syntax with placeholder values # # resp = client.disassociate_address({ @@ -48078,6 +47897,8 @@ def modify_verified_access_instance(params = {}, options = {}) # enabled: false, # required # delivery_stream: "String", # }, + # log_version: "String", + # include_trust_context: false, # }, # dry_run: false, # client_token: "String", @@ -48100,6 +47921,8 @@ def modify_verified_access_instance(params = {}, options = {}) # resp.logging_configuration.access_logs.kinesis_data_firehose.delivery_status.code #=> String, one of "success", "failed" # resp.logging_configuration.access_logs.kinesis_data_firehose.delivery_status.message #=> String # resp.logging_configuration.access_logs.kinesis_data_firehose.delivery_stream #=> String + # resp.logging_configuration.access_logs.log_version #=> String + # resp.logging_configuration.access_logs.include_trust_context #=> Boolean # # @see http://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVerifiedAccessInstanceLoggingConfiguration AWS API Documentation # @@ -50154,50 +49977,6 @@ def purchase_reserved_instances_offering(params = {}, options = {}) # # * {Types::PurchaseScheduledInstancesResult#scheduled_instance_set #scheduled_instance_set} => Array<Types::ScheduledInstance> # - # - # @example Example: To purchase a Scheduled Instance - # - # # This example purchases a Scheduled Instance. - # - # resp = client.purchase_scheduled_instances({ - # purchase_requests: [ - # { - # instance_count: 1, - # purchase_token: "eyJ2IjoiMSIsInMiOjEsImMiOi...", - # }, - # ], - # }) - # - # resp.to_h outputs the following: - # { - # scheduled_instance_set: [ - # { - # availability_zone: "us-west-2b", - # create_date: Time.parse("2016-01-25T21:43:38.612Z"), - # hourly_price: "0.095", - # instance_count: 1, - # instance_type: "c4.large", - # network_platform: "EC2-VPC", - # next_slot_start_time: Time.parse("2016-01-31T09:00:00Z"), - # platform: "Linux/UNIX", - # recurrence: { - # frequency: "Weekly", - # interval: 1, - # occurrence_day_set: [ - # 1, - # ], - # occurrence_relative_to_end: false, - # occurrence_unit: "", - # }, - # scheduled_instance_id: "sci-1234-1234-1234-1234-123456789012", - # slot_duration_in_hours: 32, - # term_end_date: Time.parse("2017-01-31T09:00:00Z"), - # term_start_date: Time.parse("2016-01-31T09:00:00Z"), - # total_scheduled_instance_hours: 1696, - # }, - # ], - # } - # # @example Request syntax with placeholder values # # resp = client.purchase_scheduled_instances({ @@ -51026,22 +50805,14 @@ def reject_vpc_peering_connection(params = {}, options = {}) # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # - # @example Example: To release an Elastic IP address for EC2-VPC + # @example Example: To release an Elastic IP address # - # # This example releases an Elastic IP address for use with instances in a VPC. + # # This example releases the specified Elastic IP address. # # resp = client.release_address({ # allocation_id: "eipalloc-64d5890a", # }) # - # @example Example: To release an Elastic IP addresses for EC2-Classic - # - # # This example releases an Elastic IP address for use with instances in EC2-Classic. - # - # resp = client.release_address({ - # public_ip: "198.51.100.0", - # }) - # # @example Request syntax with placeholder values # # resp = client.release_address({ @@ -51844,8 +51615,7 @@ def report_instance_status(params = {}, options = {}) # # # This example creates a Spot fleet request with two launch specifications that differ only by Availability Zone. The Spot # # fleet launches the instances in the specified Availability Zone with the lowest price. If your account supports EC2-VPC - # # only, Amazon EC2 launches the Spot instances in the default subnet of the Availability Zone. If your account supports - # # EC2-Classic, Amazon EC2 launches the instances in EC2-Classic in the Availability Zone. + # # only, Amazon EC2 launches the Spot instances in the default subnet of the Availability Zone. # # resp = client.request_spot_fleet({ # spot_fleet_request_config: { @@ -52384,8 +52154,7 @@ def request_spot_fleet(params = {}, options = {}) # # # This example creates a one-time Spot Instance request for five instances in the specified Availability Zone. If your # # account supports EC2-VPC only, Amazon EC2 launches the instances in the default subnet of the specified Availability - # # Zone. If your account supports EC2-Classic, Amazon EC2 launches the instances in EC2-Classic in the specified - # # Availability Zone. + # # Zone. # # resp = client.request_spot_instances({ # instance_count: 5, @@ -52990,21 +52759,6 @@ def reset_snapshot_attribute(params = {}, options = {}) # * {Types::RestoreAddressToClassicResult#public_ip #public_ip} => String # * {Types::RestoreAddressToClassicResult#status #status} => String # - # - # @example Example: To restore an address to EC2-Classic - # - # # This example restores the specified Elastic IP address to the EC2-Classic platform. - # - # resp = client.restore_address_to_classic({ - # public_ip: "198.51.100.0", - # }) - # - # resp.to_h outputs the following: - # { - # public_ip: "198.51.100.0", - # status: "MoveInProgress", - # } - # # @example Request syntax with placeholder values # # resp = client.restore_address_to_classic({ @@ -54511,71 +54265,6 @@ def run_instances(params = {}, options = {}) # # * {Types::RunScheduledInstancesResult#instance_id_set #instance_id_set} => Array<String> # - # - # @example Example: To launch a Scheduled Instance in a VPC - # - # # This example launches the specified Scheduled Instance in a VPC. - # - # resp = client.run_scheduled_instances({ - # instance_count: 1, - # launch_specification: { - # iam_instance_profile: { - # name: "my-iam-role", - # }, - # image_id: "ami-12345678", - # instance_type: "c4.large", - # key_name: "my-key-pair", - # network_interfaces: [ - # { - # associate_public_ip_address: true, - # device_index: 0, - # groups: [ - # "sg-12345678", - # ], - # subnet_id: "subnet-12345678", - # }, - # ], - # }, - # scheduled_instance_id: "sci-1234-1234-1234-1234-123456789012", - # }) - # - # resp.to_h outputs the following: - # { - # instance_id_set: [ - # "i-1234567890abcdef0", - # ], - # } - # - # @example Example: To launch a Scheduled Instance in EC2-Classic - # - # # This example launches the specified Scheduled Instance in EC2-Classic. - # - # resp = client.run_scheduled_instances({ - # instance_count: 1, - # launch_specification: { - # iam_instance_profile: { - # name: "my-iam-role", - # }, - # image_id: "ami-12345678", - # instance_type: "c4.large", - # key_name: "my-key-pair", - # placement: { - # availability_zone: "us-west-2b", - # }, - # security_group_ids: [ - # "sg-12345678", - # ], - # }, - # scheduled_instance_id: "sci-1234-1234-1234-1234-123456789012", - # }) - # - # resp.to_h outputs the following: - # { - # instance_id_set: [ - # "i-1234567890abcdef0", - # ], - # } - # # @example Request syntax with placeholder values # # resp = client.run_scheduled_instances({ @@ -56965,7 +56654,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-ec2' - context[:gem_version] = '1.384.0' + context[:gem_version] = '1.385.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-ec2/lib/aws-sdk-ec2/client_api.rb b/gems/aws-sdk-ec2/lib/aws-sdk-ec2/client_api.rb index 6829240cb47..50cbb7ea0d7 100644 --- a/gems/aws-sdk-ec2/lib/aws-sdk-ec2/client_api.rb +++ b/gems/aws-sdk-ec2/lib/aws-sdk-ec2/client_api.rb @@ -185,6 +185,7 @@ module ClientApi AutoPlacement = Shapes::StringShape.new(name: 'AutoPlacement') AutoRecoveryFlag = Shapes::BooleanShape.new(name: 'AutoRecoveryFlag') AvailabilityZone = Shapes::StructureShape.new(name: 'AvailabilityZone') + AvailabilityZoneId = Shapes::StringShape.new(name: 'AvailabilityZoneId') AvailabilityZoneList = Shapes::ListShape.new(name: 'AvailabilityZoneList') AvailabilityZoneMessage = Shapes::StructureShape.new(name: 'AvailabilityZoneMessage') AvailabilityZoneMessageList = Shapes::ListShape.new(name: 'AvailabilityZoneMessageList') @@ -4284,8 +4285,8 @@ module ClientApi CreateCapacityReservationRequest.add_member(:client_token, Shapes::ShapeRef.new(shape: String, location_name: "ClientToken")) CreateCapacityReservationRequest.add_member(:instance_type, Shapes::ShapeRef.new(shape: String, required: true, location_name: "InstanceType")) CreateCapacityReservationRequest.add_member(:instance_platform, Shapes::ShapeRef.new(shape: CapacityReservationInstancePlatform, required: true, location_name: "InstancePlatform")) - CreateCapacityReservationRequest.add_member(:availability_zone, Shapes::ShapeRef.new(shape: String, location_name: "AvailabilityZone")) - CreateCapacityReservationRequest.add_member(:availability_zone_id, Shapes::ShapeRef.new(shape: String, location_name: "AvailabilityZoneId")) + CreateCapacityReservationRequest.add_member(:availability_zone, Shapes::ShapeRef.new(shape: AvailabilityZoneName, location_name: "AvailabilityZone")) + CreateCapacityReservationRequest.add_member(:availability_zone_id, Shapes::ShapeRef.new(shape: AvailabilityZoneId, location_name: "AvailabilityZoneId")) CreateCapacityReservationRequest.add_member(:tenancy, Shapes::ShapeRef.new(shape: CapacityReservationTenancy, location_name: "Tenancy")) CreateCapacityReservationRequest.add_member(:instance_count, Shapes::ShapeRef.new(shape: Integer, required: true, location_name: "InstanceCount")) CreateCapacityReservationRequest.add_member(:ebs_optimized, Shapes::ShapeRef.new(shape: Boolean, location_name: "EbsOptimized")) @@ -14530,6 +14531,8 @@ module ClientApi VerifiedAccessLogOptions.add_member(:s3, Shapes::ShapeRef.new(shape: VerifiedAccessLogS3DestinationOptions, location_name: "S3")) VerifiedAccessLogOptions.add_member(:cloud_watch_logs, Shapes::ShapeRef.new(shape: VerifiedAccessLogCloudWatchLogsDestinationOptions, location_name: "CloudWatchLogs")) VerifiedAccessLogOptions.add_member(:kinesis_data_firehose, Shapes::ShapeRef.new(shape: VerifiedAccessLogKinesisDataFirehoseDestinationOptions, location_name: "KinesisDataFirehose")) + VerifiedAccessLogOptions.add_member(:log_version, Shapes::ShapeRef.new(shape: String, location_name: "LogVersion")) + VerifiedAccessLogOptions.add_member(:include_trust_context, Shapes::ShapeRef.new(shape: Boolean, location_name: "IncludeTrustContext")) VerifiedAccessLogOptions.struct_class = Types::VerifiedAccessLogOptions VerifiedAccessLogS3Destination.add_member(:enabled, Shapes::ShapeRef.new(shape: Boolean, location_name: "enabled")) @@ -14548,6 +14551,8 @@ module ClientApi VerifiedAccessLogs.add_member(:s3, Shapes::ShapeRef.new(shape: VerifiedAccessLogS3Destination, location_name: "s3")) VerifiedAccessLogs.add_member(:cloud_watch_logs, Shapes::ShapeRef.new(shape: VerifiedAccessLogCloudWatchLogsDestination, location_name: "cloudWatchLogs")) VerifiedAccessLogs.add_member(:kinesis_data_firehose, Shapes::ShapeRef.new(shape: VerifiedAccessLogKinesisDataFirehoseDestination, location_name: "kinesisDataFirehose")) + VerifiedAccessLogs.add_member(:log_version, Shapes::ShapeRef.new(shape: String, location_name: "logVersion")) + VerifiedAccessLogs.add_member(:include_trust_context, Shapes::ShapeRef.new(shape: Boolean, location_name: "includeTrustContext")) VerifiedAccessLogs.struct_class = Types::VerifiedAccessLogs VerifiedAccessTrustProvider.add_member(:verified_access_trust_provider_id, Shapes::ShapeRef.new(shape: String, location_name: "verifiedAccessTrustProviderId")) diff --git a/gems/aws-sdk-ec2/lib/aws-sdk-ec2/types.rb b/gems/aws-sdk-ec2/lib/aws-sdk-ec2/types.rb index 4883f0a3a4e..2a0526f1308 100644 --- a/gems/aws-sdk-ec2/lib/aws-sdk-ec2/types.rb +++ b/gems/aws-sdk-ec2/lib/aws-sdk-ec2/types.rb @@ -63149,7 +63149,7 @@ class VerifiedAccessLogKinesisDataFirehoseDestinationOptions < Struct.new( include Aws::Structure end - # Describes the destinations for Verified Access logs. + # Options for Verified Access logs. # # @!attribute [rw] s3 # Sends Verified Access logs to Amazon S3. @@ -63163,12 +63163,24 @@ class VerifiedAccessLogKinesisDataFirehoseDestinationOptions < Struct.new( # Sends Verified Access logs to Kinesis. # @return [Types::VerifiedAccessLogKinesisDataFirehoseDestinationOptions] # + # @!attribute [rw] log_version + # The logging version to use. + # + # Valid values: `ocsf-0.1` \| `ocsf-1.0.0-rc.2` + # @return [String] + # + # @!attribute [rw] include_trust_context + # Include trust data sent by trust providers into the logs. + # @return [Boolean] + # # @see http://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VerifiedAccessLogOptions AWS API Documentation # class VerifiedAccessLogOptions < Struct.new( :s3, :cloud_watch_logs, - :kinesis_data_firehose) + :kinesis_data_firehose, + :log_version, + :include_trust_context) SENSITIVE = [] include Aws::Structure end @@ -63237,7 +63249,7 @@ class VerifiedAccessLogS3DestinationOptions < Struct.new( include Aws::Structure end - # Describes the destinations for Verified Access logs. + # Describes the options for Verified Access logs. # # @!attribute [rw] s3 # Amazon S3 logging options. @@ -63251,12 +63263,22 @@ class VerifiedAccessLogS3DestinationOptions < Struct.new( # Kinesis logging destination. # @return [Types::VerifiedAccessLogKinesisDataFirehoseDestination] # + # @!attribute [rw] log_version + # Describes current setting for the logging version. + # @return [String] + # + # @!attribute [rw] include_trust_context + # Describes current setting for including trust data into the logs. + # @return [Boolean] + # # @see http://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VerifiedAccessLogs AWS API Documentation # class VerifiedAccessLogs < Struct.new( :s3, :cloud_watch_logs, - :kinesis_data_firehose) + :kinesis_data_firehose, + :log_version, + :include_trust_context) SENSITIVE = [] include Aws::Structure end diff --git a/gems/aws-sdk-ecs/CHANGELOG.md b/gems/aws-sdk-ecs/CHANGELOG.md index 170ffe0d25c..06e039ca229 100644 --- a/gems/aws-sdk-ecs/CHANGELOG.md +++ b/gems/aws-sdk-ecs/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.122.0 (2023-06-19) +------------------ + +* Feature - Documentation only update to address various tickets. + 1.121.0 (2023-06-15) ------------------ diff --git a/gems/aws-sdk-ecs/VERSION b/gems/aws-sdk-ecs/VERSION index 83bd3452721..2265fa2d9b7 100644 --- a/gems/aws-sdk-ecs/VERSION +++ b/gems/aws-sdk-ecs/VERSION @@ -1 +1 @@ -1.121.0 +1.122.0 diff --git a/gems/aws-sdk-ecs/lib/aws-sdk-ecs.rb b/gems/aws-sdk-ecs/lib/aws-sdk-ecs.rb index ca96874e1f0..9277f8262ff 100644 --- a/gems/aws-sdk-ecs/lib/aws-sdk-ecs.rb +++ b/gems/aws-sdk-ecs/lib/aws-sdk-ecs.rb @@ -53,6 +53,6 @@ # @!group service module Aws::ECS - GEM_VERSION = '1.121.0' + GEM_VERSION = '1.122.0' end diff --git a/gems/aws-sdk-ecs/lib/aws-sdk-ecs/client.rb b/gems/aws-sdk-ecs/lib/aws-sdk-ecs/client.rb index fcf1b840c3b..f8d1db96ba3 100644 --- a/gems/aws-sdk-ecs/lib/aws-sdk-ecs/client.rb +++ b/gems/aws-sdk-ecs/lib/aws-sdk-ecs/client.rb @@ -1164,6 +1164,9 @@ def create_cluster(params = {}, options = {}) # resources][1] in the *Amazon Elastic Container Service Developer # Guide*. # + # When you use Amazon ECS managed tags, you need to set the + # `propagateTags` request parameter. + # # # # [1]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html @@ -1174,6 +1177,8 @@ def create_cluster(params = {}, options = {}) # can only be propagated to the task during task creation. To add tags # to a task after task creation, use the [TagResource][1] API action. # + # The default is `NONE`. + # # # # [1]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_TagResource.html @@ -2356,6 +2361,14 @@ def delete_service(params = {}, options = {}) # A task definition revision will stay in `DELETE_IN_PROGRESS` status # until all the associated tasks and services have been terminated. # + # When you delete all `INACTIVE` task definition revisions, the task + # definition name is not displayed in the console and not returned in + # the API. If a task definition revisions are in the + # `DELETE_IN_PROGRESS` state, the task definition name is displayed in + # the console and returned in the API. The task definition name is + # retained by Amazon ECS and the revision is incremented the next time + # you create a task definition with that name. + # # # # [1]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeregisterTaskDefinition.html @@ -9709,7 +9722,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-ecs' - context[:gem_version] = '1.121.0' + context[:gem_version] = '1.122.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-ecs/lib/aws-sdk-ecs/types.rb b/gems/aws-sdk-ecs/lib/aws-sdk-ecs/types.rb index ce0f8fe6683..ccd58566964 100644 --- a/gems/aws-sdk-ecs/lib/aws-sdk-ecs/types.rb +++ b/gems/aws-sdk-ecs/lib/aws-sdk-ecs/types.rb @@ -2828,6 +2828,9 @@ class CreateClusterResponse < Struct.new( # ECS resources][1] in the *Amazon Elastic Container Service Developer # Guide*. # + # When you use Amazon ECS managed tags, you need to set the + # `propagateTags` request parameter. + # # # # [1]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html @@ -2840,6 +2843,8 @@ class CreateClusterResponse < Struct.new( # tags to a task after task creation, use the [TagResource][1] API # action. # + # The default is `NONE`. + # # # # [1]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_TagResource.html diff --git a/gems/aws-sdk-glue/CHANGELOG.md b/gems/aws-sdk-glue/CHANGELOG.md index 0f53a7cb96d..50351dc512a 100644 --- a/gems/aws-sdk-glue/CHANGELOG.md +++ b/gems/aws-sdk-glue/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.142.0 (2023-06-19) +------------------ + +* Feature - This release adds support for creating cross region table/database resource links + 1.141.0 (2023-06-15) ------------------ diff --git a/gems/aws-sdk-glue/VERSION b/gems/aws-sdk-glue/VERSION index 642c6da52ce..246c5532a6a 100644 --- a/gems/aws-sdk-glue/VERSION +++ b/gems/aws-sdk-glue/VERSION @@ -1 +1 @@ -1.141.0 +1.142.0 diff --git a/gems/aws-sdk-glue/lib/aws-sdk-glue.rb b/gems/aws-sdk-glue/lib/aws-sdk-glue.rb index 74b4efd12cd..21385f7cf9a 100644 --- a/gems/aws-sdk-glue/lib/aws-sdk-glue.rb +++ b/gems/aws-sdk-glue/lib/aws-sdk-glue.rb @@ -52,6 +52,6 @@ # @!group service module Aws::Glue - GEM_VERSION = '1.141.0' + GEM_VERSION = '1.142.0' end diff --git a/gems/aws-sdk-glue/lib/aws-sdk-glue/client.rb b/gems/aws-sdk-glue/lib/aws-sdk-glue/client.rb index 83771f49516..169d1cd3880 100644 --- a/gems/aws-sdk-glue/lib/aws-sdk-glue/client.rb +++ b/gems/aws-sdk-glue/lib/aws-sdk-glue/client.rb @@ -2946,6 +2946,7 @@ def create_data_quality_ruleset(params = {}, options = {}) # target_database: { # catalog_id: "CatalogIdString", # database_name: "NameString", + # region: "NameString", # }, # federated_database: { # identifier: "FederationIdentifier", @@ -4280,6 +4281,7 @@ def create_session(params = {}, options = {}) # catalog_id: "CatalogIdString", # database_name: "NameString", # name: "NameString", + # region: "NameString", # }, # }, # partition_indexes: [ @@ -6503,6 +6505,7 @@ def get_data_quality_ruleset_evaluation_run(params = {}, options = {}) # resp.database.create_table_default_permissions[0].permissions[0] #=> String, one of "ALL", "SELECT", "ALTER", "DROP", "DELETE", "INSERT", "CREATE_DATABASE", "CREATE_TABLE", "DATA_LOCATION_ACCESS" # resp.database.target_database.catalog_id #=> String # resp.database.target_database.database_name #=> String + # resp.database.target_database.region #=> String # resp.database.catalog_id #=> String # resp.database.federated_database.identifier #=> String # resp.database.federated_database.connection_name #=> String @@ -6573,6 +6576,7 @@ def get_database(params = {}, options = {}) # resp.database_list[0].create_table_default_permissions[0].permissions[0] #=> String, one of "ALL", "SELECT", "ALTER", "DROP", "DELETE", "INSERT", "CREATE_DATABASE", "CREATE_TABLE", "DATA_LOCATION_ACCESS" # resp.database_list[0].target_database.catalog_id #=> String # resp.database_list[0].target_database.database_name #=> String + # resp.database_list[0].target_database.region #=> String # resp.database_list[0].catalog_id #=> String # resp.database_list[0].federated_database.identifier #=> String # resp.database_list[0].federated_database.connection_name #=> String @@ -10108,6 +10112,7 @@ def get_statement(params = {}, options = {}) # resp.table.target_table.catalog_id #=> String # resp.table.target_table.database_name #=> String # resp.table.target_table.name #=> String + # resp.table.target_table.region #=> String # resp.table.catalog_id #=> String # resp.table.version_id #=> String # resp.table.federated_table.identifier #=> String @@ -10217,6 +10222,7 @@ def get_table(params = {}, options = {}) # resp.table_version.table.target_table.catalog_id #=> String # resp.table_version.table.target_table.database_name #=> String # resp.table_version.table.target_table.name #=> String + # resp.table_version.table.target_table.region #=> String # resp.table_version.table.catalog_id #=> String # resp.table_version.table.version_id #=> String # resp.table_version.table.federated_table.identifier #=> String @@ -10335,6 +10341,7 @@ def get_table_version(params = {}, options = {}) # resp.table_versions[0].table.target_table.catalog_id #=> String # resp.table_versions[0].table.target_table.database_name #=> String # resp.table_versions[0].table.target_table.name #=> String + # resp.table_versions[0].table.target_table.region #=> String # resp.table_versions[0].table.catalog_id #=> String # resp.table_versions[0].table.version_id #=> String # resp.table_versions[0].table.federated_table.identifier #=> String @@ -10464,6 +10471,7 @@ def get_table_versions(params = {}, options = {}) # resp.table_list[0].target_table.catalog_id #=> String # resp.table_list[0].target_table.database_name #=> String # resp.table_list[0].target_table.name #=> String + # resp.table_list[0].target_table.region #=> String # resp.table_list[0].catalog_id #=> String # resp.table_list[0].version_id #=> String # resp.table_list[0].federated_table.identifier #=> String @@ -11050,6 +11058,7 @@ def get_unfiltered_partitions_metadata(params = {}, options = {}) # resp.table.target_table.catalog_id #=> String # resp.table.target_table.database_name #=> String # resp.table.target_table.name #=> String + # resp.table.target_table.region #=> String # resp.table.catalog_id #=> String # resp.table.version_id #=> String # resp.table.federated_table.identifier #=> String @@ -13391,6 +13400,7 @@ def run_statement(params = {}, options = {}) # resp.table_list[0].target_table.catalog_id #=> String # resp.table_list[0].target_table.database_name #=> String # resp.table_list[0].target_table.name #=> String + # resp.table_list[0].target_table.region #=> String # resp.table_list[0].catalog_id #=> String # resp.table_list[0].version_id #=> String # resp.table_list[0].federated_table.identifier #=> String @@ -14959,6 +14969,7 @@ def update_data_quality_ruleset(params = {}, options = {}) # target_database: { # catalog_id: "CatalogIdString", # database_name: "NameString", + # region: "NameString", # }, # federated_database: { # identifier: "FederationIdentifier", @@ -15654,6 +15665,7 @@ def update_source_control_from_job(params = {}, options = {}) # catalog_id: "CatalogIdString", # database_name: "NameString", # name: "NameString", + # region: "NameString", # }, # }, # skip_archive: false, @@ -15868,7 +15880,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-glue' - context[:gem_version] = '1.141.0' + context[:gem_version] = '1.142.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-glue/lib/aws-sdk-glue/client_api.rb b/gems/aws-sdk-glue/lib/aws-sdk-glue/client_api.rb index 73ccb3130a3..f261d6bb916 100644 --- a/gems/aws-sdk-glue/lib/aws-sdk-glue/client_api.rb +++ b/gems/aws-sdk-glue/lib/aws-sdk-glue/client_api.rb @@ -2361,6 +2361,7 @@ module ClientApi DatabaseIdentifier.add_member(:catalog_id, Shapes::ShapeRef.new(shape: CatalogIdString, location_name: "CatalogId")) DatabaseIdentifier.add_member(:database_name, Shapes::ShapeRef.new(shape: NameString, location_name: "DatabaseName")) + DatabaseIdentifier.add_member(:region, Shapes::ShapeRef.new(shape: NameString, location_name: "Region")) DatabaseIdentifier.struct_class = Types::DatabaseIdentifier DatabaseInput.add_member(:name, Shapes::ShapeRef.new(shape: NameString, required: true, location_name: "Name")) @@ -4998,6 +4999,7 @@ module ClientApi TableIdentifier.add_member(:catalog_id, Shapes::ShapeRef.new(shape: CatalogIdString, location_name: "CatalogId")) TableIdentifier.add_member(:database_name, Shapes::ShapeRef.new(shape: NameString, location_name: "DatabaseName")) TableIdentifier.add_member(:name, Shapes::ShapeRef.new(shape: NameString, location_name: "Name")) + TableIdentifier.add_member(:region, Shapes::ShapeRef.new(shape: NameString, location_name: "Region")) TableIdentifier.struct_class = Types::TableIdentifier TableInput.add_member(:name, Shapes::ShapeRef.new(shape: NameString, required: true, location_name: "Name")) diff --git a/gems/aws-sdk-glue/lib/aws-sdk-glue/types.rb b/gems/aws-sdk-glue/lib/aws-sdk-glue/types.rb index e20ccf71e77..a7a29e4ed5c 100644 --- a/gems/aws-sdk-glue/lib/aws-sdk-glue/types.rb +++ b/gems/aws-sdk-glue/lib/aws-sdk-glue/types.rb @@ -6142,11 +6142,16 @@ class Database < Struct.new( # The name of the catalog database. # @return [String] # + # @!attribute [rw] region + # Region of the target database. + # @return [String] + # # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DatabaseIdentifier AWS API Documentation # class DatabaseIdentifier < Struct.new( :catalog_id, - :database_name) + :database_name, + :region) SENSITIVE = [] include Aws::Structure end @@ -19274,12 +19279,17 @@ class TableError < Struct.new( # The name of the target table. # @return [String] # + # @!attribute [rw] region + # Region of the target table. + # @return [String] + # # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/TableIdentifier AWS API Documentation # class TableIdentifier < Struct.new( :catalog_id, :database_name, - :name) + :name, + :region) SENSITIVE = [] include Aws::Structure end diff --git a/gems/aws-sdk-pricing/CHANGELOG.md b/gems/aws-sdk-pricing/CHANGELOG.md index 1b7ea7c5942..65a39901338 100644 --- a/gems/aws-sdk-pricing/CHANGELOG.md +++ b/gems/aws-sdk-pricing/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.46.0 (2023-06-19) +------------------ + +* Feature - This release updates the PriceListArn regex pattern. + 1.45.0 (2023-06-15) ------------------ diff --git a/gems/aws-sdk-pricing/VERSION b/gems/aws-sdk-pricing/VERSION index 50aceaa7b71..0a3db35b241 100644 --- a/gems/aws-sdk-pricing/VERSION +++ b/gems/aws-sdk-pricing/VERSION @@ -1 +1 @@ -1.45.0 +1.46.0 diff --git a/gems/aws-sdk-pricing/features/smoke.feature b/gems/aws-sdk-pricing/features/smoke.feature new file mode 100644 index 00000000000..0aabdc07124 --- /dev/null +++ b/gems/aws-sdk-pricing/features/smoke.feature @@ -0,0 +1,11 @@ +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +Feature: Smoke tests for Pricing + +Background: + Given I create a client in region 'us-west-2' diff --git a/gems/aws-sdk-pricing/features/smoke_step_definitions.rb b/gems/aws-sdk-pricing/features/smoke_step_definitions.rb new file mode 100644 index 00000000000..e633c46b91d --- /dev/null +++ b/gems/aws-sdk-pricing/features/smoke_step_definitions.rb @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +# Shared Smoke Test Definitions +Given(/I create a client in region '(.*?)'/) do |region| + @regional_client = Aws::Pricing::Client.new(region: region) +end + +Given(/I create a client with endpoint '(.*?)'/) do |endpoint| + @regional_client = Aws::Pricing::Client.new(endpoint: endpoint) +end + +When(/I call the operation '(.*?)' with params:/) do |operation, params| + opts = JSON.parse(params, symbolize_names: true) + begin + @regional_client.send(operation.to_sym, opts) + @operation_raised_error = false + rescue StandardError + @operation_raised_error = true + end +end + +Then(/I expect an error was raised/) do + expect(@operation_raised_error).to be_truthy +end + +Then(/I expect an error was not raised/) do + expect(@operation_raised_error).not_to be_truthy +end diff --git a/gems/aws-sdk-pricing/lib/aws-sdk-pricing.rb b/gems/aws-sdk-pricing/lib/aws-sdk-pricing.rb index e78529b75a6..5c110f4fe2e 100644 --- a/gems/aws-sdk-pricing/lib/aws-sdk-pricing.rb +++ b/gems/aws-sdk-pricing/lib/aws-sdk-pricing.rb @@ -16,6 +16,7 @@ require_relative 'aws-sdk-pricing/plugins/endpoints.rb' require_relative 'aws-sdk-pricing/client' require_relative 'aws-sdk-pricing/errors' +require_relative 'aws-sdk-pricing/waiters' require_relative 'aws-sdk-pricing/resource' require_relative 'aws-sdk-pricing/endpoint_parameters' require_relative 'aws-sdk-pricing/endpoint_provider' @@ -52,6 +53,6 @@ # @!group service module Aws::Pricing - GEM_VERSION = '1.45.0' + GEM_VERSION = '1.46.0' end diff --git a/gems/aws-sdk-pricing/lib/aws-sdk-pricing/client.rb b/gems/aws-sdk-pricing/lib/aws-sdk-pricing/client.rb index 34cd875c254..bcc92381d5d 100644 --- a/gems/aws-sdk-pricing/lib/aws-sdk-pricing/client.rb +++ b/gems/aws-sdk-pricing/lib/aws-sdk-pricing/client.rb @@ -417,35 +417,6 @@ def initialize(*args) # # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. # - # - # @example Example: To retrieve a list of services and service codes - # - # # Retrieves the service for the given Service Code. - # - # resp = client.describe_services({ - # format_version: "aws_v1", - # max_results: 1, - # service_code: "AmazonEC2", - # }) - # - # resp.to_h outputs the following: - # { - # format_version: "aws_v1", - # next_token: "abcdefg123", - # services: [ - # { - # attribute_names: [ - # "volumeType", - # "maxIopsvolume", - # "instanceCapacity10xlarge", - # "locationType", - # "operation", - # ], - # service_code: "AmazonEC2", - # }, - # ], - # } - # # @example Request syntax with placeholder values # # resp = client.describe_services({ @@ -506,30 +477,6 @@ def describe_services(params = {}, options = {}) # # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. # - # - # @example Example: To retrieve a list of attribute values - # - # # This operation returns a list of values available for the given attribute. - # - # resp = client.get_attribute_values({ - # attribute_name: "volumeType", - # max_results: 2, - # service_code: "AmazonEC2", - # }) - # - # resp.to_h outputs the following: - # { - # attribute_values: [ - # { - # value: "Throughput Optimized HDD", - # }, - # { - # value: "Provisioned IOPS", - # }, - # ], - # next_token: "GpgauEXAMPLEezucl5LV0w==:7GzYJ0nw0DBTJ2J66EoTIIynE6O1uXwQtTRqioJzQadBnDVgHPzI1en4BUQnPCLpzeBk9RQQAWaFieA4+DapFAGLgk+Z/9/cTw9GldnPOHN98+FdmJP7wKU3QQpQ8MQr5KOeBkIsAqvAQYdL0DkL7tHwPtE5iCEByAmg9gcC/yBU1vAOsf7R3VaNN4M5jMDv3woSWqASSIlBVB6tgW78YL22KhssoItM/jWW+aP6Jqtq4mldxp/ct6DWAl+xLFwHU/CbketimPPXyqHF3/UXDw==", - # } - # # @example Request syntax with placeholder values # # resp = client.get_attribute_values({ @@ -781,7 +728,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-pricing' - context[:gem_version] = '1.45.0' + context[:gem_version] = '1.46.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-pricing/lib/aws-sdk-pricing/client_api.rb b/gems/aws-sdk-pricing/lib/aws-sdk-pricing/client_api.rb index 7e9d53b34a1..a65300b7905 100644 --- a/gems/aws-sdk-pricing/lib/aws-sdk-pricing/client_api.rb +++ b/gems/aws-sdk-pricing/lib/aws-sdk-pricing/client_api.rb @@ -43,7 +43,6 @@ module ClientApi NotFoundException = Shapes::StructureShape.new(name: 'NotFoundException') PriceList = Shapes::StructureShape.new(name: 'PriceList') PriceListArn = Shapes::StringShape.new(name: 'PriceListArn') - PriceListJsonItem = Shapes::StringShape.new(name: 'PriceListJsonItem') PriceListJsonItems = Shapes::ListShape.new(name: 'PriceListJsonItems') PriceLists = Shapes::ListShape.new(name: 'PriceLists') RegionCode = Shapes::StringShape.new(name: 'RegionCode') @@ -51,6 +50,7 @@ module ClientApi ServiceCode = Shapes::StringShape.new(name: 'ServiceCode') ServiceList = Shapes::ListShape.new(name: 'ServiceList') String = Shapes::StringShape.new(name: 'String') + SynthesizedJsonPriceListJsonItem = Shapes::StringShape.new(name: 'SynthesizedJsonPriceListJsonItem') errorMessage = Shapes::StringShape.new(name: 'errorMessage') AccessDeniedException.add_member(:message, Shapes::ShapeRef.new(shape: errorMessage, location_name: "Message")) @@ -66,7 +66,7 @@ module ClientApi DescribeServicesRequest.add_member(:service_code, Shapes::ShapeRef.new(shape: String, location_name: "ServiceCode")) DescribeServicesRequest.add_member(:format_version, Shapes::ShapeRef.new(shape: String, location_name: "FormatVersion")) DescribeServicesRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location_name: "NextToken")) - DescribeServicesRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "MaxResults", metadata: {"box"=>true})) + DescribeServicesRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "MaxResults")) DescribeServicesRequest.struct_class = Types::DescribeServicesRequest DescribeServicesResponse.add_member(:services, Shapes::ShapeRef.new(shape: ServiceList, location_name: "Services")) @@ -89,7 +89,7 @@ module ClientApi GetAttributeValuesRequest.add_member(:service_code, Shapes::ShapeRef.new(shape: String, required: true, location_name: "ServiceCode")) GetAttributeValuesRequest.add_member(:attribute_name, Shapes::ShapeRef.new(shape: String, required: true, location_name: "AttributeName")) GetAttributeValuesRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location_name: "NextToken")) - GetAttributeValuesRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "MaxResults", metadata: {"box"=>true})) + GetAttributeValuesRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "MaxResults")) GetAttributeValuesRequest.struct_class = Types::GetAttributeValuesRequest GetAttributeValuesResponse.add_member(:attribute_values, Shapes::ShapeRef.new(shape: AttributeValueList, location_name: "AttributeValues")) @@ -107,7 +107,7 @@ module ClientApi GetProductsRequest.add_member(:filters, Shapes::ShapeRef.new(shape: Filters, location_name: "Filters")) GetProductsRequest.add_member(:format_version, Shapes::ShapeRef.new(shape: String, location_name: "FormatVersion")) GetProductsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location_name: "NextToken")) - GetProductsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "MaxResults", metadata: {"box"=>true})) + GetProductsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "MaxResults")) GetProductsRequest.struct_class = Types::GetProductsRequest GetProductsResponse.add_member(:format_version, Shapes::ShapeRef.new(shape: String, location_name: "FormatVersion")) @@ -145,7 +145,7 @@ module ClientApi PriceList.add_member(:file_formats, Shapes::ShapeRef.new(shape: FileFormats, location_name: "FileFormats")) PriceList.struct_class = Types::PriceList - PriceListJsonItems.member = Shapes::ShapeRef.new(shape: PriceListJsonItem, metadata: {"jsonvalue"=>true}) + PriceListJsonItems.member = Shapes::ShapeRef.new(shape: SynthesizedJsonPriceListJsonItem, metadata: {"jsonvalue"=>true}) PriceLists.member = Shapes::ShapeRef.new(shape: PriceList) @@ -181,10 +181,10 @@ module ClientApi o.http_request_uri = "/" o.input = Shapes::ShapeRef.new(shape: DescribeServicesRequest) o.output = Shapes::ShapeRef.new(shape: DescribeServicesResponse) - o.errors << Shapes::ShapeRef.new(shape: InternalErrorException) o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException) - o.errors << Shapes::ShapeRef.new(shape: NotFoundException) o.errors << Shapes::ShapeRef.new(shape: InvalidNextTokenException) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: InternalErrorException) o.errors << Shapes::ShapeRef.new(shape: ExpiredNextTokenException) o[:pager] = Aws::Pager.new( limit_key: "max_results", @@ -200,10 +200,10 @@ module ClientApi o.http_request_uri = "/" o.input = Shapes::ShapeRef.new(shape: GetAttributeValuesRequest) o.output = Shapes::ShapeRef.new(shape: GetAttributeValuesResponse) - o.errors << Shapes::ShapeRef.new(shape: InternalErrorException) o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException) - o.errors << Shapes::ShapeRef.new(shape: NotFoundException) o.errors << Shapes::ShapeRef.new(shape: InvalidNextTokenException) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: InternalErrorException) o.errors << Shapes::ShapeRef.new(shape: ExpiredNextTokenException) o[:pager] = Aws::Pager.new( limit_key: "max_results", @@ -219,10 +219,10 @@ module ClientApi o.http_request_uri = "/" o.input = Shapes::ShapeRef.new(shape: GetPriceListFileUrlRequest) o.output = Shapes::ShapeRef.new(shape: GetPriceListFileUrlResponse) - o.errors << Shapes::ShapeRef.new(shape: InternalErrorException) o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException) o.errors << Shapes::ShapeRef.new(shape: NotFoundException) o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException) + o.errors << Shapes::ShapeRef.new(shape: InternalErrorException) end) api.add_operation(:get_products, Seahorse::Model::Operation.new.tap do |o| @@ -231,10 +231,10 @@ module ClientApi o.http_request_uri = "/" o.input = Shapes::ShapeRef.new(shape: GetProductsRequest) o.output = Shapes::ShapeRef.new(shape: GetProductsResponse) - o.errors << Shapes::ShapeRef.new(shape: InternalErrorException) o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException) - o.errors << Shapes::ShapeRef.new(shape: NotFoundException) o.errors << Shapes::ShapeRef.new(shape: InvalidNextTokenException) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: InternalErrorException) o.errors << Shapes::ShapeRef.new(shape: ExpiredNextTokenException) o[:pager] = Aws::Pager.new( limit_key: "max_results", @@ -250,12 +250,12 @@ module ClientApi o.http_request_uri = "/" o.input = Shapes::ShapeRef.new(shape: ListPriceListsRequest) o.output = Shapes::ShapeRef.new(shape: ListPriceListsResponse) - o.errors << Shapes::ShapeRef.new(shape: InternalErrorException) o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException) - o.errors << Shapes::ShapeRef.new(shape: NotFoundException) o.errors << Shapes::ShapeRef.new(shape: InvalidNextTokenException) - o.errors << Shapes::ShapeRef.new(shape: ExpiredNextTokenException) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException) + o.errors << Shapes::ShapeRef.new(shape: InternalErrorException) + o.errors << Shapes::ShapeRef.new(shape: ExpiredNextTokenException) o[:pager] = Aws::Pager.new( limit_key: "max_results", tokens: { diff --git a/gems/aws-sdk-pricing/lib/aws-sdk-pricing/waiters.rb b/gems/aws-sdk-pricing/lib/aws-sdk-pricing/waiters.rb new file mode 100644 index 00000000000..3976f95f626 --- /dev/null +++ b/gems/aws-sdk-pricing/lib/aws-sdk-pricing/waiters.rb @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +require 'aws-sdk-core/waiters' + +module Aws::Pricing + module Waiters + end +end diff --git a/gems/aws-sdk-pricing/spec/endpoint_provider_spec.rb b/gems/aws-sdk-pricing/spec/endpoint_provider_spec.rb index ec7b3501eb8..9ca32bc48f9 100644 --- a/gems/aws-sdk-pricing/spec/endpoint_provider_spec.rb +++ b/gems/aws-sdk-pricing/spec/endpoint_provider_spec.rb @@ -20,7 +20,7 @@ module Aws::Pricing end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>false, :use_dual_stack=>false, :region=>"ap-south-1"}) + params = EndpointParameters.new(**{:region=>"ap-south-1", :use_fips=>false, :use_dual_stack=>false}) endpoint = subject.resolve_endpoint(params) expect(endpoint.url).to eq(expected['endpoint']['url']) expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) @@ -34,7 +34,7 @@ module Aws::Pricing end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>false, :use_dual_stack=>false, :region=>"us-east-1"}) + params = EndpointParameters.new(**{:region=>"us-east-1", :use_fips=>false, :use_dual_stack=>false}) endpoint = subject.resolve_endpoint(params) expect(endpoint.url).to eq(expected['endpoint']['url']) expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) @@ -48,7 +48,7 @@ module Aws::Pricing end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>true, :use_dual_stack=>true, :region=>"us-east-1"}) + params = EndpointParameters.new(**{:region=>"us-east-1", :use_fips=>true, :use_dual_stack=>true}) endpoint = subject.resolve_endpoint(params) expect(endpoint.url).to eq(expected['endpoint']['url']) expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) @@ -62,7 +62,7 @@ module Aws::Pricing end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>true, :use_dual_stack=>false, :region=>"us-east-1"}) + params = EndpointParameters.new(**{:region=>"us-east-1", :use_fips=>true, :use_dual_stack=>false}) endpoint = subject.resolve_endpoint(params) expect(endpoint.url).to eq(expected['endpoint']['url']) expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) @@ -76,7 +76,7 @@ module Aws::Pricing end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>false, :use_dual_stack=>true, :region=>"us-east-1"}) + params = EndpointParameters.new(**{:region=>"us-east-1", :use_fips=>false, :use_dual_stack=>true}) endpoint = subject.resolve_endpoint(params) expect(endpoint.url).to eq(expected['endpoint']['url']) expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) @@ -90,7 +90,7 @@ module Aws::Pricing end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>true, :use_dual_stack=>true, :region=>"cn-north-1"}) + params = EndpointParameters.new(**{:region=>"cn-north-1", :use_fips=>true, :use_dual_stack=>true}) endpoint = subject.resolve_endpoint(params) expect(endpoint.url).to eq(expected['endpoint']['url']) expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) @@ -104,7 +104,7 @@ module Aws::Pricing end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>true, :use_dual_stack=>false, :region=>"cn-north-1"}) + params = EndpointParameters.new(**{:region=>"cn-north-1", :use_fips=>true, :use_dual_stack=>false}) endpoint = subject.resolve_endpoint(params) expect(endpoint.url).to eq(expected['endpoint']['url']) expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) @@ -118,7 +118,7 @@ module Aws::Pricing end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>false, :use_dual_stack=>true, :region=>"cn-north-1"}) + params = EndpointParameters.new(**{:region=>"cn-north-1", :use_fips=>false, :use_dual_stack=>true}) endpoint = subject.resolve_endpoint(params) expect(endpoint.url).to eq(expected['endpoint']['url']) expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) @@ -132,7 +132,7 @@ module Aws::Pricing end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>false, :use_dual_stack=>false, :region=>"cn-north-1"}) + params = EndpointParameters.new(**{:region=>"cn-north-1", :use_fips=>false, :use_dual_stack=>false}) endpoint = subject.resolve_endpoint(params) expect(endpoint.url).to eq(expected['endpoint']['url']) expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) @@ -146,7 +146,7 @@ module Aws::Pricing end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>true, :use_dual_stack=>true, :region=>"us-gov-east-1"}) + params = EndpointParameters.new(**{:region=>"us-gov-east-1", :use_fips=>true, :use_dual_stack=>true}) endpoint = subject.resolve_endpoint(params) expect(endpoint.url).to eq(expected['endpoint']['url']) expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) @@ -160,7 +160,7 @@ module Aws::Pricing end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>true, :use_dual_stack=>false, :region=>"us-gov-east-1"}) + params = EndpointParameters.new(**{:region=>"us-gov-east-1", :use_fips=>true, :use_dual_stack=>false}) endpoint = subject.resolve_endpoint(params) expect(endpoint.url).to eq(expected['endpoint']['url']) expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) @@ -174,7 +174,7 @@ module Aws::Pricing end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>false, :use_dual_stack=>true, :region=>"us-gov-east-1"}) + params = EndpointParameters.new(**{:region=>"us-gov-east-1", :use_fips=>false, :use_dual_stack=>true}) endpoint = subject.resolve_endpoint(params) expect(endpoint.url).to eq(expected['endpoint']['url']) expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) @@ -188,7 +188,7 @@ module Aws::Pricing end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>false, :use_dual_stack=>false, :region=>"us-gov-east-1"}) + params = EndpointParameters.new(**{:region=>"us-gov-east-1", :use_fips=>false, :use_dual_stack=>false}) endpoint = subject.resolve_endpoint(params) expect(endpoint.url).to eq(expected['endpoint']['url']) expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) @@ -196,13 +196,26 @@ module Aws::Pricing end end + context 'For region us-iso-east-1 with FIPS enabled and DualStack enabled' do + let(:expected) do + {"error"=>"FIPS and DualStack are enabled, but this partition does not support one or both"} + end + + it 'produces the expected output from the EndpointProvider' do + params = EndpointParameters.new(**{:region=>"us-iso-east-1", :use_fips=>true, :use_dual_stack=>true}) + expect do + subject.resolve_endpoint(params) + end.to raise_error(ArgumentError, expected['error']) + end + end + context 'For region us-iso-east-1 with FIPS enabled and DualStack disabled' do let(:expected) do {"endpoint"=>{"url"=>"https://api.pricing-fips.us-iso-east-1.c2s.ic.gov"}} end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>true, :use_dual_stack=>false, :region=>"us-iso-east-1"}) + params = EndpointParameters.new(**{:region=>"us-iso-east-1", :use_fips=>true, :use_dual_stack=>false}) endpoint = subject.resolve_endpoint(params) expect(endpoint.url).to eq(expected['endpoint']['url']) expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) @@ -210,13 +223,26 @@ module Aws::Pricing end end + context 'For region us-iso-east-1 with FIPS disabled and DualStack enabled' do + let(:expected) do + {"error"=>"DualStack is enabled but this partition does not support DualStack"} + end + + it 'produces the expected output from the EndpointProvider' do + params = EndpointParameters.new(**{:region=>"us-iso-east-1", :use_fips=>false, :use_dual_stack=>true}) + expect do + subject.resolve_endpoint(params) + end.to raise_error(ArgumentError, expected['error']) + end + end + context 'For region us-iso-east-1 with FIPS disabled and DualStack disabled' do let(:expected) do {"endpoint"=>{"url"=>"https://api.pricing.us-iso-east-1.c2s.ic.gov"}} end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>false, :use_dual_stack=>false, :region=>"us-iso-east-1"}) + params = EndpointParameters.new(**{:region=>"us-iso-east-1", :use_fips=>false, :use_dual_stack=>false}) endpoint = subject.resolve_endpoint(params) expect(endpoint.url).to eq(expected['endpoint']['url']) expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) @@ -224,13 +250,26 @@ module Aws::Pricing end end + context 'For region us-isob-east-1 with FIPS enabled and DualStack enabled' do + let(:expected) do + {"error"=>"FIPS and DualStack are enabled, but this partition does not support one or both"} + end + + it 'produces the expected output from the EndpointProvider' do + params = EndpointParameters.new(**{:region=>"us-isob-east-1", :use_fips=>true, :use_dual_stack=>true}) + expect do + subject.resolve_endpoint(params) + end.to raise_error(ArgumentError, expected['error']) + end + end + context 'For region us-isob-east-1 with FIPS enabled and DualStack disabled' do let(:expected) do {"endpoint"=>{"url"=>"https://api.pricing-fips.us-isob-east-1.sc2s.sgov.gov"}} end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>true, :use_dual_stack=>false, :region=>"us-isob-east-1"}) + params = EndpointParameters.new(**{:region=>"us-isob-east-1", :use_fips=>true, :use_dual_stack=>false}) endpoint = subject.resolve_endpoint(params) expect(endpoint.url).to eq(expected['endpoint']['url']) expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) @@ -238,13 +277,26 @@ module Aws::Pricing end end + context 'For region us-isob-east-1 with FIPS disabled and DualStack enabled' do + let(:expected) do + {"error"=>"DualStack is enabled but this partition does not support DualStack"} + end + + it 'produces the expected output from the EndpointProvider' do + params = EndpointParameters.new(**{:region=>"us-isob-east-1", :use_fips=>false, :use_dual_stack=>true}) + expect do + subject.resolve_endpoint(params) + end.to raise_error(ArgumentError, expected['error']) + end + end + context 'For region us-isob-east-1 with FIPS disabled and DualStack disabled' do let(:expected) do {"endpoint"=>{"url"=>"https://api.pricing.us-isob-east-1.sc2s.sgov.gov"}} end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>false, :use_dual_stack=>false, :region=>"us-isob-east-1"}) + params = EndpointParameters.new(**{:region=>"us-isob-east-1", :use_fips=>false, :use_dual_stack=>false}) endpoint = subject.resolve_endpoint(params) expect(endpoint.url).to eq(expected['endpoint']['url']) expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) @@ -258,7 +310,7 @@ module Aws::Pricing end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>false, :use_dual_stack=>false, :region=>"us-east-1", :endpoint=>"https://example.com"}) + params = EndpointParameters.new(**{:region=>"us-east-1", :use_fips=>false, :use_dual_stack=>false, :endpoint=>"https://example.com"}) endpoint = subject.resolve_endpoint(params) expect(endpoint.url).to eq(expected['endpoint']['url']) expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) @@ -286,7 +338,7 @@ module Aws::Pricing end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>true, :use_dual_stack=>false, :region=>"us-east-1", :endpoint=>"https://example.com"}) + params = EndpointParameters.new(**{:region=>"us-east-1", :use_fips=>true, :use_dual_stack=>false, :endpoint=>"https://example.com"}) expect do subject.resolve_endpoint(params) end.to raise_error(ArgumentError, expected['error']) @@ -299,7 +351,20 @@ module Aws::Pricing end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>false, :use_dual_stack=>true, :region=>"us-east-1", :endpoint=>"https://example.com"}) + params = EndpointParameters.new(**{:region=>"us-east-1", :use_fips=>false, :use_dual_stack=>true, :endpoint=>"https://example.com"}) + expect do + subject.resolve_endpoint(params) + end.to raise_error(ArgumentError, expected['error']) + end + end + + context 'Missing region' do + let(:expected) do + {"error"=>"Invalid Configuration: Missing Region"} + end + + it 'produces the expected output from the EndpointProvider' do + params = EndpointParameters.new(**{}) expect do subject.resolve_endpoint(params) end.to raise_error(ArgumentError, expected['error']) diff --git a/gems/aws-sdk-route53domains/CHANGELOG.md b/gems/aws-sdk-route53domains/CHANGELOG.md index b025416d13a..f2181f8294f 100644 --- a/gems/aws-sdk-route53domains/CHANGELOG.md +++ b/gems/aws-sdk-route53domains/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.46.0 (2023-06-19) +------------------ + +* Feature - Update MaxItems upper bound to 1000 for ListPricesRequest + 1.45.0 (2023-06-15) ------------------ diff --git a/gems/aws-sdk-route53domains/VERSION b/gems/aws-sdk-route53domains/VERSION index 50aceaa7b71..0a3db35b241 100644 --- a/gems/aws-sdk-route53domains/VERSION +++ b/gems/aws-sdk-route53domains/VERSION @@ -1 +1 @@ -1.45.0 +1.46.0 diff --git a/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains.rb b/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains.rb index 44f1d3b6af2..1d08122f5b8 100644 --- a/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains.rb +++ b/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains.rb @@ -52,6 +52,6 @@ # @!group service module Aws::Route53Domains - GEM_VERSION = '1.45.0' + GEM_VERSION = '1.46.0' end diff --git a/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/client.rb b/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/client.rb index d7fc950425f..4ec31c59296 100644 --- a/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/client.rb +++ b/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/client.rb @@ -1477,10 +1477,8 @@ def push_domain(params = {}, options = {}) req.send_request(options) end - # This operation registers a domain. Domains are registered either by - # Amazon Registrar (for .com, .net, and .org domains) or by our - # registrar associate, Gandi (for all other domains). For some top-level - # domains (TLDs), this operation requires extra parameters. + # This operation registers a domain. For some top-level domains (TLDs), + # this operation requires extra parameters. # # When you register a domain, Amazon Route 53 does the following: # @@ -1494,14 +1492,13 @@ def push_domain(params = {}, options = {}) # date so you can choose whether to renew the registration. # # * Optionally enables privacy protection, so WHOIS queries return - # contact information either for Amazon Registrar (for .com, .net, and - # .org domains) or for our registrar associate, Gandi (for all other - # TLDs). If you don't enable privacy protection, WHOIS queries return - # the information that you entered for the administrative, registrant, - # and technical contacts. + # contact for the registrar or the phrase "REDACTED FOR PRIVACY", or + # "On behalf of <domain name> owner." If you don't enable + # privacy protection, WHOIS queries return the information that you + # entered for the administrative, registrant, and technical contacts. # - # You must specify the same privacy setting for the administrative, - # registrant, and technical contacts. + # While some domains may allow different privacy settings per contact, + # we recommend specifying the same privacy setting for all contacts. # # # @@ -1926,10 +1923,7 @@ def retrieve_domain_auth_code(params = {}, options = {}) req.send_request(options) end - # Transfers a domain from another registrar to Amazon Route 53. When the - # transfer is complete, the domain is registered either with Amazon - # Registrar (for .com, .net, and .org domains) or with our registrar - # associate, Gandi (for all other TLDs). + # Transfers a domain from another registrar to Amazon Route 53. # # For more information about transferring domains, see the following # topics: @@ -2030,13 +2024,11 @@ def retrieve_domain_auth_code(params = {}, options = {}) # @option params [Boolean] :privacy_protect_admin_contact # Whether you want to conceal contact information from WHOIS queries. If # you specify `true`, WHOIS ("who is") queries return contact - # information either for Amazon Registrar (for .com, .net, and .org - # domains) or for our registrar associate, Gandi (for all other TLDs). - # If you specify `false`, WHOIS queries return the information that you - # entered for the admin contact. + # information for the registrar, the phrase "REDACTED FOR PRIVACY", or + # "On behalf of <domain name> owner.". # - # You must specify the same privacy setting for the administrative, - # registrant, and technical contacts. + # While some domains may allow different privacy settings per contact, + # we recommend specifying the same privacy setting for all contacts. # # # @@ -2265,7 +2257,8 @@ def transfer_domain_to_another_aws_account(params = {}, options = {}) # Provides detailed contact information. # # @option params [Types::Consent] :consent - # Customer's consent for the owner change request. + # Customer's consent for the owner change request. Required if the + # domain is not free (consent price is more than $0.00). # # @return [Types::UpdateDomainContactResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # @@ -2358,13 +2351,13 @@ def update_domain_contact(params = {}, options = {}) end # This operation updates the specified domain contact's privacy - # setting. When privacy protection is enabled, contact information such - # as email address is replaced either with contact information for - # Amazon Registrar (for .com, .net, and .org domains) or with contact - # information for our registrar associate, Gandi. + # setting. When privacy protection is enabled, your contact information + # is replaced with contact information for the registrar or with the + # phrase "REDACTED FOR PRIVACY", or "On behalf of <domain name> + # owner." # - # You must specify the same privacy setting for the administrative, - # registrant, and technical contacts. + # While some domains may allow different privacy settings per contact, + # we recommend specifying the same privacy setting for all contacts. # # # @@ -2623,7 +2616,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-route53domains' - context[:gem_version] = '1.45.0' + context[:gem_version] = '1.46.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/client_api.rb b/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/client_api.rb index ba47a6e663e..d8cbdc8f9f9 100644 --- a/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/client_api.rb +++ b/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/client_api.rb @@ -104,6 +104,7 @@ module ClientApi ListOperationsRequest = Shapes::StructureShape.new(name: 'ListOperationsRequest') ListOperationsResponse = Shapes::StructureShape.new(name: 'ListOperationsResponse') ListOperationsSortAttributeName = Shapes::StringShape.new(name: 'ListOperationsSortAttributeName') + ListPricesPageMaxItems = Shapes::IntegerShape.new(name: 'ListPricesPageMaxItems') ListPricesRequest = Shapes::StructureShape.new(name: 'ListPricesRequest') ListPricesResponse = Shapes::StructureShape.new(name: 'ListPricesResponse') ListTagsForDomainRequest = Shapes::StructureShape.new(name: 'ListTagsForDomainRequest') @@ -431,7 +432,7 @@ module ClientApi ListPricesRequest.add_member(:tld, Shapes::ShapeRef.new(shape: TldName, location_name: "Tld")) ListPricesRequest.add_member(:marker, Shapes::ShapeRef.new(shape: PageMarker, location_name: "Marker")) - ListPricesRequest.add_member(:max_items, Shapes::ShapeRef.new(shape: PageMaxItems, location_name: "MaxItems")) + ListPricesRequest.add_member(:max_items, Shapes::ShapeRef.new(shape: ListPricesPageMaxItems, location_name: "MaxItems")) ListPricesRequest.struct_class = Types::ListPricesRequest ListPricesResponse.add_member(:prices, Shapes::ShapeRef.new(shape: DomainPriceList, location_name: "Prices")) diff --git a/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/endpoint_parameters.rb b/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/endpoint_parameters.rb index aa8453d5f89..d30b0829c8f 100644 --- a/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/endpoint_parameters.rb +++ b/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/endpoint_parameters.rb @@ -50,9 +50,6 @@ class << self def initialize(options = {}) self[:region] = options[:region] - if self[:region].nil? - raise ArgumentError, "Missing required EndpointParameter: :region" - end self[:use_dual_stack] = options[:use_dual_stack] self[:use_dual_stack] = false if self[:use_dual_stack].nil? if self[:use_dual_stack].nil? diff --git a/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/endpoint_provider.rb b/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/endpoint_provider.rb index 1f22a67e50e..0d915305eab 100644 --- a/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/endpoint_provider.rb +++ b/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/endpoint_provider.rb @@ -14,36 +14,39 @@ def resolve_endpoint(parameters) use_dual_stack = parameters.use_dual_stack use_fips = parameters.use_fips endpoint = parameters.endpoint - if (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) - if Aws::Endpoints::Matchers.set?(endpoint) - if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) - raise ArgumentError, "Invalid Configuration: FIPS and custom endpoint are not supported" - end - if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) - raise ArgumentError, "Invalid Configuration: Dualstack and custom endpoint are not supported" - end - return Aws::Endpoints::Endpoint.new(url: endpoint, headers: {}, properties: {}) - end - if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) - if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS")) && Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsDualStack")) - return Aws::Endpoints::Endpoint.new(url: "https://route53domains-fips.#{region}.#{partition_result['dualStackDnsSuffix']}", headers: {}, properties: {}) - end - raise ArgumentError, "FIPS and DualStack are enabled, but this partition does not support one or both" - end + if Aws::Endpoints::Matchers.set?(endpoint) if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) - if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS")) - return Aws::Endpoints::Endpoint.new(url: "https://route53domains-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {}) - end - raise ArgumentError, "FIPS is enabled but this partition does not support FIPS" + raise ArgumentError, "Invalid Configuration: FIPS and custom endpoint are not supported" end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) - if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsDualStack")) - return Aws::Endpoints::Endpoint.new(url: "https://route53domains.#{region}.#{partition_result['dualStackDnsSuffix']}", headers: {}, properties: {}) + raise ArgumentError, "Invalid Configuration: Dualstack and custom endpoint are not supported" + end + return Aws::Endpoints::Endpoint.new(url: endpoint, headers: {}, properties: {}) + end + if Aws::Endpoints::Matchers.set?(region) + if (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) + if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS")) && Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsDualStack")) + return Aws::Endpoints::Endpoint.new(url: "https://route53domains-fips.#{region}.#{partition_result['dualStackDnsSuffix']}", headers: {}, properties: {}) + end + raise ArgumentError, "FIPS and DualStack are enabled, but this partition does not support one or both" + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) + if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS")) + return Aws::Endpoints::Endpoint.new(url: "https://route53domains-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {}) + end + raise ArgumentError, "FIPS is enabled but this partition does not support FIPS" + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) + if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsDualStack")) + return Aws::Endpoints::Endpoint.new(url: "https://route53domains.#{region}.#{partition_result['dualStackDnsSuffix']}", headers: {}, properties: {}) + end + raise ArgumentError, "DualStack is enabled but this partition does not support DualStack" end - raise ArgumentError, "DualStack is enabled but this partition does not support DualStack" + return Aws::Endpoints::Endpoint.new(url: "https://route53domains.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {}) end - return Aws::Endpoints::Endpoint.new(url: "https://route53domains.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {}) end + raise ArgumentError, "Invalid Configuration: Missing Region" raise ArgumentError, 'No endpoint could be resolved' end diff --git a/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/types.rb b/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/types.rb index f1d679589d8..cb2634d6ce2 100644 --- a/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/types.rb +++ b/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/types.rb @@ -1305,22 +1305,6 @@ class EnableDomainTransferLockResponse < Struct.new( # # * `TOWNSHIP` # - # .fr - # : * `BIRTH_CITY` - # - # * `BIRTH_COUNTRY` - # - # * `BIRTH_DATE_IN_YYYY_MM_DD` - # - # * `BIRTH_DEPARTMENT`: Specify the INSEE code that corresponds with - # the department where the contact was born. If the contact was - # born somewhere other than France or its overseas departments, - # specify `99`. For more information, including a list of - # departments and the corresponding INSEE numbers, see the - # Wikipedia entry [Departments of France][1]. - # - # * `BRAND_NUMBER` - # # .it # : * `IT_NATIONALITY` # @@ -1399,10 +1383,6 @@ class EnableDomainTransferLockResponse < Struct.new( # * `UK_COMPANY_NUMBER` # # In addition, many TLDs require a `VAT_NUMBER`. - # - # - # - # [1]: https://en.wikipedia.org/wiki/Departments_of_France # @return [String] # # @!attribute [rw] value @@ -2722,13 +2702,11 @@ class Tag < Struct.new( # @!attribute [rw] privacy_protect_admin_contact # Whether you want to conceal contact information from WHOIS queries. # If you specify `true`, WHOIS ("who is") queries return contact - # information either for Amazon Registrar (for .com, .net, and .org - # domains) or for our registrar associate, Gandi (for all other TLDs). - # If you specify `false`, WHOIS queries return the information that - # you entered for the admin contact. + # information for the registrar, the phrase "REDACTED FOR PRIVACY", + # or "On behalf of <domain name> owner.". # - # You must specify the same privacy setting for the administrative, - # registrant, and technical contacts. + # While some domains may allow different privacy settings per contact, + # we recommend specifying the same privacy setting for all contacts. # # # @@ -2971,7 +2949,8 @@ class UpdateDomainContactPrivacyResponse < Struct.new( # @return [Types::ContactDetail] # # @!attribute [rw] consent - # Customer's consent for the owner change request. + # Customer's consent for the owner change request. Required if the + # domain is not free (consent price is more than $0.00). # @return [Types::Consent] # # @see http://docs.aws.amazon.com/goto/WebAPI/route53domains-2014-05-15/UpdateDomainContactRequest AWS API Documentation diff --git a/gems/aws-sdk-route53domains/spec/endpoint_provider_spec.rb b/gems/aws-sdk-route53domains/spec/endpoint_provider_spec.rb index d4e0ca93904..68cab5a38c8 100644 --- a/gems/aws-sdk-route53domains/spec/endpoint_provider_spec.rb +++ b/gems/aws-sdk-route53domains/spec/endpoint_provider_spec.rb @@ -14,13 +14,27 @@ module Aws::Route53Domains describe EndpointProvider do subject { Aws::Route53Domains::EndpointProvider.new } + context 'For region us-east-1 with FIPS disabled and DualStack disabled' do + let(:expected) do + {"endpoint"=>{"url"=>"https://route53domains.us-east-1.amazonaws.com"}} + end + + it 'produces the expected output from the EndpointProvider' do + params = EndpointParameters.new(**{:region=>"us-east-1", :use_fips=>false, :use_dual_stack=>false}) + endpoint = subject.resolve_endpoint(params) + expect(endpoint.url).to eq(expected['endpoint']['url']) + expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) + expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {}) + end + end + context 'For region us-east-1 with FIPS enabled and DualStack enabled' do let(:expected) do {"endpoint"=>{"url"=>"https://route53domains-fips.us-east-1.api.aws"}} end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>true, :region=>"us-east-1", :use_dual_stack=>true}) + params = EndpointParameters.new(**{:region=>"us-east-1", :use_fips=>true, :use_dual_stack=>true}) endpoint = subject.resolve_endpoint(params) expect(endpoint.url).to eq(expected['endpoint']['url']) expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) @@ -34,7 +48,7 @@ module Aws::Route53Domains end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>true, :region=>"us-east-1", :use_dual_stack=>false}) + params = EndpointParameters.new(**{:region=>"us-east-1", :use_fips=>true, :use_dual_stack=>false}) endpoint = subject.resolve_endpoint(params) expect(endpoint.url).to eq(expected['endpoint']['url']) expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) @@ -48,7 +62,7 @@ module Aws::Route53Domains end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>false, :region=>"us-east-1", :use_dual_stack=>true}) + params = EndpointParameters.new(**{:region=>"us-east-1", :use_fips=>false, :use_dual_stack=>true}) endpoint = subject.resolve_endpoint(params) expect(endpoint.url).to eq(expected['endpoint']['url']) expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) @@ -56,13 +70,165 @@ module Aws::Route53Domains end end - context 'For region us-east-1 with FIPS disabled and DualStack disabled' do + context 'For region cn-north-1 with FIPS enabled and DualStack enabled' do let(:expected) do - {"endpoint"=>{"url"=>"https://route53domains.us-east-1.amazonaws.com"}} + {"endpoint"=>{"url"=>"https://route53domains-fips.cn-north-1.api.amazonwebservices.com.cn"}} + end + + it 'produces the expected output from the EndpointProvider' do + params = EndpointParameters.new(**{:region=>"cn-north-1", :use_fips=>true, :use_dual_stack=>true}) + endpoint = subject.resolve_endpoint(params) + expect(endpoint.url).to eq(expected['endpoint']['url']) + expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) + expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {}) + end + end + + context 'For region cn-north-1 with FIPS enabled and DualStack disabled' do + let(:expected) do + {"endpoint"=>{"url"=>"https://route53domains-fips.cn-north-1.amazonaws.com.cn"}} + end + + it 'produces the expected output from the EndpointProvider' do + params = EndpointParameters.new(**{:region=>"cn-north-1", :use_fips=>true, :use_dual_stack=>false}) + endpoint = subject.resolve_endpoint(params) + expect(endpoint.url).to eq(expected['endpoint']['url']) + expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) + expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {}) + end + end + + context 'For region cn-north-1 with FIPS disabled and DualStack enabled' do + let(:expected) do + {"endpoint"=>{"url"=>"https://route53domains.cn-north-1.api.amazonwebservices.com.cn"}} + end + + it 'produces the expected output from the EndpointProvider' do + params = EndpointParameters.new(**{:region=>"cn-north-1", :use_fips=>false, :use_dual_stack=>true}) + endpoint = subject.resolve_endpoint(params) + expect(endpoint.url).to eq(expected['endpoint']['url']) + expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) + expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {}) + end + end + + context 'For region cn-north-1 with FIPS disabled and DualStack disabled' do + let(:expected) do + {"endpoint"=>{"url"=>"https://route53domains.cn-north-1.amazonaws.com.cn"}} + end + + it 'produces the expected output from the EndpointProvider' do + params = EndpointParameters.new(**{:region=>"cn-north-1", :use_fips=>false, :use_dual_stack=>false}) + endpoint = subject.resolve_endpoint(params) + expect(endpoint.url).to eq(expected['endpoint']['url']) + expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) + expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {}) + end + end + + context 'For region us-gov-east-1 with FIPS enabled and DualStack enabled' do + let(:expected) do + {"endpoint"=>{"url"=>"https://route53domains-fips.us-gov-east-1.api.aws"}} + end + + it 'produces the expected output from the EndpointProvider' do + params = EndpointParameters.new(**{:region=>"us-gov-east-1", :use_fips=>true, :use_dual_stack=>true}) + endpoint = subject.resolve_endpoint(params) + expect(endpoint.url).to eq(expected['endpoint']['url']) + expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) + expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {}) + end + end + + context 'For region us-gov-east-1 with FIPS enabled and DualStack disabled' do + let(:expected) do + {"endpoint"=>{"url"=>"https://route53domains-fips.us-gov-east-1.amazonaws.com"}} + end + + it 'produces the expected output from the EndpointProvider' do + params = EndpointParameters.new(**{:region=>"us-gov-east-1", :use_fips=>true, :use_dual_stack=>false}) + endpoint = subject.resolve_endpoint(params) + expect(endpoint.url).to eq(expected['endpoint']['url']) + expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) + expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {}) + end + end + + context 'For region us-gov-east-1 with FIPS disabled and DualStack enabled' do + let(:expected) do + {"endpoint"=>{"url"=>"https://route53domains.us-gov-east-1.api.aws"}} + end + + it 'produces the expected output from the EndpointProvider' do + params = EndpointParameters.new(**{:region=>"us-gov-east-1", :use_fips=>false, :use_dual_stack=>true}) + endpoint = subject.resolve_endpoint(params) + expect(endpoint.url).to eq(expected['endpoint']['url']) + expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) + expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {}) + end + end + + context 'For region us-gov-east-1 with FIPS disabled and DualStack disabled' do + let(:expected) do + {"endpoint"=>{"url"=>"https://route53domains.us-gov-east-1.amazonaws.com"}} + end + + it 'produces the expected output from the EndpointProvider' do + params = EndpointParameters.new(**{:region=>"us-gov-east-1", :use_fips=>false, :use_dual_stack=>false}) + endpoint = subject.resolve_endpoint(params) + expect(endpoint.url).to eq(expected['endpoint']['url']) + expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) + expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {}) + end + end + + context 'For region us-iso-east-1 with FIPS enabled and DualStack enabled' do + let(:expected) do + {"error"=>"FIPS and DualStack are enabled, but this partition does not support one or both"} + end + + it 'produces the expected output from the EndpointProvider' do + params = EndpointParameters.new(**{:region=>"us-iso-east-1", :use_fips=>true, :use_dual_stack=>true}) + expect do + subject.resolve_endpoint(params) + end.to raise_error(ArgumentError, expected['error']) + end + end + + context 'For region us-iso-east-1 with FIPS enabled and DualStack disabled' do + let(:expected) do + {"endpoint"=>{"url"=>"https://route53domains-fips.us-iso-east-1.c2s.ic.gov"}} + end + + it 'produces the expected output from the EndpointProvider' do + params = EndpointParameters.new(**{:region=>"us-iso-east-1", :use_fips=>true, :use_dual_stack=>false}) + endpoint = subject.resolve_endpoint(params) + expect(endpoint.url).to eq(expected['endpoint']['url']) + expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) + expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {}) + end + end + + context 'For region us-iso-east-1 with FIPS disabled and DualStack enabled' do + let(:expected) do + {"error"=>"DualStack is enabled but this partition does not support DualStack"} end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>false, :region=>"us-east-1", :use_dual_stack=>false}) + params = EndpointParameters.new(**{:region=>"us-iso-east-1", :use_fips=>false, :use_dual_stack=>true}) + expect do + subject.resolve_endpoint(params) + end.to raise_error(ArgumentError, expected['error']) + end + end + + context 'For region us-iso-east-1 with FIPS disabled and DualStack disabled' do + let(:expected) do + {"endpoint"=>{"url"=>"https://route53domains.us-iso-east-1.c2s.ic.gov"}} + end + + it 'produces the expected output from the EndpointProvider' do + params = EndpointParameters.new(**{:region=>"us-iso-east-1", :use_fips=>false, :use_dual_stack=>false}) endpoint = subject.resolve_endpoint(params) expect(endpoint.url).to eq(expected['endpoint']['url']) expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) @@ -70,13 +236,81 @@ module Aws::Route53Domains end end - context 'For custom endpoint with fips disabled and dualstack disabled' do + context 'For region us-isob-east-1 with FIPS enabled and DualStack enabled' do + let(:expected) do + {"error"=>"FIPS and DualStack are enabled, but this partition does not support one or both"} + end + + it 'produces the expected output from the EndpointProvider' do + params = EndpointParameters.new(**{:region=>"us-isob-east-1", :use_fips=>true, :use_dual_stack=>true}) + expect do + subject.resolve_endpoint(params) + end.to raise_error(ArgumentError, expected['error']) + end + end + + context 'For region us-isob-east-1 with FIPS enabled and DualStack disabled' do + let(:expected) do + {"endpoint"=>{"url"=>"https://route53domains-fips.us-isob-east-1.sc2s.sgov.gov"}} + end + + it 'produces the expected output from the EndpointProvider' do + params = EndpointParameters.new(**{:region=>"us-isob-east-1", :use_fips=>true, :use_dual_stack=>false}) + endpoint = subject.resolve_endpoint(params) + expect(endpoint.url).to eq(expected['endpoint']['url']) + expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) + expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {}) + end + end + + context 'For region us-isob-east-1 with FIPS disabled and DualStack enabled' do + let(:expected) do + {"error"=>"DualStack is enabled but this partition does not support DualStack"} + end + + it 'produces the expected output from the EndpointProvider' do + params = EndpointParameters.new(**{:region=>"us-isob-east-1", :use_fips=>false, :use_dual_stack=>true}) + expect do + subject.resolve_endpoint(params) + end.to raise_error(ArgumentError, expected['error']) + end + end + + context 'For region us-isob-east-1 with FIPS disabled and DualStack disabled' do + let(:expected) do + {"endpoint"=>{"url"=>"https://route53domains.us-isob-east-1.sc2s.sgov.gov"}} + end + + it 'produces the expected output from the EndpointProvider' do + params = EndpointParameters.new(**{:region=>"us-isob-east-1", :use_fips=>false, :use_dual_stack=>false}) + endpoint = subject.resolve_endpoint(params) + expect(endpoint.url).to eq(expected['endpoint']['url']) + expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) + expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {}) + end + end + + context 'For custom endpoint with region set and fips disabled and dualstack disabled' do + let(:expected) do + {"endpoint"=>{"url"=>"https://example.com"}} + end + + it 'produces the expected output from the EndpointProvider' do + params = EndpointParameters.new(**{:region=>"us-east-1", :use_fips=>false, :use_dual_stack=>false, :endpoint=>"https://example.com"}) + endpoint = subject.resolve_endpoint(params) + expect(endpoint.url).to eq(expected['endpoint']['url']) + expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) + expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {}) + end + end + + context 'For custom endpoint with region not set and fips disabled and dualstack disabled' do let(:expected) do {"endpoint"=>{"url"=>"https://example.com"}} end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>false, :region=>"us-east-1", :use_dual_stack=>false, :endpoint=>"https://example.com"}) + params = EndpointParameters.new(**{:use_fips=>false, :use_dual_stack=>false, :endpoint=>"https://example.com"}) endpoint = subject.resolve_endpoint(params) expect(endpoint.url).to eq(expected['endpoint']['url']) expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {}) @@ -90,7 +324,7 @@ module Aws::Route53Domains end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>true, :region=>"us-east-1", :use_dual_stack=>false, :endpoint=>"https://example.com"}) + params = EndpointParameters.new(**{:region=>"us-east-1", :use_fips=>true, :use_dual_stack=>false, :endpoint=>"https://example.com"}) expect do subject.resolve_endpoint(params) end.to raise_error(ArgumentError, expected['error']) @@ -103,7 +337,20 @@ module Aws::Route53Domains end it 'produces the expected output from the EndpointProvider' do - params = EndpointParameters.new(**{:use_fips=>false, :region=>"us-east-1", :use_dual_stack=>true, :endpoint=>"https://example.com"}) + params = EndpointParameters.new(**{:region=>"us-east-1", :use_fips=>false, :use_dual_stack=>true, :endpoint=>"https://example.com"}) + expect do + subject.resolve_endpoint(params) + end.to raise_error(ArgumentError, expected['error']) + end + end + + context 'Missing region' do + let(:expected) do + {"error"=>"Invalid Configuration: Missing Region"} + end + + it 'produces the expected output from the EndpointProvider' do + params = EndpointParameters.new(**{}) expect do subject.resolve_endpoint(params) end.to raise_error(ArgumentError, expected['error']) diff --git a/gems/aws-sdk-sagemaker/CHANGELOG.md b/gems/aws-sdk-sagemaker/CHANGELOG.md index d62ded988d9..be14435c68f 100644 --- a/gems/aws-sdk-sagemaker/CHANGELOG.md +++ b/gems/aws-sdk-sagemaker/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.188.0 (2023-06-19) +------------------ + +* Feature - Amazon Sagemaker Autopilot releases CreateAutoMLJobV2 and DescribeAutoMLJobV2 for Autopilot customers with ImageClassification, TextClassification and Tabular problem type config support. + 1.187.0 (2023-06-15) ------------------ diff --git a/gems/aws-sdk-sagemaker/VERSION b/gems/aws-sdk-sagemaker/VERSION index 4c2ac53efd4..6e0b62948a7 100644 --- a/gems/aws-sdk-sagemaker/VERSION +++ b/gems/aws-sdk-sagemaker/VERSION @@ -1 +1 @@ -1.187.0 +1.188.0 diff --git a/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker.rb b/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker.rb index 390a45b95a9..816dc2d4db8 100644 --- a/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker.rb +++ b/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker.rb @@ -53,6 +53,6 @@ # @!group service module Aws::SageMaker - GEM_VERSION = '1.187.0' + GEM_VERSION = '1.188.0' end diff --git a/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/client.rb b/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/client.rb index 08d47a78db2..91b155b3da5 100644 --- a/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/client.rb +++ b/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/client.rb @@ -1184,18 +1184,30 @@ def create_artifact(params = {}, options = {}) req.send_request(options) end - # Creates an Autopilot job. + # Creates an Autopilot job also referred to as Autopilot experiment or + # AutoML job. # - # Find the best-performing model after you run an Autopilot job by - # calling [DescribeAutoMLJob][1]. + # Find the best-performing model after you run an AutoML job by calling + # [DescribeAutoMLJobV2][1] (recommended) or [DescribeAutoMLJob][2]. # - # For information about how to use Autopilot, see [Automate Model - # Development with Amazon SageMaker Autopilot][2]. + # `CreateAutoMLJob` only accepts tabular input data. We recommend using + # [CreateAutoMLJobV2][3] for all problem types. `CreateAutoMLJobV2` can + # process the same tabular data as its previous version + # `CreateAutoMLJob`, as well as non-tabular data for problem types such + # as image or text classification. # + # Find guidelines about how to migrate `CreateAutoMLJob` to + # `CreateAutoMLJobV2` in [Migrate a CreateAutoMLJob to + # CreateAutoMLJobV2][4]. + # + # # # - # [1]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_DescribeAutoMLJob.html - # [2]: https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html + # + # [1]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_DescribeAutoMLJobV2.html + # [2]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_DescribeAutoMLJob.html + # [3]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateAutoMLJobV2.html + # [4]: https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development-create-experiment-api.html#autopilot-create-experiment-api-migrate-v1-v2 # # @option params [required, String] :auto_ml_job_name # Identifies an Autopilot job. The name must be unique to your account @@ -1229,15 +1241,13 @@ def create_artifact(params = {}, options = {}) # [1]: https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-datasets-problem-types.html#autopilot-problem-types # # @option params [Types::AutoMLJobObjective] :auto_ml_job_objective - # Defines the objective metric used to measure the predictive quality of - # an AutoML job. You provide an [AutoMLJobObjective$MetricName][1] and - # Autopilot infers whether to minimize or maximize it. For - # [CreateAutoMLJobV2][2], only `Accuracy` is supported. + # Specifies a metric to minimize or maximize as the objective of a job. + # If not specified, the default objective metric depends on the problem + # type. See [AutoMLJobObjective][1] for the default values. # # # # [1]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_AutoMLJobObjective.html - # [2]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateAutoMLJobV2.html # # @option params [Types::AutoMLJobConfig] :auto_ml_job_config # A collection of settings used to configure an AutoML job. @@ -1350,24 +1360,32 @@ def create_auto_ml_job(params = {}, options = {}) req.send_request(options) end - # Creates an Amazon SageMaker AutoML job that uses non-tabular data such - # as images or text for Computer Vision or Natural Language Processing - # problems. + # Creates an Autopilot job also referred to as Autopilot experiment or + # AutoML job V2. # - # Find the resulting model after you run an AutoML job V2 by calling - # [DescribeAutoMLJobV2][1]. + # We recommend using [CreateAutoMLJobV2][1] for all problem types. + # `CreateAutoMLJobV2` can process the same tabular data as its previous + # version `CreateAutoMLJob`, as well as non-tabular data for problem + # types such as image or text classification. # - # To create an `AutoMLJob` using tabular data, see [CreateAutoMLJob][2]. + # Find guidelines about how to migrate `CreateAutoMLJob` to + # `CreateAutoMLJobV2` in [Migrate a CreateAutoMLJob to + # CreateAutoMLJobV2][2]. # - # This API action is callable through SageMaker Canvas only. Calling it - # directly from the CLI or an SDK results in an error. + # For the list of available problem types supported by + # `CreateAutoMLJobV2`, see [AutoMLProblemTypeConfig][3]. # - # + # Find the best-performing model after you run an AutoML job V2 by + # calling [DescribeAutoMLJobV2][4]. Calling [DescribeAutoMLJob][5] on a + # AutoML job V2 results in an error. # # # - # [1]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_DescribeAutoMLJobV2.html - # [2]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateAutoMLJob.html + # [1]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateAutoMLJobV2.html + # [2]: https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development-create-experiment-api.html#autopilot-create-experiment-api-migrate-v1-v2 + # [3]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_AutoMLProblemTypeConfig.html + # [4]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_DescribeAutoMLJobV2.html + # [5]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_DescribeAutoMLJob.html # # @option params [required, String] :auto_ml_job_name # Identifies an Autopilot job. The name must be unique to your account @@ -1379,10 +1397,12 @@ def create_auto_ml_job(params = {}, options = {}) # [InputDataConfig][1] supported by `CreateAutoMLJob`. The supported # formats depend on the problem type: # - # * ImageClassification: S3Prefix, `ManifestFile`, - # `AugmentedManifestFile` + # * For Tabular problem types: `S3Prefix`, `ManifestFile`. + # + # * For ImageClassification: `S3Prefix`, `ManifestFile`, + # `AugmentedManifestFile`. # - # * TextClassification: S3Prefix + # * For TextClassification: `S3Prefix`. # # # @@ -1396,6 +1416,13 @@ def create_auto_ml_job(params = {}, options = {}) # Defines the configuration settings of one of the supported problem # types. # + # For tabular problem types, you must either specify the type of + # supervised learning problem in `AutoMLProblemTypeConfig` + # (`TabularJobConfig.ProblemType`) and provide the `AutoMLJobObjective`, + # or none at all. + # + # + # # @option params [required, String] :role_arn # The ARN of the role that is used to access the data. # @@ -1415,11 +1442,20 @@ def create_auto_ml_job(params = {}, options = {}) # # @option params [Types::AutoMLJobObjective] :auto_ml_job_objective # Specifies a metric to minimize or maximize as the objective of a job. - # For [CreateAutoMLJobV2][1], only `Accuracy` is supported. + # If not specified, the default objective metric depends on the problem + # type. For the list of default values per problem type, see + # [AutoMLJobObjective][1]. + # + # For tabular problem types, you must either provide the + # `AutoMLJobObjective` and indicate the type of supervised learning + # problem in `AutoMLProblemTypeConfig` (`TabularJobConfig.ProblemType`), + # or none. # + # # # - # [1]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateAutoMLJobV2.html + # + # [1]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_AutoMLJobObjective.html # # @option params [Types::ModelDeployConfig] :model_deploy_config # Specifies how to generate the endpoint name for an automatic one-click @@ -1429,12 +1465,9 @@ def create_auto_ml_job(params = {}, options = {}) # This structure specifies how to split the data into train and # validation datasets. # - # If you are using the V1 API (for example `CreateAutoMLJob`) or the V2 - # API for Natural Language Processing problems (for example - # `CreateAutoMLJobV2` with a `TextClassificationJobConfig` problem - # type), the validation and training datasets must contain the same - # headers. Also, for V1 API jobs, the validation dataset must be less - # than 2 GB in size. + # The validation and training datasets must contain the same headers. + # For jobs created by calling `CreateAutoMLJob`, the validation dataset + # must be less than 2 GB in size. # # @return [Types::CreateAutoMLJobV2Response] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # @@ -1478,6 +1511,26 @@ def create_auto_ml_job(params = {}, options = {}) # content_column: "ContentColumn", # target_label_column: "TargetLabelColumn", # }, + # tabular_job_config: { + # candidate_generation_config: { + # algorithms_config: [ + # { + # auto_ml_algorithms: ["xgboost"], # required, accepts xgboost, linear-learner, mlp, lightgbm, catboost, randomforest, extra-trees, nn-torch, fastai + # }, + # ], + # }, + # completion_criteria: { + # max_candidates: 1, + # max_runtime_per_training_job_in_seconds: 1, + # max_auto_ml_job_runtime_in_seconds: 1, + # }, + # feature_specification_s3_uri: "S3Uri", + # mode: "AUTO", # accepts AUTO, ENSEMBLING, HYPERPARAMETER_TUNING + # generate_candidate_definitions_only: false, + # problem_type: "BinaryClassification", # accepts BinaryClassification, MulticlassClassification, Regression + # target_attribute_name: "TargetAttributeName", # required + # sample_weight_attribute_name: "SampleWeightAttributeName", + # }, # }, # role_arn: "RoleArn", # required # tags: [ @@ -9950,7 +10003,12 @@ def describe_artifact(params = {}, options = {}) req.send_request(options) end - # Returns information about an Amazon SageMaker AutoML job. + # Returns information about an AutoML job created by calling + # [CreateAutoMLJob][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateAutoMLJob.html # # @option params [required, String] :auto_ml_job_name # Requests information about an AutoML job using its unique name. @@ -10079,15 +10137,15 @@ def describe_auto_ml_job(params = {}, options = {}) req.send_request(options) end - # Returns information about an Amazon SageMaker AutoML V2 job. + # Returns information about an AutoML job V2 created by calling + # [CreateAutoMLJobV2][1]. # - # This API action is callable through SageMaker Canvas only. Calling it - # directly from the CLI or an SDK results in an error. # - # + # + # [1]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateAutoMLJobV2.html # # @option params [required, String] :auto_ml_job_name - # Requests information about an AutoML V2 job using its unique name. + # Requests information about an AutoML job V2 using its unique name. # # @return [Types::DescribeAutoMLJobV2Response] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # @@ -10110,6 +10168,9 @@ def describe_auto_ml_job(params = {}, options = {}) # * {Types::DescribeAutoMLJobV2Response#model_deploy_result #model_deploy_result} => Types::ModelDeployResult # * {Types::DescribeAutoMLJobV2Response#data_split_config #data_split_config} => Types::AutoMLDataSplitConfig # * {Types::DescribeAutoMLJobV2Response#security_config #security_config} => Types::AutoMLSecurityConfig + # * {Types::DescribeAutoMLJobV2Response#auto_ml_job_artifacts #auto_ml_job_artifacts} => Types::AutoMLJobArtifacts + # * {Types::DescribeAutoMLJobV2Response#resolved_attributes #resolved_attributes} => Types::AutoMLResolvedAttributes + # * {Types::DescribeAutoMLJobV2Response#auto_ml_problem_type_config_name #auto_ml_problem_type_config_name} => String # # @example Request syntax with placeholder values # @@ -10139,6 +10200,18 @@ def describe_auto_ml_job(params = {}, options = {}) # resp.auto_ml_problem_type_config.text_classification_job_config.completion_criteria.max_auto_ml_job_runtime_in_seconds #=> Integer # resp.auto_ml_problem_type_config.text_classification_job_config.content_column #=> String # resp.auto_ml_problem_type_config.text_classification_job_config.target_label_column #=> String + # resp.auto_ml_problem_type_config.tabular_job_config.candidate_generation_config.algorithms_config #=> Array + # resp.auto_ml_problem_type_config.tabular_job_config.candidate_generation_config.algorithms_config[0].auto_ml_algorithms #=> Array + # resp.auto_ml_problem_type_config.tabular_job_config.candidate_generation_config.algorithms_config[0].auto_ml_algorithms[0] #=> String, one of "xgboost", "linear-learner", "mlp", "lightgbm", "catboost", "randomforest", "extra-trees", "nn-torch", "fastai" + # resp.auto_ml_problem_type_config.tabular_job_config.completion_criteria.max_candidates #=> Integer + # resp.auto_ml_problem_type_config.tabular_job_config.completion_criteria.max_runtime_per_training_job_in_seconds #=> Integer + # resp.auto_ml_problem_type_config.tabular_job_config.completion_criteria.max_auto_ml_job_runtime_in_seconds #=> Integer + # resp.auto_ml_problem_type_config.tabular_job_config.feature_specification_s3_uri #=> String + # resp.auto_ml_problem_type_config.tabular_job_config.mode #=> String, one of "AUTO", "ENSEMBLING", "HYPERPARAMETER_TUNING" + # resp.auto_ml_problem_type_config.tabular_job_config.generate_candidate_definitions_only #=> Boolean + # resp.auto_ml_problem_type_config.tabular_job_config.problem_type #=> String, one of "BinaryClassification", "MulticlassClassification", "Regression" + # resp.auto_ml_problem_type_config.tabular_job_config.target_attribute_name #=> String + # resp.auto_ml_problem_type_config.tabular_job_config.sample_weight_attribute_name #=> String # resp.creation_time #=> Time # resp.end_time #=> Time # resp.last_modified_time #=> Time @@ -10190,6 +10263,14 @@ def describe_auto_ml_job(params = {}, options = {}) # resp.security_config.vpc_config.security_group_ids[0] #=> String # resp.security_config.vpc_config.subnets #=> Array # resp.security_config.vpc_config.subnets[0] #=> String + # resp.auto_ml_job_artifacts.candidate_definition_notebook_location #=> String + # resp.auto_ml_job_artifacts.data_exploration_notebook_location #=> String + # resp.resolved_attributes.auto_ml_job_objective.metric_name #=> String, one of "Accuracy", "MSE", "F1", "F1macro", "AUC", "RMSE", "MAE", "R2", "BalancedAccuracy", "Precision", "PrecisionMacro", "Recall", "RecallMacro" + # resp.resolved_attributes.completion_criteria.max_candidates #=> Integer + # resp.resolved_attributes.completion_criteria.max_runtime_per_training_job_in_seconds #=> Integer + # resp.resolved_attributes.completion_criteria.max_auto_ml_job_runtime_in_seconds #=> Integer + # resp.resolved_attributes.auto_ml_problem_type_resolved_attributes.tabular_resolved_attributes.problem_type #=> String, one of "BinaryClassification", "MulticlassClassification", "Regression" + # resp.auto_ml_problem_type_config_name #=> String, one of "ImageClassification", "TextClassification", "Tabular" # # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeAutoMLJobV2 AWS API Documentation # @@ -23627,7 +23708,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-sagemaker' - context[:gem_version] = '1.187.0' + context[:gem_version] = '1.188.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/client_api.rb b/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/client_api.rb index c538e1e8716..cfe22e5a590 100644 --- a/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/client_api.rb +++ b/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/client_api.rb @@ -137,7 +137,10 @@ module ClientApi AutoMLPartialFailureReason = Shapes::StructureShape.new(name: 'AutoMLPartialFailureReason') AutoMLPartialFailureReasons = Shapes::ListShape.new(name: 'AutoMLPartialFailureReasons') AutoMLProblemTypeConfig = Shapes::UnionShape.new(name: 'AutoMLProblemTypeConfig') + AutoMLProblemTypeConfigName = Shapes::StringShape.new(name: 'AutoMLProblemTypeConfigName') + AutoMLProblemTypeResolvedAttributes = Shapes::UnionShape.new(name: 'AutoMLProblemTypeResolvedAttributes') AutoMLProcessingUnit = Shapes::StringShape.new(name: 'AutoMLProcessingUnit') + AutoMLResolvedAttributes = Shapes::StructureShape.new(name: 'AutoMLResolvedAttributes') AutoMLS3DataSource = Shapes::StructureShape.new(name: 'AutoMLS3DataSource') AutoMLS3DataType = Shapes::StringShape.new(name: 'AutoMLS3DataType') AutoMLSecurityConfig = Shapes::StructureShape.new(name: 'AutoMLSecurityConfig') @@ -171,6 +174,7 @@ module ClientApi CallbackToken = Shapes::StringShape.new(name: 'CallbackToken') CandidateArtifactLocations = Shapes::StructureShape.new(name: 'CandidateArtifactLocations') CandidateDefinitionNotebookLocation = Shapes::StringShape.new(name: 'CandidateDefinitionNotebookLocation') + CandidateGenerationConfig = Shapes::StructureShape.new(name: 'CandidateGenerationConfig') CandidateName = Shapes::StringShape.new(name: 'CandidateName') CandidateProperties = Shapes::StructureShape.new(name: 'CandidateProperties') CandidateSortBy = Shapes::StringShape.new(name: 'CandidateSortBy') @@ -1786,6 +1790,8 @@ module ClientApi SuggestionQuery = Shapes::StructureShape.new(name: 'SuggestionQuery') TableFormat = Shapes::StringShape.new(name: 'TableFormat') TableName = Shapes::StringShape.new(name: 'TableName') + TabularJobConfig = Shapes::StructureShape.new(name: 'TabularJobConfig') + TabularResolvedAttributes = Shapes::StructureShape.new(name: 'TabularResolvedAttributes') Tag = Shapes::StructureShape.new(name: 'Tag') TagKey = Shapes::StringShape.new(name: 'TagKey') TagKeyList = Shapes::ListShape.new(name: 'TagKeyList') @@ -2327,12 +2333,25 @@ module ClientApi AutoMLProblemTypeConfig.add_member(:image_classification_job_config, Shapes::ShapeRef.new(shape: ImageClassificationJobConfig, location_name: "ImageClassificationJobConfig")) AutoMLProblemTypeConfig.add_member(:text_classification_job_config, Shapes::ShapeRef.new(shape: TextClassificationJobConfig, location_name: "TextClassificationJobConfig")) + AutoMLProblemTypeConfig.add_member(:tabular_job_config, Shapes::ShapeRef.new(shape: TabularJobConfig, location_name: "TabularJobConfig")) AutoMLProblemTypeConfig.add_member(:unknown, Shapes::ShapeRef.new(shape: nil, location_name: 'unknown')) AutoMLProblemTypeConfig.add_member_subclass(:image_classification_job_config, Types::AutoMLProblemTypeConfig::ImageClassificationJobConfig) AutoMLProblemTypeConfig.add_member_subclass(:text_classification_job_config, Types::AutoMLProblemTypeConfig::TextClassificationJobConfig) + AutoMLProblemTypeConfig.add_member_subclass(:tabular_job_config, Types::AutoMLProblemTypeConfig::TabularJobConfig) AutoMLProblemTypeConfig.add_member_subclass(:unknown, Types::AutoMLProblemTypeConfig::Unknown) AutoMLProblemTypeConfig.struct_class = Types::AutoMLProblemTypeConfig + AutoMLProblemTypeResolvedAttributes.add_member(:tabular_resolved_attributes, Shapes::ShapeRef.new(shape: TabularResolvedAttributes, location_name: "TabularResolvedAttributes")) + AutoMLProblemTypeResolvedAttributes.add_member(:unknown, Shapes::ShapeRef.new(shape: nil, location_name: 'unknown')) + AutoMLProblemTypeResolvedAttributes.add_member_subclass(:tabular_resolved_attributes, Types::AutoMLProblemTypeResolvedAttributes::TabularResolvedAttributes) + AutoMLProblemTypeResolvedAttributes.add_member_subclass(:unknown, Types::AutoMLProblemTypeResolvedAttributes::Unknown) + AutoMLProblemTypeResolvedAttributes.struct_class = Types::AutoMLProblemTypeResolvedAttributes + + AutoMLResolvedAttributes.add_member(:auto_ml_job_objective, Shapes::ShapeRef.new(shape: AutoMLJobObjective, location_name: "AutoMLJobObjective")) + AutoMLResolvedAttributes.add_member(:completion_criteria, Shapes::ShapeRef.new(shape: AutoMLJobCompletionCriteria, location_name: "CompletionCriteria")) + AutoMLResolvedAttributes.add_member(:auto_ml_problem_type_resolved_attributes, Shapes::ShapeRef.new(shape: AutoMLProblemTypeResolvedAttributes, location_name: "AutoMLProblemTypeResolvedAttributes")) + AutoMLResolvedAttributes.struct_class = Types::AutoMLResolvedAttributes + AutoMLS3DataSource.add_member(:s3_data_type, Shapes::ShapeRef.new(shape: AutoMLS3DataType, required: true, location_name: "S3DataType")) AutoMLS3DataSource.add_member(:s3_uri, Shapes::ShapeRef.new(shape: S3Uri, required: true, location_name: "S3Uri")) AutoMLS3DataSource.struct_class = Types::AutoMLS3DataSource @@ -2421,6 +2440,9 @@ module ClientApi CandidateArtifactLocations.add_member(:model_insights, Shapes::ShapeRef.new(shape: ModelInsightsLocation, location_name: "ModelInsights")) CandidateArtifactLocations.struct_class = Types::CandidateArtifactLocations + CandidateGenerationConfig.add_member(:algorithms_config, Shapes::ShapeRef.new(shape: AutoMLAlgorithmsConfig, location_name: "AlgorithmsConfig")) + CandidateGenerationConfig.struct_class = Types::CandidateGenerationConfig + CandidateProperties.add_member(:candidate_artifact_locations, Shapes::ShapeRef.new(shape: CandidateArtifactLocations, location_name: "CandidateArtifactLocations")) CandidateProperties.add_member(:candidate_metrics, Shapes::ShapeRef.new(shape: MetricDataList, location_name: "CandidateMetrics")) CandidateProperties.struct_class = Types::CandidateProperties @@ -3764,6 +3786,9 @@ module ClientApi DescribeAutoMLJobV2Response.add_member(:model_deploy_result, Shapes::ShapeRef.new(shape: ModelDeployResult, location_name: "ModelDeployResult")) DescribeAutoMLJobV2Response.add_member(:data_split_config, Shapes::ShapeRef.new(shape: AutoMLDataSplitConfig, location_name: "DataSplitConfig")) DescribeAutoMLJobV2Response.add_member(:security_config, Shapes::ShapeRef.new(shape: AutoMLSecurityConfig, location_name: "SecurityConfig")) + DescribeAutoMLJobV2Response.add_member(:auto_ml_job_artifacts, Shapes::ShapeRef.new(shape: AutoMLJobArtifacts, location_name: "AutoMLJobArtifacts")) + DescribeAutoMLJobV2Response.add_member(:resolved_attributes, Shapes::ShapeRef.new(shape: AutoMLResolvedAttributes, location_name: "ResolvedAttributes")) + DescribeAutoMLJobV2Response.add_member(:auto_ml_problem_type_config_name, Shapes::ShapeRef.new(shape: AutoMLProblemTypeConfigName, location_name: "AutoMLProblemTypeConfigName")) DescribeAutoMLJobV2Response.struct_class = Types::DescribeAutoMLJobV2Response DescribeCodeRepositoryInput.add_member(:code_repository_name, Shapes::ShapeRef.new(shape: EntityName, required: true, location_name: "CodeRepositoryName")) @@ -8200,6 +8225,19 @@ module ClientApi SuggestionQuery.add_member(:property_name_query, Shapes::ShapeRef.new(shape: PropertyNameQuery, location_name: "PropertyNameQuery")) SuggestionQuery.struct_class = Types::SuggestionQuery + TabularJobConfig.add_member(:candidate_generation_config, Shapes::ShapeRef.new(shape: CandidateGenerationConfig, location_name: "CandidateGenerationConfig")) + TabularJobConfig.add_member(:completion_criteria, Shapes::ShapeRef.new(shape: AutoMLJobCompletionCriteria, location_name: "CompletionCriteria")) + TabularJobConfig.add_member(:feature_specification_s3_uri, Shapes::ShapeRef.new(shape: S3Uri, location_name: "FeatureSpecificationS3Uri")) + TabularJobConfig.add_member(:mode, Shapes::ShapeRef.new(shape: AutoMLMode, location_name: "Mode")) + TabularJobConfig.add_member(:generate_candidate_definitions_only, Shapes::ShapeRef.new(shape: GenerateCandidateDefinitionsOnly, location_name: "GenerateCandidateDefinitionsOnly")) + TabularJobConfig.add_member(:problem_type, Shapes::ShapeRef.new(shape: ProblemType, location_name: "ProblemType")) + TabularJobConfig.add_member(:target_attribute_name, Shapes::ShapeRef.new(shape: TargetAttributeName, required: true, location_name: "TargetAttributeName")) + TabularJobConfig.add_member(:sample_weight_attribute_name, Shapes::ShapeRef.new(shape: SampleWeightAttributeName, location_name: "SampleWeightAttributeName")) + TabularJobConfig.struct_class = Types::TabularJobConfig + + TabularResolvedAttributes.add_member(:problem_type, Shapes::ShapeRef.new(shape: ProblemType, location_name: "ProblemType")) + TabularResolvedAttributes.struct_class = Types::TabularResolvedAttributes + Tag.add_member(:key, Shapes::ShapeRef.new(shape: TagKey, required: true, location_name: "Key")) Tag.add_member(:value, Shapes::ShapeRef.new(shape: TagValue, required: true, location_name: "Value")) Tag.struct_class = Types::Tag diff --git a/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/types.rb b/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/types.rb index c0505ee48e6..c53a25f2b9f 100644 --- a/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/types.rb +++ b/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/types.rb @@ -1762,8 +1762,9 @@ class AutoMLAlgorithmConfig < Struct.new( # @!attribute [rw] inference_container_definitions # The mapping of all supported processing unit (CPU, GPU, etc...) to # inference container definitions for the candidate. This field is - # populated for the V2 API only (for example, for jobs created by - # calling `CreateAutoMLJobV2`). + # populated for the AutoML jobs V2 (for example, for jobs created by + # calling `CreateAutoMLJobV2`) related to image or text classification + # problem types only. # @return [Hash>] # # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/AutoMLCandidate AWS API Documentation @@ -2035,12 +2036,9 @@ class AutoMLDataSource < Struct.new( # This structure specifies how to split the data into train and # validation datasets. # - # If you are using the V1 API (for example `CreateAutoMLJob`) or the V2 - # API for Natural Language Processing problems (for example - # `CreateAutoMLJobV2` with a `TextClassificationJobConfig` problem - # type), the validation and training datasets must contain the same - # headers. Also, for V1 API jobs, the validation dataset must be less - # than 2 GB in size. + # The validation and training datasets must contain the same headers. + # For jobs created by calling `CreateAutoMLJob`, the validation dataset + # must be less than 2 GB in size. # # @!attribute [rw] validation_fraction # The validation fraction (optional) is a float that specifies the @@ -2077,14 +2075,12 @@ class AutoMLJobArtifacts < Struct.new( end # A channel is a named input source that training algorithms can - # consume. This channel is used for the non tabular training data of an - # AutoML job using the V2 API. For tabular training data, see [ - # AutoMLChannel][1]. For more information, see [ Channel][2]. + # consume. This channel is used for AutoML jobs V2 (jobs created by + # calling [CreateAutoMLJobV2][1]). # # # - # [1]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_AutoMLChannel.html - # [2]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_Channel.html + # [1]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateAutoMLJobV2.html # # @!attribute [rw] channel_type # The type of channel. Defines whether the data are used for training @@ -2096,23 +2092,27 @@ class AutoMLJobArtifacts < Struct.new( # The content type of the data from the input source. The following # are the allowed content types for different problems: # - # * ImageClassification: `image/png`, `image/jpeg`, or `image/*`. The - # default value is `image/*`. + # * For Tabular problem types: `text/csv;header=present` or + # `x-application/vnd.amazon+parquet`. The default value is + # `text/csv;header=present`. # - # * TextClassification: `text/csv;header=present` or + # * For ImageClassification: `image/png`, `image/jpeg`, or `image/*`. + # The default value is `image/*`. + # + # * For TextClassification: `text/csv;header=present` or # `x-application/vnd.amazon+parquet`. The default value is # `text/csv;header=present`. # @return [String] # # @!attribute [rw] compression_type - # The allowed compression types depend on the input format. We allow - # the compression type `Gzip` for `S3Prefix` inputs only. For all - # other inputs, the compression type should be `None`. If no - # compression type is provided, we default to `None`. + # The allowed compression types depend on the input format and problem + # type. We allow the compression type `Gzip` for `S3Prefix` inputs on + # tabular data only. For all other inputs, the compression type should + # be `None`. If no compression type is provided, we default to `None`. # @return [String] # # @!attribute [rw] data_source - # The data source for an AutoML channel. + # The data source for an AutoML channel (Required). # @return [Types::AutoMLDataSource] # # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/AutoMLJobChannel AWS API Documentation @@ -2132,7 +2132,7 @@ class AutoMLJobChannel < Struct.new( # @!attribute [rw] max_candidates # The maximum number of times a training job is allowed to run. # - # For V2 jobs (jobs created by calling `CreateAutoMLJobV2`), the + # For job V2s (jobs created by calling `CreateAutoMLJobV2`), the # supported value is 1. # @return [Integer] # @@ -2142,7 +2142,7 @@ class AutoMLJobChannel < Struct.new( # tuning job. For more information, see the [StoppingCondition][1] # used by the [CreateHyperParameterTuningJob][2] action. # - # For V2 jobs (jobs created by calling `CreateAutoMLJobV2`), this + # For job V2s (jobs created by calling `CreateAutoMLJobV2`), this # field controls the runtime of the job candidate. # # @@ -2221,7 +2221,7 @@ class AutoMLJobCompletionCriteria < Struct.new( # # # - # [1]: https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-model-support-validation.html#autopilot-algorithm-suppprt + # [1]: https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-model-support-validation.html#autopilot-algorithm-support # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/AutoMLJobConfig AWS API Documentation @@ -2237,8 +2237,6 @@ class AutoMLJobConfig < Struct.new( end # Specifies a metric to minimize or maximize as the objective of a job. - # V2 API jobs (for example jobs created by calling `CreateAutoMLJobV2`), - # support `Accuracy` only. # # @!attribute [rw] metric_name # The name of the objective metric used to measure the predictive @@ -2253,11 +2251,15 @@ class AutoMLJobConfig < Struct.new( # If you do not specify a metric explicitly, the default behavior is # to automatically use: # - # * `MSE`: for regression. + # * For tabular problem types: # - # * `F1`: for binary classification + # * Regression: `MSE`. # - # * `Accuracy`: for multiclass classification. + # * Binary classification: `F1`. + # + # * Multiclass classification: `Accuracy`. + # + # * For image or text classification problem types: `Accuracy` # # # @@ -2375,28 +2377,34 @@ class AutoMLPartialFailureReason < Struct.new( end # A collection of settings specific to the problem type used to - # configure an AutoML job using the V2 API. There must be one and only - # one config of the following type. + # configure an AutoML job V2. There must be one and only one config of + # the following type. # # @note AutoMLProblemTypeConfig is a union - when making an API calls you must set exactly one of the members. # # @note AutoMLProblemTypeConfig is a union - when returned from an API call exactly one value will be set and the returned type will be a subclass of AutoMLProblemTypeConfig corresponding to the set member. # # @!attribute [rw] image_classification_job_config - # Settings used to configure an AutoML job using the V2 API for the - # image classification problem type. + # Settings used to configure an AutoML job V2 for the image + # classification problem type. # @return [Types::ImageClassificationJobConfig] # # @!attribute [rw] text_classification_job_config - # Settings used to configure an AutoML job using the V2 API for the - # text classification problem type. + # Settings used to configure an AutoML job V2 for the text + # classification problem type. # @return [Types::TextClassificationJobConfig] # + # @!attribute [rw] tabular_job_config + # Settings used to configure an AutoML job V2 for a tabular problem + # type (regression, classification). + # @return [Types::TabularJobConfig] + # # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/AutoMLProblemTypeConfig AWS API Documentation # class AutoMLProblemTypeConfig < Struct.new( :image_classification_job_config, :text_classification_job_config, + :tabular_job_config, :unknown) SENSITIVE = [] include Aws::Structure @@ -2404,9 +2412,58 @@ class AutoMLProblemTypeConfig < Struct.new( class ImageClassificationJobConfig < AutoMLProblemTypeConfig; end class TextClassificationJobConfig < AutoMLProblemTypeConfig; end + class TabularJobConfig < AutoMLProblemTypeConfig; end class Unknown < AutoMLProblemTypeConfig; end end + # The resolved attributes specific to the problem type of an AutoML job + # V2. + # + # @note AutoMLProblemTypeResolvedAttributes is a union - when returned from an API call exactly one value will be set and the returned type will be a subclass of AutoMLProblemTypeResolvedAttributes corresponding to the set member. + # + # @!attribute [rw] tabular_resolved_attributes + # Defines the resolved attributes for the `TABULAR` problem type. + # @return [Types::TabularResolvedAttributes] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/AutoMLProblemTypeResolvedAttributes AWS API Documentation + # + class AutoMLProblemTypeResolvedAttributes < Struct.new( + :tabular_resolved_attributes, + :unknown) + SENSITIVE = [] + include Aws::Structure + include Aws::Structure::Union + + class TabularResolvedAttributes < AutoMLProblemTypeResolvedAttributes; end + class Unknown < AutoMLProblemTypeResolvedAttributes; end + end + + # The resolved attributes used to configure an AutoML job V2. + # + # @!attribute [rw] auto_ml_job_objective + # Specifies a metric to minimize or maximize as the objective of a + # job. + # @return [Types::AutoMLJobObjective] + # + # @!attribute [rw] completion_criteria + # How long a job is allowed to run, or how many candidates a job is + # allowed to generate. + # @return [Types::AutoMLJobCompletionCriteria] + # + # @!attribute [rw] auto_ml_problem_type_resolved_attributes + # Defines the resolved attributes specific to a problem type. + # @return [Types::AutoMLProblemTypeResolvedAttributes] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/AutoMLResolvedAttributes AWS API Documentation + # + class AutoMLResolvedAttributes < Struct.new( + :auto_ml_job_objective, + :completion_criteria, + :auto_ml_problem_type_resolved_attributes) + SENSITIVE = [] + include Aws::Structure + end + # Describes the Amazon S3 data source. # # @!attribute [rw] s3_data_type @@ -2939,6 +2996,50 @@ class CandidateArtifactLocations < Struct.new( include Aws::Structure end + # Stores the configuration information for how model candidates are + # generated using an AutoML job V2. + # + # @!attribute [rw] algorithms_config + # Stores the configuration information for the selection of algorithms + # used to train model candidates on tabular data. + # + # The list of available algorithms to choose from depends on the + # training mode set in [ `TabularJobConfig.Mode` ][1]. + # + # * `AlgorithmsConfig` should not be set in `AUTO` training mode. + # + # * When `AlgorithmsConfig` is provided, one `AutoMLAlgorithms` + # attribute must be set and one only. + # + # If the list of algorithms provided as values for + # `AutoMLAlgorithms` is empty, `CandidateGenerationConfig` uses the + # full set of algorithms for the given training mode. + # + # * When `AlgorithmsConfig` is not provided, + # `CandidateGenerationConfig` uses the full set of algorithms for + # the given training mode. + # + # For the list of all algorithms per problem type and training mode, + # see [ AutoMLAlgorithmConfig][2]. + # + # For more information on each algorithm, see the [Algorithm + # support][3] section in Autopilot developer guide. + # + # + # + # [1]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_TabularJobConfig.html + # [2]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_AutoMLAlgorithmConfig.html + # [3]: https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-model-support-validation.html#autopilot-algorithm-support + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CandidateGenerationConfig AWS API Documentation + # + class CandidateGenerationConfig < Struct.new( + :algorithms_config) + SENSITIVE = [] + include Aws::Structure + end + # The properties of an AutoML candidate job. # # @!attribute [rw] candidate_artifact_locations @@ -4525,15 +4626,13 @@ class CreateArtifactResponse < Struct.new( # @return [String] # # @!attribute [rw] auto_ml_job_objective - # Defines the objective metric used to measure the predictive quality - # of an AutoML job. You provide an [AutoMLJobObjective$MetricName][1] - # and Autopilot infers whether to minimize or maximize it. For - # [CreateAutoMLJobV2][2], only `Accuracy` is supported. + # Specifies a metric to minimize or maximize as the objective of a + # job. If not specified, the default objective metric depends on the + # problem type. See [AutoMLJobObjective][1] for the default values. # # # # [1]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_AutoMLJobObjective.html - # [2]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateAutoMLJobV2.html # @return [Types::AutoMLJobObjective] # # @!attribute [rw] auto_ml_job_config @@ -4607,10 +4706,12 @@ class CreateAutoMLJobResponse < Struct.new( # [InputDataConfig][1] supported by `CreateAutoMLJob`. The supported # formats depend on the problem type: # - # * ImageClassification: S3Prefix, `ManifestFile`, - # `AugmentedManifestFile` + # * For Tabular problem types: `S3Prefix`, `ManifestFile`. # - # * TextClassification: S3Prefix + # * For ImageClassification: `S3Prefix`, `ManifestFile`, + # `AugmentedManifestFile`. + # + # * For TextClassification: `S3Prefix`. # # # @@ -4625,6 +4726,13 @@ class CreateAutoMLJobResponse < Struct.new( # @!attribute [rw] auto_ml_problem_type_config # Defines the configuration settings of one of the supported problem # types. + # + # For tabular problem types, you must either specify the type of + # supervised learning problem in `AutoMLProblemTypeConfig` + # (`TabularJobConfig.ProblemType`) and provide the + # `AutoMLJobObjective`, or none at all. + # + # # @return [Types::AutoMLProblemTypeConfig] # # @!attribute [rw] role_arn @@ -4649,11 +4757,20 @@ class CreateAutoMLJobResponse < Struct.new( # # @!attribute [rw] auto_ml_job_objective # Specifies a metric to minimize or maximize as the objective of a - # job. For [CreateAutoMLJobV2][1], only `Accuracy` is supported. + # job. If not specified, the default objective metric depends on the + # problem type. For the list of default values per problem type, see + # [AutoMLJobObjective][1]. + # + # For tabular problem types, you must either provide the + # `AutoMLJobObjective` and indicate the type of supervised learning + # problem in `AutoMLProblemTypeConfig` + # (`TabularJobConfig.ProblemType`), or none. + # + # # # # - # [1]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateAutoMLJobV2.html + # [1]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_AutoMLJobObjective.html # @return [Types::AutoMLJobObjective] # # @!attribute [rw] model_deploy_config @@ -4665,12 +4782,9 @@ class CreateAutoMLJobResponse < Struct.new( # This structure specifies how to split the data into train and # validation datasets. # - # If you are using the V1 API (for example `CreateAutoMLJob`) or the - # V2 API for Natural Language Processing problems (for example - # `CreateAutoMLJobV2` with a `TextClassificationJobConfig` problem - # type), the validation and training datasets must contain the same - # headers. Also, for V1 API jobs, the validation dataset must be less - # than 2 GB in size. + # The validation and training datasets must contain the same headers. + # For jobs created by calling `CreateAutoMLJob`, the validation + # dataset must be less than 2 GB in size. # @return [Types::AutoMLDataSplitConfig] # # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateAutoMLJobV2Request AWS API Documentation @@ -10636,8 +10750,7 @@ class DescribeAutoMLJobRequest < Struct.new( # @!attribute [rw] resolved_attributes # Contains `ProblemType`, `AutoMLJobObjective`, and # `CompletionCriteria`. If you do not provide these values, they are - # auto-inferred. If you do provide them, the values used are the ones - # you provide. + # inferred. # @return [Types::ResolvedAttributes] # # @!attribute [rw] model_deploy_config @@ -10678,7 +10791,7 @@ class DescribeAutoMLJobResponse < Struct.new( end # @!attribute [rw] auto_ml_job_name - # Requests information about an AutoML V2 job using its unique name. + # Requests information about an AutoML job V2 using its unique name. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeAutoMLJobV2Request AWS API Documentation @@ -10690,11 +10803,11 @@ class DescribeAutoMLJobV2Request < Struct.new( end # @!attribute [rw] auto_ml_job_name - # Returns the name of the AutoML V2 job. + # Returns the name of the AutoML job V2. # @return [String] # # @!attribute [rw] auto_ml_job_arn - # Returns the Amazon Resource Name (ARN) of the AutoML V2 job. + # Returns the Amazon Resource Name (ARN) of the AutoML job V2. # @return [String] # # @!attribute [rw] auto_ml_job_input_data_config @@ -10718,15 +10831,15 @@ class DescribeAutoMLJobV2Request < Struct.new( # # @!attribute [rw] auto_ml_problem_type_config # Returns the configuration settings of the problem type set for the - # AutoML V2 job. + # AutoML job V2. # @return [Types::AutoMLProblemTypeConfig] # # @!attribute [rw] creation_time - # Returns the creation time of the AutoML V2 job. + # Returns the creation time of the AutoML job V2. # @return [Time] # # @!attribute [rw] end_time - # Returns the end time of the AutoML V2 job. + # Returns the end time of the AutoML job V2. # @return [Time] # # @!attribute [rw] last_modified_time @@ -10734,13 +10847,13 @@ class DescribeAutoMLJobV2Request < Struct.new( # @return [Time] # # @!attribute [rw] failure_reason - # Returns the reason for the failure of the AutoML V2 job, when + # Returns the reason for the failure of the AutoML job V2, when # applicable. # @return [String] # # @!attribute [rw] partial_failure_reasons - # Returns a list of reasons for partial failures within an AutoML V2 - # job. + # Returns a list of reasons for partial failures within an AutoML job + # V2. # @return [Array] # # @!attribute [rw] best_candidate @@ -10749,11 +10862,11 @@ class DescribeAutoMLJobV2Request < Struct.new( # @return [Types::AutoMLCandidate] # # @!attribute [rw] auto_ml_job_status - # Returns the status of the AutoML V2 job. + # Returns the status of the AutoML job V2. # @return [String] # # @!attribute [rw] auto_ml_job_secondary_status - # Returns the secondary status of the AutoML V2 job. + # Returns the secondary status of the AutoML job V2. # @return [String] # # @!attribute [rw] model_deploy_config @@ -10775,6 +10888,19 @@ class DescribeAutoMLJobV2Request < Struct.new( # VPC settings. # @return [Types::AutoMLSecurityConfig] # + # @!attribute [rw] auto_ml_job_artifacts + # The artifacts that are generated during an AutoML job. + # @return [Types::AutoMLJobArtifacts] + # + # @!attribute [rw] resolved_attributes + # Returns the resolved attributes used by the AutoML job V2. + # @return [Types::AutoMLResolvedAttributes] + # + # @!attribute [rw] auto_ml_problem_type_config_name + # Returns the name of the problem type configuration set for the + # AutoML job V2. + # @return [String] + # # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeAutoMLJobV2Response AWS API Documentation # class DescribeAutoMLJobV2Response < Struct.new( @@ -10796,7 +10922,10 @@ class DescribeAutoMLJobV2Response < Struct.new( :model_deploy_config, :model_deploy_result, :data_split_config, - :security_config) + :security_config, + :auto_ml_job_artifacts, + :resolved_attributes, + :auto_ml_problem_type_config_name) SENSITIVE = [] include Aws::Structure end @@ -20733,7 +20862,7 @@ class Image < Struct.new( end # Stores the configuration information for the image classification - # problem of an AutoML job using the V2 API. + # problem of an AutoML job V2. # # @!attribute [rw] completion_criteria # How long a job is allowed to run, or how many candidates a job is @@ -30828,6 +30957,11 @@ class OnlineStoreSecurityConfig < Struct.new( # using [TargetPlatform][1] fields. It can be used instead of # `TargetPlatform`. # + # Currently `ml_trn1` is available only in US East (N. Virginia) + # Region, and `ml_inf2` is available only in US East (Ohio) Region. + # + # + # # # # [1]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_TargetPlatform.html @@ -34402,8 +34536,7 @@ class RepositoryAuthConfig < Struct.new( # # @!attribute [rw] auto_ml_job_objective # Specifies a metric to minimize or maximize as the objective of a - # job. V2 API jobs (for example jobs created by calling - # `CreateAutoMLJobV2`), support `Accuracy` only. + # job. # @return [Types::AutoMLJobObjective] # # @!attribute [rw] problem_type @@ -36307,6 +36440,161 @@ class SuggestionQuery < Struct.new( include Aws::Structure end + # The collection of settings used by an AutoML job V2 for the `TABULAR` + # problem type. + # + # @!attribute [rw] candidate_generation_config + # The configuration information of how model candidates are generated. + # @return [Types::CandidateGenerationConfig] + # + # @!attribute [rw] completion_criteria + # How long a job is allowed to run, or how many candidates a job is + # allowed to generate. + # @return [Types::AutoMLJobCompletionCriteria] + # + # @!attribute [rw] feature_specification_s3_uri + # A URL to the Amazon S3 data source containing selected features from + # the input data source to run an Autopilot job V2. You can input + # `FeatureAttributeNames` (optional) in JSON format as shown below: + # + # `\{ "FeatureAttributeNames":["col1", "col2", ...] \}`. + # + # You can also specify the data type of the feature (optional) in the + # format shown below: + # + # `\{ "FeatureDataTypes":\{"col1":"numeric", "col2":"categorical" ... + # \} \}` + # + # These column keys may not include the target column. + # + # + # + # In ensembling mode, Autopilot only supports the following data + # types: `numeric`, `categorical`, `text`, and `datetime`. In HPO + # mode, Autopilot can support `numeric`, `categorical`, `text`, + # `datetime`, and `sequence`. + # + # If only `FeatureDataTypes` is provided, the column keys (`col1`, + # `col2`,..) should be a subset of the column names in the input data. + # + # If both `FeatureDataTypes` and `FeatureAttributeNames` are provided, + # then the column keys should be a subset of the column names provided + # in `FeatureAttributeNames`. + # + # The key name `FeatureAttributeNames` is fixed. The values listed in + # `["col1", "col2", ...]` are case sensitive and should be a list of + # strings containing unique values that are a subset of the column + # names in the input data. The list of columns provided must not + # include the target column. + # @return [String] + # + # @!attribute [rw] mode + # The method that Autopilot uses to train the data. You can either + # specify the mode manually or let Autopilot choose for you based on + # the dataset size by selecting `AUTO`. In `AUTO` mode, Autopilot + # chooses `ENSEMBLING` for datasets smaller than 100 MB, and + # `HYPERPARAMETER_TUNING` for larger ones. + # + # The `ENSEMBLING` mode uses a multi-stack ensemble model to predict + # classification and regression tasks directly from your dataset. This + # machine learning mode combines several base models to produce an + # optimal predictive model. It then uses a stacking ensemble method to + # combine predictions from contributing members. A multi-stack + # ensemble model can provide better performance over a single model by + # combining the predictive capabilities of multiple models. See + # [Autopilot algorithm support][1] for a list of algorithms supported + # by `ENSEMBLING` mode. + # + # The `HYPERPARAMETER_TUNING` (HPO) mode uses the best hyperparameters + # to train the best version of a model. HPO automatically selects an + # algorithm for the type of problem you want to solve. Then HPO finds + # the best hyperparameters according to your objective metric. See + # [Autopilot algorithm support][1] for a list of algorithms supported + # by `HYPERPARAMETER_TUNING` mode. + # + # + # + # [1]: https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-model-support-validation.html#autopilot-algorithm-support + # @return [String] + # + # @!attribute [rw] generate_candidate_definitions_only + # Generates possible candidates without training the models. A model + # candidate is a combination of data preprocessors, algorithms, and + # algorithm parameter settings. + # @return [Boolean] + # + # @!attribute [rw] problem_type + # The type of supervised learning problem available for the model + # candidates of the AutoML job V2. For more information, see [ Amazon + # SageMaker Autopilot problem types][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-datasets-problem-types.html#autopilot-problem-types + # @return [String] + # + # @!attribute [rw] target_attribute_name + # The name of the target variable in supervised learning, usually + # represented by 'y'. + # @return [String] + # + # @!attribute [rw] sample_weight_attribute_name + # If specified, this column name indicates which column of the dataset + # should be treated as sample weights for use by the objective metric + # during the training, evaluation, and the selection of the best + # model. This column is not considered as a predictive feature. For + # more information on Autopilot metrics, see [Metrics and + # validation][1]. + # + # Sample weights should be numeric, non-negative, with larger values + # indicating which rows are more important than others. Data points + # that have invalid or no weight value are excluded. + # + # Support for sample weights is available in [Ensembling][2] mode + # only. + # + # + # + # [1]: https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-metrics-validation.html + # [2]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_AutoMLAlgorithmConfig.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/TabularJobConfig AWS API Documentation + # + class TabularJobConfig < Struct.new( + :candidate_generation_config, + :completion_criteria, + :feature_specification_s3_uri, + :mode, + :generate_candidate_definitions_only, + :problem_type, + :target_attribute_name, + :sample_weight_attribute_name) + SENSITIVE = [] + include Aws::Structure + end + + # The resolved attributes specific to the `TABULAR` problem type. + # + # @!attribute [rw] problem_type + # The type of supervised learning problem available for the model + # candidates of the AutoML job V2 (Binary Classification, Multiclass + # Classification, Regression). For more information, see [ Amazon + # SageMaker Autopilot problem types][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-datasets-problem-types.html#autopilot-problem-types + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/TabularResolvedAttributes AWS API Documentation + # + class TabularResolvedAttributes < Struct.new( + :problem_type) + SENSITIVE = [] + include Aws::Structure + end + # A tag object that consists of a key and an optional value, used to # manage metadata for SageMaker Amazon Web Services resources. # @@ -36432,7 +36720,7 @@ class TensorBoardOutputConfig < Struct.new( end # Stores the configuration information for the text classification - # problem of an AutoML job using the V2 API. + # problem of an AutoML job V2. # # @!attribute [rw] completion_criteria # How long a job is allowed to run, or how many candidates a job is @@ -36441,12 +36729,13 @@ class TensorBoardOutputConfig < Struct.new( # # @!attribute [rw] content_column # The name of the column used to provide the sentences to be - # classified. It should not be the same as the target column. + # classified. It should not be the same as the target column + # (Required). # @return [String] # # @!attribute [rw] target_label_column # The name of the column used to provide the class labels. It should - # not be same as the content column. + # not be same as the content column (Required). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/TextClassificationJobConfig AWS API Documentation