diff --git a/CHANGELOG.md b/CHANGELOG.md index 21dc82a5977..3282271ada6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,14 @@ +Release v1.52.1 (2024-05-02) +=== + +### Service Client Updates +* `service/dynamodb`: Updates service API, documentation, waiters, paginators, and examples + * This release adds support to specify an optional, maximum OnDemandThroughput for DynamoDB tables and global secondary indexes in the CreateTable or UpdateTable APIs. You can also override the OnDemandThroughput settings by calling the ImportTable, RestoreFromPointInTime, or RestoreFromBackup APIs. +* `service/ec2`: Updates service API and documentation + * This release includes a new API for retrieving the public endorsement key of the EC2 instance's Nitro Trusted Platform Module (NitroTPM). +* `service/personalize`: Updates service API and documentation +* `service/redshift-serverless`: Updates service API and documentation + Release v1.52.0 (2024-05-01) === diff --git a/aws/version.go b/aws/version.go index d0d3118f27e..25f2c8b55aa 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.52.0" +const SDKVersion = "1.52.1" diff --git a/models/apis/dynamodb/2012-08-10/api-2.json b/models/apis/dynamodb/2012-08-10/api-2.json index a1487e39931..af7ec92c8c2 100644 --- a/models/apis/dynamodb/2012-08-10/api-2.json +++ b/models/apis/dynamodb/2012-08-10/api-2.json @@ -1528,7 +1528,8 @@ "IndexName":{"shape":"IndexName"}, "KeySchema":{"shape":"KeySchema"}, "Projection":{"shape":"Projection"}, - "ProvisionedThroughput":{"shape":"ProvisionedThroughput"} + "ProvisionedThroughput":{"shape":"ProvisionedThroughput"}, + "OnDemandThroughput":{"shape":"OnDemandThroughput"} } }, "CreateGlobalTableInput":{ @@ -1562,6 +1563,7 @@ "RegionName":{"shape":"RegionName"}, "KMSMasterKeyId":{"shape":"KMSMasterKeyId"}, "ProvisionedThroughputOverride":{"shape":"ProvisionedThroughputOverride"}, + "OnDemandThroughputOverride":{"shape":"OnDemandThroughputOverride"}, "GlobalSecondaryIndexes":{"shape":"ReplicaGlobalSecondaryIndexList"}, "TableClassOverride":{"shape":"TableClass"} } @@ -1586,7 +1588,8 @@ "Tags":{"shape":"TagList"}, "TableClass":{"shape":"TableClass"}, "DeletionProtectionEnabled":{"shape":"DeletionProtectionEnabled"}, - "ResourcePolicy":{"shape":"ResourcePolicy"} + "ResourcePolicy":{"shape":"ResourcePolicy"}, + "OnDemandThroughput":{"shape":"OnDemandThroughput"} } }, "CreateTableOutput":{ @@ -2222,7 +2225,8 @@ "IndexName":{"shape":"IndexName"}, "KeySchema":{"shape":"KeySchema"}, "Projection":{"shape":"Projection"}, - "ProvisionedThroughput":{"shape":"ProvisionedThroughput"} + "ProvisionedThroughput":{"shape":"ProvisionedThroughput"}, + "OnDemandThroughput":{"shape":"OnDemandThroughput"} } }, "GlobalSecondaryIndexAutoScalingUpdate":{ @@ -2248,7 +2252,8 @@ "ProvisionedThroughput":{"shape":"ProvisionedThroughputDescription"}, "IndexSizeBytes":{"shape":"LongObject"}, "ItemCount":{"shape":"LongObject"}, - "IndexArn":{"shape":"String"} + "IndexArn":{"shape":"String"}, + "OnDemandThroughput":{"shape":"OnDemandThroughput"} } }, "GlobalSecondaryIndexDescriptionList":{ @@ -2261,7 +2266,8 @@ "IndexName":{"shape":"IndexName"}, "KeySchema":{"shape":"KeySchema"}, "Projection":{"shape":"Projection"}, - "ProvisionedThroughput":{"shape":"ProvisionedThroughput"} + "ProvisionedThroughput":{"shape":"ProvisionedThroughput"}, + "OnDemandThroughput":{"shape":"OnDemandThroughput"} } }, "GlobalSecondaryIndexList":{ @@ -2892,6 +2898,19 @@ "type":"list", "member":{"shape":"NumberAttributeValue"} }, + "OnDemandThroughput":{ + "type":"structure", + "members":{ + "MaxReadRequestUnits":{"shape":"LongObject"}, + "MaxWriteRequestUnits":{"shape":"LongObject"} + } + }, + "OnDemandThroughputOverride":{ + "type":"structure", + "members":{ + "MaxReadRequestUnits":{"shape":"LongObject"} + } + }, "ParameterizedStatement":{ "type":"structure", "required":["Statement"], @@ -3193,6 +3212,7 @@ "ReplicaStatusPercentProgress":{"shape":"ReplicaStatusPercentProgress"}, "KMSMasterKeyId":{"shape":"KMSMasterKeyId"}, "ProvisionedThroughputOverride":{"shape":"ProvisionedThroughputOverride"}, + "OnDemandThroughputOverride":{"shape":"OnDemandThroughputOverride"}, "GlobalSecondaryIndexes":{"shape":"ReplicaGlobalSecondaryIndexDescriptionList"}, "ReplicaInaccessibleDateTime":{"shape":"Date"}, "ReplicaTableClassSummary":{"shape":"TableClassSummary"} @@ -3207,7 +3227,8 @@ "required":["IndexName"], "members":{ "IndexName":{"shape":"IndexName"}, - "ProvisionedThroughputOverride":{"shape":"ProvisionedThroughputOverride"} + "ProvisionedThroughputOverride":{"shape":"ProvisionedThroughputOverride"}, + "OnDemandThroughputOverride":{"shape":"OnDemandThroughputOverride"} } }, "ReplicaGlobalSecondaryIndexAutoScalingDescription":{ @@ -3238,7 +3259,8 @@ "type":"structure", "members":{ "IndexName":{"shape":"IndexName"}, - "ProvisionedThroughputOverride":{"shape":"ProvisionedThroughputOverride"} + "ProvisionedThroughputOverride":{"shape":"ProvisionedThroughputOverride"}, + "OnDemandThroughputOverride":{"shape":"OnDemandThroughputOverride"} } }, "ReplicaGlobalSecondaryIndexDescriptionList":{ @@ -3420,6 +3442,7 @@ "GlobalSecondaryIndexOverride":{"shape":"GlobalSecondaryIndexList"}, "LocalSecondaryIndexOverride":{"shape":"LocalSecondaryIndexList"}, "ProvisionedThroughputOverride":{"shape":"ProvisionedThroughput"}, + "OnDemandThroughputOverride":{"shape":"OnDemandThroughput"}, "SSESpecificationOverride":{"shape":"SSESpecification"} } }, @@ -3442,6 +3465,7 @@ "GlobalSecondaryIndexOverride":{"shape":"GlobalSecondaryIndexList"}, "LocalSecondaryIndexOverride":{"shape":"LocalSecondaryIndexList"}, "ProvisionedThroughputOverride":{"shape":"ProvisionedThroughput"}, + "OnDemandThroughputOverride":{"shape":"OnDemandThroughput"}, "SSESpecificationOverride":{"shape":"SSESpecification"} } }, @@ -3633,6 +3657,7 @@ "KeySchema":{"shape":"KeySchema"}, "TableCreationDateTime":{"shape":"TableCreationDateTime"}, "ProvisionedThroughput":{"shape":"ProvisionedThroughput"}, + "OnDemandThroughput":{"shape":"OnDemandThroughput"}, "ItemCount":{"shape":"ItemCount"}, "BillingMode":{"shape":"BillingMode"} } @@ -3724,6 +3749,7 @@ "KeySchema":{"shape":"KeySchema"}, "BillingMode":{"shape":"BillingMode"}, "ProvisionedThroughput":{"shape":"ProvisionedThroughput"}, + "OnDemandThroughput":{"shape":"OnDemandThroughput"}, "SSESpecification":{"shape":"SSESpecification"}, "GlobalSecondaryIndexes":{"shape":"GlobalSecondaryIndexList"} } @@ -3753,7 +3779,8 @@ "SSEDescription":{"shape":"SSEDescription"}, "ArchivalSummary":{"shape":"ArchivalSummary"}, "TableClassSummary":{"shape":"TableClassSummary"}, - "DeletionProtectionEnabled":{"shape":"DeletionProtectionEnabled"} + "DeletionProtectionEnabled":{"shape":"DeletionProtectionEnabled"}, + "OnDemandThroughput":{"shape":"OnDemandThroughput"} } }, "TableId":{ @@ -4024,13 +4051,11 @@ "UpdateExpression":{"type":"string"}, "UpdateGlobalSecondaryIndexAction":{ "type":"structure", - "required":[ - "IndexName", - "ProvisionedThroughput" - ], + "required":["IndexName"], "members":{ "IndexName":{"shape":"IndexName"}, - "ProvisionedThroughput":{"shape":"ProvisionedThroughput"} + "ProvisionedThroughput":{"shape":"ProvisionedThroughput"}, + "OnDemandThroughput":{"shape":"OnDemandThroughput"} } }, "UpdateGlobalTableInput":{ @@ -4133,6 +4158,7 @@ "RegionName":{"shape":"RegionName"}, "KMSMasterKeyId":{"shape":"KMSMasterKeyId"}, "ProvisionedThroughputOverride":{"shape":"ProvisionedThroughputOverride"}, + "OnDemandThroughputOverride":{"shape":"OnDemandThroughputOverride"}, "GlobalSecondaryIndexes":{"shape":"ReplicaGlobalSecondaryIndexList"}, "TableClassOverride":{"shape":"TableClass"} } @@ -4150,7 +4176,8 @@ "SSESpecification":{"shape":"SSESpecification"}, "ReplicaUpdates":{"shape":"ReplicationGroupUpdateList"}, "TableClass":{"shape":"TableClass"}, - "DeletionProtectionEnabled":{"shape":"DeletionProtectionEnabled"} + "DeletionProtectionEnabled":{"shape":"DeletionProtectionEnabled"}, + "OnDemandThroughput":{"shape":"OnDemandThroughput"} } }, "UpdateTableOutput":{ diff --git a/models/apis/dynamodb/2012-08-10/docs-2.json b/models/apis/dynamodb/2012-08-10/docs-2.json index 04bb2e18d2d..8624bd14014 100644 --- a/models/apis/dynamodb/2012-08-10/docs-2.json +++ b/models/apis/dynamodb/2012-08-10/docs-2.json @@ -31,7 +31,7 @@ "ExecuteTransaction": "

This operation allows you to perform transactional reads or writes on data stored in DynamoDB, using PartiQL.

The entire transaction must consist of either read statements or write statements, you cannot mix both in one transaction. The EXISTS function is an exception and can be used to check the condition of specific attributes of the item in a similar manner to ConditionCheck in the TransactWriteItems API.

", "ExportTableToPointInTime": "

Exports table data to an S3 bucket. The table must have point in time recovery enabled, and you can export data from any time within the point in time recovery window.

", "GetItem": "

The GetItem operation returns a set of attributes for the item with the given primary key. If there is no matching item, GetItem does not return any data and there will be no Item element in the response.

GetItem provides an eventually consistent read by default. If your application requires a strongly consistent read, set ConsistentRead to true. Although a strongly consistent read might take more time than an eventually consistent read, it always returns the last updated value.

", - "GetResourcePolicy": "

Returns the resource-based policy document attached to the resource, which can be a table or stream, in JSON format.

GetResourcePolicy follows an eventually consistent model. The following list describes the outcomes when you issue the GetResourcePolicy request immediately after issuing another request:

Because GetResourcePolicy uses an eventually consistent query, the metadata for your policy or table might not be available at that moment. Wait for a few seconds, and then retry the GetResourcePolicy request.

After a GetResourcePolicy request returns a policy created using the PutResourcePolicy request, you can assume the policy will start getting applied in the authorization of requests to the resource. Because this process is eventually consistent, it will take some time to apply the policy to all requests to a resource. Policies that you attach while creating a table using the CreateTable request will always be applied to all requests for that table.

", + "GetResourcePolicy": "

Returns the resource-based policy document attached to the resource, which can be a table or stream, in JSON format.

GetResourcePolicy follows an eventually consistent model. The following list describes the outcomes when you issue the GetResourcePolicy request immediately after issuing another request:

Because GetResourcePolicy uses an eventually consistent query, the metadata for your policy or table might not be available at that moment. Wait for a few seconds, and then retry the GetResourcePolicy request.

After a GetResourcePolicy request returns a policy created using the PutResourcePolicy request, the policy will be applied in the authorization of requests to the resource. Because this process is eventually consistent, it will take some time to apply the policy to all requests to a resource. Policies that you attach while creating a table using the CreateTable request will always be applied to all requests for that table.

", "ImportTable": "

Imports table data from an S3 bucket.

", "ListBackups": "

List DynamoDB backups that are associated with an Amazon Web Services account and weren't made with Amazon Web Services Backup. To list these backups for a given table, specify TableName. ListBackups returns a paginated list of results with at most 1 MB worth of items in a page. You can also specify a maximum number of entries to be returned in a page.

In the request, start time is inclusive, but end time is exclusive. Note that these boundaries are for the time at which the original backup was requested.

You can call ListBackups a maximum of five times per second.

If you want to retrieve the complete list of backups made with Amazon Web Services Backup, use the Amazon Web Services Backup list API.

", "ListContributorInsights": "

Returns a list of ContributorInsightsSummary for a table and all its global secondary indexes.

", @@ -41,7 +41,7 @@ "ListTables": "

Returns an array of table names associated with the current account and endpoint. The output from ListTables is paginated, with each page returning a maximum of 100 table names.

", "ListTagsOfResource": "

List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource up to 10 times per second, per account.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

", "PutItem": "

Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new item already exists in the specified table, the new item completely replaces the existing item. You can perform a conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values. You can return the item's attribute values in the same operation, using the ReturnValues parameter.

When you add an item, the primary key attributes are the only required attributes.

Empty String and Binary attribute values are allowed. Attribute values of type String and Binary must have a length greater than zero if the attribute is used as a key attribute for a table or index. Set type attributes cannot be empty.

Invalid Requests with empty values will be rejected with a ValidationException exception.

To prevent a new item from replacing an existing item, use a conditional expression that contains the attribute_not_exists function with the name of the attribute being used as the partition key for the table. Since every record must contain that attribute, the attribute_not_exists function will only succeed if no matching item exists.

For more information about PutItem, see Working with Items in the Amazon DynamoDB Developer Guide.

", - "PutResourcePolicy": "

Attaches a resource-based policy document to the resource, which can be a table or stream. When you attach a resource-based policy using this API, the policy application is eventually consistent .

PutResourcePolicy is an idempotent operation; running it multiple times on the same resource using the same policy document will return the same revision ID. If you specify an ExpectedRevisionId which doesn't match the current policy's RevisionId, the PolicyNotFoundException will be returned.

PutResourcePolicy is an asynchronous operation. If you issue a GetResourcePolicy request immediately after a PutResourcePolicy request, DynamoDB might return your previous policy, if there was one, or return the PolicyNotFoundException. This is because GetResourcePolicy uses an eventually consistent query, and the metadata for your policy or table might not be available at that moment. Wait for a few seconds, and then try the GetResourcePolicy request again.

", + "PutResourcePolicy": "

Attaches a resource-based policy document to the resource, which can be a table or stream. When you attach a resource-based policy using this API, the policy application is eventually consistent .

PutResourcePolicy is an idempotent operation; running it multiple times on the same resource using the same policy document will return the same revision ID. If you specify an ExpectedRevisionId that doesn't match the current policy's RevisionId, the PolicyNotFoundException will be returned.

PutResourcePolicy is an asynchronous operation. If you issue a GetResourcePolicy request immediately after a PutResourcePolicy request, DynamoDB might return your previous policy, if there was one, or return the PolicyNotFoundException. This is because GetResourcePolicy uses an eventually consistent query, and the metadata for your policy or table might not be available at that moment. Wait for a few seconds, and then try the GetResourcePolicy request again.

", "Query": "

You must provide the name of the partition key attribute and a single value for that attribute. Query returns all items with that partition key value. Optionally, you can provide a sort key attribute and use a comparison operator to refine the search results.

Use the KeyConditionExpression parameter to provide a specific value for the partition key. The Query operation will return all of the items from the table or index with that partition key value. You can optionally narrow the scope of the Query operation by specifying a sort key value and a comparison operator in KeyConditionExpression. To further refine the Query results, you can optionally provide a FilterExpression. A FilterExpression determines which items within the results should be returned to you. All of the other results are discarded.

A Query operation always returns a result set. If no matching items are found, the result set will be empty. Queries that do not return results consume the minimum number of read capacity units for that type of read operation.

DynamoDB calculates the number of read capacity units consumed based on item size, not on the amount of data that is returned to an application. The number of capacity units consumed will be the same whether you request all of the attributes (the default behavior) or just some of them (using a projection expression). The number will also be the same whether or not you use a FilterExpression.

Query results are always sorted by the sort key value. If the data type of the sort key is Number, the results are returned in numeric order; otherwise, the results are returned in order of UTF-8 bytes. By default, the sort order is ascending. To reverse the order, set the ScanIndexForward parameter to false.

A single Query operation will read up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression. If LastEvaluatedKey is present in the response, you will need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.

FilterExpression is applied after a Query finishes, but before the results are returned. A FilterExpression cannot contain partition key or sort key attributes. You need to specify those attributes in the KeyConditionExpression.

A Query operation can return an empty result set and a LastEvaluatedKey if all the items read for the page of results are filtered out.

You can query a table, a local secondary index, or a global secondary index. For a query on a table or on a local secondary index, you can set the ConsistentRead parameter to true and obtain a strongly consistent result. Global secondary indexes support eventually consistent reads only, so do not specify ConsistentRead when querying a global secondary index.

", "RestoreTableFromBackup": "

Creates a new table from an existing backup. Any number of users can execute up to 50 concurrent restores (any type of restore) in a given account.

You can call RestoreTableFromBackup at a maximum rate of 10 times per second.

You must manually set up the following on the restored table:

", "RestoreTableToPointInTime": "

Restores the specified table to the specified point in time within EarliestRestorableDateTime and LatestRestorableDateTime. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 50 concurrent restores (any type of restore) in a given account.

When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table.

Along with data, the following are also included on the new restored table using point in time recovery:

You must manually set up the following on the restored table:

", @@ -1991,6 +1991,9 @@ "ImportTableDescription$ProcessedSizeBytes": "

The total size of data processed from the source file, in Bytes.

", "LocalSecondaryIndexDescription$IndexSizeBytes": "

The total size of the specified index, in bytes. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

", "LocalSecondaryIndexDescription$ItemCount": "

The number of items in the specified index. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

", + "OnDemandThroughput$MaxReadRequestUnits": "

Maximum number of read request units for the specified table.

To specify a maximum OnDemandThroughput on your table, set the value of MaxReadRequestUnits as greater than or equal to 1. To remove the maximum OnDemandThroughput that is currently set on your table, set the value of MaxReadRequestUnits to -1.

", + "OnDemandThroughput$MaxWriteRequestUnits": "

Maximum number of write request units for the specified table.

To specify a maximum OnDemandThroughput on your table, set the value of MaxWriteRequestUnits as greater than or equal to 1. To remove the maximum OnDemandThroughput that is currently set on your table, set the value of MaxWriteRequestUnits to -1.

", + "OnDemandThroughputOverride$MaxReadRequestUnits": "

Maximum number of read request units for the specified replica table.

", "SourceTableDetails$TableSizeBytes": "

Size of the table in bytes. Note that this is an approximate value.

", "TableDescription$TableSizeBytes": "

The total size of the specified table, in bytes. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

", "TableDescription$ItemCount": "

The number of items in the specified table. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

" @@ -2051,6 +2054,33 @@ "AttributeValue$NS": "

An attribute of type Number Set. For example:

\"NS\": [\"42.2\", \"-19\", \"7.5\", \"3.14\"]

Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.

" } }, + "OnDemandThroughput": { + "base": "

Sets the maximum number of read and write units for the specified on-demand table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

", + "refs": { + "CreateGlobalSecondaryIndexAction$OnDemandThroughput": "

The maximum number of read and write units for the global secondary index being created. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

", + "CreateTableInput$OnDemandThroughput": "

Sets the maximum number of read and write units for the specified table in on-demand capacity mode. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

", + "GlobalSecondaryIndex$OnDemandThroughput": "

The maximum number of read and write units for the specified global secondary index. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

", + "GlobalSecondaryIndexDescription$OnDemandThroughput": "

The maximum number of read and write units for the specified global secondary index. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

", + "GlobalSecondaryIndexInfo$OnDemandThroughput": null, + "RestoreTableFromBackupInput$OnDemandThroughputOverride": null, + "RestoreTableToPointInTimeInput$OnDemandThroughputOverride": null, + "SourceTableDetails$OnDemandThroughput": null, + "TableCreationParameters$OnDemandThroughput": null, + "TableDescription$OnDemandThroughput": "

The maximum number of read and write units for the specified on-demand table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

", + "UpdateGlobalSecondaryIndexAction$OnDemandThroughput": "

Updates the maximum number of read and write units for the specified global secondary index. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

", + "UpdateTableInput$OnDemandThroughput": "

Updates the maximum number of read and write units for the specified table in on-demand capacity mode. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

" + } + }, + "OnDemandThroughputOverride": { + "base": "

Overrides the on-demand throughput settings for this replica table. If you don't specify a value for this parameter, it uses the source table's on-demand throughput settings.

", + "refs": { + "CreateReplicationGroupMemberAction$OnDemandThroughputOverride": "

The maximum on-demand throughput settings for the specified replica table being created. You can only modify MaxReadRequestUnits, because you can't modify MaxWriteRequestUnits for individual replica tables.

", + "ReplicaDescription$OnDemandThroughputOverride": "

Overrides the maximum on-demand throughput settings for the specified replica table.

", + "ReplicaGlobalSecondaryIndex$OnDemandThroughputOverride": "

Overrides the maximum on-demand throughput settings for the specified global secondary index in the specified replica table.

", + "ReplicaGlobalSecondaryIndexDescription$OnDemandThroughputOverride": "

Overrides the maximum on-demand throughput for the specified global secondary index in the specified replica table.

", + "UpdateReplicationGroupMemberAction$OnDemandThroughputOverride": "

Overrides the maximum on-demand throughput for the replica table.

" + } + }, "ParameterizedStatement": { "base": "

Represents a PartiQL statement that uses parameters.

", "refs": { @@ -2122,10 +2152,10 @@ "base": null, "refs": { "DeleteResourcePolicyInput$ExpectedRevisionId": "

A string value that you can use to conditionally delete your policy. When you provide an expected revision ID, if the revision ID of the existing policy on the resource doesn't match or if there's no policy attached to the resource, the request will fail and return a PolicyNotFoundException.

", - "DeleteResourcePolicyOutput$RevisionId": "

A unique string that represents the revision ID of the policy. If you are comparing revision IDs, make sure to always use string comparison logic.

This value will be empty if you make a request against a resource without a policy.

", - "GetResourcePolicyOutput$RevisionId": "

A unique string that represents the revision ID of the policy. If you are comparing revision IDs, make sure to always use string comparison logic.

", - "PutResourcePolicyInput$ExpectedRevisionId": "

A string value that you can use to conditionally update your policy. You can provide the revision ID of your existing policy to make mutating requests against that policy. When you provide an expected revision ID, if the revision ID of the existing policy on the resource doesn't match or if there's no policy attached to the resource, your request will be rejected with a PolicyNotFoundException.

To conditionally put a policy when no policy exists for the resource, specify NO_POLICY for the revision ID.

", - "PutResourcePolicyOutput$RevisionId": "

A unique string that represents the revision ID of the policy. If you are comparing revision IDs, make sure to always use string comparison logic.

" + "DeleteResourcePolicyOutput$RevisionId": "

A unique string that represents the revision ID of the policy. If you're comparing revision IDs, make sure to always use string comparison logic.

This value will be empty if you make a request against a resource without a policy.

", + "GetResourcePolicyOutput$RevisionId": "

A unique string that represents the revision ID of the policy. If you're comparing revision IDs, make sure to always use string comparison logic.

", + "PutResourcePolicyInput$ExpectedRevisionId": "

A string value that you can use to conditionally update your policy. You can provide the revision ID of your existing policy to make mutating requests against that policy.

When you provide an expected revision ID, if the revision ID of the existing policy on the resource doesn't match or if there's no policy attached to the resource, your request will be rejected with a PolicyNotFoundException.

To conditionally attach a policy when no policy exists for the resource, specify NO_POLICY for the revision ID.

", + "PutResourcePolicyOutput$RevisionId": "

A unique string that represents the revision ID of the policy. If you're comparing revision IDs, make sure to always use string comparison logic.

" } }, "PositiveIntegerObject": { @@ -2537,9 +2567,9 @@ "ResourcePolicy": { "base": null, "refs": { - "CreateTableInput$ResourcePolicy": "

An Amazon Web Services resource-based policy document in JSON format that will be attached to the table.

When you attach a resource-based policy while creating a table, the policy creation is strongly consistent.

The maximum size supported for a resource-based policy document is 20 KB. DynamoDB counts whitespaces when calculating the size of a policy against this limit. You can’t request an increase for this limit. For a full list of all considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations.

", + "CreateTableInput$ResourcePolicy": "

An Amazon Web Services resource-based policy document in JSON format that will be attached to the table.

When you attach a resource-based policy while creating a table, the policy application is strongly consistent.

The maximum size supported for a resource-based policy document is 20 KB. DynamoDB counts whitespaces when calculating the size of a policy against this limit. For a full list of all considerations that apply for resource-based policies, see Resource-based policy considerations.

", "GetResourcePolicyOutput$Policy": "

The resource-based policy document attached to the resource, which can be a table or stream, in JSON format.

", - "PutResourcePolicyInput$Policy": "

An Amazon Web Services resource-based policy document in JSON format.

The maximum size supported for a resource-based policy document is 20 KB. DynamoDB counts whitespaces when calculating the size of a policy against this limit. For a full list of all considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations.

" + "PutResourcePolicyInput$Policy": "

An Amazon Web Services resource-based policy document in JSON format.

For a full list of all considerations that apply while attaching a resource-based policy, see Resource-based policy considerations.

" } }, "RestoreInProgress": { diff --git a/models/apis/ec2/2016-11-15/api-2.json b/models/apis/ec2/2016-11-15/api-2.json index 397f2634aad..a9ff4f33b5c 100755 --- a/models/apis/ec2/2016-11-15/api-2.json +++ b/models/apis/ec2/2016-11-15/api-2.json @@ -3964,6 +3964,15 @@ "input":{"shape":"GetInstanceMetadataDefaultsRequest"}, "output":{"shape":"GetInstanceMetadataDefaultsResult"} }, + "GetInstanceTpmEkPub":{ + "name":"GetInstanceTpmEkPub", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetInstanceTpmEkPubRequest"}, + "output":{"shape":"GetInstanceTpmEkPubResult"} + }, "GetInstanceTypesFromInstanceRequirements":{ "name":"GetInstanceTypesFromInstanceRequirements", "http":{ @@ -21055,6 +21064,24 @@ "locationName":"item" } }, + "EkPubKeyFormat":{ + "type":"string", + "enum":[ + "der", + "tpmt" + ] + }, + "EkPubKeyType":{ + "type":"string", + "enum":[ + "rsa-2048", + "ecc-sec-p384" + ] + }, + "EkPubKeyValue":{ + "type":"string", + "sensitive":true + }, "ElasticGpuAssociation":{ "type":"structure", "members":{ @@ -23706,6 +23733,41 @@ } } }, + "GetInstanceTpmEkPubRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "KeyType", + "KeyFormat" + ], + "members":{ + "InstanceId":{"shape":"InstanceId"}, + "KeyType":{"shape":"EkPubKeyType"}, + "KeyFormat":{"shape":"EkPubKeyFormat"}, + "DryRun":{"shape":"Boolean"} + } + }, + "GetInstanceTpmEkPubResult":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "locationName":"instanceId" + }, + "KeyType":{ + "shape":"EkPubKeyType", + "locationName":"keyType" + }, + "KeyFormat":{ + "shape":"EkPubKeyFormat", + "locationName":"keyFormat" + }, + "KeyValue":{ + "shape":"EkPubKeyValue", + "locationName":"keyValue" + } + } + }, "GetInstanceTypesFromInstanceRequirementsRequest":{ "type":"structure", "required":[ diff --git a/models/apis/ec2/2016-11-15/docs-2.json b/models/apis/ec2/2016-11-15/docs-2.json index 298ff22756d..4e48821a182 100755 --- a/models/apis/ec2/2016-11-15/docs-2.json +++ b/models/apis/ec2/2016-11-15/docs-2.json @@ -445,6 +445,7 @@ "GetHostReservationPurchasePreview": "

Preview a reservation purchase with configurations that match those of your Dedicated Host. You must have active Dedicated Hosts in your account before you purchase a reservation.

This is a preview of the PurchaseHostReservation action and does not result in the offering being purchased.

", "GetImageBlockPublicAccessState": "

Gets the current state of block public access for AMIs at the account level in the specified Amazon Web Services Region.

For more information, see Block public access to your AMIs in the Amazon EC2 User Guide.

", "GetInstanceMetadataDefaults": "

Gets the default instance metadata service (IMDS) settings that are set at the account level in the specified Amazon Web Services
 Region.

For more information, see Order of precedence for instance metadata options in the Amazon EC2 User Guide.

", + "GetInstanceTpmEkPub": "

Gets the public endorsement key associated with the Nitro Trusted Platform Module (NitroTPM) for the specified instance.

", "GetInstanceTypesFromInstanceRequirements": "

Returns a list of instance types with the specified instance attributes. You can use the response to preview the instance types without launching instances. Note that the response does not consider capacity.

When you specify multiple parameters, you get instance types that satisfy all of the specified parameters. If you specify multiple values for a parameter, you get instance types that satisfy any of the specified values.

For more information, see Preview instance types with specified attributes, Attribute-based instance type selection for EC2 Fleet, Attribute-based instance type selection for Spot Fleet, and Spot placement score in the Amazon EC2 User Guide, and Creating an Auto Scaling group using attribute-based instance type selection in the Amazon EC2 Auto Scaling User Guide.

", "GetInstanceUefiData": "

A binary representation of the UEFI variable store. Only non-volatile variables are stored. This is a base64 encoded and zlib compressed binary value that must be properly encoded.

When you use register-image to create an AMI, you can create an exact copy of your variable store by passing the UEFI data in the UefiData parameter. You can modify the UEFI data by using the python-uefivars tool on GitHub. You can use the tool to convert the UEFI data into a human-readable format (JSON), which you can inspect and modify, and then convert back into the binary format to use with register-image.

For more information, see UEFI Secure Boot in the Amazon EC2 User Guide.

", "GetIpamAddressHistory": "

Retrieve historical information about a CIDR within an IPAM scope. For more information, see View the history of IP addresses in the Amazon VPC IPAM User Guide.

", @@ -2500,6 +2501,7 @@ "GetGroupsForCapacityReservationRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "GetImageBlockPublicAccessStateRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "GetInstanceMetadataDefaultsRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", + "GetInstanceTpmEkPubRequest$DryRun": "

Specify this parameter to verify whether the request will succeed, without actually making the request. If the request will succeed, the response is DryRunOperation. Otherwise, the response is UnauthorizedOperation.

", "GetInstanceTypesFromInstanceRequirementsRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "GetInstanceUefiDataRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "GetIpamAddressHistoryRequest$DryRun": "

A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", @@ -8732,6 +8734,26 @@ "DisassociateNatGatewayAddressRequest$AssociationIds": "

The association IDs of EIPs that have been associated with the NAT gateway.

" } }, + "EkPubKeyFormat": { + "base": null, + "refs": { + "GetInstanceTpmEkPubRequest$KeyFormat": "

The required public endorsement key format. Specify der for a DER-encoded public key that is compatible with OpenSSL. Specify tpmt for a TPM 2.0 format that is compatible with tpm2-tools. The returned key is base64 encoded.

", + "GetInstanceTpmEkPubResult$KeyFormat": "

The public endorsement key format.

" + } + }, + "EkPubKeyType": { + "base": null, + "refs": { + "GetInstanceTpmEkPubRequest$KeyType": "

The required public endorsement key type.

", + "GetInstanceTpmEkPubResult$KeyType": "

The public endorsement key type.

" + } + }, + "EkPubKeyValue": { + "base": null, + "refs": { + "GetInstanceTpmEkPubResult$KeyValue": "

The public endorsement key material.

" + } + }, "ElasticGpuAssociation": { "base": "

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances.

Describes the association between an instance and an Elastic Graphics accelerator.

", "refs": { @@ -10164,6 +10186,16 @@ "refs": { } }, + "GetInstanceTpmEkPubRequest": { + "base": null, + "refs": { + } + }, + "GetInstanceTpmEkPubResult": { + "base": null, + "refs": { + } + }, "GetInstanceTypesFromInstanceRequirementsRequest": { "base": null, "refs": { @@ -11538,6 +11570,8 @@ "DetachClassicLinkVpcRequest$InstanceId": "

The ID of the instance to unlink from the VPC.

", "GetConsoleOutputRequest$InstanceId": "

The ID of the instance.

", "GetConsoleScreenshotRequest$InstanceId": "

The ID of the instance.

", + "GetInstanceTpmEkPubRequest$InstanceId": "

The ID of the instance for which to get the public endorsement key.

", + "GetInstanceTpmEkPubResult$InstanceId": "

The ID of the instance.

", "GetInstanceUefiDataRequest$InstanceId": "

The ID of the instance from which to retrieve the UEFI data.

", "GetInstanceUefiDataResult$InstanceId": "

The ID of the instance from which to retrieve the UEFI data.

", "GetLaunchTemplateDataRequest$InstanceId": "

The ID of the instance.

", diff --git a/models/apis/personalize/2018-05-22/api-2.json b/models/apis/personalize/2018-05-22/api-2.json index 8c5f67ec1f6..330edb9f1c9 100644 --- a/models/apis/personalize/2018-05-22/api-2.json +++ b/models/apis/personalize/2018-05-22/api-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"personalize", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"Amazon Personalize", "serviceId":"Personalize", "signatureVersion":"v4", @@ -65,6 +66,23 @@ ], "idempotent":true }, + "CreateDataDeletionJob":{ + "name":"CreateDataDeletionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDataDeletionJobRequest"}, + "output":{"shape":"CreateDataDeletionJobResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceInUseException"}, + {"shape":"TooManyTagsException"} + ] + }, "CreateDataset":{ "name":"CreateDataset", "http":{ @@ -431,6 +449,20 @@ ], "idempotent":true }, + "DescribeDataDeletionJob":{ + "name":"DescribeDataDeletionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDataDeletionJobRequest"}, + "output":{"shape":"DescribeDataDeletionJobResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "idempotent":true + }, "DescribeDataset":{ "name":"DescribeDataset", "http":{ @@ -668,6 +700,20 @@ ], "idempotent":true }, + "ListDataDeletionJobs":{ + "name":"ListDataDeletionJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDataDeletionJobsRequest"}, + "output":{"shape":"ListDataDeletionJobsResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InvalidNextTokenException"} + ], + "idempotent":true + }, "ListDatasetExportJobs":{ "name":"ListDatasetExportJobs", "http":{ @@ -1351,6 +1397,28 @@ "campaignArn":{"shape":"Arn"} } }, + "CreateDataDeletionJobRequest":{ + "type":"structure", + "required":[ + "jobName", + "datasetGroupArn", + "dataSource", + "roleArn" + ], + "members":{ + "jobName":{"shape":"Name"}, + "datasetGroupArn":{"shape":"Arn"}, + "dataSource":{"shape":"DataSource"}, + "roleArn":{"shape":"RoleArn"}, + "tags":{"shape":"Tags"} + } + }, + "CreateDataDeletionJobResponse":{ + "type":"structure", + "members":{ + "dataDeletionJobArn":{"shape":"Arn"} + } + }, "CreateDatasetExportJobRequest":{ "type":"structure", "required":[ @@ -1577,6 +1645,38 @@ "solutionVersionArn":{"shape":"Arn"} } }, + "DataDeletionJob":{ + "type":"structure", + "members":{ + "jobName":{"shape":"Name"}, + "dataDeletionJobArn":{"shape":"Arn"}, + "datasetGroupArn":{"shape":"Arn"}, + "dataSource":{"shape":"DataSource"}, + "roleArn":{"shape":"RoleArn"}, + "status":{"shape":"Status"}, + "numDeleted":{"shape":"Integer"}, + "creationDateTime":{"shape":"Date"}, + "lastUpdatedDateTime":{"shape":"Date"}, + "failureReason":{"shape":"FailureReason"} + } + }, + "DataDeletionJobSummary":{ + "type":"structure", + "members":{ + "dataDeletionJobArn":{"shape":"Arn"}, + "datasetGroupArn":{"shape":"Arn"}, + "jobName":{"shape":"Name"}, + "status":{"shape":"Status"}, + "creationDateTime":{"shape":"Date"}, + "lastUpdatedDateTime":{"shape":"Date"}, + "failureReason":{"shape":"FailureReason"} + } + }, + "DataDeletionJobs":{ + "type":"list", + "member":{"shape":"DataDeletionJobSummary"}, + "max":100 + }, "DataSource":{ "type":"structure", "members":{ @@ -1916,6 +2016,19 @@ "campaign":{"shape":"Campaign"} } }, + "DescribeDataDeletionJobRequest":{ + "type":"structure", + "required":["dataDeletionJobArn"], + "members":{ + "dataDeletionJobArn":{"shape":"Arn"} + } + }, + "DescribeDataDeletionJobResponse":{ + "type":"structure", + "members":{ + "dataDeletionJob":{"shape":"DataDeletionJob"} + } + }, "DescribeDatasetExportJobRequest":{ "type":"structure", "required":["datasetExportJobArn"], @@ -2281,6 +2394,7 @@ "ALL" ] }, + "Integer":{"type":"integer"}, "IntegerHyperParameterRange":{ "type":"structure", "members":{ @@ -2378,6 +2492,21 @@ "nextToken":{"shape":"NextToken"} } }, + "ListDataDeletionJobsRequest":{ + "type":"structure", + "members":{ + "datasetGroupArn":{"shape":"Arn"}, + "nextToken":{"shape":"NextToken"}, + "maxResults":{"shape":"MaxResults"} + } + }, + "ListDataDeletionJobsResponse":{ + "type":"structure", + "members":{ + "dataDeletionJobs":{"shape":"DataDeletionJobs"}, + "nextToken":{"shape":"NextToken"} + } + }, "ListDatasetExportJobsRequest":{ "type":"structure", "members":{ diff --git a/models/apis/personalize/2018-05-22/docs-2.json b/models/apis/personalize/2018-05-22/docs-2.json index dd0b3a66ae3..ef2d296aa91 100644 --- a/models/apis/personalize/2018-05-22/docs-2.json +++ b/models/apis/personalize/2018-05-22/docs-2.json @@ -5,6 +5,7 @@ "CreateBatchInferenceJob": "

Generates batch recommendations based on a list of items or users stored in Amazon S3 and exports the recommendations to an Amazon S3 bucket.

To generate batch recommendations, specify the ARN of a solution version and an Amazon S3 URI for the input and output data. For user personalization, popular items, and personalized ranking solutions, the batch inference job generates a list of recommended items for each user ID in the input file. For related items solutions, the job generates a list of recommended items for each item ID in the input file.

For more information, see Creating a batch inference job .

If you use the Similar-Items recipe, Amazon Personalize can add descriptive themes to batch recommendations. To generate themes, set the job's mode to THEME_GENERATION and specify the name of the field that contains item names in the input data.

For more information about generating themes, see Batch recommendations with themes from Content Generator .

You can't get batch recommendations with the Trending-Now or Next-Best-Action recipes.

", "CreateBatchSegmentJob": "

Creates a batch segment job. The operation can handle up to 50 million records and the input file must be in JSON format. For more information, see Getting batch recommendations and user segments.

", "CreateCampaign": "

You incur campaign costs while it is active. To avoid unnecessary costs, make sure to delete the campaign when you are finished. For information about campaign costs, see Amazon Personalize pricing.

Creates a campaign that deploys a solution version. When a client calls the GetRecommendations and GetPersonalizedRanking APIs, a campaign is specified in the request.

Minimum Provisioned TPS and Auto-Scaling

A high minProvisionedTPS will increase your cost. We recommend starting with 1 for minProvisionedTPS (the default). Track your usage using Amazon CloudWatch metrics, and increase the minProvisionedTPS as necessary.

When you create an Amazon Personalize campaign, you can specify the minimum provisioned transactions per second (minProvisionedTPS) for the campaign. This is the baseline transaction throughput for the campaign provisioned by Amazon Personalize. It sets the minimum billing charge for the campaign while it is active. A transaction is a single GetRecommendations or GetPersonalizedRanking request. The default minProvisionedTPS is 1.

If your TPS increases beyond the minProvisionedTPS, Amazon Personalize auto-scales the provisioned capacity up and down, but never below minProvisionedTPS. There's a short time delay while the capacity is increased that might cause loss of transactions. When your traffic reduces, capacity returns to the minProvisionedTPS.

You are charged for the the minimum provisioned TPS or, if your requests exceed the minProvisionedTPS, the actual TPS. The actual TPS is the total number of recommendation requests you make. We recommend starting with a low minProvisionedTPS, track your usage using Amazon CloudWatch metrics, and then increase the minProvisionedTPS as necessary.

For more information about campaign costs, see Amazon Personalize pricing.

Status

A campaign can be in one of the following states:

To get the campaign status, call DescribeCampaign.

Wait until the status of the campaign is ACTIVE before asking the campaign for recommendations.

Related APIs

", + "CreateDataDeletionJob": "

Creates a batch job that deletes all references to specific users from an Amazon Personalize dataset group in batches. You specify the users to delete in a CSV file of userIds in an Amazon S3 bucket. After a job completes, Amazon Personalize no longer trains on the users’ data and no longer considers the users when generating user segments. For more information about creating a data deletion job, see Deleting users.

After you create a job, it can take up to a day to delete all references to the users from datasets and models. Until the job completes, Amazon Personalize continues to use the data when training. And if you use a User Segmentation recipe, the users might appear in user segments.

Status

A data deletion job can have one of the following statuses:

To get the status of the data deletion job, call DescribeDataDeletionJob API operation and specify the Amazon Resource Name (ARN) of the job. If the status is FAILED, the response includes a failureReason key, which describes why the job failed.

Related APIs

", "CreateDataset": "

Creates an empty dataset and adds it to the specified dataset group. Use CreateDatasetImportJob to import your training data to a dataset.

There are 5 types of datasets:

Each dataset type has an associated schema with required field types. Only the Item interactions dataset is required in order to train a model (also referred to as creating a solution).

A dataset can be in one of the following states:

To get the status of the dataset, call DescribeDataset.

Related APIs

", "CreateDatasetExportJob": "

Creates a job that exports data from your dataset to an Amazon S3 bucket. To allow Amazon Personalize to export the training data, you must specify an service-linked IAM role that gives Amazon Personalize PutObject permissions for your Amazon S3 bucket. For information, see Exporting a dataset in the Amazon Personalize developer guide.

Status

A dataset export job can be in one of the following states:

To get the status of the export job, call DescribeDatasetExportJob, and specify the Amazon Resource Name (ARN) of the dataset export job. The dataset export is complete when the status shows as ACTIVE. If the status shows as CREATE FAILED, the response includes a failureReason key, which describes why the job failed.

", "CreateDatasetGroup": "

Creates an empty dataset group. A dataset group is a container for Amazon Personalize resources. A dataset group can contain at most three datasets, one for each type of dataset:

A dataset group can be a Domain dataset group, where you specify a domain and use pre-configured resources like recommenders, or a Custom dataset group, where you use custom resources, such as a solution with a solution version, that you deploy with a campaign. If you start with a Domain dataset group, you can still add custom resources such as solutions and solution versions trained with recipes for custom use cases and deployed with campaigns.

A dataset group can be in one of the following states:

To get the status of the dataset group, call DescribeDatasetGroup. If the status shows as CREATE FAILED, the response includes a failureReason key, which describes why the creation failed.

You must wait until the status of the dataset group is ACTIVE before adding a dataset to the group.

You can specify an Key Management Service (KMS) key to encrypt the datasets in the group. If you specify a KMS key, you must also include an Identity and Access Management (IAM) role that has permission to access the key.

APIs that require a dataset group ARN in the request

Related APIs

", @@ -29,6 +30,7 @@ "DescribeBatchInferenceJob": "

Gets the properties of a batch inference job including name, Amazon Resource Name (ARN), status, input and output configurations, and the ARN of the solution version used to generate the recommendations.

", "DescribeBatchSegmentJob": "

Gets the properties of a batch segment job including name, Amazon Resource Name (ARN), status, input and output configurations, and the ARN of the solution version used to generate segments.

", "DescribeCampaign": "

Describes the given campaign, including its status.

A campaign can be in one of the following states:

When the status is CREATE FAILED, the response includes the failureReason key, which describes why.

For more information on campaigns, see CreateCampaign.

", + "DescribeDataDeletionJob": "

Describes the data deletion job created by CreateDataDeletionJob, including the job status.

", "DescribeDataset": "

Describes the given dataset. For more information on datasets, see CreateDataset.

", "DescribeDatasetExportJob": "

Describes the dataset export job created by CreateDatasetExportJob, including the export job status.

", "DescribeDatasetGroup": "

Describes the given dataset group. For more information on dataset groups, see CreateDatasetGroup.

", @@ -46,6 +48,7 @@ "ListBatchInferenceJobs": "

Gets a list of the batch inference jobs that have been performed off of a solution version.

", "ListBatchSegmentJobs": "

Gets a list of the batch segment jobs that have been performed off of a solution version that you specify.

", "ListCampaigns": "

Returns a list of campaigns that use the given solution. When a solution is not specified, all the campaigns associated with the account are listed. The response provides the properties for each campaign, including the Amazon Resource Name (ARN). For more information on campaigns, see CreateCampaign.

", + "ListDataDeletionJobs": "

Returns a list of data deletion jobs for a dataset group ordered by creation time, with the most recent first. When a dataset group is not specified, all the data deletion jobs associated with the account are listed. The response provides the properties for each job, including the Amazon Resource Name (ARN). For more information on data deletion jobs, see Deleting users.

", "ListDatasetExportJobs": "

Returns a list of dataset export jobs that use the given dataset. When a dataset is not specified, all the dataset export jobs associated with the account are listed. The response provides the properties for each dataset export job, including the Amazon Resource Name (ARN). For more information on dataset export jobs, see CreateDatasetExportJob. For more information on datasets, see CreateDataset.

", "ListDatasetGroups": "

Returns a list of dataset groups. The response provides the properties for each dataset group, including the Amazon Resource Name (ARN). For more information on dataset groups, see CreateDatasetGroup.

", "ListDatasetImportJobs": "

Returns a list of dataset import jobs that use the given dataset. When a dataset is not specified, all the dataset import jobs associated with the account are listed. The response provides the properties for each dataset import job, including the Amazon Resource Name (ARN). For more information on dataset import jobs, see CreateDatasetImportJob. For more information on datasets, see CreateDataset.

", @@ -118,6 +121,8 @@ "CreateBatchSegmentJobResponse$batchSegmentJobArn": "

The ARN of the batch segment job.

", "CreateCampaignRequest$solutionVersionArn": "

The Amazon Resource Name (ARN) of the trained model to deploy with the campaign. To specify the latest solution version of your solution, specify the ARN of your solution in SolutionArn/$LATEST format. You must use this format if you set syncWithLatestSolutionVersion to True in the CampaignConfig.

To deploy a model that isn't the latest solution version of your solution, specify the ARN of the solution version.

For more information about automatic campaign updates, see Enabling automatic campaign updates.

", "CreateCampaignResponse$campaignArn": "

The Amazon Resource Name (ARN) of the campaign.

", + "CreateDataDeletionJobRequest$datasetGroupArn": "

The Amazon Resource Name (ARN) of the dataset group that has the datasets you want to delete records from.

", + "CreateDataDeletionJobResponse$dataDeletionJobArn": "

The Amazon Resource Name (ARN) of the data deletion job.

", "CreateDatasetExportJobRequest$datasetArn": "

The Amazon Resource Name (ARN) of the dataset that contains the data to export.

", "CreateDatasetExportJobResponse$datasetExportJobArn": "

The Amazon Resource Name (ARN) of the dataset export job.

", "CreateDatasetGroupResponse$datasetGroupArn": "

The Amazon Resource Name (ARN) of the new dataset group.

", @@ -141,6 +146,10 @@ "CreateSolutionResponse$solutionArn": "

The ARN of the solution.

", "CreateSolutionVersionRequest$solutionArn": "

The Amazon Resource Name (ARN) of the solution containing the training configuration information.

", "CreateSolutionVersionResponse$solutionVersionArn": "

The ARN of the new solution version.

", + "DataDeletionJob$dataDeletionJobArn": "

The Amazon Resource Name (ARN) of the data deletion job.

", + "DataDeletionJob$datasetGroupArn": "

The Amazon Resource Name (ARN) of the dataset group the job deletes records from.

", + "DataDeletionJobSummary$dataDeletionJobArn": "

The Amazon Resource Name (ARN) of the data deletion job.

", + "DataDeletionJobSummary$datasetGroupArn": "

The Amazon Resource Name (ARN) of the dataset group the job deleted records from.

", "Dataset$datasetArn": "

The Amazon Resource Name (ARN) of the dataset that you want metadata for.

", "Dataset$datasetGroupArn": "

The Amazon Resource Name (ARN) of the dataset group.

", "Dataset$schemaArn": "

The ARN of the associated schema.

", @@ -171,6 +180,7 @@ "DescribeBatchInferenceJobRequest$batchInferenceJobArn": "

The ARN of the batch inference job to describe.

", "DescribeBatchSegmentJobRequest$batchSegmentJobArn": "

The ARN of the batch segment job to describe.

", "DescribeCampaignRequest$campaignArn": "

The Amazon Resource Name (ARN) of the campaign.

", + "DescribeDataDeletionJobRequest$dataDeletionJobArn": "

The Amazon Resource Name (ARN) of the data deletion job.

", "DescribeDatasetExportJobRequest$datasetExportJobArn": "

The Amazon Resource Name (ARN) of the dataset export job to describe.

", "DescribeDatasetGroupRequest$datasetGroupArn": "

The Amazon Resource Name (ARN) of the dataset group to describe.

", "DescribeDatasetImportJobRequest$datasetImportJobArn": "

The Amazon Resource Name (ARN) of the dataset import job to describe.

", @@ -197,6 +207,7 @@ "ListBatchInferenceJobsRequest$solutionVersionArn": "

The Amazon Resource Name (ARN) of the solution version from which the batch inference jobs were created.

", "ListBatchSegmentJobsRequest$solutionVersionArn": "

The Amazon Resource Name (ARN) of the solution version that the batch segment jobs used to generate batch segments.

", "ListCampaignsRequest$solutionArn": "

The Amazon Resource Name (ARN) of the solution to list the campaigns for. When a solution is not specified, all the campaigns associated with the account are listed.

", + "ListDataDeletionJobsRequest$datasetGroupArn": "

The Amazon Resource Name (ARN) of the dataset group to list data deletion jobs for.

", "ListDatasetExportJobsRequest$datasetArn": "

The Amazon Resource Name (ARN) of the dataset to list the dataset export jobs for.

", "ListDatasetImportJobsRequest$datasetArn": "

The Amazon Resource Name (ARN) of the dataset to list the dataset import jobs for.

", "ListDatasetsRequest$datasetGroupArn": "

The Amazon Resource Name (ARN) of the dataset group that contains the datasets to list.

", @@ -498,6 +509,16 @@ "refs": { } }, + "CreateDataDeletionJobRequest": { + "base": null, + "refs": { + } + }, + "CreateDataDeletionJobResponse": { + "base": null, + "refs": { + } + }, "CreateDatasetExportJobRequest": { "base": null, "refs": { @@ -608,10 +629,30 @@ "refs": { } }, + "DataDeletionJob": { + "base": "

Describes a job that deletes all references to specific users from an Amazon Personalize dataset group in batches. For information about creating a data deletion job, see Deleting users.

", + "refs": { + "DescribeDataDeletionJobResponse$dataDeletionJob": "

Information about the data deletion job, including the status.

The status is one of the following values:

" + } + }, + "DataDeletionJobSummary": { + "base": "

Provides a summary of the properties of a data deletion job. For a complete listing, call the DescribeDataDeletionJob API operation.

", + "refs": { + "DataDeletionJobs$member": null + } + }, + "DataDeletionJobs": { + "base": null, + "refs": { + "ListDataDeletionJobsResponse$dataDeletionJobs": "

The list of data deletion jobs.

" + } + }, "DataSource": { - "base": "

Describes the data source that contains the data to upload to a dataset.

", + "base": "

Describes the data source that contains the data to upload to a dataset, or the list of records to delete from Amazon Personalize.

", "refs": { + "CreateDataDeletionJobRequest$dataSource": "

The Amazon S3 bucket that contains the list of userIds of the users to delete.

", "CreateDatasetImportJobRequest$dataSource": "

The Amazon S3 bucket that contains the training data to import.

", + "DataDeletionJob$dataSource": null, "DatasetImportJob$dataSource": "

The Amazon S3 bucket that contains the training data to import.

" } }, @@ -740,6 +781,10 @@ "CampaignSummary$lastUpdatedDateTime": "

The date and time (in Unix time) that the campaign was last updated.

", "CampaignUpdateSummary$creationDateTime": "

The date and time (in Unix time) that the campaign update was created.

", "CampaignUpdateSummary$lastUpdatedDateTime": "

The date and time (in Unix time) that the campaign update was last updated.

", + "DataDeletionJob$creationDateTime": "

The creation date and time (in Unix time) of the data deletion job.

", + "DataDeletionJob$lastUpdatedDateTime": "

The date and time (in Unix time) the data deletion job was last updated.

", + "DataDeletionJobSummary$creationDateTime": "

The creation date and time (in Unix time) of the data deletion job.

", + "DataDeletionJobSummary$lastUpdatedDateTime": "

The date and time (in Unix time) the data deletion job was last updated.

", "Dataset$creationDateTime": "

The creation date and time (in Unix time) of the dataset.

", "Dataset$lastUpdatedDateTime": "

A time stamp that shows when the dataset was updated.

", "DatasetExportJob$creationDateTime": "

The creation date and time (in Unix time) of the dataset export job.

", @@ -923,6 +968,16 @@ "refs": { } }, + "DescribeDataDeletionJobRequest": { + "base": null, + "refs": { + } + }, + "DescribeDataDeletionJobResponse": { + "base": null, + "refs": { + } + }, "DescribeDatasetExportJobRequest": { "base": null, "refs": { @@ -1141,6 +1196,8 @@ "Campaign$failureReason": "

If a campaign fails, the reason behind the failure.

", "CampaignSummary$failureReason": "

If a campaign fails, the reason behind the failure.

", "CampaignUpdateSummary$failureReason": "

If a campaign update fails, the reason behind the failure.

", + "DataDeletionJob$failureReason": "

If a data deletion job fails, provides the reason why.

", + "DataDeletionJobSummary$failureReason": "

If a data deletion job fails, provides the reason why.

", "DatasetExportJob$failureReason": "

If a dataset export job fails, provides the reason why.

", "DatasetExportJobSummary$failureReason": "

If a dataset export job fails, the reason behind the failure.

", "DatasetGroup$failureReason": "

If creating a dataset group fails, provides the reason why.

", @@ -1280,6 +1337,12 @@ "DatasetExportJob$ingestionMode": "

The data to export, based on how you imported the data. You can choose to export BULK data that you imported using a dataset import job, PUT data that you imported incrementally (using the console, PutEvents, PutUsers and PutItems operations), or ALL for both types. The default value is PUT.

" } }, + "Integer": { + "base": null, + "refs": { + "DataDeletionJob$numDeleted": "

The number of records deleted by a COMPLETED job.

" + } + }, "IntegerHyperParameterRange": { "base": "

Provides the name and range of an integer-valued hyperparameter.

", "refs": { @@ -1365,6 +1428,16 @@ "refs": { } }, + "ListDataDeletionJobsRequest": { + "base": null, + "refs": { + } + }, + "ListDataDeletionJobsResponse": { + "base": null, + "refs": { + } + }, "ListDatasetExportJobsRequest": { "base": null, "refs": { @@ -1511,6 +1584,7 @@ "ListBatchInferenceJobsRequest$maxResults": "

The maximum number of batch inference job results to return in each page. The default value is 100.

", "ListBatchSegmentJobsRequest$maxResults": "

The maximum number of batch segment job results to return in each page. The default value is 100.

", "ListCampaignsRequest$maxResults": "

The maximum number of campaigns to return.

", + "ListDataDeletionJobsRequest$maxResults": "

The maximum number of data deletion jobs to return.

", "ListDatasetExportJobsRequest$maxResults": "

The maximum number of dataset export jobs to return.

", "ListDatasetGroupsRequest$maxResults": "

The maximum number of dataset groups to return.

", "ListDatasetImportJobsRequest$maxResults": "

The maximum number of dataset import jobs to return.

", @@ -1621,6 +1695,7 @@ "CreateBatchInferenceJobRequest$jobName": "

The name of the batch inference job to create.

", "CreateBatchSegmentJobRequest$jobName": "

The name of the batch segment job to create.

", "CreateCampaignRequest$name": "

A name for the new campaign. The campaign name must be unique within your account.

", + "CreateDataDeletionJobRequest$jobName": "

The name for the data deletion job.

", "CreateDatasetExportJobRequest$jobName": "

The name for the dataset export job.

", "CreateDatasetGroupRequest$name": "

The name for the new dataset group.

", "CreateDatasetImportJobRequest$jobName": "

The name for the dataset import job.

", @@ -1632,6 +1707,8 @@ "CreateSchemaRequest$name": "

The name for the schema.

", "CreateSolutionRequest$name": "

The name for the solution.

", "CreateSolutionVersionRequest$name": "

The name of the solution version.

", + "DataDeletionJob$jobName": "

The name of the data deletion job.

", + "DataDeletionJobSummary$jobName": "

The name of the data deletion job.

", "Dataset$name": "

The name of the dataset.

", "DatasetExportJob$jobName": "

The name of the export job.

", "DatasetExportJobSummary$jobName": "

The name of the dataset export job.

", @@ -1667,6 +1744,8 @@ "ListBatchSegmentJobsResponse$nextToken": "

The token to use to retrieve the next page of results. The value is null when there are no more results to return.

", "ListCampaignsRequest$nextToken": "

A token returned from the previous call to ListCampaigns for getting the next set of campaigns (if they exist).

", "ListCampaignsResponse$nextToken": "

A token for getting the next set of campaigns (if they exist).

", + "ListDataDeletionJobsRequest$nextToken": "

A token returned from the previous call to ListDataDeletionJobs for getting the next set of jobs (if they exist).

", + "ListDataDeletionJobsResponse$nextToken": "

A token for getting the next set of data deletion jobs (if they exist).

", "ListDatasetExportJobsRequest$nextToken": "

A token returned from the previous call to ListDatasetExportJobs for getting the next set of dataset export jobs (if they exist).

", "ListDatasetExportJobsResponse$nextToken": "

A token for getting the next set of dataset export jobs (if they exist).

", "ListDatasetGroupsRequest$nextToken": "

A token returned from the previous call to ListDatasetGroups for getting the next set of dataset groups (if they exist).

", @@ -1854,9 +1933,11 @@ "BatchSegmentJob$roleArn": "

The ARN of the Amazon Identity and Access Management (IAM) role that requested the batch segment job.

", "CreateBatchInferenceJobRequest$roleArn": "

The ARN of the Amazon Identity and Access Management role that has permissions to read and write to your input and output Amazon S3 buckets respectively.

", "CreateBatchSegmentJobRequest$roleArn": "

The ARN of the Amazon Identity and Access Management role that has permissions to read and write to your input and output Amazon S3 buckets respectively.

", + "CreateDataDeletionJobRequest$roleArn": "

The Amazon Resource Name (ARN) of the IAM role that has permissions to read from the Amazon S3 data source.

", "CreateDatasetExportJobRequest$roleArn": "

The Amazon Resource Name (ARN) of the IAM service role that has permissions to add data to your output Amazon S3 bucket.

", "CreateDatasetGroupRequest$roleArn": "

The ARN of the Identity and Access Management (IAM) role that has permissions to access the Key Management Service (KMS) key. Supplying an IAM role is only valid when also specifying a KMS key.

", "CreateDatasetImportJobRequest$roleArn": "

The ARN of the IAM role that has permissions to read from the Amazon S3 data source.

", + "DataDeletionJob$roleArn": "

The Amazon Resource Name (ARN) of the IAM role that has permissions to read from the Amazon S3 data source.

", "DatasetGroup$roleArn": "

The ARN of the Identity and Access Management (IAM) role that has permissions to access the Key Management Service (KMS) key. Supplying an IAM role is only valid when also specifying a KMS key.

", "MetricAttributionOutput$roleArn": "

The Amazon Resource Name (ARN) of the IAM service role that has permissions to add data to your output Amazon S3 bucket and add metrics to Amazon CloudWatch. For more information, see Measuring impact of recommendations.

" } @@ -1875,7 +1956,7 @@ "S3Location": { "base": null, "refs": { - "DataSource$dataLocation": "

The path to the Amazon S3 bucket where the data that you want to upload to your dataset is stored. For example:

s3://bucket-name/folder-name/

", + "DataSource$dataLocation": "

For dataset import jobs, the path to the Amazon S3 bucket where the data that you want to upload to your dataset is stored. For data deletion jobs, the path to the Amazon S3 bucket that stores the list of records to delete.

For example:

s3://bucket-name/folder-name/fileName.csv

If your CSV files are in a folder in your Amazon S3 bucket and you want your import job or data deletion job to consider multiple files, you can specify the path to the folder. With a data deletion job, Amazon Personalize uses all files in the folder and any sub folder. Use the following syntax with a / after the folder name:

s3://bucket-name/folder-name/

", "S3DataConfig$path": "

The file path of the Amazon S3 bucket.

" } }, @@ -1956,6 +2037,8 @@ "Campaign$status": "

The status of the campaign.

A campaign can be in one of the following states:

", "CampaignSummary$status": "

The status of the campaign.

A campaign can be in one of the following states:

", "CampaignUpdateSummary$status": "

The status of the campaign update.

A campaign update can be in one of the following states:

", + "DataDeletionJob$status": "

The status of the data deletion job.

A data deletion job can have one of the following statuses:

", + "DataDeletionJobSummary$status": "

The status of the data deletion job.

A data deletion job can have one of the following statuses:

", "Dataset$status": "

The status of the dataset.

A dataset can be in one of the following states:

", "DatasetExportJob$status": "

The status of the dataset export job.

A dataset export job can be in one of the following states:

", "DatasetExportJobSummary$status": "

The status of the dataset export job.

A dataset export job can be in one of the following states:

", @@ -2039,6 +2122,7 @@ "CreateBatchInferenceJobRequest$tags": "

A list of tags to apply to the batch inference job.

", "CreateBatchSegmentJobRequest$tags": "

A list of tags to apply to the batch segment job.

", "CreateCampaignRequest$tags": "

A list of tags to apply to the campaign.

", + "CreateDataDeletionJobRequest$tags": "

A list of tags to apply to the data deletion job.

", "CreateDatasetExportJobRequest$tags": "

A list of tags to apply to the dataset export job.

", "CreateDatasetGroupRequest$tags": "

A list of tags to apply to the dataset group.

", "CreateDatasetImportJobRequest$tags": "

A list of tags to apply to the dataset import job.

", diff --git a/models/apis/redshift-serverless/2021-04-21/api-2.json b/models/apis/redshift-serverless/2021-04-21/api-2.json index 725d4513c8d..4f2eea7d18c 100644 --- a/models/apis/redshift-serverless/2021-04-21/api-2.json +++ b/models/apis/redshift-serverless/2021-04-21/api-2.json @@ -2018,6 +2018,13 @@ }, "union":true }, + "ScheduledActionAssociation":{ + "type":"structure", + "members":{ + "namespaceName":{"shape":"NamespaceName"}, + "scheduledActionName":{"shape":"ScheduledActionName"} + } + }, "ScheduledActionName":{ "type":"string", "max":60, @@ -2042,7 +2049,7 @@ }, "ScheduledActionsList":{ "type":"list", - "member":{"shape":"ScheduledActionName"} + "member":{"shape":"ScheduledActionAssociation"} }, "SecurityGroupId":{"type":"string"}, "SecurityGroupIdList":{ diff --git a/models/apis/redshift-serverless/2021-04-21/docs-2.json b/models/apis/redshift-serverless/2021-04-21/docs-2.json index f4e76c6657e..bc783333192 100644 --- a/models/apis/redshift-serverless/2021-04-21/docs-2.json +++ b/models/apis/redshift-serverless/2021-04-21/docs-2.json @@ -108,7 +108,7 @@ "UpdateWorkgroupRequest$enhancedVpcRouting": "

The value that specifies whether to turn on enhanced virtual private cloud (VPC) routing, which forces Amazon Redshift Serverless to route traffic through your VPC.

", "UpdateWorkgroupRequest$publiclyAccessible": "

A value that specifies whether the workgroup can be accessible from a public network.

", "Workgroup$enhancedVpcRouting": "

The value that specifies whether to enable enhanced virtual private cloud (VPC) routing, which forces Amazon Redshift Serverless to route traffic through your VPC.

", - "Workgroup$publiclyAccessible": "

A value that specifies whether the workgroup can be accessible from a public network

" + "Workgroup$publiclyAccessible": "

A value that specifies whether the workgroup can be accessible from a public network.

" } }, "ConfigParameter": { @@ -120,9 +120,9 @@ "ConfigParameterList": { "base": null, "refs": { - "CreateWorkgroupRequest$configParameters": "

An array of parameters to set for advanced control over a database. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, require_ssl, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

", - "UpdateWorkgroupRequest$configParameters": "

An array of parameters to set for advanced control over a database. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, require_ssl, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

", - "Workgroup$configParameters": "

An array of parameters to set for advanced control over a database. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, require_ssl, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

" + "CreateWorkgroupRequest$configParameters": "

An array of parameters to set for advanced control over a database. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, require_ssl, use_fips_ssl, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

", + "UpdateWorkgroupRequest$configParameters": "

An array of parameters to set for advanced control over a database. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, require_ssl, use_fips_ssl, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

", + "Workgroup$configParameters": "

An array of parameters to set for advanced control over a database. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, require_ssl, use_fips_ssl, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

" } }, "ConflictException": { @@ -813,6 +813,7 @@ "RecoveryPoint$namespaceName": "

The name of the namespace the recovery point is associated with.

", "RestoreFromRecoveryPointRequest$namespaceName": "

The name of the namespace to restore data into.

", "RestoreFromSnapshotRequest$namespaceName": "

The name of the namespace to restore the snapshot to.

", + "ScheduledActionAssociation$namespaceName": "

Name of associated Amazon Redshift Serverless namespace.

", "ScheduledActionResponse$namespaceName": "

The end time in UTC when the schedule is no longer active. After this time, the scheduled action does not trigger.

", "SnapshotCopyConfiguration$namespaceName": "

The name of the namespace to copy snapshots from in the source Amazon Web Services Region.

", "UpdateNamespaceRequest$namespaceName": "

The name of the namespace to update. You can't update the name of a namespace once it is created.

" @@ -868,7 +869,7 @@ "ParameterKey": { "base": null, "refs": { - "ConfigParameter$parameterKey": "

The key of the parameter. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, require_ssl, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

" + "ConfigParameter$parameterKey": "

The key of the parameter. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, require_ssl, use_fips_ssl, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

" } }, "ParameterValue": { @@ -966,14 +967,20 @@ "UpdateScheduledActionRequest$schedule": "

The schedule for a one-time (at timestamp format) or recurring (cron format) scheduled action. Schedule invocations must be separated by at least one hour. Times are in UTC.

" } }, + "ScheduledActionAssociation": { + "base": "

Contains names of objects associated with a scheduled action.

", + "refs": { + "ScheduledActionsList$member": null + } + }, "ScheduledActionName": { "base": null, "refs": { "CreateScheduledActionRequest$scheduledActionName": "

The name of the scheduled action.

", "DeleteScheduledActionRequest$scheduledActionName": "

The name of the scheduled action to delete.

", "GetScheduledActionRequest$scheduledActionName": "

The name of the scheduled action.

", + "ScheduledActionAssociation$scheduledActionName": "

Name of associated scheduled action.

", "ScheduledActionResponse$scheduledActionName": "

The name of the scheduled action.

", - "ScheduledActionsList$member": null, "UpdateScheduledActionRequest$scheduledActionName": "

The name of the scheduled action to update to.

" } }, @@ -989,7 +996,7 @@ "ScheduledActionsList": { "base": null, "refs": { - "ListScheduledActionsResponse$scheduledActions": "

All of the returned scheduled action objects.

" + "ListScheduledActionsResponse$scheduledActions": "

All of the returned scheduled action association objects.

" } }, "SecurityGroupId": { diff --git a/service/dynamodb/api.go b/service/dynamodb/api.go index ae7e60ebeca..0d28ae695f9 100644 --- a/service/dynamodb/api.go +++ b/service/dynamodb/api.go @@ -4067,11 +4067,11 @@ func (c *DynamoDB) GetResourcePolicyRequest(input *GetResourcePolicyInput) (req // a few seconds, and then retry the GetResourcePolicy request. // // After a GetResourcePolicy request returns a policy created using the PutResourcePolicy -// request, you can assume the policy will start getting applied in the authorization -// of requests to the resource. Because this process is eventually consistent, -// it will take some time to apply the policy to all requests to a resource. -// Policies that you attach while creating a table using the CreateTable request -// will always be applied to all requests for that table. +// request, the policy will be applied in the authorization of requests to the +// resource. Because this process is eventually consistent, it will take some +// time to apply the policy to all requests to a resource. Policies that you +// attach while creating a table using the CreateTable request will always be +// applied to all requests for that table. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5412,8 +5412,8 @@ func (c *DynamoDB) PutResourcePolicyRequest(input *PutResourcePolicyInput) (req // // PutResourcePolicy is an idempotent operation; running it multiple times on // the same resource using the same policy document will return the same revision -// ID. If you specify an ExpectedRevisionId which doesn't match the current -// policy's RevisionId, the PolicyNotFoundException will be returned. +// ID. If you specify an ExpectedRevisionId that doesn't match the current policy's +// RevisionId, the PolicyNotFoundException will be returned. // // PutResourcePolicy is an asynchronous operation. If you issue a GetResourcePolicy // request immediately after a PutResourcePolicy request, DynamoDB might return @@ -11159,6 +11159,11 @@ type CreateGlobalSecondaryIndexAction struct { // KeySchema is a required field KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + // The maximum number of read and write units for the global secondary index + // being created. If you use this parameter, you must specify MaxReadRequestUnits, + // MaxWriteRequestUnits, or both. + OnDemandThroughput *OnDemandThroughput `type:"structure"` + // Represents attributes that are copied (projected) from the table into an // index. These are in addition to the primary key attributes and index key // attributes, which are automatically projected. @@ -11250,6 +11255,12 @@ func (s *CreateGlobalSecondaryIndexAction) SetKeySchema(v []*KeySchemaElement) * return s } +// SetOnDemandThroughput sets the OnDemandThroughput field's value. +func (s *CreateGlobalSecondaryIndexAction) SetOnDemandThroughput(v *OnDemandThroughput) *CreateGlobalSecondaryIndexAction { + s.OnDemandThroughput = v + return s +} + // SetProjection sets the Projection field's value. func (s *CreateGlobalSecondaryIndexAction) SetProjection(v *Projection) *CreateGlobalSecondaryIndexAction { s.Projection = v @@ -11416,6 +11427,11 @@ type CreateReplicationGroupMemberAction struct { // different from the default DynamoDB KMS key alias/aws/dynamodb. KMSMasterKeyId *string `type:"string"` + // The maximum on-demand throughput settings for the specified replica table + // being created. You can only modify MaxReadRequestUnits, because you can't + // modify MaxWriteRequestUnits for individual replica tables. + OnDemandThroughputOverride *OnDemandThroughputOverride `type:"structure"` + // Replica-specific provisioned throughput. If not specified, uses the source // table's provisioned throughput settings. ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` @@ -11491,6 +11507,12 @@ func (s *CreateReplicationGroupMemberAction) SetKMSMasterKeyId(v string) *Create return s } +// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value. +func (s *CreateReplicationGroupMemberAction) SetOnDemandThroughputOverride(v *OnDemandThroughputOverride) *CreateReplicationGroupMemberAction { + s.OnDemandThroughputOverride = v + return s +} + // SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. func (s *CreateReplicationGroupMemberAction) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *CreateReplicationGroupMemberAction { s.ProvisionedThroughputOverride = v @@ -11620,6 +11642,11 @@ type CreateTableInput struct { // attributes when determining the total. LocalSecondaryIndexes []*LocalSecondaryIndex `type:"list"` + // Sets the maximum number of read and write units for the specified table in + // on-demand capacity mode. If you use this parameter, you must specify MaxReadRequestUnits, + // MaxWriteRequestUnits, or both. + OnDemandThroughput *OnDemandThroughput `type:"structure"` + // Represents the provisioned throughput settings for a specified table or index. // The settings can be modified using the UpdateTable operation. // @@ -11635,13 +11662,12 @@ type CreateTableInput struct { // will be attached to the table. // // When you attach a resource-based policy while creating a table, the policy - // creation is strongly consistent. + // application is strongly consistent. // // The maximum size supported for a resource-based policy document is 20 KB. // DynamoDB counts whitespaces when calculating the size of a policy against - // this limit. You can’t request an increase for this limit. For a full list - // of all considerations that you should keep in mind while attaching a resource-based - // policy, see Resource-based policy considerations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html). + // this limit. For a full list of all considerations that apply for resource-based + // policies, see Resource-based policy considerations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html). ResourcePolicy *string `type:"string"` // Represents the settings used to enable server-side encryption. @@ -11815,6 +11841,12 @@ func (s *CreateTableInput) SetLocalSecondaryIndexes(v []*LocalSecondaryIndex) *C return s } +// SetOnDemandThroughput sets the OnDemandThroughput field's value. +func (s *CreateTableInput) SetOnDemandThroughput(v *OnDemandThroughput) *CreateTableInput { + s.OnDemandThroughput = v + return s +} + // SetProvisionedThroughput sets the ProvisionedThroughput field's value. func (s *CreateTableInput) SetProvisionedThroughput(v *ProvisionedThroughput) *CreateTableInput { s.ProvisionedThroughput = v @@ -12714,7 +12746,7 @@ func (s *DeleteResourcePolicyInput) SetResourceArn(v string) *DeleteResourcePoli type DeleteResourcePolicyOutput struct { _ struct{} `type:"structure"` - // A unique string that represents the revision ID of the policy. If you are + // A unique string that represents the revision ID of the policy. If you're // comparing revision IDs, make sure to always use string comparison logic. // // This value will be empty if you make a request against a resource without @@ -15954,7 +15986,7 @@ type GetResourcePolicyOutput struct { // a table or stream, in JSON format. Policy *string `type:"string"` - // A unique string that represents the revision ID of the policy. If you are + // A unique string that represents the revision ID of the policy. If you're // comparing revision IDs, make sure to always use string comparison logic. RevisionId *string `min:"1" type:"string"` } @@ -16018,6 +16050,11 @@ type GlobalSecondaryIndex struct { // KeySchema is a required field KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + // The maximum number of read and write units for the specified global secondary + // index. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, + // or both. + OnDemandThroughput *OnDemandThroughput `type:"structure"` + // Represents attributes that are copied (projected) from the table into the // global secondary index. These are in addition to the primary key attributes // and index key attributes, which are automatically projected. @@ -16109,6 +16146,12 @@ func (s *GlobalSecondaryIndex) SetKeySchema(v []*KeySchemaElement) *GlobalSecond return s } +// SetOnDemandThroughput sets the OnDemandThroughput field's value. +func (s *GlobalSecondaryIndex) SetOnDemandThroughput(v *OnDemandThroughput) *GlobalSecondaryIndex { + s.OnDemandThroughput = v + return s +} + // SetProjection sets the Projection field's value. func (s *GlobalSecondaryIndex) SetProjection(v *Projection) *GlobalSecondaryIndex { s.Projection = v @@ -16245,6 +16288,11 @@ type GlobalSecondaryIndexDescription struct { // key physically close together, in sorted order by the sort key value. KeySchema []*KeySchemaElement `min:"1" type:"list"` + // The maximum number of read and write units for the specified global secondary + // index. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, + // or both. + OnDemandThroughput *OnDemandThroughput `type:"structure"` + // Represents attributes that are copied (projected) from the table into the // global secondary index. These are in addition to the primary key attributes // and index key attributes, which are automatically projected. @@ -16319,6 +16367,12 @@ func (s *GlobalSecondaryIndexDescription) SetKeySchema(v []*KeySchemaElement) *G return s } +// SetOnDemandThroughput sets the OnDemandThroughput field's value. +func (s *GlobalSecondaryIndexDescription) SetOnDemandThroughput(v *OnDemandThroughput) *GlobalSecondaryIndexDescription { + s.OnDemandThroughput = v + return s +} + // SetProjection sets the Projection field's value. func (s *GlobalSecondaryIndexDescription) SetProjection(v *Projection) *GlobalSecondaryIndexDescription { s.Projection = v @@ -16356,6 +16410,11 @@ type GlobalSecondaryIndexInfo struct { // key physically close together, in sorted order by the sort key value. KeySchema []*KeySchemaElement `min:"1" type:"list"` + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, + // or both. + OnDemandThroughput *OnDemandThroughput `type:"structure"` + // Represents attributes that are copied (projected) from the table into the // global secondary index. These are in addition to the primary key attributes // and index key attributes, which are automatically projected. @@ -16396,6 +16455,12 @@ func (s *GlobalSecondaryIndexInfo) SetKeySchema(v []*KeySchemaElement) *GlobalSe return s } +// SetOnDemandThroughput sets the OnDemandThroughput field's value. +func (s *GlobalSecondaryIndexInfo) SetOnDemandThroughput(v *OnDemandThroughput) *GlobalSecondaryIndexInfo { + s.OnDemandThroughput = v + return s +} + // SetProjection sets the Projection field's value. func (s *GlobalSecondaryIndexInfo) SetProjection(v *Projection) *GlobalSecondaryIndexInfo { s.Projection = v @@ -19458,6 +19523,92 @@ func (s *LocalSecondaryIndexInfo) SetProjection(v *Projection) *LocalSecondaryIn return s } +// Sets the maximum number of read and write units for the specified on-demand +// table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, +// or both. +type OnDemandThroughput struct { + _ struct{} `type:"structure"` + + // Maximum number of read request units for the specified table. + // + // To specify a maximum OnDemandThroughput on your table, set the value of MaxReadRequestUnits + // as greater than or equal to 1. To remove the maximum OnDemandThroughput that + // is currently set on your table, set the value of MaxReadRequestUnits to -1. + MaxReadRequestUnits *int64 `type:"long"` + + // Maximum number of write request units for the specified table. + // + // To specify a maximum OnDemandThroughput on your table, set the value of MaxWriteRequestUnits + // as greater than or equal to 1. To remove the maximum OnDemandThroughput that + // is currently set on your table, set the value of MaxWriteRequestUnits to + // -1. + MaxWriteRequestUnits *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OnDemandThroughput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OnDemandThroughput) GoString() string { + return s.String() +} + +// SetMaxReadRequestUnits sets the MaxReadRequestUnits field's value. +func (s *OnDemandThroughput) SetMaxReadRequestUnits(v int64) *OnDemandThroughput { + s.MaxReadRequestUnits = &v + return s +} + +// SetMaxWriteRequestUnits sets the MaxWriteRequestUnits field's value. +func (s *OnDemandThroughput) SetMaxWriteRequestUnits(v int64) *OnDemandThroughput { + s.MaxWriteRequestUnits = &v + return s +} + +// Overrides the on-demand throughput settings for this replica table. If you +// don't specify a value for this parameter, it uses the source table's on-demand +// throughput settings. +type OnDemandThroughputOverride struct { + _ struct{} `type:"structure"` + + // Maximum number of read request units for the specified replica table. + MaxReadRequestUnits *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OnDemandThroughputOverride) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OnDemandThroughputOverride) GoString() string { + return s.String() +} + +// SetMaxReadRequestUnits sets the MaxReadRequestUnits field's value. +func (s *OnDemandThroughputOverride) SetMaxReadRequestUnits(v int64) *OnDemandThroughputOverride { + s.MaxReadRequestUnits = &v + return s +} + // Represents a PartiQL statement that uses parameters. type ParameterizedStatement struct { _ struct{} `type:"structure"` @@ -20615,21 +20766,28 @@ type PutResourcePolicyInput struct { // A string value that you can use to conditionally update your policy. You // can provide the revision ID of your existing policy to make mutating requests - // against that policy. When you provide an expected revision ID, if the revision - // ID of the existing policy on the resource doesn't match or if there's no - // policy attached to the resource, your request will be rejected with a PolicyNotFoundException. + // against that policy. + // + // When you provide an expected revision ID, if the revision ID of the existing + // policy on the resource doesn't match or if there's no policy attached to + // the resource, your request will be rejected with a PolicyNotFoundException. // - // To conditionally put a policy when no policy exists for the resource, specify - // NO_POLICY for the revision ID. + // To conditionally attach a policy when no policy exists for the resource, + // specify NO_POLICY for the revision ID. ExpectedRevisionId *string `min:"1" type:"string"` // An Amazon Web Services resource-based policy document in JSON format. // - // The maximum size supported for a resource-based policy document is 20 KB. - // DynamoDB counts whitespaces when calculating the size of a policy against - // this limit. For a full list of all considerations that you should keep in - // mind while attaching a resource-based policy, see Resource-based policy considerations - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html). + // * The maximum size supported for a resource-based policy document is 20 + // KB. DynamoDB counts whitespaces when calculating the size of a policy + // against this limit. + // + // * Within a resource-based policy, if the action for a DynamoDB service-linked + // role (SLR) to replicate data for a global table is denied, adding or deleting + // a replica will fail with an error. + // + // For a full list of all considerations that apply while attaching a resource-based + // policy, see Resource-based policy considerations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html). // // Policy is a required field Policy *string `type:"string" required:"true"` @@ -20715,7 +20873,7 @@ func (s *PutResourcePolicyInput) SetResourceArn(v string) *PutResourcePolicyInpu type PutResourcePolicyOutput struct { _ struct{} `type:"structure"` - // A unique string that represents the revision ID of the policy. If you are + // A unique string that represents the revision ID of the policy. If you're // comparing revision IDs, make sure to always use string comparison logic. RevisionId *string `min:"1" type:"string"` } @@ -21564,6 +21722,10 @@ type ReplicaDescription struct { // The KMS key of the replica that will be used for KMS encryption. KMSMasterKeyId *string `type:"string"` + // Overrides the maximum on-demand throughput settings for the specified replica + // table. + OnDemandThroughputOverride *OnDemandThroughputOverride `type:"structure"` + // Replica-specific provisioned throughput. If not described, uses the source // table's provisioned throughput settings. ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` @@ -21639,6 +21801,12 @@ func (s *ReplicaDescription) SetKMSMasterKeyId(v string) *ReplicaDescription { return s } +// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value. +func (s *ReplicaDescription) SetOnDemandThroughputOverride(v *OnDemandThroughputOverride) *ReplicaDescription { + s.OnDemandThroughputOverride = v + return s +} + // SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. func (s *ReplicaDescription) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *ReplicaDescription { s.ProvisionedThroughputOverride = v @@ -21690,6 +21858,10 @@ type ReplicaGlobalSecondaryIndex struct { // IndexName is a required field IndexName *string `min:"3" type:"string" required:"true"` + // Overrides the maximum on-demand throughput settings for the specified global + // secondary index in the specified replica table. + OnDemandThroughputOverride *OnDemandThroughputOverride `type:"structure"` + // Replica table GSI-specific provisioned throughput. If not specified, uses // the source table GSI's read capacity settings. ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` @@ -21740,6 +21912,12 @@ func (s *ReplicaGlobalSecondaryIndex) SetIndexName(v string) *ReplicaGlobalSecon return s } +// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value. +func (s *ReplicaGlobalSecondaryIndex) SetOnDemandThroughputOverride(v *OnDemandThroughputOverride) *ReplicaGlobalSecondaryIndex { + s.OnDemandThroughputOverride = v + return s +} + // SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. func (s *ReplicaGlobalSecondaryIndex) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *ReplicaGlobalSecondaryIndex { s.ProvisionedThroughputOverride = v @@ -21885,6 +22063,10 @@ type ReplicaGlobalSecondaryIndexDescription struct { // The name of the global secondary index. IndexName *string `min:"3" type:"string"` + // Overrides the maximum on-demand throughput for the specified global secondary + // index in the specified replica table. + OnDemandThroughputOverride *OnDemandThroughputOverride `type:"structure"` + // If not described, uses the source table GSI's read capacity settings. ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` } @@ -21913,6 +22095,12 @@ func (s *ReplicaGlobalSecondaryIndexDescription) SetIndexName(v string) *Replica return s } +// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value. +func (s *ReplicaGlobalSecondaryIndexDescription) SetOnDemandThroughputOverride(v *OnDemandThroughputOverride) *ReplicaGlobalSecondaryIndexDescription { + s.OnDemandThroughputOverride = v + return s +} + // SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. func (s *ReplicaGlobalSecondaryIndexDescription) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *ReplicaGlobalSecondaryIndexDescription { s.ProvisionedThroughputOverride = v @@ -22824,6 +23012,11 @@ type RestoreTableFromBackupInput struct { // all of the indexes at the time of restore. LocalSecondaryIndexOverride []*LocalSecondaryIndex `type:"list"` + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, + // or both. + OnDemandThroughputOverride *OnDemandThroughput `type:"structure"` + // Provisioned throughput settings for the restored table. ProvisionedThroughputOverride *ProvisionedThroughput `type:"structure"` @@ -22925,6 +23118,12 @@ func (s *RestoreTableFromBackupInput) SetLocalSecondaryIndexOverride(v []*LocalS return s } +// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value. +func (s *RestoreTableFromBackupInput) SetOnDemandThroughputOverride(v *OnDemandThroughput) *RestoreTableFromBackupInput { + s.OnDemandThroughputOverride = v + return s +} + // SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. func (s *RestoreTableFromBackupInput) SetProvisionedThroughputOverride(v *ProvisionedThroughput) *RestoreTableFromBackupInput { s.ProvisionedThroughputOverride = v @@ -22990,6 +23189,11 @@ type RestoreTableToPointInTimeInput struct { // all of the indexes at the time of restore. LocalSecondaryIndexOverride []*LocalSecondaryIndex `type:"list"` + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, + // or both. + OnDemandThroughputOverride *OnDemandThroughput `type:"structure"` + // Provisioned throughput settings for the restored table. ProvisionedThroughputOverride *ProvisionedThroughput `type:"structure"` @@ -23099,6 +23303,12 @@ func (s *RestoreTableToPointInTimeInput) SetLocalSecondaryIndexOverride(v []*Loc return s } +// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value. +func (s *RestoreTableToPointInTimeInput) SetOnDemandThroughputOverride(v *OnDemandThroughput) *RestoreTableToPointInTimeInput { + s.OnDemandThroughputOverride = v + return s +} + // SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. func (s *RestoreTableToPointInTimeInput) SetProvisionedThroughputOverride(v *ProvisionedThroughput) *RestoreTableToPointInTimeInput { s.ProvisionedThroughputOverride = v @@ -23883,6 +24093,11 @@ type SourceTableDetails struct { // KeySchema is a required field KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, + // or both. + OnDemandThroughput *OnDemandThroughput `type:"structure"` + // Read IOPs and Write IOPS on the table when the backup was created. // // ProvisionedThroughput is a required field @@ -23946,6 +24161,12 @@ func (s *SourceTableDetails) SetKeySchema(v []*KeySchemaElement) *SourceTableDet return s } +// SetOnDemandThroughput sets the OnDemandThroughput field's value. +func (s *SourceTableDetails) SetOnDemandThroughput(v *OnDemandThroughput) *SourceTableDetails { + s.OnDemandThroughput = v + return s +} + // SetProvisionedThroughput sets the ProvisionedThroughput field's value. func (s *SourceTableDetails) SetProvisionedThroughput(v *ProvisionedThroughput) *SourceTableDetails { s.ProvisionedThroughput = v @@ -24313,6 +24534,11 @@ type TableCreationParameters struct { // KeySchema is a required field KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, + // or both. + OnDemandThroughput *OnDemandThroughput `type:"structure"` + // Represents the provisioned throughput settings for a specified table or index. // The settings can be modified using the UpdateTable operation. // @@ -24432,6 +24658,12 @@ func (s *TableCreationParameters) SetKeySchema(v []*KeySchemaElement) *TableCrea return s } +// SetOnDemandThroughput sets the OnDemandThroughput field's value. +func (s *TableCreationParameters) SetOnDemandThroughput(v *OnDemandThroughput) *TableCreationParameters { + s.OnDemandThroughput = v + return s +} + // SetProvisionedThroughput sets the ProvisionedThroughput field's value. func (s *TableCreationParameters) SetProvisionedThroughput(v *ProvisionedThroughput) *TableCreationParameters { s.ProvisionedThroughput = v @@ -24613,6 +24845,11 @@ type TableDescription struct { // be returned. LocalSecondaryIndexes []*LocalSecondaryIndexDescription `type:"list"` + // The maximum number of read and write units for the specified on-demand table. + // If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, + // or both. + OnDemandThroughput *OnDemandThroughput `type:"structure"` + // The provisioned throughput settings for the table, consisting of read and // write capacity units, along with data about increases and decreases. ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"` @@ -24760,6 +24997,12 @@ func (s *TableDescription) SetLocalSecondaryIndexes(v []*LocalSecondaryIndexDesc return s } +// SetOnDemandThroughput sets the OnDemandThroughput field's value. +func (s *TableDescription) SetOnDemandThroughput(v *OnDemandThroughput) *TableDescription { + s.OnDemandThroughput = v + return s +} + // SetProvisionedThroughput sets the ProvisionedThroughput field's value. func (s *TableDescription) SetProvisionedThroughput(v *ProvisionedThroughputDescription) *TableDescription { s.ProvisionedThroughput = v @@ -26438,15 +26681,18 @@ type UpdateGlobalSecondaryIndexAction struct { // IndexName is a required field IndexName *string `min:"3" type:"string" required:"true"` + // Updates the maximum number of read and write units for the specified global + // secondary index. If you use this parameter, you must specify MaxReadRequestUnits, + // MaxWriteRequestUnits, or both. + OnDemandThroughput *OnDemandThroughput `type:"structure"` + // Represents the provisioned throughput settings for the specified global secondary // index. // // For current minimum and maximum provisioned throughput values, see Service, // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) // in the Amazon DynamoDB Developer Guide. - // - // ProvisionedThroughput is a required field - ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"` + ProvisionedThroughput *ProvisionedThroughput `type:"structure"` } // String returns the string representation. @@ -26476,9 +26722,6 @@ func (s *UpdateGlobalSecondaryIndexAction) Validate() error { if s.IndexName != nil && len(*s.IndexName) < 3 { invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) } - if s.ProvisionedThroughput == nil { - invalidParams.Add(request.NewErrParamRequired("ProvisionedThroughput")) - } if s.ProvisionedThroughput != nil { if err := s.ProvisionedThroughput.Validate(); err != nil { invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) @@ -26497,6 +26740,12 @@ func (s *UpdateGlobalSecondaryIndexAction) SetIndexName(v string) *UpdateGlobalS return s } +// SetOnDemandThroughput sets the OnDemandThroughput field's value. +func (s *UpdateGlobalSecondaryIndexAction) SetOnDemandThroughput(v *OnDemandThroughput) *UpdateGlobalSecondaryIndexAction { + s.OnDemandThroughput = v + return s +} + // SetProvisionedThroughput sets the ProvisionedThroughput field's value. func (s *UpdateGlobalSecondaryIndexAction) SetProvisionedThroughput(v *ProvisionedThroughput) *UpdateGlobalSecondaryIndexAction { s.ProvisionedThroughput = v @@ -27382,6 +27631,9 @@ type UpdateReplicationGroupMemberAction struct { // from the default DynamoDB KMS key alias/aws/dynamodb. KMSMasterKeyId *string `type:"string"` + // Overrides the maximum on-demand throughput for the replica table. + OnDemandThroughputOverride *OnDemandThroughputOverride `type:"structure"` + // Replica-specific provisioned throughput. If not specified, uses the source // table's provisioned throughput settings. ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` @@ -27457,6 +27709,12 @@ func (s *UpdateReplicationGroupMemberAction) SetKMSMasterKeyId(v string) *Update return s } +// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value. +func (s *UpdateReplicationGroupMemberAction) SetOnDemandThroughputOverride(v *OnDemandThroughputOverride) *UpdateReplicationGroupMemberAction { + s.OnDemandThroughputOverride = v + return s +} + // SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. func (s *UpdateReplicationGroupMemberAction) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *UpdateReplicationGroupMemberAction { s.ProvisionedThroughputOverride = v @@ -27518,6 +27776,11 @@ type UpdateTableInput struct { // in the Amazon DynamoDB Developer Guide. GlobalSecondaryIndexUpdates []*GlobalSecondaryIndexUpdate `type:"list"` + // Updates the maximum number of read and write units for the specified table + // in on-demand capacity mode. If you use this parameter, you must specify MaxReadRequestUnits, + // MaxWriteRequestUnits, or both. + OnDemandThroughput *OnDemandThroughput `type:"structure"` + // The new provisioned throughput settings for the specified table or index. ProvisionedThroughput *ProvisionedThroughput `type:"structure"` @@ -27649,6 +27912,12 @@ func (s *UpdateTableInput) SetGlobalSecondaryIndexUpdates(v []*GlobalSecondaryIn return s } +// SetOnDemandThroughput sets the OnDemandThroughput field's value. +func (s *UpdateTableInput) SetOnDemandThroughput(v *OnDemandThroughput) *UpdateTableInput { + s.OnDemandThroughput = v + return s +} + // SetProvisionedThroughput sets the ProvisionedThroughput field's value. func (s *UpdateTableInput) SetProvisionedThroughput(v *ProvisionedThroughput) *UpdateTableInput { s.ProvisionedThroughput = v diff --git a/service/ec2/api.go b/service/ec2/api.go index 220a7a49dbb..08ae0cc38ed 100644 --- a/service/ec2/api.go +++ b/service/ec2/api.go @@ -41430,6 +41430,80 @@ func (c *EC2) GetInstanceMetadataDefaultsWithContext(ctx aws.Context, input *Get return out, req.Send() } +const opGetInstanceTpmEkPub = "GetInstanceTpmEkPub" + +// GetInstanceTpmEkPubRequest generates a "aws/request.Request" representing the +// client's request for the GetInstanceTpmEkPub operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetInstanceTpmEkPub for more information on using the GetInstanceTpmEkPub +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetInstanceTpmEkPubRequest method. +// req, resp := client.GetInstanceTpmEkPubRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetInstanceTpmEkPub +func (c *EC2) GetInstanceTpmEkPubRequest(input *GetInstanceTpmEkPubInput) (req *request.Request, output *GetInstanceTpmEkPubOutput) { + op := &request.Operation{ + Name: opGetInstanceTpmEkPub, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetInstanceTpmEkPubInput{} + } + + output = &GetInstanceTpmEkPubOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetInstanceTpmEkPub API operation for Amazon Elastic Compute Cloud. +// +// Gets the public endorsement key associated with the Nitro Trusted Platform +// Module (NitroTPM) for the specified instance. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetInstanceTpmEkPub for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetInstanceTpmEkPub +func (c *EC2) GetInstanceTpmEkPub(input *GetInstanceTpmEkPubInput) (*GetInstanceTpmEkPubOutput, error) { + req, out := c.GetInstanceTpmEkPubRequest(input) + return out, req.Send() +} + +// GetInstanceTpmEkPubWithContext is the same as GetInstanceTpmEkPub with the addition of +// the ability to pass a context and additional request options. +// +// See GetInstanceTpmEkPub for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetInstanceTpmEkPubWithContext(ctx aws.Context, input *GetInstanceTpmEkPubInput, opts ...request.Option) (*GetInstanceTpmEkPubOutput, error) { + req, out := c.GetInstanceTpmEkPubRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetInstanceTypesFromInstanceRequirements = "GetInstanceTypesFromInstanceRequirements" // GetInstanceTypesFromInstanceRequirementsRequest generates a "aws/request.Request" representing the @@ -126617,6 +126691,155 @@ func (s *GetInstanceMetadataDefaultsOutput) SetAccountLevel(v *InstanceMetadataD return s } +type GetInstanceTpmEkPubInput struct { + _ struct{} `type:"structure"` + + // Specify this parameter to verify whether the request will succeed, without + // actually making the request. If the request will succeed, the response is + // DryRunOperation. Otherwise, the response is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the instance for which to get the public endorsement key. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` + + // The required public endorsement key format. Specify der for a DER-encoded + // public key that is compatible with OpenSSL. Specify tpmt for a TPM 2.0 format + // that is compatible with tpm2-tools. The returned key is base64 encoded. + // + // KeyFormat is a required field + KeyFormat *string `type:"string" required:"true" enum:"EkPubKeyFormat"` + + // The required public endorsement key type. + // + // KeyType is a required field + KeyType *string `type:"string" required:"true" enum:"EkPubKeyType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetInstanceTpmEkPubInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetInstanceTpmEkPubInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetInstanceTpmEkPubInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetInstanceTpmEkPubInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.KeyFormat == nil { + invalidParams.Add(request.NewErrParamRequired("KeyFormat")) + } + if s.KeyType == nil { + invalidParams.Add(request.NewErrParamRequired("KeyType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetInstanceTpmEkPubInput) SetDryRun(v bool) *GetInstanceTpmEkPubInput { + s.DryRun = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *GetInstanceTpmEkPubInput) SetInstanceId(v string) *GetInstanceTpmEkPubInput { + s.InstanceId = &v + return s +} + +// SetKeyFormat sets the KeyFormat field's value. +func (s *GetInstanceTpmEkPubInput) SetKeyFormat(v string) *GetInstanceTpmEkPubInput { + s.KeyFormat = &v + return s +} + +// SetKeyType sets the KeyType field's value. +func (s *GetInstanceTpmEkPubInput) SetKeyType(v string) *GetInstanceTpmEkPubInput { + s.KeyType = &v + return s +} + +type GetInstanceTpmEkPubOutput struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The public endorsement key format. + KeyFormat *string `locationName:"keyFormat" type:"string" enum:"EkPubKeyFormat"` + + // The public endorsement key type. + KeyType *string `locationName:"keyType" type:"string" enum:"EkPubKeyType"` + + // The public endorsement key material. + // + // KeyValue is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetInstanceTpmEkPubOutput's + // String and GoString methods. + KeyValue *string `locationName:"keyValue" type:"string" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetInstanceTpmEkPubOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetInstanceTpmEkPubOutput) GoString() string { + return s.String() +} + +// SetInstanceId sets the InstanceId field's value. +func (s *GetInstanceTpmEkPubOutput) SetInstanceId(v string) *GetInstanceTpmEkPubOutput { + s.InstanceId = &v + return s +} + +// SetKeyFormat sets the KeyFormat field's value. +func (s *GetInstanceTpmEkPubOutput) SetKeyFormat(v string) *GetInstanceTpmEkPubOutput { + s.KeyFormat = &v + return s +} + +// SetKeyType sets the KeyType field's value. +func (s *GetInstanceTpmEkPubOutput) SetKeyType(v string) *GetInstanceTpmEkPubOutput { + s.KeyType = &v + return s +} + +// SetKeyValue sets the KeyValue field's value. +func (s *GetInstanceTpmEkPubOutput) SetKeyValue(v string) *GetInstanceTpmEkPubOutput { + s.KeyValue = &v + return s +} + type GetInstanceTypesFromInstanceRequirementsInput struct { _ struct{} `type:"structure"` @@ -192751,6 +192974,38 @@ func Ec2InstanceConnectEndpointState_Values() []string { } } +const ( + // EkPubKeyFormatDer is a EkPubKeyFormat enum value + EkPubKeyFormatDer = "der" + + // EkPubKeyFormatTpmt is a EkPubKeyFormat enum value + EkPubKeyFormatTpmt = "tpmt" +) + +// EkPubKeyFormat_Values returns all elements of the EkPubKeyFormat enum +func EkPubKeyFormat_Values() []string { + return []string{ + EkPubKeyFormatDer, + EkPubKeyFormatTpmt, + } +} + +const ( + // EkPubKeyTypeRsa2048 is a EkPubKeyType enum value + EkPubKeyTypeRsa2048 = "rsa-2048" + + // EkPubKeyTypeEccSecP384 is a EkPubKeyType enum value + EkPubKeyTypeEccSecP384 = "ecc-sec-p384" +) + +// EkPubKeyType_Values returns all elements of the EkPubKeyType enum +func EkPubKeyType_Values() []string { + return []string{ + EkPubKeyTypeRsa2048, + EkPubKeyTypeEccSecP384, + } +} + const ( // ElasticGpuStateAttached is a ElasticGpuState enum value ElasticGpuStateAttached = "ATTACHED" diff --git a/service/ec2/ec2iface/interface.go b/service/ec2/ec2iface/interface.go index 90e50399bf7..5a1c447647b 100644 --- a/service/ec2/ec2iface/interface.go +++ b/service/ec2/ec2iface/interface.go @@ -2183,6 +2183,10 @@ type EC2API interface { GetInstanceMetadataDefaultsWithContext(aws.Context, *ec2.GetInstanceMetadataDefaultsInput, ...request.Option) (*ec2.GetInstanceMetadataDefaultsOutput, error) GetInstanceMetadataDefaultsRequest(*ec2.GetInstanceMetadataDefaultsInput) (*request.Request, *ec2.GetInstanceMetadataDefaultsOutput) + GetInstanceTpmEkPub(*ec2.GetInstanceTpmEkPubInput) (*ec2.GetInstanceTpmEkPubOutput, error) + GetInstanceTpmEkPubWithContext(aws.Context, *ec2.GetInstanceTpmEkPubInput, ...request.Option) (*ec2.GetInstanceTpmEkPubOutput, error) + GetInstanceTpmEkPubRequest(*ec2.GetInstanceTpmEkPubInput) (*request.Request, *ec2.GetInstanceTpmEkPubOutput) + GetInstanceTypesFromInstanceRequirements(*ec2.GetInstanceTypesFromInstanceRequirementsInput) (*ec2.GetInstanceTypesFromInstanceRequirementsOutput, error) GetInstanceTypesFromInstanceRequirementsWithContext(aws.Context, *ec2.GetInstanceTypesFromInstanceRequirementsInput, ...request.Option) (*ec2.GetInstanceTypesFromInstanceRequirementsOutput, error) GetInstanceTypesFromInstanceRequirementsRequest(*ec2.GetInstanceTypesFromInstanceRequirementsInput) (*request.Request, *ec2.GetInstanceTypesFromInstanceRequirementsOutput) diff --git a/service/personalize/api.go b/service/personalize/api.go index 8d7e7807d64..0abc6e766bc 100644 --- a/service/personalize/api.go +++ b/service/personalize/api.go @@ -376,6 +376,139 @@ func (c *Personalize) CreateCampaignWithContext(ctx aws.Context, input *CreateCa return out, req.Send() } +const opCreateDataDeletionJob = "CreateDataDeletionJob" + +// CreateDataDeletionJobRequest generates a "aws/request.Request" representing the +// client's request for the CreateDataDeletionJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDataDeletionJob for more information on using the CreateDataDeletionJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateDataDeletionJobRequest method. +// req, resp := client.CreateDataDeletionJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/CreateDataDeletionJob +func (c *Personalize) CreateDataDeletionJobRequest(input *CreateDataDeletionJobInput) (req *request.Request, output *CreateDataDeletionJobOutput) { + op := &request.Operation{ + Name: opCreateDataDeletionJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDataDeletionJobInput{} + } + + output = &CreateDataDeletionJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDataDeletionJob API operation for Amazon Personalize. +// +// Creates a batch job that deletes all references to specific users from an +// Amazon Personalize dataset group in batches. You specify the users to delete +// in a CSV file of userIds in an Amazon S3 bucket. After a job completes, Amazon +// Personalize no longer trains on the users’ data and no longer considers +// the users when generating user segments. For more information about creating +// a data deletion job, see Deleting users (https://docs.aws.amazon.com/personalize/latest/dg/delete-records.html). +// +// - Your input file must be a CSV file with a single USER_ID column that +// lists the users IDs. For more information about preparing the CSV file, +// see Preparing your data deletion file and uploading it to Amazon S3 (https://docs.aws.amazon.com/personalize/latest/dg/prepare-deletion-input-file.html). +// +// - To give Amazon Personalize permission to access your input CSV file +// of userIds, you must specify an IAM service role that has permission to +// read from the data source. This role needs GetObject and ListBucket permissions +// for the bucket and its content. These permissions are the same as importing +// data. For information on granting access to your Amazon S3 bucket, see +// Giving Amazon Personalize Access to Amazon S3 Resources (https://docs.aws.amazon.com/personalize/latest/dg/granting-personalize-s3-access.html). +// +// After you create a job, it can take up to a day to delete all references +// to the users from datasets and models. Until the job completes, Amazon Personalize +// continues to use the data when training. And if you use a User Segmentation +// recipe, the users might appear in user segments. +// +// # Status +// +// A data deletion job can have one of the following statuses: +// +// - PENDING > IN_PROGRESS > COMPLETED -or- FAILED +// +// To get the status of the data deletion job, call DescribeDataDeletionJob +// (https://docs.aws.amazon.com/personalize/latest/dg/API_DescribeDataDeletionJob.html) +// API operation and specify the Amazon Resource Name (ARN) of the job. If the +// status is FAILED, the response includes a failureReason key, which describes +// why the job failed. +// +// Related APIs +// +// - ListDataDeletionJobs (https://docs.aws.amazon.com/personalize/latest/dg/API_ListDataDeletionJobs.html) +// +// - DescribeDataDeletionJob (https://docs.aws.amazon.com/personalize/latest/dg/API_DescribeDataDeletionJob.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation CreateDataDeletionJob for usage and error information. +// +// Returned Error Types: +// +// - InvalidInputException +// Provide a valid value for the field or parameter. +// +// - ResourceNotFoundException +// Could not find the specified resource. +// +// - ResourceAlreadyExistsException +// The specified resource already exists. +// +// - LimitExceededException +// The limit on the number of requests per second has been exceeded. +// +// - ResourceInUseException +// The specified resource is in use. +// +// - TooManyTagsException +// You have exceeded the maximum number of tags you can apply to this resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/CreateDataDeletionJob +func (c *Personalize) CreateDataDeletionJob(input *CreateDataDeletionJobInput) (*CreateDataDeletionJobOutput, error) { + req, out := c.CreateDataDeletionJobRequest(input) + return out, req.Send() +} + +// CreateDataDeletionJobWithContext is the same as CreateDataDeletionJob with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDataDeletionJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) CreateDataDeletionJobWithContext(ctx aws.Context, input *CreateDataDeletionJobInput, opts ...request.Option) (*CreateDataDeletionJobOutput, error) { + req, out := c.CreateDataDeletionJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateDataset = "CreateDataset" // CreateDatasetRequest generates a "aws/request.Request" representing the @@ -2876,6 +3009,89 @@ func (c *Personalize) DescribeCampaignWithContext(ctx aws.Context, input *Descri return out, req.Send() } +const opDescribeDataDeletionJob = "DescribeDataDeletionJob" + +// DescribeDataDeletionJobRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDataDeletionJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDataDeletionJob for more information on using the DescribeDataDeletionJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeDataDeletionJobRequest method. +// req, resp := client.DescribeDataDeletionJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeDataDeletionJob +func (c *Personalize) DescribeDataDeletionJobRequest(input *DescribeDataDeletionJobInput) (req *request.Request, output *DescribeDataDeletionJobOutput) { + op := &request.Operation{ + Name: opDescribeDataDeletionJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDataDeletionJobInput{} + } + + output = &DescribeDataDeletionJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDataDeletionJob API operation for Amazon Personalize. +// +// Describes the data deletion job created by CreateDataDeletionJob (https://docs.aws.amazon.com/personalize/latest/dg/API_CreateDataDeletionJob.html), +// including the job status. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation DescribeDataDeletionJob for usage and error information. +// +// Returned Error Types: +// +// - InvalidInputException +// Provide a valid value for the field or parameter. +// +// - ResourceNotFoundException +// Could not find the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeDataDeletionJob +func (c *Personalize) DescribeDataDeletionJob(input *DescribeDataDeletionJobInput) (*DescribeDataDeletionJobOutput, error) { + req, out := c.DescribeDataDeletionJobRequest(input) + return out, req.Send() +} + +// DescribeDataDeletionJobWithContext is the same as DescribeDataDeletionJob with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDataDeletionJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) DescribeDataDeletionJobWithContext(ctx aws.Context, input *DescribeDataDeletionJobInput, opts ...request.Option) (*DescribeDataDeletionJobOutput, error) { + req, out := c.DescribeDataDeletionJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeDataset = "DescribeDataset" // DescribeDatasetRequest generates a "aws/request.Request" representing the @@ -4490,6 +4706,92 @@ func (c *Personalize) ListCampaignsPagesWithContext(ctx aws.Context, input *List return p.Err() } +const opListDataDeletionJobs = "ListDataDeletionJobs" + +// ListDataDeletionJobsRequest generates a "aws/request.Request" representing the +// client's request for the ListDataDeletionJobs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDataDeletionJobs for more information on using the ListDataDeletionJobs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListDataDeletionJobsRequest method. +// req, resp := client.ListDataDeletionJobsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListDataDeletionJobs +func (c *Personalize) ListDataDeletionJobsRequest(input *ListDataDeletionJobsInput) (req *request.Request, output *ListDataDeletionJobsOutput) { + op := &request.Operation{ + Name: opListDataDeletionJobs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListDataDeletionJobsInput{} + } + + output = &ListDataDeletionJobsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDataDeletionJobs API operation for Amazon Personalize. +// +// Returns a list of data deletion jobs for a dataset group ordered by creation +// time, with the most recent first. When a dataset group is not specified, +// all the data deletion jobs associated with the account are listed. The response +// provides the properties for each job, including the Amazon Resource Name +// (ARN). For more information on data deletion jobs, see Deleting users (https://docs.aws.amazon.com/personalize/latest/dg/delete-records.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation ListDataDeletionJobs for usage and error information. +// +// Returned Error Types: +// +// - InvalidInputException +// Provide a valid value for the field or parameter. +// +// - InvalidNextTokenException +// The token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListDataDeletionJobs +func (c *Personalize) ListDataDeletionJobs(input *ListDataDeletionJobsInput) (*ListDataDeletionJobsOutput, error) { + req, out := c.ListDataDeletionJobsRequest(input) + return out, req.Send() +} + +// ListDataDeletionJobsWithContext is the same as ListDataDeletionJobs with the addition of +// the ability to pass a context and additional request options. +// +// See ListDataDeletionJobs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) ListDataDeletionJobsWithContext(ctx aws.Context, input *ListDataDeletionJobsInput, opts ...request.Option) (*ListDataDeletionJobsOutput, error) { + req, out := c.ListDataDeletionJobsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListDatasetExportJobs = "ListDatasetExportJobs" // ListDatasetExportJobsRequest generates a "aws/request.Request" representing the @@ -9335,38 +9637,33 @@ func (s *CreateCampaignOutput) SetCampaignArn(v string) *CreateCampaignOutput { return s } -type CreateDatasetExportJobInput struct { +type CreateDataDeletionJobInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the dataset that contains the data to export. + // The Amazon S3 bucket that contains the list of userIds of the users to delete. // - // DatasetArn is a required field - DatasetArn *string `locationName:"datasetArn" type:"string" required:"true"` + // DataSource is a required field + DataSource *DataSource `locationName:"dataSource" type:"structure" required:"true"` - // The data to export, based on how you imported the data. You can choose to - // export only BULK data that you imported using a dataset import job, only - // PUT data that you imported incrementally (using the console, PutEvents, PutUsers - // and PutItems operations), or ALL for both types. The default value is PUT. - IngestionMode *string `locationName:"ingestionMode" type:"string" enum:"IngestionMode"` + // The Amazon Resource Name (ARN) of the dataset group that has the datasets + // you want to delete records from. + // + // DatasetGroupArn is a required field + DatasetGroupArn *string `locationName:"datasetGroupArn" type:"string" required:"true"` - // The name for the dataset export job. + // The name for the data deletion job. // // JobName is a required field JobName *string `locationName:"jobName" min:"1" type:"string" required:"true"` - // The path to the Amazon S3 bucket where the job's output is stored. - // - // JobOutput is a required field - JobOutput *DatasetExportJobOutput `locationName:"jobOutput" type:"structure" required:"true"` - - // The Amazon Resource Name (ARN) of the IAM service role that has permissions - // to add data to your output Amazon S3 bucket. + // The Amazon Resource Name (ARN) of the IAM role that has permissions to read + // from the Amazon S3 data source. // // RoleArn is a required field RoleArn *string `locationName:"roleArn" type:"string" required:"true"` // A list of tags (https://docs.aws.amazon.com/personalize/latest/dg/tagging-resources.html) - // to apply to the dataset export job. + // to apply to the data deletion job. Tags []*Tag `locationName:"tags" type:"list"` } @@ -9375,7 +9672,7 @@ type CreateDatasetExportJobInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateDatasetExportJobInput) String() string { +func (s CreateDataDeletionJobInput) String() string { return awsutil.Prettify(s) } @@ -9384,15 +9681,18 @@ func (s CreateDatasetExportJobInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateDatasetExportJobInput) GoString() string { +func (s CreateDataDeletionJobInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateDatasetExportJobInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateDatasetExportJobInput"} - if s.DatasetArn == nil { - invalidParams.Add(request.NewErrParamRequired("DatasetArn")) +func (s *CreateDataDeletionJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDataDeletionJobInput"} + if s.DataSource == nil { + invalidParams.Add(request.NewErrParamRequired("DataSource")) + } + if s.DatasetGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetGroupArn")) } if s.JobName == nil { invalidParams.Add(request.NewErrParamRequired("JobName")) @@ -9400,17 +9700,9 @@ func (s *CreateDatasetExportJobInput) Validate() error { if s.JobName != nil && len(*s.JobName) < 1 { invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) } - if s.JobOutput == nil { - invalidParams.Add(request.NewErrParamRequired("JobOutput")) - } if s.RoleArn == nil { invalidParams.Add(request.NewErrParamRequired("RoleArn")) } - if s.JobOutput != nil { - if err := s.JobOutput.Validate(); err != nil { - invalidParams.AddNested("JobOutput", err.(request.ErrInvalidParams)) - } - } if s.Tags != nil { for i, v := range s.Tags { if v == nil { @@ -9428,13 +9720,167 @@ func (s *CreateDatasetExportJobInput) Validate() error { return nil } -// SetDatasetArn sets the DatasetArn field's value. -func (s *CreateDatasetExportJobInput) SetDatasetArn(v string) *CreateDatasetExportJobInput { - s.DatasetArn = &v +// SetDataSource sets the DataSource field's value. +func (s *CreateDataDeletionJobInput) SetDataSource(v *DataSource) *CreateDataDeletionJobInput { + s.DataSource = v return s } -// SetIngestionMode sets the IngestionMode field's value. +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *CreateDataDeletionJobInput) SetDatasetGroupArn(v string) *CreateDataDeletionJobInput { + s.DatasetGroupArn = &v + return s +} + +// SetJobName sets the JobName field's value. +func (s *CreateDataDeletionJobInput) SetJobName(v string) *CreateDataDeletionJobInput { + s.JobName = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *CreateDataDeletionJobInput) SetRoleArn(v string) *CreateDataDeletionJobInput { + s.RoleArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateDataDeletionJobInput) SetTags(v []*Tag) *CreateDataDeletionJobInput { + s.Tags = v + return s +} + +type CreateDataDeletionJobOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the data deletion job. + DataDeletionJobArn *string `locationName:"dataDeletionJobArn" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateDataDeletionJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateDataDeletionJobOutput) GoString() string { + return s.String() +} + +// SetDataDeletionJobArn sets the DataDeletionJobArn field's value. +func (s *CreateDataDeletionJobOutput) SetDataDeletionJobArn(v string) *CreateDataDeletionJobOutput { + s.DataDeletionJobArn = &v + return s +} + +type CreateDatasetExportJobInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset that contains the data to export. + // + // DatasetArn is a required field + DatasetArn *string `locationName:"datasetArn" type:"string" required:"true"` + + // The data to export, based on how you imported the data. You can choose to + // export only BULK data that you imported using a dataset import job, only + // PUT data that you imported incrementally (using the console, PutEvents, PutUsers + // and PutItems operations), or ALL for both types. The default value is PUT. + IngestionMode *string `locationName:"ingestionMode" type:"string" enum:"IngestionMode"` + + // The name for the dataset export job. + // + // JobName is a required field + JobName *string `locationName:"jobName" min:"1" type:"string" required:"true"` + + // The path to the Amazon S3 bucket where the job's output is stored. + // + // JobOutput is a required field + JobOutput *DatasetExportJobOutput `locationName:"jobOutput" type:"structure" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM service role that has permissions + // to add data to your output Amazon S3 bucket. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // A list of tags (https://docs.aws.amazon.com/personalize/latest/dg/tagging-resources.html) + // to apply to the dataset export job. + Tags []*Tag `locationName:"tags" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateDatasetExportJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateDatasetExportJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDatasetExportJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDatasetExportJobInput"} + if s.DatasetArn == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetArn")) + } + if s.JobName == nil { + invalidParams.Add(request.NewErrParamRequired("JobName")) + } + if s.JobName != nil && len(*s.JobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) + } + if s.JobOutput == nil { + invalidParams.Add(request.NewErrParamRequired("JobOutput")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.JobOutput != nil { + if err := s.JobOutput.Validate(); err != nil { + invalidParams.AddNested("JobOutput", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *CreateDatasetExportJobInput) SetDatasetArn(v string) *CreateDatasetExportJobInput { + s.DatasetArn = &v + return s +} + +// SetIngestionMode sets the IngestionMode field's value. func (s *CreateDatasetExportJobInput) SetIngestionMode(v string) *CreateDatasetExportJobInput { s.IngestionMode = &v return s @@ -10968,12 +11414,239 @@ func (s *CreateSolutionVersionOutput) SetSolutionVersionArn(v string) *CreateSol return s } -// Describes the data source that contains the data to upload to a dataset. +// Describes a job that deletes all references to specific users from an Amazon +// Personalize dataset group in batches. For information about creating a data +// deletion job, see Deleting users (https://docs.aws.amazon.com/personalize/latest/dg/delete-records.html). +type DataDeletionJob struct { + _ struct{} `type:"structure"` + + // The creation date and time (in Unix time) of the data deletion job. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // The Amazon Resource Name (ARN) of the data deletion job. + DataDeletionJobArn *string `locationName:"dataDeletionJobArn" type:"string"` + + // Describes the data source that contains the data to upload to a dataset, + // or the list of records to delete from Amazon Personalize. + DataSource *DataSource `locationName:"dataSource" type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset group the job deletes records + // from. + DatasetGroupArn *string `locationName:"datasetGroupArn" type:"string"` + + // If a data deletion job fails, provides the reason why. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The name of the data deletion job. + JobName *string `locationName:"jobName" min:"1" type:"string"` + + // The date and time (in Unix time) the data deletion job was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The number of records deleted by a COMPLETED job. + NumDeleted *int64 `locationName:"numDeleted" type:"integer"` + + // The Amazon Resource Name (ARN) of the IAM role that has permissions to read + // from the Amazon S3 data source. + RoleArn *string `locationName:"roleArn" type:"string"` + + // The status of the data deletion job. + // + // A data deletion job can have one of the following statuses: + // + // * PENDING > IN_PROGRESS > COMPLETED -or- FAILED + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DataDeletionJob) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DataDeletionJob) GoString() string { + return s.String() +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *DataDeletionJob) SetCreationDateTime(v time.Time) *DataDeletionJob { + s.CreationDateTime = &v + return s +} + +// SetDataDeletionJobArn sets the DataDeletionJobArn field's value. +func (s *DataDeletionJob) SetDataDeletionJobArn(v string) *DataDeletionJob { + s.DataDeletionJobArn = &v + return s +} + +// SetDataSource sets the DataSource field's value. +func (s *DataDeletionJob) SetDataSource(v *DataSource) *DataDeletionJob { + s.DataSource = v + return s +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *DataDeletionJob) SetDatasetGroupArn(v string) *DataDeletionJob { + s.DatasetGroupArn = &v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *DataDeletionJob) SetFailureReason(v string) *DataDeletionJob { + s.FailureReason = &v + return s +} + +// SetJobName sets the JobName field's value. +func (s *DataDeletionJob) SetJobName(v string) *DataDeletionJob { + s.JobName = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *DataDeletionJob) SetLastUpdatedDateTime(v time.Time) *DataDeletionJob { + s.LastUpdatedDateTime = &v + return s +} + +// SetNumDeleted sets the NumDeleted field's value. +func (s *DataDeletionJob) SetNumDeleted(v int64) *DataDeletionJob { + s.NumDeleted = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *DataDeletionJob) SetRoleArn(v string) *DataDeletionJob { + s.RoleArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DataDeletionJob) SetStatus(v string) *DataDeletionJob { + s.Status = &v + return s +} + +// Provides a summary of the properties of a data deletion job. For a complete +// listing, call the DescribeDataDeletionJob (https://docs.aws.amazon.com/personalize/latest/dg/API_DescribeDataDeletionJob.html) +// API operation. +type DataDeletionJobSummary struct { + _ struct{} `type:"structure"` + + // The creation date and time (in Unix time) of the data deletion job. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // The Amazon Resource Name (ARN) of the data deletion job. + DataDeletionJobArn *string `locationName:"dataDeletionJobArn" type:"string"` + + // The Amazon Resource Name (ARN) of the dataset group the job deleted records + // from. + DatasetGroupArn *string `locationName:"datasetGroupArn" type:"string"` + + // If a data deletion job fails, provides the reason why. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The name of the data deletion job. + JobName *string `locationName:"jobName" min:"1" type:"string"` + + // The date and time (in Unix time) the data deletion job was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The status of the data deletion job. + // + // A data deletion job can have one of the following statuses: + // + // * PENDING > IN_PROGRESS > COMPLETED -or- FAILED + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DataDeletionJobSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DataDeletionJobSummary) GoString() string { + return s.String() +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *DataDeletionJobSummary) SetCreationDateTime(v time.Time) *DataDeletionJobSummary { + s.CreationDateTime = &v + return s +} + +// SetDataDeletionJobArn sets the DataDeletionJobArn field's value. +func (s *DataDeletionJobSummary) SetDataDeletionJobArn(v string) *DataDeletionJobSummary { + s.DataDeletionJobArn = &v + return s +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *DataDeletionJobSummary) SetDatasetGroupArn(v string) *DataDeletionJobSummary { + s.DatasetGroupArn = &v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *DataDeletionJobSummary) SetFailureReason(v string) *DataDeletionJobSummary { + s.FailureReason = &v + return s +} + +// SetJobName sets the JobName field's value. +func (s *DataDeletionJobSummary) SetJobName(v string) *DataDeletionJobSummary { + s.JobName = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *DataDeletionJobSummary) SetLastUpdatedDateTime(v time.Time) *DataDeletionJobSummary { + s.LastUpdatedDateTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DataDeletionJobSummary) SetStatus(v string) *DataDeletionJobSummary { + s.Status = &v + return s +} + +// Describes the data source that contains the data to upload to a dataset, +// or the list of records to delete from Amazon Personalize. type DataSource struct { _ struct{} `type:"structure"` - // The path to the Amazon S3 bucket where the data that you want to upload to - // your dataset is stored. For example: + // For dataset import jobs, the path to the Amazon S3 bucket where the data + // that you want to upload to your dataset is stored. For data deletion jobs, + // the path to the Amazon S3 bucket that stores the list of records to delete. + // + // For example: + // + // s3://bucket-name/folder-name/fileName.csv + // + // If your CSV files are in a folder in your Amazon S3 bucket and you want your + // import job or data deletion job to consider multiple files, you can specify + // the path to the folder. With a data deletion job, Amazon Personalize uses + // all files in the folder and any sub folder. Use the following syntax with + // a / after the folder name: // // s3://bucket-name/folder-name/ DataLocation *string `locationName:"dataLocation" type:"string"` @@ -13302,6 +13975,93 @@ func (s *DescribeCampaignOutput) SetCampaign(v *Campaign) *DescribeCampaignOutpu return s } +type DescribeDataDeletionJobInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the data deletion job. + // + // DataDeletionJobArn is a required field + DataDeletionJobArn *string `locationName:"dataDeletionJobArn" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDataDeletionJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDataDeletionJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDataDeletionJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDataDeletionJobInput"} + if s.DataDeletionJobArn == nil { + invalidParams.Add(request.NewErrParamRequired("DataDeletionJobArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataDeletionJobArn sets the DataDeletionJobArn field's value. +func (s *DescribeDataDeletionJobInput) SetDataDeletionJobArn(v string) *DescribeDataDeletionJobInput { + s.DataDeletionJobArn = &v + return s +} + +type DescribeDataDeletionJobOutput struct { + _ struct{} `type:"structure"` + + // Information about the data deletion job, including the status. + // + // The status is one of the following values: + // + // * PENDING + // + // * IN_PROGRESS + // + // * COMPLETED + // + // * FAILED + DataDeletionJob *DataDeletionJob `locationName:"dataDeletionJob" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDataDeletionJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDataDeletionJobOutput) GoString() string { + return s.String() +} + +// SetDataDeletionJob sets the DataDeletionJob field's value. +func (s *DescribeDataDeletionJobOutput) SetDataDeletionJob(v *DataDeletionJob) *DescribeDataDeletionJobOutput { + s.DataDeletionJob = v + return s +} + type DescribeDatasetExportJobInput struct { _ struct{} `type:"structure"` @@ -15721,6 +16481,110 @@ func (s *ListCampaignsOutput) SetNextToken(v string) *ListCampaignsOutput { return s } +type ListDataDeletionJobsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset group to list data deletion + // jobs for. + DatasetGroupArn *string `locationName:"datasetGroupArn" type:"string"` + + // The maximum number of data deletion jobs to return. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // A token returned from the previous call to ListDataDeletionJobs for getting + // the next set of jobs (if they exist). + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDataDeletionJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDataDeletionJobsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDataDeletionJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDataDeletionJobsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *ListDataDeletionJobsInput) SetDatasetGroupArn(v string) *ListDataDeletionJobsInput { + s.DatasetGroupArn = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListDataDeletionJobsInput) SetMaxResults(v int64) *ListDataDeletionJobsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDataDeletionJobsInput) SetNextToken(v string) *ListDataDeletionJobsInput { + s.NextToken = &v + return s +} + +type ListDataDeletionJobsOutput struct { + _ struct{} `type:"structure"` + + // The list of data deletion jobs. + DataDeletionJobs []*DataDeletionJobSummary `locationName:"dataDeletionJobs" type:"list"` + + // A token for getting the next set of data deletion jobs (if they exist). + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDataDeletionJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDataDeletionJobsOutput) GoString() string { + return s.String() +} + +// SetDataDeletionJobs sets the DataDeletionJobs field's value. +func (s *ListDataDeletionJobsOutput) SetDataDeletionJobs(v []*DataDeletionJobSummary) *ListDataDeletionJobsOutput { + s.DataDeletionJobs = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDataDeletionJobsOutput) SetNextToken(v string) *ListDataDeletionJobsOutput { + s.NextToken = &v + return s +} + type ListDatasetExportJobsInput struct { _ struct{} `type:"structure"` diff --git a/service/personalize/personalizeiface/interface.go b/service/personalize/personalizeiface/interface.go index 8bd0a66f286..7e7c31bfa47 100644 --- a/service/personalize/personalizeiface/interface.go +++ b/service/personalize/personalizeiface/interface.go @@ -72,6 +72,10 @@ type PersonalizeAPI interface { CreateCampaignWithContext(aws.Context, *personalize.CreateCampaignInput, ...request.Option) (*personalize.CreateCampaignOutput, error) CreateCampaignRequest(*personalize.CreateCampaignInput) (*request.Request, *personalize.CreateCampaignOutput) + CreateDataDeletionJob(*personalize.CreateDataDeletionJobInput) (*personalize.CreateDataDeletionJobOutput, error) + CreateDataDeletionJobWithContext(aws.Context, *personalize.CreateDataDeletionJobInput, ...request.Option) (*personalize.CreateDataDeletionJobOutput, error) + CreateDataDeletionJobRequest(*personalize.CreateDataDeletionJobInput) (*request.Request, *personalize.CreateDataDeletionJobOutput) + CreateDataset(*personalize.CreateDatasetInput) (*personalize.CreateDatasetOutput, error) CreateDatasetWithContext(aws.Context, *personalize.CreateDatasetInput, ...request.Option) (*personalize.CreateDatasetOutput, error) CreateDatasetRequest(*personalize.CreateDatasetInput) (*request.Request, *personalize.CreateDatasetOutput) @@ -168,6 +172,10 @@ type PersonalizeAPI interface { DescribeCampaignWithContext(aws.Context, *personalize.DescribeCampaignInput, ...request.Option) (*personalize.DescribeCampaignOutput, error) DescribeCampaignRequest(*personalize.DescribeCampaignInput) (*request.Request, *personalize.DescribeCampaignOutput) + DescribeDataDeletionJob(*personalize.DescribeDataDeletionJobInput) (*personalize.DescribeDataDeletionJobOutput, error) + DescribeDataDeletionJobWithContext(aws.Context, *personalize.DescribeDataDeletionJobInput, ...request.Option) (*personalize.DescribeDataDeletionJobOutput, error) + DescribeDataDeletionJobRequest(*personalize.DescribeDataDeletionJobInput) (*request.Request, *personalize.DescribeDataDeletionJobOutput) + DescribeDataset(*personalize.DescribeDatasetInput) (*personalize.DescribeDatasetOutput, error) DescribeDatasetWithContext(aws.Context, *personalize.DescribeDatasetInput, ...request.Option) (*personalize.DescribeDatasetOutput, error) DescribeDatasetRequest(*personalize.DescribeDatasetInput) (*request.Request, *personalize.DescribeDatasetOutput) @@ -245,6 +253,10 @@ type PersonalizeAPI interface { ListCampaignsPages(*personalize.ListCampaignsInput, func(*personalize.ListCampaignsOutput, bool) bool) error ListCampaignsPagesWithContext(aws.Context, *personalize.ListCampaignsInput, func(*personalize.ListCampaignsOutput, bool) bool, ...request.Option) error + ListDataDeletionJobs(*personalize.ListDataDeletionJobsInput) (*personalize.ListDataDeletionJobsOutput, error) + ListDataDeletionJobsWithContext(aws.Context, *personalize.ListDataDeletionJobsInput, ...request.Option) (*personalize.ListDataDeletionJobsOutput, error) + ListDataDeletionJobsRequest(*personalize.ListDataDeletionJobsInput) (*request.Request, *personalize.ListDataDeletionJobsOutput) + ListDatasetExportJobs(*personalize.ListDatasetExportJobsInput) (*personalize.ListDatasetExportJobsOutput, error) ListDatasetExportJobsWithContext(aws.Context, *personalize.ListDatasetExportJobsInput, ...request.Option) (*personalize.ListDatasetExportJobsOutput, error) ListDatasetExportJobsRequest(*personalize.ListDatasetExportJobsInput) (*request.Request, *personalize.ListDatasetExportJobsOutput) diff --git a/service/redshiftserverless/api.go b/service/redshiftserverless/api.go index b351bcf016e..faa06236410 100644 --- a/service/redshiftserverless/api.go +++ b/service/redshiftserverless/api.go @@ -5676,10 +5676,10 @@ type ConfigParameter struct { _ struct{} `type:"structure"` // The key of the parameter. The options are auto_mv, datestyle, enable_case_sensitive_identifier, - // enable_user_activity_logging, query_group, search_path, require_ssl, and - // query monitoring metrics that let you define performance boundaries. For - // more information about query monitoring rules and available metrics, see - // Query monitoring metrics for Amazon Redshift Serverless (https://docs.aws.amazon.com/redshift/latest/dg/cm-c-wlm-query-monitoring-rules.html#cm-c-wlm-query-monitoring-metrics-serverless). + // enable_user_activity_logging, query_group, search_path, require_ssl, use_fips_ssl, + // and query monitoring metrics that let you define performance boundaries. + // For more information about query monitoring rules and available metrics, + // see Query monitoring metrics for Amazon Redshift Serverless (https://docs.aws.amazon.com/redshift/latest/dg/cm-c-wlm-query-monitoring-rules.html#cm-c-wlm-query-monitoring-metrics-serverless). ParameterKey *string `locationName:"parameterKey" type:"string"` // The value of the parameter to set. @@ -7064,10 +7064,10 @@ type CreateWorkgroupInput struct { // An array of parameters to set for advanced control over a database. The options // are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, - // query_group, search_path, require_ssl, and query monitoring metrics that - // let you define performance boundaries. For more information about query monitoring - // rules and available metrics, see Query monitoring metrics for Amazon Redshift - // Serverless (https://docs.aws.amazon.com/redshift/latest/dg/cm-c-wlm-query-monitoring-rules.html#cm-c-wlm-query-monitoring-metrics-serverless). + // query_group, search_path, require_ssl, use_fips_ssl, and query monitoring + // metrics that let you define performance boundaries. For more information + // about query monitoring rules and available metrics, see Query monitoring + // metrics for Amazon Redshift Serverless (https://docs.aws.amazon.com/redshift/latest/dg/cm-c-wlm-query-monitoring-rules.html#cm-c-wlm-query-monitoring-metrics-serverless). ConfigParameters []*ConfigParameter `locationName:"configParameters" type:"list"` // The value that specifies whether to turn on enhanced virtual private cloud @@ -9908,8 +9908,8 @@ type ListScheduledActionsOutput struct { // using the returned token to retrieve the next page. NextToken *string `locationName:"nextToken" min:"8" type:"string"` - // All of the returned scheduled action objects. - ScheduledActions []*string `locationName:"scheduledActions" type:"list"` + // All of the returned scheduled action association objects. + ScheduledActions []*ScheduledActionAssociation `locationName:"scheduledActions" type:"list"` } // String returns the string representation. @@ -9937,7 +9937,7 @@ func (s *ListScheduledActionsOutput) SetNextToken(v string) *ListScheduledAction } // SetScheduledActions sets the ScheduledActions field's value. -func (s *ListScheduledActionsOutput) SetScheduledActions(v []*string) *ListScheduledActionsOutput { +func (s *ListScheduledActionsOutput) SetScheduledActions(v []*ScheduledActionAssociation) *ListScheduledActionsOutput { s.ScheduledActions = v return s } @@ -11840,6 +11840,47 @@ func (s *Schedule) SetCron(v string) *Schedule { return s } +// Contains names of objects associated with a scheduled action. +type ScheduledActionAssociation struct { + _ struct{} `type:"structure"` + + // Name of associated Amazon Redshift Serverless namespace. + NamespaceName *string `locationName:"namespaceName" min:"3" type:"string"` + + // Name of associated scheduled action. + ScheduledActionName *string `locationName:"scheduledActionName" min:"3" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ScheduledActionAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ScheduledActionAssociation) GoString() string { + return s.String() +} + +// SetNamespaceName sets the NamespaceName field's value. +func (s *ScheduledActionAssociation) SetNamespaceName(v string) *ScheduledActionAssociation { + s.NamespaceName = &v + return s +} + +// SetScheduledActionName sets the ScheduledActionName field's value. +func (s *ScheduledActionAssociation) SetScheduledActionName(v string) *ScheduledActionAssociation { + s.ScheduledActionName = &v + return s +} + // The returned scheduled action object. type ScheduledActionResponse struct { _ struct{} `type:"structure"` @@ -13809,10 +13850,10 @@ type UpdateWorkgroupInput struct { // An array of parameters to set for advanced control over a database. The options // are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, - // query_group, search_path, require_ssl, and query monitoring metrics that - // let you define performance boundaries. For more information about query monitoring - // rules and available metrics, see Query monitoring metrics for Amazon Redshift - // Serverless (https://docs.aws.amazon.com/redshift/latest/dg/cm-c-wlm-query-monitoring-rules.html#cm-c-wlm-query-monitoring-metrics-serverless). + // query_group, search_path, require_ssl, use_fips_ssl, and query monitoring + // metrics that let you define performance boundaries. For more information + // about query monitoring rules and available metrics, see Query monitoring + // metrics for Amazon Redshift Serverless (https://docs.aws.amazon.com/redshift/latest/dg/cm-c-wlm-query-monitoring-rules.html#cm-c-wlm-query-monitoring-metrics-serverless). ConfigParameters []*ConfigParameter `locationName:"configParameters" type:"list"` // The value that specifies whether to turn on enhanced virtual private cloud @@ -14224,10 +14265,10 @@ type Workgroup struct { // An array of parameters to set for advanced control over a database. The options // are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, - // query_group, search_path, require_ssl, and query monitoring metrics that - // let you define performance boundaries. For more information about query monitoring - // rules and available metrics, see Query monitoring metrics for Amazon Redshift - // Serverless (https://docs.aws.amazon.com/redshift/latest/dg/cm-c-wlm-query-monitoring-rules.html#cm-c-wlm-query-monitoring-metrics-serverless). + // query_group, search_path, require_ssl, use_fips_ssl, and query monitoring + // metrics that let you define performance boundaries. For more information + // about query monitoring rules and available metrics, see Query monitoring + // metrics for Amazon Redshift Serverless (https://docs.aws.amazon.com/redshift/latest/dg/cm-c-wlm-query-monitoring-rules.html#cm-c-wlm-query-monitoring-metrics-serverless). ConfigParameters []*ConfigParameter `locationName:"configParameters" type:"list"` // The creation date of the workgroup. @@ -14272,7 +14313,7 @@ type Workgroup struct { Port *int64 `locationName:"port" type:"integer"` // A value that specifies whether the workgroup can be accessible from a public - // network + // network. PubliclyAccessible *bool `locationName:"publiclyAccessible" type:"boolean"` // An array of security group IDs to associate with the workgroup.