diff --git a/apis/athena/2017-05-18/api-2.json b/apis/athena/2017-05-18/api-2.json
index 1230522bd56..a624ccfe94b 100644
--- a/apis/athena/2017-05-18/api-2.json
+++ b/apis/athena/2017-05-18/api-2.json
@@ -239,6 +239,19 @@
{"shape":"InvalidRequestException"}
]
},
+ "GetQueryRuntimeStatistics":{
+ "name":"GetQueryRuntimeStatistics",
+ "http":{
+ "method":"POST",
+ "requestUri":"/"
+ },
+ "input":{"shape":"GetQueryRuntimeStatisticsInput"},
+ "output":{"shape":"GetQueryRuntimeStatisticsOutput"},
+ "errors":[
+ {"shape":"InternalServerException"},
+ {"shape":"InvalidRequestException"}
+ ]
+ },
"GetTableMetadata":{
"name":"GetTableMetadata",
"http":{
@@ -982,6 +995,19 @@
"NextToken":{"shape":"Token"}
}
},
+ "GetQueryRuntimeStatisticsInput":{
+ "type":"structure",
+ "required":["QueryExecutionId"],
+ "members":{
+ "QueryExecutionId":{"shape":"QueryExecutionId"}
+ }
+ },
+ "GetQueryRuntimeStatisticsOutput":{
+ "type":"structure",
+ "members":{
+ "QueryRuntimeStatistics":{"shape":"QueryRuntimeStatistics"}
+ }
+ },
"GetTableMetadataInput":{
"type":"structure",
"required":[
@@ -1402,6 +1428,64 @@
"AthenaError":{"shape":"AthenaError"}
}
},
+ "QueryRuntimeStatistics":{
+ "type":"structure",
+ "members":{
+ "Timeline":{"shape":"QueryRuntimeStatisticsTimeline"},
+ "Rows":{"shape":"QueryRuntimeStatisticsRows"},
+ "OutputStage":{"shape":"QueryStage"}
+ }
+ },
+ "QueryRuntimeStatisticsRows":{
+ "type":"structure",
+ "members":{
+ "InputRows":{"shape":"Long"},
+ "InputBytes":{"shape":"Long"},
+ "OutputBytes":{"shape":"Long"},
+ "OutputRows":{"shape":"Long"}
+ }
+ },
+ "QueryRuntimeStatisticsTimeline":{
+ "type":"structure",
+ "members":{
+ "QueryQueueTimeInMillis":{"shape":"Long"},
+ "QueryPlanningTimeInMillis":{"shape":"Long"},
+ "EngineExecutionTimeInMillis":{"shape":"Long"},
+ "ServiceProcessingTimeInMillis":{"shape":"Long"},
+ "TotalExecutionTimeInMillis":{"shape":"Long"}
+ }
+ },
+ "QueryStage":{
+ "type":"structure",
+ "members":{
+ "StageId":{"shape":"Long"},
+ "State":{"shape":"String"},
+ "OutputBytes":{"shape":"Long"},
+ "OutputRows":{"shape":"Long"},
+ "InputBytes":{"shape":"Long"},
+ "InputRows":{"shape":"Long"},
+ "ExecutionTime":{"shape":"Long"},
+ "QueryStagePlan":{"shape":"QueryStagePlanNode"},
+ "SubStages":{"shape":"QueryStages"}
+ }
+ },
+ "QueryStagePlanNode":{
+ "type":"structure",
+ "members":{
+ "Name":{"shape":"String"},
+ "Identifier":{"shape":"String"},
+ "Children":{"shape":"QueryStagePlanNodes"},
+ "RemoteSources":{"shape":"StringList"}
+ }
+ },
+ "QueryStagePlanNodes":{
+ "type":"list",
+ "member":{"shape":"QueryStagePlanNode"}
+ },
+ "QueryStages":{
+ "type":"list",
+ "member":{"shape":"QueryStage"}
+ },
"QueryString":{
"type":"string",
"max":262144,
@@ -1516,6 +1600,10 @@
}
},
"String":{"type":"string"},
+ "StringList":{
+ "type":"list",
+ "member":{"shape":"String"}
+ },
"TableMetadata":{
"type":"structure",
"required":["Name"],
diff --git a/apis/athena/2017-05-18/docs-2.json b/apis/athena/2017-05-18/docs-2.json
index da171605efc..76a3c931409 100644
--- a/apis/athena/2017-05-18/docs-2.json
+++ b/apis/athena/2017-05-18/docs-2.json
@@ -19,6 +19,7 @@
"GetPreparedStatement": "
Retrieves the prepared statement with the specified name from the specified workgroup.
",
"GetQueryExecution": "Returns information about a single execution of a query if you have access to the workgroup in which the query ran. Each time a query executes, information about the query execution is saved with a unique ID.
",
"GetQueryResults": "Streams the results of a single query execution specified by QueryExecutionId
from the Athena query results location in Amazon S3. For more information, see Query Results in the Amazon Athena User Guide. This request does not execute the query but returns results. Use StartQueryExecution to run a query.
To stream query results successfully, the IAM principal with permission to call GetQueryResults
also must have permissions to the Amazon S3 GetObject
action for the Athena query results location.
IAM principals with permission to the Amazon S3 GetObject
action for the query results location are able to retrieve query results from Amazon S3 even if permission to the GetQueryResults
action is denied. To restrict user or role access, ensure that Amazon S3 permissions to the Athena query location are denied.
",
+ "GetQueryRuntimeStatistics": "Returns query execution runtime statistics related to a single execution of a query if you have access to the workgroup in which the query ran. The query execution runtime statistics is returned only when QueryExecutionStatus$State is in a SUCCEEDED or FAILED state.
",
"GetTableMetadata": "Returns table metadata for the specified catalog, database, and table.
",
"GetWorkGroup": "Returns information about the workgroup with the specified name.
",
"ListDataCatalogs": "Lists the data catalogs in the current Amazon Web Services account.
",
@@ -484,6 +485,16 @@
"refs": {
}
},
+ "GetQueryRuntimeStatisticsInput": {
+ "base": null,
+ "refs": {
+ }
+ },
+ "GetQueryRuntimeStatisticsOutput": {
+ "base": null,
+ "refs": {
+ }
+ },
"GetTableMetadataInput": {
"base": null,
"refs": {
@@ -633,7 +644,22 @@
"QueryExecutionStatistics$TotalExecutionTimeInMillis": "The number of milliseconds that Athena took to run the query.
",
"QueryExecutionStatistics$QueryQueueTimeInMillis": "The number of milliseconds that the query was in your query queue waiting for resources. Note that if transient errors occur, Athena might automatically add the query back to the queue.
",
"QueryExecutionStatistics$QueryPlanningTimeInMillis": "The number of milliseconds that Athena took to plan the query processing flow. This includes the time spent retrieving table partitions from the data source. Note that because the query engine performs the query planning, query planning time is a subset of engine processing time.
",
- "QueryExecutionStatistics$ServiceProcessingTimeInMillis": "The number of milliseconds that Athena took to finalize and publish the query results after the query engine finished running the query.
"
+ "QueryExecutionStatistics$ServiceProcessingTimeInMillis": "The number of milliseconds that Athena took to finalize and publish the query results after the query engine finished running the query.
",
+ "QueryRuntimeStatisticsRows$InputRows": "The number of rows read to execute the query.
",
+ "QueryRuntimeStatisticsRows$InputBytes": "The number of bytes read to execute the query.
",
+ "QueryRuntimeStatisticsRows$OutputBytes": "The number of bytes returned by the query.
",
+ "QueryRuntimeStatisticsRows$OutputRows": "The number of rows returned by the query.
",
+ "QueryRuntimeStatisticsTimeline$QueryQueueTimeInMillis": "The number of milliseconds that the query was in your query queue waiting for resources. Note that if transient errors occur, Athena might automatically add the query back to the queue.
",
+ "QueryRuntimeStatisticsTimeline$QueryPlanningTimeInMillis": "The number of milliseconds that Athena took to plan the query processing flow. This includes the time spent retrieving table partitions from the data source. Note that because the query engine performs the query planning, query planning time is a subset of engine processing time.
",
+ "QueryRuntimeStatisticsTimeline$EngineExecutionTimeInMillis": "The number of milliseconds that the query took to execute.
",
+ "QueryRuntimeStatisticsTimeline$ServiceProcessingTimeInMillis": "The number of milliseconds that Athena took to finalize and publish the query results after the query engine finished running the query.
",
+ "QueryRuntimeStatisticsTimeline$TotalExecutionTimeInMillis": "The number of milliseconds that Athena took to run the query.
",
+ "QueryStage$StageId": "The identifier for a stage.
",
+ "QueryStage$OutputBytes": "The number of bytes output from the stage after execution.
",
+ "QueryStage$OutputRows": "The number of rows output from the stage after execution.
",
+ "QueryStage$InputBytes": "The number of bytes input into the stage for execution.
",
+ "QueryStage$InputRows": "The number of rows input into the stage for execution.
",
+ "QueryStage$ExecutionTime": "Time taken to execute this stage.
"
}
},
"MaxDataCatalogsCount": {
@@ -822,6 +848,7 @@
"refs": {
"GetQueryExecutionInput$QueryExecutionId": "The unique ID of the query execution.
",
"GetQueryResultsInput$QueryExecutionId": "The unique ID of the query execution.
",
+ "GetQueryRuntimeStatisticsInput$QueryExecutionId": "The unique ID of the query execution.
",
"QueryExecution$QueryExecutionId": "The unique identifier for each query execution.
",
"QueryExecutionIdList$member": null,
"StartQueryExecutionOutput$QueryExecutionId": "The unique ID of the query that ran as a result of this request.
",
@@ -860,6 +887,50 @@
"QueryExecution$Status": "The completion date, current state, submission time, and state change reason (if applicable) for the query execution.
"
}
},
+ "QueryRuntimeStatistics": {
+ "base": "The query execution timeline, statistics on input and output rows and bytes, and the different query stages that form the query execution plan.
",
+ "refs": {
+ "GetQueryRuntimeStatisticsOutput$QueryRuntimeStatistics": "Runtime statistics about the query execution.
"
+ }
+ },
+ "QueryRuntimeStatisticsRows": {
+ "base": "Statistics such as input rows and bytes read by the query, rows and bytes output by the query, and the number of rows written by the query.
",
+ "refs": {
+ "QueryRuntimeStatistics$Rows": null
+ }
+ },
+ "QueryRuntimeStatisticsTimeline": {
+ "base": "Timeline statistics such as query queue time, planning time, execution time, service processing time, and total execution time.
",
+ "refs": {
+ "QueryRuntimeStatistics$Timeline": null
+ }
+ },
+ "QueryStage": {
+ "base": "Stage statistics such as input and output rows and bytes, execution time and stage state. This information also includes substages and the query stage plan.
",
+ "refs": {
+ "QueryRuntimeStatistics$OutputStage": "Stage statistics such as input and output rows and bytes, execution time, and stage state. This information also includes substages and the query stage plan.
",
+ "QueryStages$member": null
+ }
+ },
+ "QueryStagePlanNode": {
+ "base": "Stage plan information such as name, identifier, sub plans, and remote sources.
",
+ "refs": {
+ "QueryStage$QueryStagePlan": "Stage plan information such as name, identifier, sub plans, and source stages.
",
+ "QueryStagePlanNodes$member": null
+ }
+ },
+ "QueryStagePlanNodes": {
+ "base": null,
+ "refs": {
+ "QueryStagePlanNode$Children": "Stage plan information such as name, identifier, sub plans, and remote sources of child plan nodes/
"
+ }
+ },
+ "QueryStages": {
+ "base": null,
+ "refs": {
+ "QueryStage$SubStages": "List of sub query stages that form this stage execution plan.
"
+ }
+ },
"QueryString": {
"base": null,
"refs": {
@@ -980,7 +1051,17 @@
"ColumnInfo$Type": "The data type of the column.
",
"EncryptionConfiguration$KmsKey": "For SSE_KMS
and CSE_KMS
, this is the KMS key ARN or ID.
",
"QueryExecutionStatistics$DataManifestLocation": "The location and file name of a data manifest file. The manifest file is saved to the Athena query results location in Amazon S3. The manifest file tracks files that the query wrote to Amazon S3. If the query fails, the manifest file also tracks files that the query intended to write. The manifest is useful for identifying orphaned files resulting from a failed query. For more information, see Working with Query Results, Output Files, and Query History in the Amazon Athena User Guide.
",
- "QueryExecutionStatus$StateChangeReason": "Further detail about the status of the query.
"
+ "QueryExecutionStatus$StateChangeReason": "Further detail about the status of the query.
",
+ "QueryStage$State": "State of the stage after query execution.
",
+ "QueryStagePlanNode$Name": "Name of the query stage plan that describes the operation this stage is performing as part of query execution.
",
+ "QueryStagePlanNode$Identifier": "Information about the operation this query stage plan node is performing.
",
+ "StringList$member": null
+ }
+ },
+ "StringList": {
+ "base": null,
+ "refs": {
+ "QueryStagePlanNode$RemoteSources": "Source plan node IDs.
"
}
},
"TableMetadata": {
diff --git a/apis/athena/2017-05-18/paginators-1.json b/apis/athena/2017-05-18/paginators-1.json
index d813cda77ae..305dbd384d5 100644
--- a/apis/athena/2017-05-18/paginators-1.json
+++ b/apis/athena/2017-05-18/paginators-1.json
@@ -17,6 +17,11 @@
"output_token": "NextToken",
"result_key": "DatabaseList"
},
+ "ListEngineVersions": {
+ "input_token": "NextToken",
+ "limit_key": "MaxResults",
+ "output_token": "NextToken"
+ },
"ListNamedQueries": {
"input_token": "NextToken",
"limit_key": "MaxResults",
diff --git a/apis/dms/2016-01-01/docs-2.json b/apis/dms/2016-01-01/docs-2.json
index 428db295af6..7fbc32b95cd 100644
--- a/apis/dms/2016-01-01/docs-2.json
+++ b/apis/dms/2016-01-01/docs-2.json
@@ -1968,7 +1968,7 @@
"Connection$EndpointIdentifier": "The identifier of the endpoint. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen or contain two consecutive hyphens.
",
"Connection$ReplicationInstanceIdentifier": "The replication instance identifier. This parameter is stored as a lowercase string.
",
"CreateEndpointMessage$EndpointIdentifier": "The database endpoint identifier. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen, or contain two consecutive hyphens.
",
- "CreateEndpointMessage$EngineName": "The type of engine for the endpoint. Valid values, depending on the EndpointType
value, include \"mysql\"
, \"oracle\"
, \"postgres\"
, \"mariadb\"
, \"aurora\"
, \"aurora-postgresql\"
, \"opensearch\"
, \"redshift\"
, \"s3\"
, \"db2\"
, \"azuredb\"
, \"sybase\"
, \"dynamodb\"
, \"mongodb\"
, \"kinesis\"
, \"kafka\"
, \"elasticsearch\"
, \"docdb\"
, \"sqlserver\"
, and \"neptune\"
.
",
+ "CreateEndpointMessage$EngineName": "The type of engine for the endpoint. Valid values, depending on the EndpointType
value, include \"mysql\"
, \"oracle\"
, \"postgres\"
, \"mariadb\"
, \"aurora\"
, \"aurora-postgresql\"
, \"opensearch\"
, \"redshift\"
, \"s3\"
, \"db2\"
, db2-zos
, \"azuredb\"
, \"sybase\"
, \"dynamodb\"
, \"mongodb\"
, \"kinesis\"
, \"kafka\"
, \"elasticsearch\"
, \"docdb\"
, \"sqlserver\"
, \"neptune\"
, and babelfish
.
",
"CreateEndpointMessage$Username": "The user name to be used to log in to the endpoint database.
",
"CreateEndpointMessage$ServerName": "The name of the server where the endpoint database resides.
",
"CreateEndpointMessage$DatabaseName": "The name of the endpoint database. For a MySQL source or target endpoint, do not specify DatabaseName. To migrate to a specific database, use this setting and targetDbType
.
",
@@ -2393,7 +2393,7 @@
"SybaseSettings$SecretsManagerSecretId": "The full ARN, partial ARN, or friendly name of the SecretsManagerSecret
that contains the SAP SAE endpoint connection details.
",
"TableStatistics$SchemaName": "The schema name.
",
"TableStatistics$TableName": "The name of the table.
",
- "TableStatistics$TableState": "The state of the tables described.
Valid states: Table does not exist | Before load | Full load | Table completed | Table cancelled | Table error | Table all | Table updates | Table is being reloaded
",
+ "TableStatistics$TableState": "The state of the tables described.
Valid states: Table does not exist | Before load | Full load | Table completed | Table cancelled | Table error | Table is being reloaded
",
"TableStatistics$ValidationState": "The validation state of the table.
This parameter can have the following values:
-
Not enabled – Validation isn't enabled for the table in the migration task.
-
Pending records – Some records in the table are waiting for validation.
-
Mismatched records – Some records in the table don't match between the source and target.
-
Suspended records – Some records in the table couldn't be validated.
-
No primary key –The table couldn't be validated because it has no primary key.
-
Table error – The table wasn't validated because it's in an error state and some data wasn't migrated.
-
Validated – All rows in the table are validated. If the table is updated, the status can change from Validated.
-
Error – The table couldn't be validated because of an unexpected error.
-
Pending validation – The table is waiting validation.
-
Preparing table – Preparing the table enabled in the migration task for validation.
-
Pending revalidation – All rows in the table are pending validation after the table was updated.
",
"TableStatistics$ValidationStateDetails": "Additional details about the state of validation.
",
"TableToReload$SchemaName": "The schema name of the table to be reloaded.
",
diff --git a/apis/docdb/2014-10-31/api-2.json b/apis/docdb/2014-10-31/api-2.json
index 958a61699cc..6b7ddaa68bd 100644
--- a/apis/docdb/2014-10-31/api-2.json
+++ b/apis/docdb/2014-10-31/api-2.json
@@ -1202,6 +1202,7 @@
"AutoMinorVersionUpgrade":{"shape":"BooleanOptional"},
"Tags":{"shape":"TagList"},
"DBClusterIdentifier":{"shape":"String"},
+ "CopyTagsToSnapshot":{"shape":"BooleanOptional"},
"PromotionTier":{"shape":"IntegerOptional"},
"EnablePerformanceInsights":{"shape":"BooleanOptional"},
"PerformanceInsightsKMSKeyId":{"shape":"String"}
@@ -1305,6 +1306,7 @@
"DbClusterResourceId":{"shape":"String"},
"DBClusterArn":{"shape":"String"},
"AssociatedRoles":{"shape":"DBClusterRoles"},
+ "CloneGroupId":{"shape":"String"},
"ClusterCreateTime":{"shape":"TStamp"},
"EnabledCloudwatchLogsExports":{"shape":"LogTypeList"},
"DeletionProtection":{"shape":"Boolean"}
@@ -1571,6 +1573,7 @@
"KmsKeyId":{"shape":"String"},
"DbiResourceId":{"shape":"String"},
"CACertificateIdentifier":{"shape":"String"},
+ "CopyTagsToSnapshot":{"shape":"BooleanOptional"},
"PromotionTier":{"shape":"IntegerOptional"},
"DBInstanceArn":{"shape":"String"},
"EnabledCloudwatchLogsExports":{"shape":"LogTypeList"}
@@ -2558,6 +2561,7 @@
"AutoMinorVersionUpgrade":{"shape":"BooleanOptional"},
"NewDBInstanceIdentifier":{"shape":"String"},
"CACertificateIdentifier":{"shape":"String"},
+ "CopyTagsToSnapshot":{"shape":"BooleanOptional"},
"PromotionTier":{"shape":"IntegerOptional"},
"EnablePerformanceInsights":{"shape":"BooleanOptional"},
"PerformanceInsightsKMSKeyId":{"shape":"String"}
@@ -2859,6 +2863,7 @@
],
"members":{
"DBClusterIdentifier":{"shape":"String"},
+ "RestoreType":{"shape":"String"},
"SourceDBClusterIdentifier":{"shape":"String"},
"RestoreToTime":{"shape":"TStamp"},
"UseLatestRestorableTime":{"shape":"Boolean"},
diff --git a/apis/docdb/2014-10-31/docs-2.json b/apis/docdb/2014-10-31/docs-2.json
index 8093117c078..1bde6821075 100644
--- a/apis/docdb/2014-10-31/docs-2.json
+++ b/apis/docdb/2014-10-31/docs-2.json
@@ -159,10 +159,12 @@
"CreateDBClusterMessage$StorageEncrypted": "Specifies whether the cluster is encrypted.
",
"CreateDBClusterMessage$DeletionProtection": "Specifies whether this cluster can be deleted. If DeletionProtection
is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection
is disabled. DeletionProtection
protects clusters from being accidentally deleted.
",
"CreateDBInstanceMessage$AutoMinorVersionUpgrade": "This parameter does not apply to Amazon DocumentDB. Amazon DocumentDB does not perform minor version upgrades regardless of the value set.
Default: false
",
+ "CreateDBInstanceMessage$CopyTagsToSnapshot": "A value that indicates whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.
",
"CreateDBInstanceMessage$EnablePerformanceInsights": "A value that indicates whether to enable Performance Insights for the DB Instance. For more information, see Using Amazon Performance Insights.
",
"CreateEventSubscriptionMessage$Enabled": " A Boolean value; set to true
to activate the subscription, set to false
to create the subscription but not active it.
",
"CreateGlobalClusterMessage$DeletionProtection": "The deletion protection setting for the new global cluster. The global cluster can't be deleted when deletion protection is enabled.
",
"CreateGlobalClusterMessage$StorageEncrypted": "The storage encryption setting for the new global cluster.
",
+ "DBInstance$CopyTagsToSnapshot": "A value that indicates whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.
",
"DescribeDBEngineVersionsMessage$ListSupportedCharacterSets": "If this parameter is specified and the requested engine supports the CharacterSetName
parameter for CreateDBInstance
, the response includes a list of supported character sets for each engine version.
",
"DescribeDBEngineVersionsMessage$ListSupportedTimezones": "If this parameter is specified and the requested engine supports the TimeZone
parameter for CreateDBInstance
, the response includes a list of supported time zones for each engine version.
",
"DescribeOrderableDBInstanceOptionsMessage$Vpc": "The virtual private cloud (VPC) filter value. Specify this parameter to show only the available VPC or non-VPC offerings.
",
@@ -170,6 +172,7 @@
"GlobalCluster$DeletionProtection": "The deletion protection setting for the new global cluster.
",
"ModifyDBClusterMessage$DeletionProtection": "Specifies whether this cluster can be deleted. If DeletionProtection
is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection
is disabled. DeletionProtection
protects clusters from being accidentally deleted.
",
"ModifyDBInstanceMessage$AutoMinorVersionUpgrade": "This parameter does not apply to Amazon DocumentDB. Amazon DocumentDB does not perform minor version upgrades regardless of the value set.
",
+ "ModifyDBInstanceMessage$CopyTagsToSnapshot": "A value that indicates whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.
",
"ModifyDBInstanceMessage$EnablePerformanceInsights": "A value that indicates whether to enable Performance Insights for the DB Instance. For more information, see Using Amazon Performance Insights.
",
"ModifyEventSubscriptionMessage$Enabled": " A Boolean value; set to true
to activate the subscription.
",
"ModifyGlobalClusterMessage$DeletionProtection": "Indicates if the global cluster has deletion protection enabled. The global cluster can't be deleted when deletion protection is enabled.
",
@@ -1446,6 +1449,7 @@
"DBCluster$KmsKeyId": "If StorageEncrypted
is true
, the KMS key identifier for the encrypted cluster.
",
"DBCluster$DbClusterResourceId": "The Amazon Web Services Region-unique, immutable identifier for the cluster. This identifier is found in CloudTrail log entries whenever the KMS key for the cluster is accessed.
",
"DBCluster$DBClusterArn": "The Amazon Resource Name (ARN) for the cluster.
",
+ "DBCluster$CloneGroupId": "Identifies the clone group to which the DB cluster is associated.
",
"DBClusterMember$DBInstanceIdentifier": "Specifies the instance identifier for this member of the cluster.
",
"DBClusterMember$DBClusterParameterGroupStatus": "Specifies the status of the cluster parameter group for this member of the DB cluster.
",
"DBClusterMessage$Marker": "An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
",
@@ -1640,6 +1644,7 @@
"RestoreDBClusterFromSnapshotMessage$DBSubnetGroupName": "The name of the subnet group to use for the new cluster.
Constraints: If provided, must match the name of an existing DBSubnetGroup
.
Example: mySubnetgroup
",
"RestoreDBClusterFromSnapshotMessage$KmsKeyId": "The KMS key identifier to use when restoring an encrypted cluster from a DB snapshot or cluster snapshot.
The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are restoring a cluster with the same Amazon Web Services account that owns the KMS encryption key used to encrypt the new cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.
If you do not specify a value for the KmsKeyId
parameter, then the following occurs:
-
If the snapshot or cluster snapshot in SnapshotIdentifier
is encrypted, then the restored cluster is encrypted using the KMS key that was used to encrypt the snapshot or the cluster snapshot.
-
If the snapshot or the cluster snapshot in SnapshotIdentifier
is not encrypted, then the restored DB cluster is not encrypted.
",
"RestoreDBClusterToPointInTimeMessage$DBClusterIdentifier": "The name of the new cluster to be created.
Constraints:
-
Must contain from 1 to 63 letters, numbers, or hyphens.
-
The first character must be a letter.
-
Cannot end with a hyphen or contain two consecutive hyphens.
",
+ "RestoreDBClusterToPointInTimeMessage$RestoreType": "The type of restore to be performed. You can specify one of the following values:
Constraints: You can't specify copy-on-write
if the engine version of the source DB cluster is earlier than 1.11.
If you don't specify a RestoreType
value, then the new DB cluster is restored as a full copy of the source DB cluster.
",
"RestoreDBClusterToPointInTimeMessage$SourceDBClusterIdentifier": "The identifier of the source cluster from which to restore.
Constraints:
",
"RestoreDBClusterToPointInTimeMessage$DBSubnetGroupName": "The subnet group name to use for the new cluster.
Constraints: If provided, must match the name of an existing DBSubnetGroup
.
Example: mySubnetgroup
",
"RestoreDBClusterToPointInTimeMessage$KmsKeyId": "The KMS key identifier to use when restoring an encrypted cluster from an encrypted cluster.
The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are restoring a cluster with the same Amazon Web Services account that owns the KMS encryption key used to encrypt the new cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.
You can restore to a new cluster and encrypt the new cluster with an KMS key that is different from the KMS key used to encrypt the source cluster. The new DB cluster is encrypted with the KMS key identified by the KmsKeyId
parameter.
If you do not specify a value for the KmsKeyId
parameter, then the following occurs:
-
If the cluster is encrypted, then the restored cluster is encrypted using the KMS key that was used to encrypt the source cluster.
-
If the cluster is not encrypted, then the restored cluster is not encrypted.
If DBClusterIdentifier
refers to a cluster that is not encrypted, then the restore request is rejected.
",
diff --git a/apis/ec2-instance-connect/2018-04-02/api-2.json b/apis/ec2-instance-connect/2018-04-02/api-2.json
index 26c1734a4f5..2e8577d6406 100644
--- a/apis/ec2-instance-connect/2018-04-02/api-2.json
+++ b/apis/ec2-instance-connect/2018-04-02/api-2.json
@@ -27,7 +27,8 @@
{"shape":"ServiceException"},
{"shape":"ThrottlingException"},
{"shape":"EC2InstanceNotFoundException"},
- {"shape":"EC2InstanceStateInvalidException"}
+ {"shape":"EC2InstanceStateInvalidException"},
+ {"shape":"EC2InstanceUnavailableException"}
]
},
"SendSerialConsoleSSHPublicKey":{
@@ -48,7 +49,8 @@
{"shape":"EC2InstanceTypeInvalidException"},
{"shape":"SerialConsoleSessionLimitExceededException"},
{"shape":"SerialConsoleSessionUnavailableException"},
- {"shape":"EC2InstanceStateInvalidException"}
+ {"shape":"EC2InstanceStateInvalidException"},
+ {"shape":"EC2InstanceUnavailableException"}
]
}
},
@@ -87,6 +89,13 @@
},
"exception":true
},
+ "EC2InstanceUnavailableException":{
+ "type":"structure",
+ "members":{
+ "Message":{"shape":"String"}
+ },
+ "exception":true
+ },
"InstanceId":{
"type":"string",
"max":32,
diff --git a/apis/ec2-instance-connect/2018-04-02/docs-2.json b/apis/ec2-instance-connect/2018-04-02/docs-2.json
index a7da98023a7..bab3ed21f4e 100644
--- a/apis/ec2-instance-connect/2018-04-02/docs-2.json
+++ b/apis/ec2-instance-connect/2018-04-02/docs-2.json
@@ -32,6 +32,11 @@
"refs": {
}
},
+ "EC2InstanceUnavailableException": {
+ "base": "The instance is currently unavailable. Wait a few minutes and try again.
",
+ "refs": {
+ }
+ },
"InstanceId": {
"base": null,
"refs": {
@@ -117,6 +122,7 @@
"EC2InstanceNotFoundException$Message": null,
"EC2InstanceStateInvalidException$Message": null,
"EC2InstanceTypeInvalidException$Message": null,
+ "EC2InstanceUnavailableException$Message": null,
"InvalidArgsException$Message": null,
"SerialConsoleAccessDisabledException$Message": null,
"SerialConsoleSessionLimitExceededException$Message": null,
diff --git a/apis/frauddetector/2019-11-15/api-2.json b/apis/frauddetector/2019-11-15/api-2.json
index e8e8725be0a..43bf76093cd 100644
--- a/apis/frauddetector/2019-11-15/api-2.json
+++ b/apis/frauddetector/2019-11-15/api-2.json
@@ -1102,6 +1102,32 @@
}
},
"shapes":{
+ "ATIMetricDataPoint":{
+ "type":"structure",
+ "members":{
+ "cr":{"shape":"float"},
+ "adr":{"shape":"float"},
+ "threshold":{"shape":"float"},
+ "atodr":{"shape":"float"}
+ }
+ },
+ "ATIMetricDataPointsList":{
+ "type":"list",
+ "member":{"shape":"ATIMetricDataPoint"}
+ },
+ "ATIModelPerformance":{
+ "type":"structure",
+ "members":{
+ "asi":{"shape":"float"}
+ }
+ },
+ "ATITrainingMetricsValue":{
+ "type":"structure",
+ "members":{
+ "metricDataPoints":{"shape":"ATIMetricDataPointsList"},
+ "modelPerformance":{"shape":"ATIModelPerformance"}
+ }
+ },
"AccessDeniedException":{
"type":"structure",
"required":["message"],
@@ -1110,6 +1136,31 @@
},
"exception":true
},
+ "AggregatedLogOddsMetric":{
+ "type":"structure",
+ "required":[
+ "variableNames",
+ "aggregatedVariablesImportance"
+ ],
+ "members":{
+ "variableNames":{"shape":"ListOfStrings"},
+ "aggregatedVariablesImportance":{"shape":"float"}
+ }
+ },
+ "AggregatedVariablesImpactExplanation":{
+ "type":"structure",
+ "members":{
+ "eventVariableNames":{"shape":"ListOfStrings"},
+ "relativeImpact":{"shape":"string"},
+ "logOddsImpact":{"shape":"float"}
+ }
+ },
+ "AggregatedVariablesImportanceMetrics":{
+ "type":"structure",
+ "members":{
+ "logOddsMetrics":{"shape":"ListOfAggregatedLogOddsMetrics"}
+ }
+ },
"AsyncJobStatus":{
"type":"string",
"enum":[
@@ -2338,7 +2389,6 @@
},
"LabelSchema":{
"type":"structure",
- "required":["labelMapper"],
"members":{
"labelMapper":{"shape":"labelMapper"},
"unlabeledEventsTreatment":{"shape":"UnlabeledEventsTreatment"}
@@ -2367,6 +2417,14 @@
"nextToken":{"shape":"string"}
}
},
+ "ListOfAggregatedLogOddsMetrics":{
+ "type":"list",
+ "member":{"shape":"AggregatedLogOddsMetric"}
+ },
+ "ListOfAggregatedVariablesImpactExplanations":{
+ "type":"list",
+ "member":{"shape":"AggregatedVariablesImpactExplanation"}
+ },
"ListOfEvaluatedExternalModels":{
"type":"list",
"member":{"shape":"EvaluatedExternalModel"}
@@ -2536,7 +2594,8 @@
"type":"string",
"enum":[
"ONLINE_FRAUD_INSIGHTS",
- "TRANSACTION_FRAUD_INSIGHTS"
+ "TRANSACTION_FRAUD_INSIGHTS",
+ "ACCOUNT_TAKEOVER_INSIGHTS"
]
},
"ModelVersion":{
@@ -2567,7 +2626,8 @@
"trainingResult":{"shape":"TrainingResult"},
"lastUpdatedTime":{"shape":"time"},
"createdTime":{"shape":"time"},
- "arn":{"shape":"fraudDetectorArn"}
+ "arn":{"shape":"fraudDetectorArn"},
+ "trainingResultV2":{"shape":"TrainingResultV2"}
}
},
"ModelVersionEvaluation":{
@@ -2597,6 +2657,32 @@
"member":{"shape":"string"},
"min":1
},
+ "OFIMetricDataPoint":{
+ "type":"structure",
+ "members":{
+ "fpr":{"shape":"float"},
+ "precision":{"shape":"float"},
+ "tpr":{"shape":"float"},
+ "threshold":{"shape":"float"}
+ }
+ },
+ "OFIMetricDataPointsList":{
+ "type":"list",
+ "member":{"shape":"OFIMetricDataPoint"}
+ },
+ "OFIModelPerformance":{
+ "type":"structure",
+ "members":{
+ "auc":{"shape":"float"}
+ }
+ },
+ "OFITrainingMetricsValue":{
+ "type":"structure",
+ "members":{
+ "metricDataPoints":{"shape":"OFIMetricDataPointsList"},
+ "modelPerformance":{"shape":"OFIModelPerformance"}
+ }
+ },
"Outcome":{
"type":"structure",
"members":{
@@ -2620,7 +2706,8 @@
"PredictionExplanations":{
"type":"structure",
"members":{
- "variableImpactExplanations":{"shape":"listOfVariableImpactExplanations"}
+ "variableImpactExplanations":{"shape":"listOfVariableImpactExplanations"},
+ "aggregatedVariablesImpactExplanations":{"shape":"ListOfAggregatedVariablesImpactExplanations"}
}
},
"PredictionTimeRange":{
@@ -2848,6 +2935,32 @@
"members":{
}
},
+ "TFIMetricDataPoint":{
+ "type":"structure",
+ "members":{
+ "fpr":{"shape":"float"},
+ "precision":{"shape":"float"},
+ "tpr":{"shape":"float"},
+ "threshold":{"shape":"float"}
+ }
+ },
+ "TFIMetricDataPointsList":{
+ "type":"list",
+ "member":{"shape":"TFIMetricDataPoint"}
+ },
+ "TFIModelPerformance":{
+ "type":"structure",
+ "members":{
+ "auc":{"shape":"float"}
+ }
+ },
+ "TFITrainingMetricsValue":{
+ "type":"structure",
+ "members":{
+ "metricDataPoints":{"shape":"TFIMetricDataPointsList"},
+ "modelPerformance":{"shape":"TFIModelPerformance"}
+ }
+ },
"Tag":{
"type":"structure",
"required":[
@@ -2891,10 +3004,7 @@
},
"TrainingDataSchema":{
"type":"structure",
- "required":[
- "modelVariables",
- "labelSchema"
- ],
+ "required":["modelVariables"],
"members":{
"modelVariables":{"shape":"ListOfStrings"},
"labelSchema":{"shape":"LabelSchema"}
@@ -2914,6 +3024,14 @@
"metricDataPoints":{"shape":"metricDataPointsList"}
}
},
+ "TrainingMetricsV2":{
+ "type":"structure",
+ "members":{
+ "ofi":{"shape":"OFITrainingMetricsValue"},
+ "tfi":{"shape":"TFITrainingMetricsValue"},
+ "ati":{"shape":"ATITrainingMetricsValue"}
+ }
+ },
"TrainingResult":{
"type":"structure",
"members":{
@@ -2922,6 +3040,15 @@
"variableImportanceMetrics":{"shape":"VariableImportanceMetrics"}
}
},
+ "TrainingResultV2":{
+ "type":"structure",
+ "members":{
+ "dataValidationMetrics":{"shape":"DataValidationMetrics"},
+ "trainingMetricsV2":{"shape":"TrainingMetricsV2"},
+ "variableImportanceMetrics":{"shape":"VariableImportanceMetrics"},
+ "aggregatedVariablesImportanceMetrics":{"shape":"AggregatedVariablesImportanceMetrics"}
+ }
+ },
"UnlabeledEventsTreatment":{
"type":"string",
"enum":[
@@ -3312,7 +3439,7 @@
"labelMapper":{
"type":"map",
"key":{"shape":"string"},
- "value":{"shape":"NonEmptyListOfStrings"}
+ "value":{"shape":"ListOfStrings"}
},
"labelsMaxResults":{
"type":"integer",
@@ -3420,7 +3547,7 @@
},
"variableValue":{
"type":"string",
- "max":1024,
+ "max":8192,
"min":1,
"sensitive":true
},
diff --git a/apis/frauddetector/2019-11-15/docs-2.json b/apis/frauddetector/2019-11-15/docs-2.json
index dc323dce326..f46903a67ff 100644
--- a/apis/frauddetector/2019-11-15/docs-2.json
+++ b/apis/frauddetector/2019-11-15/docs-2.json
@@ -72,11 +72,53 @@
"UpdateVariable": "Updates a variable.
"
},
"shapes": {
+ "ATIMetricDataPoint": {
+ "base": " The Account Takeover Insights (ATI) model performance metrics data points.
",
+ "refs": {
+ "ATIMetricDataPointsList$member": null
+ }
+ },
+ "ATIMetricDataPointsList": {
+ "base": null,
+ "refs": {
+ "ATITrainingMetricsValue$metricDataPoints": " The model's performance metrics data points.
"
+ }
+ },
+ "ATIModelPerformance": {
+ "base": " The Account Takeover Insights (ATI) model performance score.
",
+ "refs": {
+ "ATITrainingMetricsValue$modelPerformance": " The model's overall performance scores.
"
+ }
+ },
+ "ATITrainingMetricsValue": {
+ "base": " The Account Takeover Insights (ATI) model training metric details.
",
+ "refs": {
+ "TrainingMetricsV2$ati": " The Account Takeover Insights (ATI) model training metric details.
"
+ }
+ },
"AccessDeniedException": {
"base": "An exception indicating Amazon Fraud Detector does not have the needed permissions. This can occur if you submit a request, such as PutExternalModel
, that specifies a role that is not in your account.
",
"refs": {
}
},
+ "AggregatedLogOddsMetric": {
+ "base": "The log odds metric details.
Account Takeover Insights (ATI) model uses event variables from the login data you provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address. In this case, event variables used to derive the aggregated variables are IP address
and user
.
",
+ "refs": {
+ "ListOfAggregatedLogOddsMetrics$member": null
+ }
+ },
+ "AggregatedVariablesImpactExplanation": {
+ "base": " The details of the impact of aggregated variables on the prediction score.
Account Takeover Insights (ATI) model uses the login data you provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, the model might calculate the number of times an user has logged in using the same IP address. In this case, event variables used to derive the aggregated variables are IP address
and user
.
",
+ "refs": {
+ "ListOfAggregatedVariablesImpactExplanations$member": null
+ }
+ },
+ "AggregatedVariablesImportanceMetrics": {
+ "base": "The details of the relative importance of the aggregated variables.
Account Takeover Insights (ATI) model uses event variables from the login data you provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address. In this case, event variables used to derive the aggregated variables are IP address
and user
.
",
+ "refs": {
+ "TrainingResultV2$aggregatedVariablesImportanceMetrics": " The variable importance metrics of the aggregated variables.
Account Takeover Insights (ATI) model uses event variables from the login data you provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address. In this case, event variables used to derive the aggregated variables are IP address
and user
.
"
+ }
+ },
"AsyncJobStatus": {
"base": null,
"refs": {
@@ -277,9 +319,10 @@
}
},
"DataValidationMetrics": {
- "base": "The model training validation messages.
",
+ "base": "The model training data validation metrics.
",
"refs": {
- "TrainingResult$dataValidationMetrics": "The validation metrics.
"
+ "TrainingResult$dataValidationMetrics": "The validation metrics.
",
+ "TrainingResultV2$dataValidationMetrics": null
}
},
"DeleteAuditHistory": {
@@ -921,6 +964,18 @@
"refs": {
}
},
+ "ListOfAggregatedLogOddsMetrics": {
+ "base": null,
+ "refs": {
+ "AggregatedVariablesImportanceMetrics$logOddsMetrics": " List of variables' metrics.
"
+ }
+ },
+ "ListOfAggregatedVariablesImpactExplanations": {
+ "base": null,
+ "refs": {
+ "PredictionExplanations$aggregatedVariablesImpactExplanations": " The details of the aggregated variables impact on the prediction score.
Account Takeover Insights (ATI) model uses event variables from the login data you provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address. In this case, event variables used to derive the aggregated variables are IP address
and user
.
"
+ }
+ },
"ListOfEvaluatedExternalModels": {
"base": null,
"refs": {
@@ -986,6 +1041,8 @@
"ListOfStrings": {
"base": null,
"refs": {
+ "AggregatedLogOddsMetric$variableNames": " The names of all the variables.
",
+ "AggregatedVariablesImpactExplanation$eventVariableNames": " The names of all the event variables that were used to derive the aggregated variables.
",
"CreateDetectorVersionRequest$externalModelEndpoints": "The Amazon Sagemaker model endpoints to include in the detector version.
",
"EvaluatedRule$outcomes": " The rule outcome.
",
"EventType$eventVariables": "The event type event variables.
",
@@ -995,7 +1052,8 @@
"PutEventTypeRequest$labels": "The event type labels.
",
"RuleResult$outcomes": "The outcomes of the matched rule, based on the rule execution mode.
",
"TrainingDataSchema$modelVariables": "The training data schema variables.
",
- "UpdateDetectorVersionRequest$externalModelEndpoints": "The Amazon SageMaker model endpoints to include in the detector version.
"
+ "UpdateDetectorVersionRequest$externalModelEndpoints": "The Amazon SageMaker model endpoints to include in the detector version.
",
+ "labelMapper$value": null
}
},
"ListTagsForResourceRequest": {
@@ -1159,8 +1217,31 @@
"PutEventTypeRequest$eventVariables": "The event type variables.
",
"PutEventTypeRequest$entityTypes": "The entity type for the event type. Example entity types: customer, merchant, account.
",
"RuleDetail$outcomes": "The rule outcomes.
",
- "UpdateRuleVersionRequest$outcomes": "The outcomes.
",
- "labelMapper$value": null
+ "UpdateRuleVersionRequest$outcomes": "The outcomes.
"
+ }
+ },
+ "OFIMetricDataPoint": {
+ "base": " The Online Fraud Insights (OFI) model performance metrics data points.
",
+ "refs": {
+ "OFIMetricDataPointsList$member": null
+ }
+ },
+ "OFIMetricDataPointsList": {
+ "base": null,
+ "refs": {
+ "OFITrainingMetricsValue$metricDataPoints": " The model's performance metrics data points.
"
+ }
+ },
+ "OFIModelPerformance": {
+ "base": " The Online Fraud Insights (OFI) model performance score.
",
+ "refs": {
+ "OFITrainingMetricsValue$modelPerformance": " The model's overall performance score.
"
+ }
+ },
+ "OFITrainingMetricsValue": {
+ "base": " The Online Fraud Insights (OFI) model training metric details.
",
+ "refs": {
+ "TrainingMetricsV2$ofi": " The Online Fraud Insights (OFI) model training metric details.
"
}
},
"Outcome": {
@@ -1335,6 +1416,30 @@
"refs": {
}
},
+ "TFIMetricDataPoint": {
+ "base": " The performance metrics data points for Transaction Fraud Insights (TFI) model.
",
+ "refs": {
+ "TFIMetricDataPointsList$member": null
+ }
+ },
+ "TFIMetricDataPointsList": {
+ "base": null,
+ "refs": {
+ "TFITrainingMetricsValue$metricDataPoints": " The model's performance metrics data points.
"
+ }
+ },
+ "TFIModelPerformance": {
+ "base": " The Transaction Fraud Insights (TFI) model performance score.
",
+ "refs": {
+ "TFITrainingMetricsValue$modelPerformance": " The model performance score.
"
+ }
+ },
+ "TFITrainingMetricsValue": {
+ "base": " The Transaction Fraud Insights (TFI) model training metric details.
",
+ "refs": {
+ "TrainingMetricsV2$tfi": " The Transaction Fraud Insights (TFI) model training metric details.
"
+ }
+ },
"Tag": {
"base": "A key and value pair.
",
"refs": {
@@ -1384,12 +1489,24 @@
"TrainingResult$trainingMetrics": "The training metric details.
"
}
},
+ "TrainingMetricsV2": {
+ "base": " The training metrics details.
",
+ "refs": {
+ "TrainingResultV2$trainingMetricsV2": " The training metric details.
"
+ }
+ },
"TrainingResult": {
"base": "The training result details.
",
"refs": {
"ModelVersionDetail$trainingResult": "The training results.
"
}
},
+ "TrainingResultV2": {
+ "base": " The training result details.
",
+ "refs": {
+ "ModelVersionDetail$trainingResultV2": " The training result details. The details include the relative importance of the variables.
"
+ }
+ },
"UnlabeledEventsTreatment": {
"base": null,
"refs": {
@@ -1544,7 +1661,8 @@
"VariableImportanceMetrics": {
"base": "The variable importance metrics details.
",
"refs": {
- "TrainingResult$variableImportanceMetrics": "The variable importance metrics.
"
+ "TrainingResult$variableImportanceMetrics": "The variable importance metrics.
",
+ "TrainingResultV2$variableImportanceMetrics": null
}
},
"VariableList": {
@@ -1662,7 +1780,7 @@
"fileValidationMessageList": {
"base": null,
"refs": {
- "DataValidationMetrics$fileLevelMessages": "The file-specific model training validation messages.
"
+ "DataValidationMetrics$fileLevelMessages": "The file-specific model training data validation messages.
"
}
},
"filterString": {
@@ -1674,12 +1792,29 @@
"float": {
"base": null,
"refs": {
+ "ATIMetricDataPoint$cr": " The challenge rate. This indicates the percentage of login events that the model recommends to challenge such as one-time password, multi-factor authentication, and investigations.
",
+ "ATIMetricDataPoint$adr": " The anomaly discovery rate. This metric quantifies the percentage of anomalies that can be detected by the model at the selected score threshold. A lower score threshold increases the percentage of anomalies captured by the model, but would also require challenging a larger percentage of login events, leading to a higher customer friction.
",
+ "ATIMetricDataPoint$threshold": " The model's threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any model score 500 or above is labeled as fraud.
",
+ "ATIMetricDataPoint$atodr": " The account takeover discovery rate. This metric quantifies the percentage of account compromise events that can be detected by the model at the selected score threshold. This metric is only available if 50 or more entities with at-least one labeled account takeover event is present in the ingested dataset.
",
+ "ATIModelPerformance$asi": " The anomaly separation index (ASI) score. This metric summarizes the overall ability of the model to separate anomalous activities from the normal behavior. Depending on the business, a large fraction of these anomalous activities can be malicious and correspond to the account takeover attacks. A model with no separability power will have the lowest possible ASI score of 0.5, whereas the a model with a high separability power will have the highest possible ASI score of 1.0
",
+ "AggregatedLogOddsMetric$aggregatedVariablesImportance": " The relative importance of the variables in the list to the other event variable.
",
+ "AggregatedVariablesImpactExplanation$logOddsImpact": " The raw, uninterpreted value represented as log-odds of the fraud. These values are usually between -10 to +10, but range from -infinity to +infinity.
",
"LogOddsMetric$variableImportance": "The relative importance of the variable. For more information, see Model variable importance.
",
"MetricDataPoint$fpr": "The false positive rate. This is the percentage of total legitimate events that are incorrectly predicted as fraud.
",
"MetricDataPoint$precision": "The percentage of fraud events correctly predicted as fraudulent as compared to all events predicted as fraudulent.
",
"MetricDataPoint$tpr": "The true positive rate. This is the percentage of total fraud the model detects. Also known as capture rate.
",
"MetricDataPoint$threshold": "The model threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any model score 500 or above is labeled as fraud.
",
"ModelPredictionMap$value": null,
+ "OFIMetricDataPoint$fpr": " The false positive rate. This is the percentage of total legitimate events that are incorrectly predicted as fraud.
",
+ "OFIMetricDataPoint$precision": " The percentage of fraud events correctly predicted as fraudulent as compared to all events predicted as fraudulent.
",
+ "OFIMetricDataPoint$tpr": " The true positive rate. This is the percentage of total fraud the model detects. Also known as capture rate.
",
+ "OFIMetricDataPoint$threshold": " The model threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any model score 500 or above is labeled as fraud.
",
+ "OFIModelPerformance$auc": " The area under the curve (auc). This summarizes the total positive rate (tpr) and false positive rate (FPR) across all possible model score thresholds.
",
+ "TFIMetricDataPoint$fpr": " The false positive rate. This is the percentage of total legitimate events that are incorrectly predicted as fraud.
",
+ "TFIMetricDataPoint$precision": " The percentage of fraud events correctly predicted as fraudulent as compared to all events predicted as fraudulent.
",
+ "TFIMetricDataPoint$tpr": " The true positive rate. This is the percentage of total fraud the model detects. Also known as capture rate.
",
+ "TFIMetricDataPoint$threshold": " The model threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any model score 500 or above is labeled as fraud.
",
+ "TFIModelPerformance$auc": " The area under the curve (auc). This summarizes the total positive rate (tpr) and false positive rate (FPR) across all possible model score thresholds.
",
"TrainingMetrics$auc": "The area under the curve. This summarizes true positive rate (TPR) and false positive rate (FPR) across all possible model score thresholds. A model with no predictive power has an AUC of 0.5, whereas a perfect model has a score of 1.0.
",
"VariableImpactExplanation$logOddsImpact": " The raw, uninterpreted value represented as log-odds of the fraud. These values are usually between -10 to +10, but range from - infinity to + infinity.
"
}
@@ -1950,6 +2085,7 @@
"base": null,
"refs": {
"AccessDeniedException$message": null,
+ "AggregatedVariablesImpactExplanation$relativeImpact": " The relative impact of the aggregated variables in terms of magnitude on the prediction scores.
",
"BatchCreateVariableError$name": "The name.
",
"BatchCreateVariableError$message": "The error message.
",
"BatchGetVariableError$name": "The error name.
",
diff --git a/apis/iotsitewise/2019-12-02/api-2.json b/apis/iotsitewise/2019-12-02/api-2.json
index e26ba18b982..72ecb1779b7 100644
--- a/apis/iotsitewise/2019-12-02/api-2.json
+++ b/apis/iotsitewise/2019-12-02/api-2.json
@@ -205,6 +205,26 @@
],
"endpoint":{"hostPrefix":"api."}
},
+ "CreateBulkImportJob":{
+ "name":"CreateBulkImportJob",
+ "http":{
+ "method":"POST",
+ "requestUri":"/jobs",
+ "responseCode":202
+ },
+ "input":{"shape":"CreateBulkImportJobRequest"},
+ "output":{"shape":"CreateBulkImportJobResponse"},
+ "errors":[
+ {"shape":"InvalidRequestException"},
+ {"shape":"ResourceAlreadyExistsException"},
+ {"shape":"ResourceNotFoundException"},
+ {"shape":"InternalFailureException"},
+ {"shape":"ThrottlingException"},
+ {"shape":"LimitExceededException"},
+ {"shape":"ConflictingOperationException"}
+ ],
+ "endpoint":{"hostPrefix":"data."}
+ },
"CreateDashboard":{
"name":"CreateDashboard",
"http":{
@@ -478,6 +498,22 @@
],
"endpoint":{"hostPrefix":"api."}
},
+ "DescribeBulkImportJob":{
+ "name":"DescribeBulkImportJob",
+ "http":{
+ "method":"GET",
+ "requestUri":"/jobs/{jobId}"
+ },
+ "input":{"shape":"DescribeBulkImportJobRequest"},
+ "output":{"shape":"DescribeBulkImportJobResponse"},
+ "errors":[
+ {"shape":"InvalidRequestException"},
+ {"shape":"ResourceNotFoundException"},
+ {"shape":"InternalFailureException"},
+ {"shape":"ThrottlingException"}
+ ],
+ "endpoint":{"hostPrefix":"data."}
+ },
"DescribeDashboard":{
"name":"DescribeDashboard",
"http":{
@@ -805,6 +841,22 @@
],
"endpoint":{"hostPrefix":"api."}
},
+ "ListBulkImportJobs":{
+ "name":"ListBulkImportJobs",
+ "http":{
+ "method":"GET",
+ "requestUri":"/jobs"
+ },
+ "input":{"shape":"ListBulkImportJobsRequest"},
+ "output":{"shape":"ListBulkImportJobsResponse"},
+ "errors":[
+ {"shape":"InvalidRequestException"},
+ {"shape":"InternalFailureException"},
+ {"shape":"ResourceNotFoundException"},
+ {"shape":"ThrottlingException"}
+ ],
+ "endpoint":{"hostPrefix":"data."}
+ },
"ListDashboards":{
"name":"ListDashboards",
"http":{
@@ -2120,6 +2172,11 @@
"errorEntries":{"shape":"BatchPutAssetPropertyErrorEntries"}
}
},
+ "Bucket":{
+ "type":"string",
+ "max":63,
+ "min":3
+ },
"CapabilityConfiguration":{
"type":"string",
"max":104857600,
@@ -2146,6 +2203,23 @@
"min":36,
"pattern":"\\S{36,64}"
},
+ "ColumnName":{
+ "type":"string",
+ "enum":[
+ "ALIAS",
+ "ASSET_ID",
+ "PROPERTY_ID",
+ "DATA_TYPE",
+ "TIMESTAMP_SECONDS",
+ "TIMESTAMP_NANO_OFFSET",
+ "QUALITY",
+ "VALUE"
+ ]
+ },
+ "ColumnNames":{
+ "type":"list",
+ "member":{"shape":"ColumnName"}
+ },
"CompositeModelProperty":{
"type":"structure",
"required":[
@@ -2301,6 +2375,36 @@
"assetStatus":{"shape":"AssetStatus"}
}
},
+ "CreateBulkImportJobRequest":{
+ "type":"structure",
+ "required":[
+ "jobName",
+ "jobRoleArn",
+ "files",
+ "errorReportLocation",
+ "jobConfiguration"
+ ],
+ "members":{
+ "jobName":{"shape":"Name"},
+ "jobRoleArn":{"shape":"ARN"},
+ "files":{"shape":"Files"},
+ "errorReportLocation":{"shape":"ErrorReportLocation"},
+ "jobConfiguration":{"shape":"JobConfiguration"}
+ }
+ },
+ "CreateBulkImportJobResponse":{
+ "type":"structure",
+ "required":[
+ "jobId",
+ "jobName",
+ "jobStatus"
+ ],
+ "members":{
+ "jobId":{"shape":"ID"},
+ "jobName":{"shape":"Name"},
+ "jobStatus":{"shape":"JobStatus"}
+ }
+ },
"CreateDashboardRequest":{
"type":"structure",
"required":[
@@ -2422,6 +2526,12 @@
"projectArn":{"shape":"ARN"}
}
},
+ "Csv":{
+ "type":"structure",
+ "members":{
+ "columnNames":{"shape":"ColumnNames"}
+ }
+ },
"CustomerManagedS3Storage":{
"type":"structure",
"required":[
@@ -2777,6 +2887,42 @@
"assetDescription":{"shape":"Description"}
}
},
+ "DescribeBulkImportJobRequest":{
+ "type":"structure",
+ "required":["jobId"],
+ "members":{
+ "jobId":{
+ "shape":"ID",
+ "location":"uri",
+ "locationName":"jobId"
+ }
+ }
+ },
+ "DescribeBulkImportJobResponse":{
+ "type":"structure",
+ "required":[
+ "jobId",
+ "jobName",
+ "jobStatus",
+ "jobRoleArn",
+ "files",
+ "errorReportLocation",
+ "jobConfiguration",
+ "jobCreationDate",
+ "jobLastUpdateDate"
+ ],
+ "members":{
+ "jobId":{"shape":"ID"},
+ "jobName":{"shape":"Name"},
+ "jobStatus":{"shape":"JobStatus"},
+ "jobRoleArn":{"shape":"ARN"},
+ "files":{"shape":"Files"},
+ "errorReportLocation":{"shape":"ErrorReportLocation"},
+ "jobConfiguration":{"shape":"JobConfiguration"},
+ "jobCreationDate":{"shape":"Timestamp"},
+ "jobLastUpdateDate":{"shape":"Timestamp"}
+ }
+ },
"DescribeDashboardRequest":{
"type":"structure",
"required":["dashboardId"],
@@ -3161,6 +3307,17 @@
}
},
"ErrorMessage":{"type":"string"},
+ "ErrorReportLocation":{
+ "type":"structure",
+ "required":[
+ "bucket",
+ "prefix"
+ ],
+ "members":{
+ "bucket":{"shape":"Bucket"},
+ "prefix":{"shape":"String"}
+ }
+ },
"ExceptionMessage":{"type":"string"},
"Expression":{
"type":"string",
@@ -3182,6 +3339,28 @@
"type":"list",
"member":{"shape":"ExpressionVariable"}
},
+ "File":{
+ "type":"structure",
+ "required":[
+ "bucket",
+ "key"
+ ],
+ "members":{
+ "bucket":{"shape":"Bucket"},
+ "key":{"shape":"String"},
+ "versionId":{"shape":"String"}
+ }
+ },
+ "FileFormat":{
+ "type":"structure",
+ "members":{
+ "csv":{"shape":"Csv"}
+ }
+ },
+ "Files":{
+ "type":"list",
+ "member":{"shape":"File"}
+ },
"ForwardingConfig":{
"type":"structure",
"required":["state"],
@@ -3644,6 +3823,41 @@
"error":{"httpStatusCode":400},
"exception":true
},
+ "JobConfiguration":{
+ "type":"structure",
+ "required":["fileFormat"],
+ "members":{
+ "fileFormat":{"shape":"FileFormat"}
+ }
+ },
+ "JobStatus":{
+ "type":"string",
+ "enum":[
+ "PENDING",
+ "CANCELLED",
+ "RUNNING",
+ "COMPLETED",
+ "FAILED",
+ "COMPLETED_WITH_FAILURES"
+ ]
+ },
+ "JobSummaries":{
+ "type":"list",
+ "member":{"shape":"JobSummary"}
+ },
+ "JobSummary":{
+ "type":"structure",
+ "required":[
+ "id",
+ "name",
+ "status"
+ ],
+ "members":{
+ "id":{"shape":"ID"},
+ "name":{"shape":"Name"},
+ "status":{"shape":"JobStatus"}
+ }
+ },
"KmsKeyId":{
"type":"string",
"max":2048,
@@ -3845,6 +4059,46 @@
"nextToken":{"shape":"NextToken"}
}
},
+ "ListBulkImportJobsFilter":{
+ "type":"string",
+ "enum":[
+ "ALL",
+ "PENDING",
+ "RUNNING",
+ "CANCELLED",
+ "FAILED",
+ "COMPLETED_WITH_FAILURES",
+ "COMPLETED"
+ ]
+ },
+ "ListBulkImportJobsRequest":{
+ "type":"structure",
+ "members":{
+ "nextToken":{
+ "shape":"NextToken",
+ "location":"querystring",
+ "locationName":"nextToken"
+ },
+ "maxResults":{
+ "shape":"MaxResults",
+ "location":"querystring",
+ "locationName":"maxResults"
+ },
+ "filter":{
+ "shape":"ListBulkImportJobsFilter",
+ "location":"querystring",
+ "locationName":"filter"
+ }
+ }
+ },
+ "ListBulkImportJobsResponse":{
+ "type":"structure",
+ "required":["jobSummaries"],
+ "members":{
+ "jobSummaries":{"shape":"JobSummaries"},
+ "nextToken":{"shape":"NextToken"}
+ }
+ },
"ListDashboardsRequest":{
"type":"structure",
"required":["projectId"],
@@ -4478,6 +4732,7 @@
"MULTI_LAYER_STORAGE"
]
},
+ "String":{"type":"string"},
"TagKey":{
"type":"string",
"max":128,
diff --git a/apis/iotsitewise/2019-12-02/docs-2.json b/apis/iotsitewise/2019-12-02/docs-2.json
index 005fdbaa566..d046d162e24 100644
--- a/apis/iotsitewise/2019-12-02/docs-2.json
+++ b/apis/iotsitewise/2019-12-02/docs-2.json
@@ -13,6 +13,7 @@
"CreateAccessPolicy": "Creates an access policy that grants the specified identity (Amazon Web Services SSO user, Amazon Web Services SSO group, or IAM user) access to the specified IoT SiteWise Monitor portal or project resource.
",
"CreateAsset": "Creates an asset from an existing asset model. For more information, see Creating assets in the IoT SiteWise User Guide.
",
"CreateAssetModel": "Creates an asset model from specified property and hierarchy definitions. You create assets from asset models. With asset models, you can easily create assets of the same type that have standardized definitions. Each asset created from a model inherits the asset model's property and hierarchy definitions. For more information, see Defining asset models in the IoT SiteWise User Guide.
",
+ "CreateBulkImportJob": " This API operation is in preview release for IoT SiteWise and is subject to change. We recommend that you use this operation only with test data, and not in production environments.
Defines a job to ingest data to IoT SiteWise from Amazon S3. For more information, see Create a bulk import job (CLI) in the Amazon Simple Storage Service User Guide.
You must enable IoT SiteWise to export data to Amazon S3 before you create a bulk import job. For more information about how to configure storage settings, see PutStorageConfiguration.
",
"CreateDashboard": "Creates a dashboard in an IoT SiteWise Monitor project.
",
"CreateGateway": "Creates a gateway, which is a virtual or edge device that delivers industrial data streams from local servers to IoT SiteWise. For more information, see Ingesting data using a gateway in the IoT SiteWise User Guide.
",
"CreatePortal": "Creates a portal, which can contain projects and dashboards. IoT SiteWise Monitor uses Amazon Web Services SSO or IAM to authenticate portal users and manage user permissions.
Before you can sign in to a new portal, you must add at least one identity to that portal. For more information, see Adding or removing portal administrators in the IoT SiteWise User Guide.
",
@@ -29,6 +30,7 @@
"DescribeAsset": "Retrieves information about an asset.
",
"DescribeAssetModel": "Retrieves information about an asset model.
",
"DescribeAssetProperty": "Retrieves information about an asset property.
When you call this operation for an attribute property, this response includes the default attribute value that you define in the asset model. If you update the default value in the model, this operation's response includes the new default value.
This operation doesn't return the value of the asset property. To get the value of an asset property, use GetAssetPropertyValue.
",
+ "DescribeBulkImportJob": " This API operation is in preview release for IoT SiteWise and is subject to change. We recommend that you use this operation only with test data, and not in production environments.
Retrieves information about a bulk import job request. For more information, see Describe a bulk import job (CLI) in the Amazon Simple Storage Service User Guide.
",
"DescribeDashboard": "Retrieves information about a dashboard.
",
"DescribeDefaultEncryptionConfiguration": "Retrieves information about the default encryption configuration for the Amazon Web Services account in the default or specified Region. For more information, see Key management in the IoT SiteWise User Guide.
",
"DescribeGateway": "Retrieves information about a gateway.
",
@@ -49,6 +51,7 @@
"ListAssetRelationships": "Retrieves a paginated list of asset relationships for an asset. You can use this operation to identify an asset's root asset and all associated assets between that asset and its root.
",
"ListAssets": "Retrieves a paginated list of asset summaries.
You can use this operation to do the following:
You can't use this operation to list all assets. To retrieve summaries for all of your assets, use ListAssetModels to get all of your asset model IDs. Then, use ListAssets to get all assets for each asset model.
",
"ListAssociatedAssets": "Retrieves a paginated list of associated assets.
You can use this operation to do the following:
",
+ "ListBulkImportJobs": " This API operation is in preview release for IoT SiteWise and is subject to change. We recommend that you use this operation only with test data, and not in production environments.
Retrieves a paginated list of bulk import job requests. For more information, see List bulk import jobs (CLI) in the Amazon Simple Storage Service User Guide.
",
"ListDashboards": "Retrieves a paginated list of dashboards for an IoT SiteWise Monitor project.
",
"ListGateways": "Retrieves a paginated list of gateways.
",
"ListPortals": "Retrieves a paginated list of IoT SiteWise Monitor portals.
",
@@ -83,6 +86,7 @@
"CreateAccessPolicyResponse$accessPolicyArn": "The ARN of the access policy, which has the following format.
arn:${Partition}:iotsitewise:${Region}:${Account}:access-policy/${AccessPolicyId}
",
"CreateAssetModelResponse$assetModelArn": "The ARN of the asset model, which has the following format.
arn:${Partition}:iotsitewise:${Region}:${Account}:asset-model/${AssetModelId}
",
"CreateAssetResponse$assetArn": "The ARN of the asset, which has the following format.
arn:${Partition}:iotsitewise:${Region}:${Account}:asset/${AssetId}
",
+ "CreateBulkImportJobRequest$jobRoleArn": "The ARN of the IAM role that allows IoT SiteWise to read Amazon S3 data.
",
"CreateDashboardResponse$dashboardArn": "The ARN of the dashboard, which has the following format.
arn:${Partition}:iotsitewise:${Region}:${Account}:dashboard/${DashboardId}
",
"CreateGatewayResponse$gatewayArn": "The ARN of the gateway, which has the following format.
arn:${Partition}:iotsitewise:${Region}:${Account}:gateway/${GatewayId}
",
"CreatePortalRequest$roleArn": "The ARN of a service role that allows the portal's users to access your IoT SiteWise resources on your behalf. For more information, see Using service roles for IoT SiteWise Monitor in the IoT SiteWise User Guide.
",
@@ -93,6 +97,7 @@
"DescribeAccessPolicyResponse$accessPolicyArn": "The ARN of the access policy, which has the following format.
arn:${Partition}:iotsitewise:${Region}:${Account}:access-policy/${AccessPolicyId}
",
"DescribeAssetModelResponse$assetModelArn": "The ARN of the asset model, which has the following format.
arn:${Partition}:iotsitewise:${Region}:${Account}:asset-model/${AssetModelId}
",
"DescribeAssetResponse$assetArn": "The ARN of the asset, which has the following format.
arn:${Partition}:iotsitewise:${Region}:${Account}:asset/${AssetId}
",
+ "DescribeBulkImportJobResponse$jobRoleArn": "The ARN of the IAM role that allows IoT SiteWise to read Amazon S3 data.
",
"DescribeDashboardResponse$dashboardArn": "The ARN of the dashboard, which has the following format.
arn:${Partition}:iotsitewise:${Region}:${Account}:dashboard/${DashboardId}
",
"DescribeDefaultEncryptionConfigurationResponse$kmsKeyArn": "The key ARN of the customer managed key used for KMS encryption if you use KMS_BASED_ENCRYPTION
.
",
"DescribeGatewayResponse$gatewayArn": "The ARN of the gateway, which has the following format.
arn:${Partition}:iotsitewise:${Region}:${Account}:gateway/${GatewayId}
",
@@ -777,6 +782,13 @@
"refs": {
}
},
+ "Bucket": {
+ "base": null,
+ "refs": {
+ "ErrorReportLocation$bucket": "The name of the Amazon S3 bucket to which errors associated with the bulk import job are sent.
",
+ "File$bucket": "The name of the Amazon S3 bucket from which data is imported.
"
+ }
+ },
"CapabilityConfiguration": {
"base": null,
"refs": {
@@ -833,6 +845,18 @@
"UpdateProjectRequest$clientToken": "A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.
"
}
},
+ "ColumnName": {
+ "base": null,
+ "refs": {
+ "ColumnNames$member": null
+ }
+ },
+ "ColumnNames": {
+ "base": null,
+ "refs": {
+ "Csv$columnNames": "The column names specified in the .csv file.
"
+ }
+ },
"CompositeModelProperty": {
"base": "Contains information about a composite model property on an asset.
",
"refs": {
@@ -908,6 +932,16 @@
"refs": {
}
},
+ "CreateBulkImportJobRequest": {
+ "base": null,
+ "refs": {
+ }
+ },
+ "CreateBulkImportJobResponse": {
+ "base": null,
+ "refs": {
+ }
+ },
"CreateDashboardRequest": {
"base": null,
"refs": {
@@ -948,6 +982,12 @@
"refs": {
}
},
+ "Csv": {
+ "base": "A .csv file.
",
+ "refs": {
+ "FileFormat$csv": "The .csv file format.
"
+ }
+ },
"CustomerManagedS3Storage": {
"base": "Contains information about a customer managed Amazon S3 bucket.
",
"refs": {
@@ -1090,6 +1130,16 @@
"refs": {
}
},
+ "DescribeBulkImportJobRequest": {
+ "base": null,
+ "refs": {
+ }
+ },
+ "DescribeBulkImportJobResponse": {
+ "base": null,
+ "refs": {
+ }
+ },
"DescribeDashboardRequest": {
"base": null,
"refs": {
@@ -1323,6 +1373,13 @@
"UnauthorizedException$message": null
}
},
+ "ErrorReportLocation": {
+ "base": "The Amazon S3 destination where errors associated with the job creation request are saved.
",
+ "refs": {
+ "CreateBulkImportJobRequest$errorReportLocation": "The Amazon S3 destination where errors associated with the job creation request are saved.
",
+ "DescribeBulkImportJobResponse$errorReportLocation": "The Amazon S3 destination where errors associated with the job creation request are saved.
"
+ }
+ },
"ExceptionMessage": {
"base": null,
"refs": {
@@ -1349,6 +1406,25 @@
"Transform$variables": "The list of variables used in the expression.
"
}
},
+ "File": {
+ "base": "The file in Amazon S3 where your data is saved.
",
+ "refs": {
+ "Files$member": null
+ }
+ },
+ "FileFormat": {
+ "base": "The file format of the data.
",
+ "refs": {
+ "JobConfiguration$fileFormat": "The file format of the data in Amazon S3.
"
+ }
+ },
+ "Files": {
+ "base": null,
+ "refs": {
+ "CreateBulkImportJobRequest$files": "The files in the specified Amazon S3 bucket that contain your data.
",
+ "DescribeBulkImportJobResponse$files": "The files in the specified Amazon S3 bucket that contain your data.
"
+ }
+ },
"ForwardingConfig": {
"base": "The forwarding configuration for a given property.
",
"refs": {
@@ -1501,6 +1577,7 @@
"CreateAssetModelResponse$assetModelId": "The ID of the asset model. You can use this ID when you call other IoT SiteWise APIs.
",
"CreateAssetRequest$assetModelId": "The ID of the asset model from which to create the asset.
",
"CreateAssetResponse$assetId": "The ID of the asset. This ID uniquely identifies the asset within IoT SiteWise and can be used with other IoT SiteWise APIs.
",
+ "CreateBulkImportJobResponse$jobId": "The ID of the job.
",
"CreateDashboardRequest$projectId": "The ID of the project in which to create the dashboard.
",
"CreateDashboardResponse$dashboardId": "The ID of the dashboard.
",
"CreateGatewayResponse$gatewayId": "The ID of the gateway device. You can use this ID when you call other IoT SiteWise APIs.
",
@@ -1528,6 +1605,8 @@
"DescribeAssetRequest$assetId": "The ID of the asset.
",
"DescribeAssetResponse$assetId": "The ID of the asset.
",
"DescribeAssetResponse$assetModelId": "The ID of the asset model that was used to create the asset.
",
+ "DescribeBulkImportJobRequest$jobId": "The ID of the job.
",
+ "DescribeBulkImportJobResponse$jobId": "The ID of the job.
",
"DescribeDashboardRequest$dashboardId": "The ID of the dashboard.
",
"DescribeDashboardResponse$dashboardId": "The ID of the dashboard.
",
"DescribeDashboardResponse$projectId": "The ID of the project that the dashboard is in.
",
@@ -1561,6 +1640,7 @@
"IDs$member": null,
"Image$id": "The ID of an existing image. Specify this parameter to keep an existing image.
",
"ImageLocation$id": "The ID of the image.
",
+ "JobSummary$id": "The ID of the job.
",
"ListAccessPoliciesRequest$resourceId": "The ID of the resource. This parameter is required if you specify resourceType
.
",
"ListAssetRelationshipsRequest$assetId": "The ID of the asset.
",
"ListAssetsRequest$assetModelId": "The ID of the asset model by which to filter the list of assets. This parameter is required if you choose ALL
for filter
.
",
@@ -1698,6 +1778,33 @@
"refs": {
}
},
+ "JobConfiguration": {
+ "base": "Contains the configuration information of a job, such as the file format used to save data in Amazon S3.
",
+ "refs": {
+ "CreateBulkImportJobRequest$jobConfiguration": "Contains the configuration information of a job, such as the file format used to save data in Amazon S3.
",
+ "DescribeBulkImportJobResponse$jobConfiguration": "Contains the configuration information of a job, such as the file format used to save data in Amazon S3.
"
+ }
+ },
+ "JobStatus": {
+ "base": null,
+ "refs": {
+ "CreateBulkImportJobResponse$jobStatus": "The status of the bulk import job can be one of following values.
-
PENDING
– IoT SiteWise is waiting for the current bulk import job to finish.
-
CANCELLED
– The bulk import job has been canceled.
-
RUNNING
– IoT SiteWise is processing your request to import your data from Amazon S3.
-
COMPLETED
– IoT SiteWise successfully completed your request to import data from Amazon S3.
-
FAILED
– IoT SiteWise couldn't process your request to import data from Amazon S3. You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.
-
COMPLETED_WITH_FAILURES
– IoT SiteWise completed your request to import data from Amazon S3 with errors. You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.
",
+ "DescribeBulkImportJobResponse$jobStatus": "The status of the bulk import job can be one of following values.
-
PENDING
– IoT SiteWise is waiting for the current bulk import job to finish.
-
CANCELLED
– The bulk import job has been canceled.
-
RUNNING
– IoT SiteWise is processing your request to import your data from Amazon S3.
-
COMPLETED
– IoT SiteWise successfully completed your request to import data from Amazon S3.
-
FAILED
– IoT SiteWise couldn't process your request to import data from Amazon S3. You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.
-
COMPLETED_WITH_FAILURES
– IoT SiteWise completed your request to import data from Amazon S3 with errors. You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.
",
+ "JobSummary$status": "The status of the bulk import job can be one of following values.
-
PENDING
– IoT SiteWise is waiting for the current bulk import job to finish.
-
CANCELLED
– The bulk import job has been canceled.
-
RUNNING
– IoT SiteWise is processing your request to import your data from Amazon S3.
-
COMPLETED
– IoT SiteWise successfully completed your request to import data from Amazon S3.
-
FAILED
– IoT SiteWise couldn't process your request to import data from Amazon S3. You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.
-
COMPLETED_WITH_FAILURES
– IoT SiteWise completed your request to import data from Amazon S3 with errors. You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.
"
+ }
+ },
+ "JobSummaries": {
+ "base": null,
+ "refs": {
+ "ListBulkImportJobsResponse$jobSummaries": "One or more job summaries to list.
"
+ }
+ },
+ "JobSummary": {
+ "base": "Contains a job summary information.
",
+ "refs": {
+ "JobSummaries$member": null
+ }
+ },
"KmsKeyId": {
"base": null,
"refs": {
@@ -1765,6 +1872,22 @@
"refs": {
}
},
+ "ListBulkImportJobsFilter": {
+ "base": null,
+ "refs": {
+ "ListBulkImportJobsRequest$filter": "You can use a filter to select the bulk import jobs that you want to retrieve.
"
+ }
+ },
+ "ListBulkImportJobsRequest": {
+ "base": null,
+ "refs": {
+ }
+ },
+ "ListBulkImportJobsResponse": {
+ "base": null,
+ "refs": {
+ }
+ },
"ListDashboardsRequest": {
"base": null,
"refs": {
@@ -1877,6 +2000,7 @@
"ListAssetRelationshipsRequest$maxResults": "The maximum number of results to return for each paginated request.
",
"ListAssetsRequest$maxResults": "The maximum number of results to return for each paginated request.
Default: 50
",
"ListAssociatedAssetsRequest$maxResults": "The maximum number of results to return for each paginated request.
Default: 50
",
+ "ListBulkImportJobsRequest$maxResults": "The maximum number of results to return for each paginated request.
",
"ListDashboardsRequest$maxResults": "The maximum number of results to return for each paginated request.
Default: 50
",
"ListGatewaysRequest$maxResults": "The maximum number of results to return for each paginated request.
Default: 50
",
"ListPortalsRequest$maxResults": "The maximum number of results to return for each paginated request.
Default: 50
",
@@ -1966,6 +2090,8 @@
"CompositeModelProperty$type": "The type of the composite model that defines this property.
",
"CreateAssetModelRequest$assetModelName": "A unique, friendly name for the asset model.
",
"CreateAssetRequest$assetName": "A unique, friendly name for the asset.
",
+ "CreateBulkImportJobRequest$jobName": "The unique name that helps identify the job request.
",
+ "CreateBulkImportJobResponse$jobName": "The unique name that helps identify the job request.
",
"CreateDashboardRequest$dashboardName": "A friendly name for the dashboard.
",
"CreateGatewayRequest$gatewayName": "A unique, friendly name for the gateway.
",
"CreatePortalRequest$portalName": "A friendly name for the portal.
",
@@ -1974,12 +2100,14 @@
"DescribeAssetModelResponse$assetModelName": "The name of the asset model.
",
"DescribeAssetPropertyResponse$assetName": "The name of the asset.
",
"DescribeAssetResponse$assetName": "The name of the asset.
",
+ "DescribeBulkImportJobResponse$jobName": "The unique name that helps identify the job request.
",
"DescribeDashboardResponse$dashboardName": "The name of the dashboard.
",
"DescribeGatewayResponse$gatewayName": "The name of the gateway.
",
"DescribePortalResponse$portalName": "The name of the portal.
",
"DescribeProjectResponse$projectName": "The name of the project.
",
"DescribeTimeSeriesResponse$dataTypeSpec": "The data type of the structure for this time series. This parameter is required for time series that have the STRUCT
data type.
The options for this parameter depend on the type of the composite model in which you created the asset property that is associated with your time series. Use AWS/ALARM_STATE
for alarm state in alarm composite models.
",
"GatewaySummary$gatewayName": "The name of the asset.
",
+ "JobSummary$name": "The unique name that helps identify the job request.
",
"PortalSummary$name": "The name of the portal.
",
"ProjectSummary$name": "The name of the project.
",
"Property$name": "The name of the property.
",
@@ -2017,6 +2145,8 @@
"ListAssetsResponse$nextToken": "The token for the next set of results, or null if there are no additional results.
",
"ListAssociatedAssetsRequest$nextToken": "The token to be used for the next set of paginated results.
",
"ListAssociatedAssetsResponse$nextToken": "The token for the next set of results, or null if there are no additional results.
",
+ "ListBulkImportJobsRequest$nextToken": "The token to be used for the next set of paginated results.
",
+ "ListBulkImportJobsResponse$nextToken": "The token for the next set of results, or null if there are no additional results.
",
"ListDashboardsRequest$nextToken": "The token to be used for the next set of paginated results.
",
"ListDashboardsResponse$nextToken": "The token for the next set of results, or null if there are no additional results.
",
"ListGatewaysRequest$nextToken": "The token to be used for the next set of paginated results.
",
@@ -2345,6 +2475,14 @@
"PutStorageConfigurationResponse$storageType": "The storage tier that you specified for your data. The storageType
parameter can be one of the following values:
-
SITEWISE_DEFAULT_STORAGE
– IoT SiteWise saves your data into the hot tier. The hot tier is a service-managed database.
-
MULTI_LAYER_STORAGE
– IoT SiteWise saves your data in both the cold tier and the hot tier. The cold tier is a customer-managed Amazon S3 bucket.
"
}
},
+ "String": {
+ "base": null,
+ "refs": {
+ "ErrorReportLocation$prefix": "Amazon S3 uses the prefix as a folder name to organize data in the bucket. Each Amazon S3 object has a key that is its unique identifier in the bucket. Each object in a bucket has exactly one key. The prefix must end with a forward slash (/). For more information, see Organizing objects using prefixes in the Amazon Simple Storage Service User Guide.
",
+ "File$key": "The key of the Amazon S3 object that contains your data. Each object has a key that is a unique identifier. Each object has exactly one key.
",
+ "File$versionId": "The version ID to identify a specific version of the Amazon S3 object that contains your data.
"
+ }
+ },
"TagKey": {
"base": null,
"refs": {
@@ -2464,6 +2602,8 @@
"DescribeAssetModelResponse$assetModelLastUpdateDate": "The date the asset model was last updated, in Unix epoch time.
",
"DescribeAssetResponse$assetCreationDate": "The date the asset was created, in Unix epoch time.
",
"DescribeAssetResponse$assetLastUpdateDate": "The date the asset was last updated, in Unix epoch time.
",
+ "DescribeBulkImportJobResponse$jobCreationDate": "The date the job was created, in Unix epoch TIME.
",
+ "DescribeBulkImportJobResponse$jobLastUpdateDate": "The date the job was last updated, in Unix epoch time.
",
"DescribeDashboardResponse$dashboardCreationDate": "The date the dashboard was created, in Unix epoch time.
",
"DescribeDashboardResponse$dashboardLastUpdateDate": "The date the dashboard was last updated, in Unix epoch time.
",
"DescribeGatewayResponse$creationDate": "The date the gateway was created, in Unix epoch time.
",
diff --git a/apis/iotsitewise/2019-12-02/paginators-1.json b/apis/iotsitewise/2019-12-02/paginators-1.json
index f136f3af528..56e894096b4 100644
--- a/apis/iotsitewise/2019-12-02/paginators-1.json
+++ b/apis/iotsitewise/2019-12-02/paginators-1.json
@@ -62,6 +62,12 @@
"limit_key": "maxResults",
"result_key": "assetSummaries"
},
+ "ListBulkImportJobs": {
+ "input_token": "nextToken",
+ "output_token": "nextToken",
+ "limit_key": "maxResults",
+ "result_key": "jobSummaries"
+ },
"ListDashboards": {
"input_token": "nextToken",
"output_token": "nextToken",
diff --git a/apis/kendra/2019-02-03/api-2.json b/apis/kendra/2019-02-03/api-2.json
index 309aa33276f..59ae6823832 100644
--- a/apis/kendra/2019-02-03/api-2.json
+++ b/apis/kendra/2019-02-03/api-2.json
@@ -4572,9 +4572,17 @@
"FieldMappings":{"shape":"DataSourceToIndexFieldMappingList"},
"DocumentTitleFieldName":{"shape":"DataSourceFieldName"},
"DisableLocalGroups":{"shape":"Boolean"},
- "SslCertificateS3Path":{"shape":"S3Path"}
+ "SslCertificateS3Path":{"shape":"S3Path"},
+ "AuthenticationType":{"shape":"SharePointOnlineAuthenticationType"}
}
},
+ "SharePointOnlineAuthenticationType":{
+ "type":"string",
+ "enum":[
+ "HTTP_BASIC",
+ "OAUTH2"
+ ]
+ },
"SharePointUrlList":{
"type":"list",
"member":{"shape":"Url"},
diff --git a/apis/kendra/2019-02-03/docs-2.json b/apis/kendra/2019-02-03/docs-2.json
index 3b93d2fb69e..53a2c63b7f3 100644
--- a/apis/kendra/2019-02-03/docs-2.json
+++ b/apis/kendra/2019-02-03/docs-2.json
@@ -8,7 +8,7 @@
"BatchGetDocumentStatus": "Returns the indexing status for one or more documents submitted with the BatchPutDocument API.
When you use the BatchPutDocument
API, documents are indexed asynchronously. You can use the BatchGetDocumentStatus
API to get the current status of a list of documents so that you can determine if they have been successfully indexed.
You can also use the BatchGetDocumentStatus
API to check the status of the BatchDeleteDocument API. When a document is deleted from the index, Amazon Kendra returns NOT_FOUND
as the status.
",
"BatchPutDocument": "Adds one or more documents to an index.
The BatchPutDocument
API enables you to ingest inline documents or a set of documents stored in an Amazon S3 bucket. Use this API to ingest your text and unstructured text into an index, add custom attributes to the documents, and to attach an access control list to the documents added to the index.
The documents are indexed asynchronously. You can see the progress of the batch using Amazon Web Services CloudWatch. Any error messages related to processing the batch are sent to your Amazon Web Services CloudWatch log.
For an example of ingesting inline documents using Python and Java SDKs, see Adding files directly to an index.
",
"ClearQuerySuggestions": "Clears existing query suggestions from an index.
This deletes existing suggestions only, not the queries in the query log. After you clear suggestions, Amazon Kendra learns new suggestions based on new queries added to the query log from the time you cleared suggestions. If you do not see any new suggestions, then please allow Amazon Kendra to collect enough queries to learn new suggestions.
ClearQuerySuggestions
is currently not supported in the Amazon Web Services GovCloud (US-West) region.
",
- "CreateAccessControlConfiguration": "Creates an access configuration for your documents. This includes user and group access information for your documents. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.
You can use this to re-configure your existing document level access control without indexing all of your documents again. For example, your index contains top-secret company documents that only certain employees or users should access. One of these users leaves the company or switches to a team that should be blocked from access to top-secret documents. Your documents in your index still give this user access to top-secret documents due to the user having access at the time your documents were indexed. You can create a specific access control configuration for this user with deny access. You can later update the access control configuration to allow access in the case the user returns to the company and re-joins the 'top-secret' team. You can re-configure access control for your documents circumstances change.
To apply your access control configuration to certain documents, you call the BatchPutDocument API with the AccessControlConfigurationId
included in the Document object. If you use an S3 bucket as a data source, you update the .metadata.json
with the AccessControlConfigurationId
and synchronize your data source. Amazon Kendra currently only supports access control configuration for S3 data sources and documents indexed using the BatchPutDocument
API.
",
+ "CreateAccessControlConfiguration": "Creates an access configuration for your documents. This includes user and group access information for your documents. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.
You can use this to re-configure your existing document level access control without indexing all of your documents again. For example, your index contains top-secret company documents that only certain employees or users should access. One of these users leaves the company or switches to a team that should be blocked from accessing top-secret documents. The user still has access to top-secret documents because the user had access when your documents were previously indexed. You can create a specific access control configuration for the user with deny access. You can later update the access control configuration to allow access if the user returns to the company and re-joins the 'top-secret' team. You can re-configure access control for your documents as circumstances change.
To apply your access control configuration to certain documents, you call the BatchPutDocument API with the AccessControlConfigurationId
included in the Document object. If you use an S3 bucket as a data source, you update the .metadata.json
with the AccessControlConfigurationId
and synchronize your data source. Amazon Kendra currently only supports access control configuration for S3 data sources and documents indexed using the BatchPutDocument
API.
",
"CreateDataSource": "Creates a data source that you want to use with an Amazon Kendra index.
You specify a name, data source connector type and description for your data source. You also specify configuration information for the data source connector.
CreateDataSource
is a synchronous operation. The operation returns 200 if the data source was successfully created. Otherwise, an exception is raised.
Amazon S3 and custom data sources are the only supported data sources in the Amazon Web Services GovCloud (US-West) region.
For an example of creating an index and data source using the Python SDK, see Getting started with Python SDK. For an example of creating an index and data source using the Java SDK, see Getting started with Java SDK.
",
"CreateExperience": "Creates an Amazon Kendra experience such as a search application. For more information on creating a search application experience, including using the Python and Java SDKs, see Building a search experience with no code.
",
"CreateFaq": "Creates an new set of frequently asked question (FAQ) questions and answers.
Adding FAQs to an index is an asynchronous operation.
For an example of adding an FAQ to an index using Python and Java SDKs, see Using your FAQ file.
",
@@ -55,7 +55,7 @@
"SubmitFeedback": "Enables you to provide feedback to Amazon Kendra to improve the performance of your index.
SubmitFeedback
is currently not supported in the Amazon Web Services GovCloud (US-West) region.
",
"TagResource": "Adds the specified tag to the specified index, FAQ, or data source resource. If the tag already exists, the existing value is replaced with the new value.
",
"UntagResource": "Removes a tag from an index, FAQ, or a data source.
",
- "UpdateAccessControlConfiguration": "Updates an access control configuration for your documents in an index. This includes user and group access information for your documents. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.
You can update an access control configuration you created without indexing all of your documents again. For example, your index contains top-secret company documents that only certain employees or users should access. You created an 'allow' access control configuration for one user who recently joined the 'top-secret' team, switching from a team with 'deny' access to top-secret documents. However, the user suddenly returns to their previous team and should no longer have access to top secret documents. You can update the access control configuration to re-configure access control for your documents as circumstances change.
You call the BatchPutDocument API to apply the updated access control configuration, with the AccessControlConfigurationId
included in the Document object. If you use an S3 bucket as a data source, you synchronize your data source to apply the the AccessControlConfigurationId
in the .metadata.json
file. Amazon Kendra currently only supports access control configuration for S3 data sources and documents indexed using the BatchPutDocument
API.
",
+ "UpdateAccessControlConfiguration": "Updates an access control configuration for your documents in an index. This includes user and group access information for your documents. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.
You can update an access control configuration you created without indexing all of your documents again. For example, your index contains top-secret company documents that only certain employees or users should access. You created an 'allow' access control configuration for one user who recently joined the 'top-secret' team, switching from a team with 'deny' access to top-secret documents. However, the user suddenly returns to their previous team and should no longer have access to top secret documents. You can update the access control configuration to re-configure access control for your documents as circumstances change.
You call the BatchPutDocument API to apply the updated access control configuration, with the AccessControlConfigurationId
included in the Document object. If you use an S3 bucket as a data source, you synchronize your data source to apply the AccessControlConfigurationId
in the .metadata.json
file. Amazon Kendra currently only supports access control configuration for S3 data sources and documents indexed using the BatchPutDocument
API.
",
"UpdateDataSource": "Updates an existing Amazon Kendra data source.
",
"UpdateExperience": "Updates your Amazon Kendra experience such as a search application. For more information on creating a search application experience, see Building a search experience with no code.
",
"UpdateIndex": "Updates an existing Amazon Kendra index.
",
@@ -2977,7 +2977,7 @@
"QuipConfiguration$SecretArn": "The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the key-value pairs that are required to connect to your Quip. The secret must contain a JSON structure with the following keys:
",
"SalesforceConfiguration$SecretArn": "The Amazon Resource Name (ARN) of an Secrets Managersecret that contains the key/value pairs required to connect to your Salesforce instance. The secret must contain a JSON structure with the following keys:
-
authenticationUrl - The OAUTH endpoint that Amazon Kendra connects to get an OAUTH token.
-
consumerKey - The application public key generated when you created your Salesforce application.
-
consumerSecret - The application private key generated when you created your Salesforce application.
-
password - The password associated with the user logging in to the Salesforce instance.
-
securityToken - The token associated with the user account logging in to the Salesforce instance.
-
username - The user name of the user logging in to the Salesforce instance.
",
"ServiceNowConfiguration$SecretArn": "The Amazon Resource Name (ARN) of the Secrets Manager secret that contains the user name and password required to connect to the ServiceNow instance. You can also provide OAuth authentication credentials of user name, password, client ID, and client secret. For more information, see Authentication for a ServiceNow data source.
",
- "SharePointConfiguration$SecretArn": "The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the user name and password required to connect to the SharePoint instance. If you use SharePoint Server, you also need to provide the sever domain name as part of the credentials. For more information, see Using a Microsoft SharePoint Data Source.
",
+ "SharePointConfiguration$SecretArn": "The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the user name and password required to connect to the SharePoint instance. If you use SharePoint Server, you also need to provide the sever domain name as part of the credentials. For more information, see Using a Microsoft SharePoint Data Source.
You can also provide OAuth authentication credentials of user name, password, client ID, and client secret. For more information, see Authentication for a SharePoint data source.
",
"SlackConfiguration$SecretArn": "The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the key-value pairs required to connect to your Slack workspace team. The secret must contain a JSON structure with the following keys:
"
}
},
@@ -3065,6 +3065,12 @@
"DataSourceConfiguration$SharePointConfiguration": "Provides the configuration information to connect to Microsoft SharePoint as your data source.
"
}
},
+ "SharePointOnlineAuthenticationType": {
+ "base": null,
+ "refs": {
+ "SharePointConfiguration$AuthenticationType": "Whether you want to connect to SharePoint using basic authentication of user name and password, or OAuth authentication of user name, password, client ID, and client secret. You can use OAuth authentication for SharePoint Online.
"
+ }
+ },
"SharePointUrlList": {
"base": null,
"refs": {
@@ -3231,8 +3237,8 @@
"JiraStatus$member": null,
"JsonTokenTypeConfiguration$UserNameAttributeField": "The user name attribute field.
",
"JsonTokenTypeConfiguration$GroupAttributeField": "The group attribute field.
",
- "ListAccessControlConfigurationsRequest$NextToken": "If the previous response was incomplete (because there is more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of access control configurations.
",
- "ListAccessControlConfigurationsResponse$NextToken": "If the response is truncated, Amazon Kendra returns this token that you can use in the subsequent request to retrieve the next set of access control configurations.
",
+ "ListAccessControlConfigurationsRequest$NextToken": "If the previous response was incomplete (because there's more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of access control configurations.
",
+ "ListAccessControlConfigurationsResponse$NextToken": "If the response is truncated, Amazon Kendra returns this token, which you can use in the subsequent request to retrieve the next set of access control configurations.
",
"PrivateChannelFilter$member": null,
"Project$member": null,
"PublicChannelFilter$member": null,
diff --git a/apis/monitoring/2010-08-01/api-2.json b/apis/monitoring/2010-08-01/api-2.json
index 7da31990144..54382095239 100644
--- a/apis/monitoring/2010-08-01/api-2.json
+++ b/apis/monitoring/2010-08-01/api-2.json
@@ -583,6 +583,19 @@
"min":1
},
"ActionsEnabled":{"type":"boolean"},
+ "ActionsSuppressedBy":{
+ "type":"string",
+ "enum":[
+ "WaitPeriod",
+ "ExtensionPeriod",
+ "Alarm"
+ ]
+ },
+ "ActionsSuppressedReason":{
+ "type":"string",
+ "max":1024,
+ "min":0
+ },
"AlarmArn":{
"type":"string",
"max":1600,
@@ -750,7 +763,13 @@
"StateReason":{"shape":"StateReason"},
"StateReasonData":{"shape":"StateReasonData"},
"StateUpdatedTimestamp":{"shape":"Timestamp"},
- "StateValue":{"shape":"StateValue"}
+ "StateValue":{"shape":"StateValue"},
+ "StateTransitionedTimestamp":{"shape":"Timestamp"},
+ "ActionsSuppressedBy":{"shape":"ActionsSuppressedBy"},
+ "ActionsSuppressedReason":{"shape":"ActionsSuppressedReason"},
+ "ActionsSuppressor":{"shape":"AlarmArn"},
+ "ActionsSuppressorWaitPeriod":{"shape":"SuppressorPeriod"},
+ "ActionsSuppressorExtensionPeriod":{"shape":"SuppressorPeriod"}
},
"xmlOrder":[
"ActionsEnabled",
@@ -765,7 +784,13 @@
"StateReason",
"StateReasonData",
"StateUpdatedTimestamp",
- "StateValue"
+ "StateValue",
+ "StateTransitionedTimestamp",
+ "ActionsSuppressedBy",
+ "ActionsSuppressedReason",
+ "ActionsSuppressor",
+ "ActionsSuppressorWaitPeriod",
+ "ActionsSuppressorExtensionPeriod"
]
},
"CompositeAlarms":{
@@ -1966,7 +1991,10 @@
"AlarmRule":{"shape":"AlarmRule"},
"InsufficientDataActions":{"shape":"ResourceList"},
"OKActions":{"shape":"ResourceList"},
- "Tags":{"shape":"TagList"}
+ "Tags":{"shape":"TagList"},
+ "ActionsSuppressor":{"shape":"AlarmArn"},
+ "ActionsSuppressorWaitPeriod":{"shape":"SuppressorPeriod"},
+ "ActionsSuppressorExtensionPeriod":{"shape":"SuppressorPeriod"}
}
},
"PutDashboardInput":{
@@ -2278,6 +2306,7 @@
"type":"integer",
"min":1
},
+ "SuppressorPeriod":{"type":"integer"},
"Tag":{
"type":"structure",
"required":[
diff --git a/apis/monitoring/2010-08-01/docs-2.json b/apis/monitoring/2010-08-01/docs-2.json
index ef2157f1847..1347be0880f 100644
--- a/apis/monitoring/2010-08-01/docs-2.json
+++ b/apis/monitoring/2010-08-01/docs-2.json
@@ -61,11 +61,25 @@
"PutMetricAlarmInput$ActionsEnabled": "Indicates whether actions should be executed during any changes to the alarm state. The default is TRUE
.
"
}
},
+ "ActionsSuppressedBy": {
+ "base": null,
+ "refs": {
+ "CompositeAlarm$ActionsSuppressedBy": " When the value is ALARM
, it means that the actions are suppressed because the suppressor alarm is in ALARM
When the value is WaitPeriod
, it means that the actions are suppressed because the composite alarm is waiting for the suppressor alarm to go into into the ALARM
state. The maximum waiting time is as specified in ActionsSuppressorWaitPeriod
. After this time, the composite alarm performs its actions. When the value is ExtensionPeriod
, it means that the actions are suppressed because the composite alarm is waiting after the suppressor alarm went out of the ALARM
state. The maximum waiting time is as specified in ActionsSuppressorExtensionPeriod
. After this time, the composite alarm performs its actions.
"
+ }
+ },
+ "ActionsSuppressedReason": {
+ "base": null,
+ "refs": {
+ "CompositeAlarm$ActionsSuppressedReason": " Captures the reason for action suppression.
"
+ }
+ },
"AlarmArn": {
"base": null,
"refs": {
"CompositeAlarm$AlarmArn": "The Amazon Resource Name (ARN) of the alarm.
",
- "MetricAlarm$AlarmArn": "The Amazon Resource Name (ARN) of the alarm.
"
+ "CompositeAlarm$ActionsSuppressor": " Actions will be suppressed if the suppressor alarm is in the ALARM
state. ActionsSuppressor
can be an AlarmName or an Amazon Resource Name (ARN) from an existing alarm.
",
+ "MetricAlarm$AlarmArn": "The Amazon Resource Name (ARN) of the alarm.
",
+ "PutCompositeAlarmInput$ActionsSuppressor": " Actions will be suppressed if the suppressor alarm is in the ALARM
state. ActionsSuppressor
can be an AlarmName or an Amazon Resource Name (ARN) from an existing alarm.
"
}
},
"AlarmDescription": {
@@ -1568,6 +1582,15 @@
"MetricDatum$StorageResolution": "Valid values are 1 and 60. Setting this to 1 specifies this metric as a high-resolution metric, so that CloudWatch stores the metric with sub-minute resolution down to one second. Setting this to 60 specifies this metric as a regular-resolution metric, which CloudWatch stores at 1-minute resolution. Currently, high resolution is available only for custom metrics. For more information about high-resolution metrics, see High-Resolution Metrics in the Amazon CloudWatch User Guide.
This field is optional, if you do not specify it the default of 60 is used.
"
}
},
+ "SuppressorPeriod": {
+ "base": null,
+ "refs": {
+ "CompositeAlarm$ActionsSuppressorWaitPeriod": " The maximum time in seconds that the composite alarm waits for the suppressor alarm to go into the ALARM
state. After this time, the composite alarm performs its actions.
WaitPeriod
is required only when ActionsSuppressor
is specified.
",
+ "CompositeAlarm$ActionsSuppressorExtensionPeriod": " The maximum time in seconds that the composite alarm waits after suppressor alarm goes out of the ALARM
state. After this time, the composite alarm performs its actions.
ExtensionPeriod
is required only when ActionsSuppressor
is specified.
",
+ "PutCompositeAlarmInput$ActionsSuppressorWaitPeriod": " The maximum time in seconds that the composite alarm waits for the suppressor alarm to go into the ALARM
state. After this time, the composite alarm performs its actions.
WaitPeriod
is required only when ActionsSuppressor
is specified.
",
+ "PutCompositeAlarmInput$ActionsSuppressorExtensionPeriod": " The maximum time in seconds that the composite alarm waits after suppressor alarm goes out of the ALARM
state. After this time, the composite alarm performs its actions.
ExtensionPeriod
is required only when ActionsSuppressor
is specified.
"
+ }
+ },
"Tag": {
"base": "A key-value pair associated with a CloudWatch resource.
",
"refs": {
@@ -1626,7 +1649,8 @@
"refs": {
"AlarmHistoryItem$Timestamp": "The time stamp for the alarm history item.
",
"CompositeAlarm$AlarmConfigurationUpdatedTimestamp": "The time stamp of the last update to the alarm configuration.
",
- "CompositeAlarm$StateUpdatedTimestamp": "The time stamp of the last update to the alarm state.
",
+ "CompositeAlarm$StateUpdatedTimestamp": "Tracks the timestamp of any state update, even if StateValue
doesn't change.
",
+ "CompositeAlarm$StateTransitionedTimestamp": " The timestamp of the last change to the alarm's StateValue
.
",
"Datapoint$Timestamp": "The time stamp used for the data point.
",
"DescribeAlarmHistoryInput$StartDate": "The starting date to retrieve alarm history.
",
"DescribeAlarmHistoryInput$EndDate": "The ending date to retrieve alarm history.
",
diff --git a/apis/network-firewall/2020-11-12/api-2.json b/apis/network-firewall/2020-11-12/api-2.json
index f5779aff4be..fbf73da1883 100644
--- a/apis/network-firewall/2020-11-12/api-2.json
+++ b/apis/network-firewall/2020-11-12/api-2.json
@@ -606,12 +606,32 @@
"member":{"shape":"AzSubnet"}
},
"Boolean":{"type":"boolean"},
+ "CIDRCount":{
+ "type":"integer",
+ "max":1000000,
+ "min":0
+ },
+ "CIDRSummary":{
+ "type":"structure",
+ "members":{
+ "AvailableCIDRCount":{"shape":"CIDRCount"},
+ "UtilizedCIDRCount":{"shape":"CIDRCount"},
+ "IPSetReferences":{"shape":"IPSetMetadataMap"}
+ }
+ },
+ "CapacityUsageSummary":{
+ "type":"structure",
+ "members":{
+ "CIDRs":{"shape":"CIDRSummary"}
+ }
+ },
"CollectionMember_String":{"type":"string"},
"ConfigurationSyncState":{
"type":"string",
"enum":[
"PENDING",
- "IN_SYNC"
+ "IN_SYNC",
+ "CAPACITY_CONSTRAINED"
]
},
"CreateFirewallPolicyRequest":{
@@ -1027,7 +1047,8 @@
"members":{
"Status":{"shape":"FirewallStatusValue"},
"ConfigurationSyncStateSummary":{"shape":"ConfigurationSyncState"},
- "SyncStates":{"shape":"SyncStates"}
+ "SyncStates":{"shape":"SyncStates"},
+ "CapacityUsageSummary":{"shape":"CapacityUsageSummary"}
}
},
"FirewallStatusValue":{
@@ -1091,6 +1112,35 @@
"Definition":{"shape":"VariableDefinitionList"}
}
},
+ "IPSetArn":{"type":"string"},
+ "IPSetMetadata":{
+ "type":"structure",
+ "members":{
+ "ResolvedCIDRCount":{"shape":"CIDRCount"}
+ }
+ },
+ "IPSetMetadataMap":{
+ "type":"map",
+ "key":{"shape":"IPSetArn"},
+ "value":{"shape":"IPSetMetadata"}
+ },
+ "IPSetReference":{
+ "type":"structure",
+ "members":{
+ "ReferenceArn":{"shape":"ResourceArn"}
+ }
+ },
+ "IPSetReferenceMap":{
+ "type":"map",
+ "key":{"shape":"IPSetReferenceName"},
+ "value":{"shape":"IPSetReference"}
+ },
+ "IPSetReferenceName":{
+ "type":"string",
+ "max":32,
+ "min":1,
+ "pattern":"^[A-Za-z][A-Za-z0-9_]*$"
+ },
"IPSets":{
"type":"map",
"key":{"shape":"RuleVariableName"},
@@ -1314,7 +1364,8 @@
"type":"string",
"enum":[
"PENDING",
- "IN_SYNC"
+ "IN_SYNC",
+ "CAPACITY_CONSTRAINED"
]
},
"PolicyString":{
@@ -1397,6 +1448,12 @@
"members":{
}
},
+ "ReferenceSets":{
+ "type":"structure",
+ "members":{
+ "IPSetReferences":{"shape":"IPSetReferenceMap"}
+ }
+ },
"ResourceArn":{
"type":"string",
"max":256,
@@ -1467,6 +1524,7 @@
"required":["RulesSource"],
"members":{
"RuleVariables":{"shape":"RuleVariables"},
+ "ReferenceSets":{"shape":"ReferenceSets"},
"RulesSource":{"shape":"RulesSource"},
"StatefulRuleOptions":{"shape":"StatefulRuleOptions"}
}
diff --git a/apis/network-firewall/2020-11-12/docs-2.json b/apis/network-firewall/2020-11-12/docs-2.json
index 4a5669f5d03..4d99b2e6af9 100644
--- a/apis/network-firewall/2020-11-12/docs-2.json
+++ b/apis/network-firewall/2020-11-12/docs-2.json
@@ -138,6 +138,26 @@
"UpdateSubnetChangeProtectionResponse$SubnetChangeProtection": "A setting indicating whether the firewall is protected against changes to the subnet associations. Use this setting to protect against accidentally modifying the subnet associations for a firewall that is in use. When you create a firewall, the operation initializes this setting to TRUE
.
"
}
},
+ "CIDRCount": {
+ "base": null,
+ "refs": {
+ "CIDRSummary$AvailableCIDRCount": "The number of CIDR blocks available for use by the IP set references in a firewall.
",
+ "CIDRSummary$UtilizedCIDRCount": "The number of CIDR blocks used by the IP set references in a firewall.
",
+ "IPSetMetadata$ResolvedCIDRCount": "Describes the total number of CIDR blocks currently in use by the IP set references in a firewall. To determine how many CIDR blocks are available for you to use in a firewall, you can call AvailableCIDRCount
.
"
+ }
+ },
+ "CIDRSummary": {
+ "base": "Summarizes the CIDR blocks used by the IP set references in a firewall. Network Firewall calculates the number of CIDRs by taking an aggregated count of all CIDRs used by the IP sets you are referencing.
",
+ "refs": {
+ "CapacityUsageSummary$CIDRs": "Describes the capacity usage of the CIDR blocks used by the IP set references in a firewall.
"
+ }
+ },
+ "CapacityUsageSummary": {
+ "base": "The capacity usage summary of the resources used by the ReferenceSets in a firewall.
",
+ "refs": {
+ "FirewallStatus$CapacityUsageSummary": "Describes the capacity usage of the resources contained in a firewall's reference sets. Network Firewall calclulates the capacity usage by taking an aggregated count of all of the resources used by all of the reference sets in a firewall.
"
+ }
+ },
"CollectionMember_String": {
"base": null,
"refs": {
@@ -490,6 +510,42 @@
"IPSets$value": null
}
},
+ "IPSetArn": {
+ "base": null,
+ "refs": {
+ "IPSetMetadataMap$key": null
+ }
+ },
+ "IPSetMetadata": {
+ "base": "General information about the IP set.
",
+ "refs": {
+ "IPSetMetadataMap$value": null
+ }
+ },
+ "IPSetMetadataMap": {
+ "base": null,
+ "refs": {
+ "CIDRSummary$IPSetReferences": "The list of the IP set references used by a firewall.
"
+ }
+ },
+ "IPSetReference": {
+ "base": "Configures one or more IP set references for a Suricata-compatible rule group. This is used in CreateRuleGroup or UpdateRuleGroup. An IP set reference is a rule variable that references a resource that you create and manage in another Amazon Web Services service, such as an Amazon VPC prefix list. Network Firewall IP set references enable you to dynamically update the contents of your rules. When you create, update, or delete the IP set you are referencing in your rule, Network Firewall automatically updates the rule's content with the changes. For more information about IP set references in Network Firewall, see Using IP set references in the Network Firewall Developer Guide.
Network Firewall currently supports only Amazon VPC prefix lists as IP set references.
",
+ "refs": {
+ "IPSetReferenceMap$value": null
+ }
+ },
+ "IPSetReferenceMap": {
+ "base": null,
+ "refs": {
+ "ReferenceSets$IPSetReferences": "The list of IP set references.
"
+ }
+ },
+ "IPSetReferenceName": {
+ "base": null,
+ "refs": {
+ "IPSetReferenceMap$key": null
+ }
+ },
"IPSets": {
"base": null,
"refs": {
@@ -768,6 +824,12 @@
"refs": {
}
},
+ "ReferenceSets": {
+ "base": "Contains a set of IP set references.
",
+ "refs": {
+ "RuleGroup$ReferenceSets": "The list of a rule group's reference sets.
"
+ }
+ },
"ResourceArn": {
"base": null,
"refs": {
@@ -797,6 +859,7 @@
"FirewallMetadata$FirewallArn": "The Amazon Resource Name (ARN) of the firewall.
",
"FirewallPolicyMetadata$Arn": "The Amazon Resource Name (ARN) of the firewall policy.
",
"FirewallPolicyResponse$FirewallPolicyArn": "The Amazon Resource Name (ARN) of the firewall policy.
If this response is for a create request that had DryRun
set to TRUE
, then this ARN is a placeholder that isn't attached to a valid resource.
",
+ "IPSetReference$ReferenceArn": "The Amazon Resource Name (ARN) of the resource that you are referencing in your rule group.
",
"ListTagsForResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) of the resource.
",
"PutResourcePolicyRequest$ResourceArn": "The Amazon Resource Name (ARN) of the account that you want to share rule groups and firewall policies with.
",
"RuleGroupMetadata$Arn": "The Amazon Resource Name (ARN) of the rule group.
",
diff --git a/apis/rds/2014-10-31/docs-2.json b/apis/rds/2014-10-31/docs-2.json
index b71f11e0f03..54ce53b7d01 100644
--- a/apis/rds/2014-10-31/docs-2.json
+++ b/apis/rds/2014-10-31/docs-2.json
@@ -8,28 +8,28 @@
"AddTagsToResource": "Adds metadata tags to an Amazon RDS resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon RDS resources, or used in a Condition statement in an IAM policy for Amazon RDS.
For an overview on tagging Amazon RDS resources, see Tagging Amazon RDS Resources.
",
"ApplyPendingMaintenanceAction": "Applies a pending maintenance action to a resource (for example, to a DB instance).
",
"AuthorizeDBSecurityGroupIngress": "Enables ingress to a DBSecurityGroup using one of two forms of authorization. First, EC2 or VPC security groups can be added to the DBSecurityGroup if the application using the database is running on EC2 or VPC instances. Second, IP ranges are available if the application accessing your database is running on the internet. Required parameters for this API are one of CIDR range, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId for non-VPC).
You can't authorize ingress from an EC2 security group in one Amazon Web Services Region to an Amazon RDS DB instance in another. You can't authorize ingress from a VPC security group in one VPC to an Amazon RDS DB instance in another.
For an overview of CIDR ranges, go to the Wikipedia Tutorial.
",
- "BacktrackDBCluster": "Backtracks a DB cluster to a specific time, without creating a new DB cluster.
For more information on backtracking, see Backtracking an Aurora DB Cluster in the Amazon Aurora User Guide.
This action only applies to Aurora MySQL DB clusters.
",
+ "BacktrackDBCluster": "Backtracks a DB cluster to a specific time, without creating a new DB cluster.
For more information on backtracking, see Backtracking an Aurora DB Cluster in the Amazon Aurora User Guide.
This action applies only to Aurora MySQL DB clusters.
",
"CancelExportTask": "Cancels an export task in progress that is exporting a snapshot to Amazon S3. Any data that has already been written to the S3 bucket isn't removed.
",
"CopyDBClusterParameterGroup": "Copies the specified DB cluster parameter group.
",
- "CopyDBClusterSnapshot": "Copies a snapshot of a DB cluster.
To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier
must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.
You can copy an encrypted DB cluster snapshot from another Amazon Web Services Region. In that case, the Amazon Web Services Region where you call the CopyDBClusterSnapshot
action is the destination Amazon Web Services Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another Amazon Web Services Region, you must provide the following values:
-
KmsKeyId
- The Amazon Web Services Key Management System (Amazon Web Services KMS) key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination Amazon Web Services Region.
-
PreSignedUrl
- A URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot
action to be called in the source Amazon Web Services Region where the DB cluster snapshot is copied from. The pre-signed URL must be a valid request for the CopyDBClusterSnapshot
API action that can be executed in the source Amazon Web Services Region that contains the encrypted DB cluster snapshot to be copied.
The pre-signed URL request must contain the following parameter values:
-
KmsKeyId
- The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB cluster snapshot in the destination Amazon Web Services Region. This is the same identifier for both the CopyDBClusterSnapshot
action that is called in the destination Amazon Web Services Region, and the action contained in the pre-signed URL.
-
DestinationRegion
- The name of the Amazon Web Services Region that the DB cluster snapshot is to be created in.
-
SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are copying an encrypted DB cluster snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBClusterSnapshotIdentifier
looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115
.
To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process.
If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI) instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a pre-signed URL that is a valid request for the operation that can be executed in the source Amazon Web Services Region.
-
TargetDBClusterSnapshotIdentifier
- The identifier for the new copy of the DB cluster snapshot in the destination Amazon Web Services Region.
-
SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the ARN format for the source Amazon Web Services Region and is the same value as the SourceDBClusterSnapshotIdentifier
in the pre-signed URL.
To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified by TargetDBClusterSnapshotIdentifier
while that DB cluster snapshot is in \"copying\" status.
For more information on copying encrypted Amazon Aurora DB cluster snapshots from one Amazon Web Services Region to another, see Copying a Snapshot in the Amazon Aurora User Guide.
For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.
For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
",
+ "CopyDBClusterSnapshot": "Copies a snapshot of a DB cluster.
To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier
must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.
You can copy an encrypted DB cluster snapshot from another Amazon Web Services Region. In that case, the Amazon Web Services Region where you call the CopyDBClusterSnapshot
operation is the destination Amazon Web Services Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another Amazon Web Services Region, you must provide the following values:
-
KmsKeyId
- The Amazon Web Services Key Management System (Amazon Web Services KMS) key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination Amazon Web Services Region.
-
TargetDBClusterSnapshotIdentifier
- The identifier for the new copy of the DB cluster snapshot in the destination Amazon Web Services Region.
-
SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the ARN format for the source Amazon Web Services Region and is the same value as the SourceDBClusterSnapshotIdentifier
in the presigned URL.
To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified by TargetDBClusterSnapshotIdentifier
while that DB cluster snapshot is in \"copying\" status.
For more information on copying encrypted Amazon Aurora DB cluster snapshots from one Amazon Web Services Region to another, see Copying a Snapshot in the Amazon Aurora User Guide.
For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.
For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
",
"CopyDBParameterGroup": "Copies the specified DB parameter group.
",
- "CopyDBSnapshot": "Copies the specified DB snapshot. The source DB snapshot must be in the available
state.
You can copy a snapshot from one Amazon Web Services Region to another. In that case, the Amazon Web Services Region where you call the CopyDBSnapshot
action is the destination Amazon Web Services Region for the DB snapshot copy.
This command doesn't apply to RDS Custom.
For more information about copying snapshots, see Copying a DB Snapshot in the Amazon RDS User Guide.
",
+ "CopyDBSnapshot": "Copies the specified DB snapshot. The source DB snapshot must be in the available
state.
You can copy a snapshot from one Amazon Web Services Region to another. In that case, the Amazon Web Services Region where you call the CopyDBSnapshot
operation is the destination Amazon Web Services Region for the DB snapshot copy.
This command doesn't apply to RDS Custom.
For more information about copying snapshots, see Copying a DB Snapshot in the Amazon RDS User Guide.
",
"CopyOptionGroup": "Copies the specified option group.
",
"CreateCustomDBEngineVersion": "Creates a custom DB engine version (CEV). A CEV is a binary volume snapshot of a database engine and specific AMI. The supported engines are the following:
Amazon RDS, which is a fully managed service, supplies the Amazon Machine Image (AMI) and database software. The Amazon RDS database software is preinstalled, so you need only select a DB engine and version, and create your database. With Amazon RDS Custom for Oracle, you upload your database installation files in Amazon S3.
When you create a custom engine version, you specify the files in a JSON document called a CEV manifest. This document describes installation .zip files stored in Amazon S3. RDS Custom creates your CEV from the installation files that you provided. This service model is called Bring Your Own Media (BYOM).
Creation takes approximately two hours. If creation fails, RDS Custom issues RDS-EVENT-0196
with the message Creation failed for custom engine version
, and includes details about the failure. For example, the event prints missing files.
After you create the CEV, it is available for use. You can create multiple CEVs, and create multiple RDS Custom instances from any CEV. You can also change the status of a CEV to make it available or inactive.
The MediaImport service that imports files from Amazon S3 to create CEVs isn't integrated with Amazon Web Services CloudTrail. If you turn on data logging for Amazon RDS in CloudTrail, calls to the CreateCustomDbEngineVersion
event aren't logged. However, you might see calls from the API gateway that accesses your Amazon S3 bucket. These calls originate from the MediaImport service for the CreateCustomDbEngineVersion
event.
For more information, see Creating a CEV in the Amazon RDS User Guide.
",
- "CreateDBCluster": "Creates a new Amazon Aurora DB cluster or Multi-AZ DB cluster.
You can use the ReplicationSourceIdentifier
parameter to create an Amazon Aurora DB cluster as a read replica of another DB cluster or Amazon RDS MySQL or PostgreSQL DB instance. For cross-Region replication where the DB cluster identified by ReplicationSourceIdentifier
is encrypted, also specify the PreSignedUrl
parameter.
For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.
For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
",
- "CreateDBClusterEndpoint": "Creates a new custom endpoint and associates it with an Amazon Aurora DB cluster.
This action only applies to Aurora DB clusters.
",
- "CreateDBClusterParameterGroup": "Creates a new DB cluster parameter group.
Parameters in a DB cluster parameter group apply to all of the instances in a DB cluster.
A DB cluster parameter group is initially created with the default parameters for the database engine used by instances in the DB cluster. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBClusterParameterGroup
. Once you've created a DB cluster parameter group, you need to associate it with your DB cluster using ModifyDBCluster
.
When you associate a new DB cluster parameter group with a running Aurora DB cluster, reboot the DB instances in the DB cluster without failover for the new DB cluster parameter group and associated settings to take effect.
When you associate a new DB cluster parameter group with a running Multi-AZ DB cluster, reboot the DB cluster without failover for the new DB cluster parameter group and associated settings to take effect.
After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the DB cluster parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database
parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBClusterParameters
action to verify that your DB cluster parameter group has been created or modified.
For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.
For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
",
+ "CreateDBCluster": "Creates a new Amazon Aurora DB cluster or Multi-AZ DB cluster.
You can use the ReplicationSourceIdentifier
parameter to create an Amazon Aurora DB cluster as a read replica of another DB cluster or Amazon RDS MySQL or PostgreSQL DB instance.
For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.
For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
",
+ "CreateDBClusterEndpoint": "Creates a new custom endpoint and associates it with an Amazon Aurora DB cluster.
This action applies only to Aurora DB clusters.
",
+ "CreateDBClusterParameterGroup": "Creates a new DB cluster parameter group.
Parameters in a DB cluster parameter group apply to all of the instances in a DB cluster.
A DB cluster parameter group is initially created with the default parameters for the database engine used by instances in the DB cluster. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBClusterParameterGroup
. Once you've created a DB cluster parameter group, you need to associate it with your DB cluster using ModifyDBCluster
.
When you associate a new DB cluster parameter group with a running Aurora DB cluster, reboot the DB instances in the DB cluster without failover for the new DB cluster parameter group and associated settings to take effect.
When you associate a new DB cluster parameter group with a running Multi-AZ DB cluster, reboot the DB cluster without failover for the new DB cluster parameter group and associated settings to take effect.
After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the DB cluster parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database
parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBClusterParameters
operation to verify that your DB cluster parameter group has been created or modified.
For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.
For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
",
"CreateDBClusterSnapshot": "Creates a snapshot of a DB cluster.
For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.
For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
",
- "CreateDBInstance": "Creates a new DB instance.
",
- "CreateDBInstanceReadReplica": "Creates a new DB instance that acts as a read replica for an existing source DB instance. You can create a read replica for a DB instance running MySQL, MariaDB, Oracle, PostgreSQL, or SQL Server. For more information, see Working with Read Replicas in the Amazon RDS User Guide.
Amazon Aurora doesn't support this action. Call the CreateDBInstance
action to create a DB instance for an Aurora DB cluster.
All read replica DB instances are created with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified.
Your source DB instance must have backup retention enabled.
",
+ "CreateDBInstance": "Creates a new DB instance.
The new DB instance can be an RDS DB instance, or it can be a DB instance in an Aurora DB cluster. For an Aurora DB cluster, you can call this operation multiple times to add more than one DB instance to the cluster.
For more information about creating an RDS DB instance, see Creating an Amazon RDS DB instance in the Amazon RDS User Guide.
For more information about creating a DB instance in an Aurora DB cluster, see Creating an Amazon Aurora DB cluster in the Amazon Aurora User Guide.
",
+ "CreateDBInstanceReadReplica": "Creates a new DB instance that acts as a read replica for an existing source DB instance. You can create a read replica for a DB instance running MySQL, MariaDB, Oracle, PostgreSQL, or SQL Server. For more information, see Working with Read Replicas in the Amazon RDS User Guide.
Amazon Aurora doesn't support this operation. Call the CreateDBInstance
operation to create a DB instance for an Aurora DB cluster.
All read replica DB instances are created with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified.
Your source DB instance must have backup retention enabled.
",
"CreateDBParameterGroup": "Creates a new DB parameter group.
A DB parameter group is initially created with the default parameters for the database engine used by the DB instance. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBParameterGroup
. Once you've created a DB parameter group, you need to associate it with your DB instance using ModifyDBInstance
. When you associate a new DB parameter group with a running DB instance, you need to reboot the DB instance without failover for the new DB parameter group and associated settings to take effect.
This command doesn't apply to RDS Custom.
After you create a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the character_set_database
parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBParameters command to verify that your DB parameter group has been created or modified.
",
"CreateDBProxy": "Creates a new DB proxy.
",
"CreateDBProxyEndpoint": "Creates a DBProxyEndpoint
. Only applies to proxies that are associated with Aurora DB clusters. You can use DB proxy endpoints to specify read/write or read-only access to the DB cluster. You can also use DB proxy endpoints to access a DB proxy through a different VPC than the proxy's default VPC.
",
"CreateDBSecurityGroup": "Creates a new DB security group. DB security groups control access to a DB instance.
A DB security group controls access to EC2-Classic DB instances that are not in a VPC.
",
"CreateDBSnapshot": "Creates a snapshot of a DB instance. The source DB instance must be in the available
or storage-optimization
state.
",
"CreateDBSubnetGroup": "Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the Amazon Web Services Region.
",
- "CreateEventSubscription": "Creates an RDS event notification subscription. This action requires a topic Amazon Resource Name (ARN) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.
You can specify the type of source (SourceType
) that you want to be notified of and provide a list of RDS sources (SourceIds
) that triggers the events. You can also provide a list of event categories (EventCategories
) for events that you want to be notified of. For example, you can specify SourceType
= db-instance
, SourceIds
= mydbinstance1
, mydbinstance2
and EventCategories
= Availability
, Backup
.
If you specify both the SourceType
and SourceIds
, such as SourceType
= db-instance
and SourceIds
= myDBInstance1
, you are notified of all the db-instance
events for the specified source. If you specify a SourceType
but do not specify SourceIds
, you receive notice of the events for that source type for all your RDS sources. If you don't specify either the SourceType or the SourceIds
, you are notified of events generated from all RDS sources belonging to your customer account.
RDS event notification is only available for unencrypted SNS topics. If you specify an encrypted SNS topic, event notifications aren't sent for the topic.
",
- "CreateGlobalCluster": "Creates an Aurora global database spread across multiple Amazon Web Services Regions. The global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem.
You can create a global database that is initially empty, and then add a primary cluster and a secondary cluster to it. Or you can specify an existing Aurora cluster during the create operation, and this cluster becomes the primary cluster of the global database.
This action only applies to Aurora DB clusters.
",
+ "CreateEventSubscription": "Creates an RDS event notification subscription. This operation requires a topic Amazon Resource Name (ARN) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.
You can specify the type of source (SourceType
) that you want to be notified of and provide a list of RDS sources (SourceIds
) that triggers the events. You can also provide a list of event categories (EventCategories
) for events that you want to be notified of. For example, you can specify SourceType
= db-instance
, SourceIds
= mydbinstance1
, mydbinstance2
and EventCategories
= Availability
, Backup
.
If you specify both the SourceType
and SourceIds
, such as SourceType
= db-instance
and SourceIds
= myDBInstance1
, you are notified of all the db-instance
events for the specified source. If you specify a SourceType
but do not specify SourceIds
, you receive notice of the events for that source type for all your RDS sources. If you don't specify either the SourceType or the SourceIds
, you are notified of events generated from all RDS sources belonging to your customer account.
RDS event notification is only available for unencrypted SNS topics. If you specify an encrypted SNS topic, event notifications aren't sent for the topic.
",
+ "CreateGlobalCluster": "Creates an Aurora global database spread across multiple Amazon Web Services Regions. The global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem.
You can create a global database that is initially empty, and then add a primary cluster and a secondary cluster to it. Or you can specify an existing Aurora cluster during the create operation, and this cluster becomes the primary cluster of the global database.
This action applies only to Aurora DB clusters.
",
"CreateOptionGroup": "Creates a new option group. You can create up to 20 option groups.
This command doesn't apply to RDS Custom.
",
"DeleteCustomDBEngineVersion": "Deletes a custom engine version. To run this command, make sure you meet the following prerequisites:
-
The CEV must not be the default for RDS Custom. If it is, change the default before running this command.
-
The CEV must not be associated with an RDS Custom DB instance, RDS Custom instance snapshot, or automated backup of your RDS Custom instance.
Typically, deletion takes a few minutes.
The MediaImport service that imports files from Amazon S3 to create CEVs isn't integrated with Amazon Web Services CloudTrail. If you turn on data logging for Amazon RDS in CloudTrail, calls to the DeleteCustomDbEngineVersion
event aren't logged. However, you might see calls from the API gateway that accesses your Amazon S3 bucket. These calls originate from the MediaImport service for the DeleteCustomDbEngineVersion
event.
For more information, see Deleting a CEV in the Amazon RDS User Guide.
",
"DeleteDBCluster": "The DeleteDBCluster action deletes a previously provisioned DB cluster. When you delete a DB cluster, all automated backups for that DB cluster are deleted and can't be recovered. Manual DB cluster snapshots of the specified DB cluster are not deleted.
For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.
For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
",
@@ -75,7 +75,7 @@
"DescribeEngineDefaultParameters": "Returns the default engine and system parameter information for the specified database engine.
",
"DescribeEventCategories": "Displays a list of categories for all event source types, or, if specified, for a specified source type. You can also see this list in the \"Amazon RDS event categories and event messages\" section of the Amazon RDS User Guide or the Amazon Aurora User Guide .
",
"DescribeEventSubscriptions": "Lists all the subscription descriptions for a customer account. The description for a subscription includes SubscriptionName
, SNSTopicARN
, CustomerID
, SourceType
, SourceID
, CreationTime
, and Status
.
If you specify a SubscriptionName
, lists the description for that subscription.
",
- "DescribeEvents": "Returns events related to DB instances, DB clusters, DB parameter groups, DB security groups, DB snapshots, DB cluster snapshots, and RDS Proxies for the past 14 days. Events specific to a particular DB instance, DB cluster, DB parameter group, DB security group, DB snapshot, DB cluster snapshot group, or RDS Proxy can be obtained by providing the name as a parameter.
By default, RDS returns events that were generated in the past hour.
",
+ "DescribeEvents": "Returns events related to DB instances, DB clusters, DB parameter groups, DB security groups, DB snapshots, DB cluster snapshots, and RDS Proxies for the past 14 days. Events specific to a particular DB instance, DB cluster, DB parameter group, DB security group, DB snapshot, DB cluster snapshot group, or RDS Proxy can be obtained by providing the name as a parameter.
For more information on working with events, see Monitoring Amazon RDS events in the Amazon RDS User Guide and Monitoring Amazon Aurora events in the Amazon Aurora User Guide.
By default, RDS returns events that were generated in the past hour.
",
"DescribeExportTasks": "Returns information about a snapshot export to Amazon S3. This API operation supports pagination.
",
"DescribeGlobalClusters": "Returns information about Aurora global database clusters. This API supports pagination.
For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.
This action only applies to Aurora DB clusters.
",
"DescribeOptionGroupOptions": "Describes all available options.
",
@@ -87,7 +87,7 @@
"DescribeSourceRegions": "Returns a list of the source Amazon Web Services Regions where the current Amazon Web Services Region can create a read replica, copy a DB snapshot from, or replicate automated backups from. This API action supports pagination.
",
"DescribeValidDBInstanceModifications": "You can call DescribeValidDBInstanceModifications
to learn what modifications you can make to your DB instance. You can use this information when you call ModifyDBInstance
.
This command doesn't apply to RDS Custom.
",
"DownloadDBLogFilePortion": "Downloads all or a portion of the specified log file, up to 1 MB in size.
This command doesn't apply to RDS Custom.
",
- "FailoverDBCluster": "Forces a failover for a DB cluster.
For an Aurora DB cluster, failover for a DB cluster promotes one of the Aurora Replicas (read-only instances) in the DB cluster to be the primary DB instance (the cluster writer).
For a Multi-AZ DB cluster, failover for a DB cluster promotes one of the readable standby DB instances (read-only instances) in the DB cluster to be the primary DB instance (the cluster writer).
An Amazon Aurora DB cluster automatically fails over to an Aurora Replica, if one exists, when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readbable standby DB instance when the primary DB instance fails.
To simulate a failure of a primary instance for testing, you can force a failover. Because each instance in a DB cluster has its own endpoint address, make sure to clean up and re-establish any existing connections that use those endpoint addresses when the failover is complete.
For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.
For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
",
+ "FailoverDBCluster": "Forces a failover for a DB cluster.
For an Aurora DB cluster, failover for a DB cluster promotes one of the Aurora Replicas (read-only instances) in the DB cluster to be the primary DB instance (the cluster writer).
For a Multi-AZ DB cluster, failover for a DB cluster promotes one of the readable standby DB instances (read-only instances) in the DB cluster to be the primary DB instance (the cluster writer).
An Amazon Aurora DB cluster automatically fails over to an Aurora Replica, if one exists, when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readable standby DB instance when the primary DB instance fails.
To simulate a failure of a primary instance for testing, you can force a failover. Because each instance in a DB cluster has its own endpoint address, make sure to clean up and re-establish any existing connections that use those endpoint addresses when the failover is complete.
For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.
For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
",
"FailoverGlobalCluster": "Initiates the failover process for an Aurora global database (GlobalCluster).
A failover for an Aurora global database promotes one of secondary read-only DB clusters to be the primary DB cluster and demotes the primary DB cluster to being a secondary (read-only) DB cluster. In other words, the role of the current primary DB cluster and the selected (target) DB cluster are switched. The selected secondary DB cluster assumes full read/write capabilities for the Aurora global database.
For more information about failing over an Amazon Aurora global database, see Managed planned failover for Amazon Aurora global databases in the Amazon Aurora User Guide.
This action applies to GlobalCluster (Aurora global databases) only. Use this action only on healthy Aurora global databases with running Aurora DB clusters and no Region-wide outages, to test disaster recovery scenarios or to reconfigure your Aurora global database topology.
",
"ListTagsForResource": "Lists all tags on an Amazon RDS resource.
For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources in the Amazon RDS User Guide.
",
"ModifyCertificates": "Override the system-default Secure Sockets Layer/Transport Layer Security (SSL/TLS) certificate for Amazon RDS for new DB instances, or remove the override.
By using this operation, you can specify an RDS-approved SSL/TLS certificate for new DB instances that is different from the default certificate provided by RDS. You can also use this operation to remove the override, so that new DB instances use the default certificate provided by RDS.
You might need to override the default certificate in the following situations:
-
You already migrated your applications to support the latest certificate authority (CA) certificate, but the new CA certificate is not yet the RDS default CA certificate for the specified Amazon Web Services Region.
-
RDS has already moved to a new default CA certificate for the specified Amazon Web Services Region, but you are still in the process of supporting the new CA certificate. In this case, you temporarily need additional time to finish your application changes.
For more information about rotating your SSL/TLS certificate for RDS DB engines, see Rotating Your SSL/TLS Certificate in the Amazon RDS User Guide.
For more information about rotating your SSL/TLS certificate for Aurora DB engines, see Rotating Your SSL/TLS Certificate in the Amazon Aurora User Guide.
",
@@ -95,15 +95,15 @@
"ModifyCustomDBEngineVersion": "Modifies the status of a custom engine version (CEV). You can find CEVs to modify by calling DescribeDBEngineVersions
.
The MediaImport service that imports files from Amazon S3 to create CEVs isn't integrated with Amazon Web Services CloudTrail. If you turn on data logging for Amazon RDS in CloudTrail, calls to the ModifyCustomDbEngineVersion
event aren't logged. However, you might see calls from the API gateway that accesses your Amazon S3 bucket. These calls originate from the MediaImport service for the ModifyCustomDbEngineVersion
event.
For more information, see Modifying CEV status in the Amazon RDS User Guide.
",
"ModifyDBCluster": "Modify the settings for an Amazon Aurora DB cluster or a Multi-AZ DB cluster. You can change one or more settings by specifying these parameters and the new values in the request.
For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.
For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
",
"ModifyDBClusterEndpoint": "Modifies the properties of an endpoint in an Amazon Aurora DB cluster.
This action only applies to Aurora DB clusters.
",
- "ModifyDBClusterParameterGroup": "Modifies the parameters of a DB cluster parameter group. To modify more than one parameter, submit a list of the following: ParameterName
, ParameterValue
, and ApplyMethod
. A maximum of 20 parameters can be modified in a single request.
After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database
parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBClusterParameters
action to verify that your DB cluster parameter group has been created or modified.
If the modified DB cluster parameter group is used by an Aurora Serverless v1 cluster, Aurora applies the update immediately. The cluster restart might interrupt your workload. In that case, your application must reopen any connections and retry any transactions that were active when the parameter changes took effect.
For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.
For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
",
- "ModifyDBClusterSnapshotAttribute": "Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot.
To share a manual DB cluster snapshot with other Amazon Web Services accounts, specify restore
as the AttributeName
and use the ValuesToAdd
parameter to add a list of IDs of the Amazon Web Services accounts that are authorized to restore the manual DB cluster snapshot. Use the value all
to make the manual DB cluster snapshot public, which means that it can be copied or restored by all Amazon Web Services accounts.
Don't add the all
value for any manual DB cluster snapshots that contain private information that you don't want available to all Amazon Web Services accounts.
If a manual DB cluster snapshot is encrypted, it can be shared, but only by specifying a list of authorized Amazon Web Services account IDs for the ValuesToAdd
parameter. You can't use all
as a value for that parameter in this case.
To view which Amazon Web Services accounts have access to copy or restore a manual DB cluster snapshot, or whether a manual DB cluster snapshot is public or private, use the DescribeDBClusterSnapshotAttributes API action. The accounts are returned as values for the restore
attribute.
",
+ "ModifyDBClusterParameterGroup": "Modifies the parameters of a DB cluster parameter group. To modify more than one parameter, submit a list of the following: ParameterName
, ParameterValue
, and ApplyMethod
. A maximum of 20 parameters can be modified in a single request.
After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database
parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBClusterParameters
operation to verify that your DB cluster parameter group has been created or modified.
If the modified DB cluster parameter group is used by an Aurora Serverless v1 cluster, Aurora applies the update immediately. The cluster restart might interrupt your workload. In that case, your application must reopen any connections and retry any transactions that were active when the parameter changes took effect.
For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.
For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
",
+ "ModifyDBClusterSnapshotAttribute": "Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot.
To share a manual DB cluster snapshot with other Amazon Web Services accounts, specify restore
as the AttributeName
and use the ValuesToAdd
parameter to add a list of IDs of the Amazon Web Services accounts that are authorized to restore the manual DB cluster snapshot. Use the value all
to make the manual DB cluster snapshot public, which means that it can be copied or restored by all Amazon Web Services accounts.
Don't add the all
value for any manual DB cluster snapshots that contain private information that you don't want available to all Amazon Web Services accounts.
If a manual DB cluster snapshot is encrypted, it can be shared, but only by specifying a list of authorized Amazon Web Services account IDs for the ValuesToAdd
parameter. You can't use all
as a value for that parameter in this case.
To view which Amazon Web Services accounts have access to copy or restore a manual DB cluster snapshot, or whether a manual DB cluster snapshot is public or private, use the DescribeDBClusterSnapshotAttributes API operation. The accounts are returned as values for the restore
attribute.
",
"ModifyDBInstance": "Modifies settings for a DB instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. To learn what modifications you can make to your DB instance, call DescribeValidDBInstanceModifications
before you call ModifyDBInstance
.
",
"ModifyDBParameterGroup": "Modifies the parameters of a DB parameter group. To modify more than one parameter, submit a list of the following: ParameterName
, ParameterValue
, and ApplyMethod
. A maximum of 20 parameters can be modified in a single request.
After you modify a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon RDS to fully complete the modify action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the character_set_database
parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBParameters command to verify that your DB parameter group has been created or modified.
",
"ModifyDBProxy": "Changes the settings for an existing DB proxy.
",
"ModifyDBProxyEndpoint": "Changes the settings for an existing DB proxy endpoint.
",
"ModifyDBProxyTargetGroup": "Modifies the properties of a DBProxyTargetGroup
.
",
"ModifyDBSnapshot": "Updates a manual DB snapshot with a new engine version. The snapshot can be encrypted or unencrypted, but not shared or public.
Amazon RDS supports upgrading DB snapshots for MySQL, PostgreSQL, and Oracle. This command doesn't apply to RDS Custom.
",
- "ModifyDBSnapshotAttribute": "Adds an attribute and values to, or removes an attribute and values from, a manual DB snapshot.
To share a manual DB snapshot with other Amazon Web Services accounts, specify restore
as the AttributeName
and use the ValuesToAdd
parameter to add a list of IDs of the Amazon Web Services accounts that are authorized to restore the manual DB snapshot. Uses the value all
to make the manual DB snapshot public, which means it can be copied or restored by all Amazon Web Services accounts.
Don't add the all
value for any manual DB snapshots that contain private information that you don't want available to all Amazon Web Services accounts.
If the manual DB snapshot is encrypted, it can be shared, but only by specifying a list of authorized Amazon Web Services account IDs for the ValuesToAdd
parameter. You can't use all
as a value for that parameter in this case.
To view which Amazon Web Services accounts have access to copy or restore a manual DB snapshot, or whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API action. The accounts are returned as values for the restore
attribute.
",
+ "ModifyDBSnapshotAttribute": "Adds an attribute and values to, or removes an attribute and values from, a manual DB snapshot.
To share a manual DB snapshot with other Amazon Web Services accounts, specify restore
as the AttributeName
and use the ValuesToAdd
parameter to add a list of IDs of the Amazon Web Services accounts that are authorized to restore the manual DB snapshot. Uses the value all
to make the manual DB snapshot public, which means it can be copied or restored by all Amazon Web Services accounts.
Don't add the all
value for any manual DB snapshots that contain private information that you don't want available to all Amazon Web Services accounts.
If the manual DB snapshot is encrypted, it can be shared, but only by specifying a list of authorized Amazon Web Services account IDs for the ValuesToAdd
parameter. You can't use all
as a value for that parameter in this case.
To view which Amazon Web Services accounts have access to copy or restore a manual DB snapshot, or whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API operation. The accounts are returned as values for the restore
attribute.
",
"ModifyDBSubnetGroup": "Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the Amazon Web Services Region.
",
"ModifyEventSubscription": "Modifies an existing RDS event notification subscription. You can't modify the source identifiers using this call. To change source identifiers for a subscription, use the AddSourceIdentifierToSubscription
and RemoveSourceIdentifierFromSubscription
calls.
You can see a list of the event categories for a given source type (SourceType
) in Events in the Amazon RDS User Guide or by using the DescribeEventCategories
operation.
",
"ModifyGlobalCluster": "Modify a setting for an Amazon Aurora global cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.
This action only applies to Aurora DB clusters.
",
@@ -112,7 +112,7 @@
"PromoteReadReplicaDBCluster": "Promotes a read replica DB cluster to a standalone DB cluster.
",
"PurchaseReservedDBInstancesOffering": "Purchases a reserved DB instance offering.
",
"RebootDBCluster": "You might need to reboot your DB cluster, usually for maintenance reasons. For example, if you make certain modifications, or if you change the DB cluster parameter group associated with the DB cluster, reboot the DB cluster for the changes to take effect.
Rebooting a DB cluster restarts the database engine service. Rebooting a DB cluster results in a momentary outage, during which the DB cluster status is set to rebooting.
Use this operation only for a non-Aurora Multi-AZ DB cluster.
For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
",
- "RebootDBInstance": "You might need to reboot your DB instance, usually for maintenance reasons. For example, if you make certain modifications, or if you change the DB parameter group associated with the DB instance, you must reboot the instance for the changes to take effect.
Rebooting a DB instance restarts the database engine service. Rebooting a DB instance results in a momentary outage, during which the DB instance status is set to rebooting.
For more information about rebooting, see Rebooting a DB Instance in the Amazon RDS User Guide.
This command doesn't apply to RDS Custom.
",
+ "RebootDBInstance": "You might need to reboot your DB instance, usually for maintenance reasons. For example, if you make certain modifications, or if you change the DB parameter group associated with the DB instance, you must reboot the instance for the changes to take effect.
Rebooting a DB instance restarts the database engine service. Rebooting a DB instance results in a momentary outage, during which the DB instance status is set to rebooting.
For more information about rebooting, see Rebooting a DB Instance in the Amazon RDS User Guide.
This command doesn't apply to RDS Custom.
If your DB instance is part of a Multi-AZ DB cluster, you can reboot the DB cluster with the RebootDBCluster
operation.
",
"RegisterDBProxyTargets": "Associate one or more DBProxyTarget
data structures with a DBProxyTargetGroup
.
",
"RemoveFromGlobalCluster": "Detaches an Aurora secondary cluster from an Aurora global database cluster. The cluster becomes a standalone cluster with read-write capability instead of being read-only and receiving data from a primary cluster in a different Region.
This action only applies to Aurora DB clusters.
",
"RemoveRoleFromDBCluster": "Removes the asssociation of an Amazon Web Services Identity and Access Management (IAM) role from a DB cluster.
For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.
For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
",
@@ -136,7 +136,7 @@
"StopActivityStream": "Stops a database activity stream that was started using the Amazon Web Services console, the start-activity-stream
CLI command, or the StartActivityStream
action.
For more information, see Database Activity Streams in the Amazon Aurora User Guide.
",
"StopDBCluster": "Stops an Amazon Aurora DB cluster. When you stop a DB cluster, Aurora retains the DB cluster's metadata, including its endpoints and DB parameter groups. Aurora also retains the transaction logs so you can do a point-in-time restore if necessary.
For more information, see Stopping and Starting an Aurora Cluster in the Amazon Aurora User Guide.
This action only applies to Aurora DB clusters.
",
"StopDBInstance": "Stops an Amazon RDS DB instance. When you stop a DB instance, Amazon RDS retains the DB instance's metadata, including its endpoint, DB parameter group, and option group membership. Amazon RDS also retains the transaction logs so you can do a point-in-time restore if necessary.
For more information, see Stopping an Amazon RDS DB Instance Temporarily in the Amazon RDS User Guide.
This command doesn't apply to RDS Custom, Aurora MySQL, and Aurora PostgreSQL. For Aurora clusters, use StopDBCluster
instead.
",
- "StopDBInstanceAutomatedBackupsReplication": "Stops automated backup replication for a DB instance.
This command doesn't apply to RDS Custom.
For more information, see Replicating Automated Backups to Another Amazon Web Services Region in the Amazon RDS User Guide.
"
+ "StopDBInstanceAutomatedBackupsReplication": "Stops automated backup replication for a DB instance.
This command doesn't apply to RDS Custom, Aurora MySQL, and Aurora PostgreSQL.
For more information, see Replicating Automated Backups to Another Amazon Web Services Region in the Amazon RDS User Guide.
"
},
"shapes": {
"AccountAttributesMessage": {
@@ -419,7 +419,7 @@
"CopyDBClusterSnapshotMessage$CopyTags": "A value that indicates whether to copy all tags from the source DB cluster snapshot to the target DB cluster snapshot. By default, tags are not copied.
",
"CopyDBSnapshotMessage$CopyTags": "A value that indicates whether to copy all tags from the source DB snapshot to the target DB snapshot. By default, tags are not copied.
",
"CreateDBClusterMessage$StorageEncrypted": "A value that indicates whether the DB cluster is encrypted.
Valid for: Aurora DB clusters and Multi-AZ DB clusters
",
- "CreateDBClusterMessage$EnableIAMDatabaseAuthentication": "A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.
For more information, see IAM Database Authentication in the Amazon Aurora User Guide..
Valid for: Aurora DB clusters only
",
+ "CreateDBClusterMessage$EnableIAMDatabaseAuthentication": "A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.
For more information, see IAM Database Authentication in the Amazon Aurora User Guide.
Valid for: Aurora DB clusters only
",
"CreateDBClusterMessage$DeletionProtection": "A value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled.
Valid for: Aurora DB clusters and Multi-AZ DB clusters
",
"CreateDBClusterMessage$EnableHttpEndpoint": "A value that indicates whether to enable the HTTP endpoint for an Aurora Serverless v1 DB cluster. By default, the HTTP endpoint is disabled.
When enabled, the HTTP endpoint provides a connectionless web service API for running SQL queries on the Aurora Serverless v1 DB cluster. You can also query your database from inside the RDS console with the query editor.
For more information, see Using the Data API for Aurora Serverless v1 in the Amazon Aurora User Guide.
Valid for: Aurora DB clusters only
",
"CreateDBClusterMessage$CopyTagsToSnapshot": "A value that indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them.
Valid for: Aurora DB clusters and Multi-AZ DB clusters
",
@@ -427,12 +427,12 @@
"CreateDBClusterMessage$PubliclyAccessible": "A value that indicates whether the DB cluster is publicly accessible.
When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.
When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.
Default: The default behavior varies depending on whether DBSubnetGroupName
is specified.
If DBSubnetGroupName
isn't specified, and PubliclyAccessible
isn't specified, the following applies:
-
If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB cluster is private.
-
If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.
If DBSubnetGroupName
is specified, and PubliclyAccessible
isn't specified, the following applies:
-
If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB cluster is private.
-
If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.
Valid for: Multi-AZ DB clusters only
",
"CreateDBClusterMessage$AutoMinorVersionUpgrade": "A value that indicates whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically.
Valid for: Multi-AZ DB clusters only
",
"CreateDBClusterMessage$EnablePerformanceInsights": "A value that indicates whether to turn on Performance Insights for the DB cluster.
For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.
Valid for: Multi-AZ DB clusters only
",
- "CreateDBInstanceMessage$MultiAZ": "A value that indicates whether the DB instance is a Multi-AZ deployment. You can't set the AvailabilityZone
parameter if the DB instance is a Multi-AZ deployment.
This setting doesn't apply to RDS Custom.
",
+ "CreateDBInstanceMessage$MultiAZ": "A value that indicates whether the DB instance is a Multi-AZ deployment. You can't set the AvailabilityZone
parameter if the DB instance is a Multi-AZ deployment.
This setting doesn't apply to RDS Custom.
Amazon Aurora
Not applicable. DB instance Availability Zones (AZs) are managed by the DB cluster.
",
"CreateDBInstanceMessage$AutoMinorVersionUpgrade": "A value that indicates whether minor engine upgrades are applied automatically to the DB instance during the maintenance window. By default, minor engine upgrades are applied automatically.
If you create an RDS Custom DB instance, you must set AutoMinorVersionUpgrade
to false
.
",
"CreateDBInstanceMessage$PubliclyAccessible": "A value that indicates whether the DB instance is publicly accessible.
When the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB instance's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB instance's VPC. Access to the DB instance is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB instance doesn't permit it.
When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.
Default: The default behavior varies depending on whether DBSubnetGroupName
is specified.
If DBSubnetGroupName
isn't specified, and PubliclyAccessible
isn't specified, the following applies:
-
If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB instance is private.
-
If the default VPC in the target Region has an internet gateway attached to it, the DB instance is public.
If DBSubnetGroupName
is specified, and PubliclyAccessible
isn't specified, the following applies:
-
If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB instance is private.
-
If the subnets are part of a VPC that has an internet gateway attached to it, the DB instance is public.
",
"CreateDBInstanceMessage$StorageEncrypted": "A value that indicates whether the DB instance is encrypted. By default, it isn't encrypted.
For RDS Custom instances, either set this parameter to true
or leave it unset. If you set this parameter to false
, RDS reports an error.
Amazon Aurora
Not applicable. The encryption for DB instances is managed by the DB cluster.
",
"CreateDBInstanceMessage$CopyTagsToSnapshot": "A value that indicates whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.
Amazon Aurora
Not applicable. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting.
",
- "CreateDBInstanceMessage$EnableIAMDatabaseAuthentication": "A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.
This setting doesn't apply to RDS Custom or Amazon Aurora. In Aurora, mapping Amazon Web Services IAM accounts to database accounts is managed by the DB cluster.
For more information, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.
",
+ "CreateDBInstanceMessage$EnableIAMDatabaseAuthentication": "A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.
For more information, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.
This setting doesn't apply to RDS Custom.
Amazon Aurora
Not applicable. Mapping Amazon Web Services IAM accounts to database accounts is managed by the DB cluster.
",
"CreateDBInstanceMessage$EnablePerformanceInsights": "A value that indicates whether to enable Performance Insights for the DB instance. For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.
This setting doesn't apply to RDS Custom.
",
"CreateDBInstanceMessage$DeletionProtection": "A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see Deleting a DB Instance.
Amazon Aurora
Not applicable. You can enable or disable deletion protection for the DB cluster. For more information, see CreateDBCluster
. DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.
",
"CreateDBInstanceMessage$EnableCustomerOwnedIp": "A value that indicates whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts DB instance.
A CoIP provides local or external connectivity to resources in your Outpost subnets through your on-premises network. For some use cases, a CoIP can provide lower latency for connections to the DB instance from outside of its virtual private cloud (VPC) on your local network.
For more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide.
For more information about CoIPs, see Customer-owned IP addresses in the Amazon Web Services Outposts User Guide.
",
@@ -481,7 +481,7 @@
"ModifyDBInstanceMessage$CopyTagsToSnapshot": "A value that indicates whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.
Amazon Aurora
Not applicable. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting. For more information, see ModifyDBCluster
.
",
"ModifyDBInstanceMessage$PubliclyAccessible": "A value that indicates whether the DB instance is publicly accessible.
When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.
When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.
PubliclyAccessible
only applies to DB instances in a VPC. The DB instance must be part of a public subnet and PubliclyAccessible
must be enabled for it to be publicly accessible.
Changes to the PubliclyAccessible
parameter are applied immediately regardless of the value of the ApplyImmediately
parameter.
",
"ModifyDBInstanceMessage$EnableIAMDatabaseAuthentication": "A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.
This setting doesn't apply to Amazon Aurora. Mapping Amazon Web Services IAM accounts to database accounts is managed by the DB cluster.
For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.
This setting doesn't apply to RDS Custom.
",
- "ModifyDBInstanceMessage$EnablePerformanceInsights": "A value that indicates whether to enable Performance Insights for the DB instance.
For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide..
This setting doesn't apply to RDS Custom.
",
+ "ModifyDBInstanceMessage$EnablePerformanceInsights": "A value that indicates whether to enable Performance Insights for the DB instance.
For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.
This setting doesn't apply to RDS Custom.
",
"ModifyDBInstanceMessage$UseDefaultProcessorFeatures": "A value that indicates whether the DB instance class of the DB instance uses its default processor features.
This setting doesn't apply to RDS Custom.
",
"ModifyDBInstanceMessage$DeletionProtection": "A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see Deleting a DB Instance.
",
"ModifyDBInstanceMessage$CertificateRotationRestart": "A value that indicates whether the DB instance is restarted when you rotate your SSL/TLS certificate.
By default, the DB instance is restarted when you rotate your SSL/TLS certificate. The certificate is not updated until the DB instance is restarted.
Set this parameter only if you are not using SSL/TLS to connect to the DB instance.
If you are using SSL/TLS to connect to the DB instance, follow the appropriate instructions for your DB engine to rotate your SSL/TLS certificate:
This setting doesn't apply to RDS Custom.
",
@@ -523,7 +523,7 @@
"RestoreDBInstanceFromS3Message$StorageEncrypted": "A value that indicates whether the new DB instance is encrypted or not.
",
"RestoreDBInstanceFromS3Message$CopyTagsToSnapshot": "A value that indicates whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.
",
"RestoreDBInstanceFromS3Message$EnableIAMDatabaseAuthentication": "A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.
For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.
",
- "RestoreDBInstanceFromS3Message$EnablePerformanceInsights": "A value that indicates whether to enable Performance Insights for the DB instance.
For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide..
",
+ "RestoreDBInstanceFromS3Message$EnablePerformanceInsights": "A value that indicates whether to enable Performance Insights for the DB instance.
For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.
",
"RestoreDBInstanceFromS3Message$UseDefaultProcessorFeatures": "A value that indicates whether the DB instance class of the DB instance uses its default processor features.
",
"RestoreDBInstanceFromS3Message$DeletionProtection": "A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see Deleting a DB Instance.
",
"RestoreDBInstanceToPointInTimeMessage$MultiAZ": "A value that indicates whether the DB instance is a Multi-AZ deployment.
This setting doesn't apply to RDS Custom.
Constraint: You can't specify the AvailabilityZone
parameter if the DB instance is a Multi-AZ deployment.
",
@@ -1458,7 +1458,7 @@
"DBSecurityGroupNameList": {
"base": null,
"refs": {
- "CreateDBInstanceMessage$DBSecurityGroups": "A list of DB security groups to associate with this DB instance.
Default: The default DB security group for the database engine.
",
+ "CreateDBInstanceMessage$DBSecurityGroups": "A list of DB security groups to associate with this DB instance.
This setting applies to the legacy EC2-Classic platform, which is no longer used to create new DB instances. Use the VpcSecurityGroupIds
setting instead.
",
"ModifyDBInstanceMessage$DBSecurityGroups": "A list of DB security groups to authorize on this DB instance. Changing this setting doesn't result in an outage and the change is asynchronously applied as soon as possible.
This setting doesn't apply to RDS Custom.
Constraints:
",
"OptionConfiguration$DBSecurityGroupMemberships": "A list of DBSecurityGroupMembership name strings used for this option.
",
"RestoreDBInstanceFromS3Message$DBSecurityGroups": "A list of DB security groups to associate with this DB instance.
Default: The default DB security group for the database engine.
"
@@ -2076,7 +2076,7 @@
"EngineFamily": {
"base": null,
"refs": {
- "CreateDBProxyRequest$EngineFamily": "The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. The engine family applies to MySQL and PostgreSQL for both RDS and Aurora.
"
+ "CreateDBProxyRequest$EngineFamily": "The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify MYSQL
. For Aurora PostgreSQL and RDS for PostgreSQL databases, specify POSTGRESQL
.
"
}
},
"EngineModeList": {
@@ -2089,7 +2089,7 @@
}
},
"Event": {
- "base": "This data type is used as a response element in the DescribeEvents
action.
",
+ "base": "This data type is used as a response element in the DescribeEvents action.
",
"refs": {
"EventList$member": null
}
@@ -2106,7 +2106,7 @@
}
},
"EventCategoriesMap": {
- "base": "Contains the results of a successful invocation of the DescribeEventCategories
operation.
",
+ "base": "Contains the results of a successful invocation of the DescribeEventCategories operation.
",
"refs": {
"EventCategoriesMapList$member": null
}
@@ -2427,19 +2427,19 @@
"CreateDBClusterMessage$AllocatedStorage": "The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster.
This setting is required to create a Multi-AZ DB cluster.
Valid for: Multi-AZ DB clusters only
",
"CreateDBClusterMessage$Iops": "The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster.
For information about valid Iops
values, see Amazon RDS Provisioned IOPS storage to improve performance in the Amazon RDS User Guide.
This setting is required to create a Multi-AZ DB cluster.
Constraints: Must be a multiple between .5 and 50 of the storage amount for the DB cluster.
Valid for: Multi-AZ DB clusters only
",
"CreateDBClusterMessage$MonitoringInterval": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0. The default is 0.
If MonitoringRoleArn
is specified, also set MonitoringInterval
to a value other than 0.
Valid Values: 0, 1, 5, 10, 15, 30, 60
Valid for: Multi-AZ DB clusters only
",
- "CreateDBClusterMessage$PerformanceInsightsRetentionPeriod": "The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
Valid for: Multi-AZ DB clusters only
",
+ "CreateDBClusterMessage$PerformanceInsightsRetentionPeriod": "The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
For example, the following values are valid:
-
93 (3 months * 31)
-
341 (11 months * 31)
-
589 (19 months * 31)
-
731
If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
Valid for: Multi-AZ DB clusters only
",
"CreateDBInstanceMessage$AllocatedStorage": "The amount of storage in gibibytes (GiB) to allocate for the DB instance.
Type: Integer
Amazon Aurora
Not applicable. Aurora cluster volumes automatically grow as the amount of data in your database increases, though you are only charged for the space that you use in an Aurora cluster volume.
Amazon RDS Custom
Constraints to the amount of storage for each storage type are the following:
-
General Purpose (SSD) storage (gp2): Must be an integer from 40 to 65536 for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server.
-
Provisioned IOPS storage (io1): Must be an integer from 40 to 65536 for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server.
MySQL
Constraints to the amount of storage for each storage type are the following:
-
General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536.
-
Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.
-
Magnetic storage (standard): Must be an integer from 5 to 3072.
MariaDB
Constraints to the amount of storage for each storage type are the following:
-
General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536.
-
Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.
-
Magnetic storage (standard): Must be an integer from 5 to 3072.
PostgreSQL
Constraints to the amount of storage for each storage type are the following:
-
General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536.
-
Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.
-
Magnetic storage (standard): Must be an integer from 5 to 3072.
Oracle
Constraints to the amount of storage for each storage type are the following:
-
General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536.
-
Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.
-
Magnetic storage (standard): Must be an integer from 10 to 3072.
SQL Server
Constraints to the amount of storage for each storage type are the following:
-
General Purpose (SSD) storage (gp2):
-
Provisioned IOPS storage (io1):
-
Magnetic storage (standard):
",
- "CreateDBInstanceMessage$BackupRetentionPeriod": "The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.
Amazon Aurora
Not applicable. The retention period for automated backups is managed by the DB cluster.
Default: 1
Constraints:
-
Must be a value from 0 to 35
-
Can't be set to 0 if the DB instance is a source to read replicas
-
Can't be set to 0 or 35 for an RDS Custom for Oracle DB instance
",
+ "CreateDBInstanceMessage$BackupRetentionPeriod": "The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.
Amazon Aurora
Not applicable. The retention period for automated backups is managed by the DB cluster.
Default: 1
Constraints:
-
Must be a value from 0 to 35
-
Can't be set to 0 if the DB instance is a source to read replicas
-
Can't be set to 0 for an RDS Custom for Oracle DB instance
",
"CreateDBInstanceMessage$Port": "The port number on which the database accepts connections.
MySQL
Default: 3306
Valid values: 1150-65535
Type: Integer
MariaDB
Default: 3306
Valid values: 1150-65535
Type: Integer
PostgreSQL
Default: 5432
Valid values: 1150-65535
Type: Integer
Oracle
Default: 1521
Valid values: 1150-65535
SQL Server
Default: 1433
Valid values: 1150-65535
except 1234
, 1434
, 3260
, 3343
, 3389
, 47001
, and 49152-49156
.
Amazon Aurora
Default: 3306
Valid values: 1150-65535
Type: Integer
",
- "CreateDBInstanceMessage$Iops": "The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. For information about valid Iops
values, see Amazon RDS Provisioned IOPS storage to improve performance in the Amazon RDS User Guide.
Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL DB instances, must be a multiple between .5 and 50 of the storage amount for the DB instance. For SQL Server DB instances, must be a multiple between 1 and 50 of the storage amount for the DB instance.
",
+ "CreateDBInstanceMessage$Iops": "The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. For information about valid Iops
values, see Amazon RDS Provisioned IOPS storage to improve performance in the Amazon RDS User Guide.
Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL DB instances, must be a multiple between .5 and 50 of the storage amount for the DB instance. For SQL Server DB instances, must be a multiple between 1 and 50 of the storage amount for the DB instance.
Amazon Aurora
Not applicable. Storage is managed by the DB cluster.
",
"CreateDBInstanceMessage$MonitoringInterval": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify 0. The default is 0.
If MonitoringRoleArn
is specified, then you must set MonitoringInterval
to a value other than 0.
This setting doesn't apply to RDS Custom.
Valid Values: 0, 1, 5, 10, 15, 30, 60
",
"CreateDBInstanceMessage$PromotionTier": "A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide.
This setting doesn't apply to RDS Custom.
Default: 1
Valid Values: 0 - 15
",
- "CreateDBInstanceMessage$PerformanceInsightsRetentionPeriod": "The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
This setting doesn't apply to RDS Custom.
",
- "CreateDBInstanceMessage$MaxAllocatedStorage": "The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.
For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.
This setting doesn't apply to RDS Custom.
",
+ "CreateDBInstanceMessage$PerformanceInsightsRetentionPeriod": "The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
For example, the following values are valid:
-
93 (3 months * 31)
-
341 (11 months * 31)
-
589 (19 months * 31)
-
731
If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
This setting doesn't apply to RDS Custom.
",
+ "CreateDBInstanceMessage$MaxAllocatedStorage": "The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.
For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.
This setting doesn't apply to RDS Custom.
Amazon Aurora
Not applicable. Storage is managed by the DB cluster.
",
"CreateDBInstanceReadReplicaMessage$Port": "The port number that the DB instance uses for connections.
Default: Inherits from the source DB instance
Valid Values: 1150-65535
",
"CreateDBInstanceReadReplicaMessage$Iops": "The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.
",
"CreateDBInstanceReadReplicaMessage$MonitoringInterval": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the read replica. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0.
If MonitoringRoleArn
is specified, then you must also set MonitoringInterval
to a value other than 0.
This setting doesn't apply to RDS Custom.
Valid Values: 0, 1, 5, 10, 15, 30, 60
",
- "CreateDBInstanceReadReplicaMessage$PerformanceInsightsRetentionPeriod": "The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
This setting doesn't apply to RDS Custom.
",
+ "CreateDBInstanceReadReplicaMessage$PerformanceInsightsRetentionPeriod": "The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
For example, the following values are valid:
-
93 (3 months * 31)
-
341 (11 months * 31)
-
589 (19 months * 31)
-
731
If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
This setting doesn't apply to RDS Custom.
",
"CreateDBInstanceReadReplicaMessage$MaxAllocatedStorage": "The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.
For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.
",
"CreateDBProxyRequest$IdleClientTimeout": "The number of seconds that a connection to the proxy can be inactive before the proxy disconnects it. You can set this value higher or lower than the connection timeout limit for the associated database.
",
"DBCluster$AllocatedStorage": "For all database engines except Amazon Aurora, AllocatedStorage
specifies the allocated storage size in gibibytes (GiB). For Aurora, AllocatedStorage
always returns 1, because Aurora DB cluster storage size isn't fixed, but instead automatically adjusts as needed.
",
@@ -2448,7 +2448,7 @@
"DBCluster$Capacity": "The current capacity of an Aurora Serverless v1 DB cluster. The capacity is 0 (zero) when the cluster is paused.
For more information about Aurora Serverless v1, see Using Amazon Aurora Serverless v1 in the Amazon Aurora User Guide.
",
"DBCluster$Iops": "The Provisioned IOPS (I/O operations per second) value.
This setting is only for non-Aurora Multi-AZ DB clusters.
",
"DBCluster$MonitoringInterval": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster.
This setting is only for non-Aurora Multi-AZ DB clusters.
",
- "DBCluster$PerformanceInsightsRetentionPeriod": "The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
This setting is only for non-Aurora Multi-AZ DB clusters.
",
+ "DBCluster$PerformanceInsightsRetentionPeriod": "The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
For example, the following values are valid:
-
93 (3 months * 31)
-
341 (11 months * 31)
-
589 (19 months * 31)
-
731
This setting is only for non-Aurora Multi-AZ DB clusters.
",
"DBClusterCapacityInfo$PendingCapacity": "A value that specifies the capacity that the DB cluster scales to next.
",
"DBClusterCapacityInfo$CurrentCapacity": "The current capacity of the DB cluster.
",
"DBClusterCapacityInfo$SecondsBeforeTimeout": "The number of seconds before a call to ModifyCurrentDBClusterCapacity
times out.
",
@@ -2456,7 +2456,7 @@
"DBInstance$Iops": "Specifies the Provisioned IOPS (I/O operations per second) value.
",
"DBInstance$MonitoringInterval": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance.
",
"DBInstance$PromotionTier": "A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide.
",
- "DBInstance$PerformanceInsightsRetentionPeriod": "The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
",
+ "DBInstance$PerformanceInsightsRetentionPeriod": "The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
For example, the following values are valid:
-
93 (3 months * 31)
-
341 (11 months * 31)
-
589 (19 months * 31)
-
731
",
"DBInstance$MaxAllocatedStorage": "The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.
",
"DBInstanceAutomatedBackup$Iops": "The IOPS (I/O operations per second) value for the automated backup.
",
"DBInstanceAutomatedBackup$BackupRetentionPeriod": "The retention period for the automated backups.
",
@@ -2485,7 +2485,7 @@
"DescribeGlobalClustersMessage$MaxRecords": "The maximum number of records to include in the response. If more records exist than the specified MaxRecords
value, a pagination token called a marker is included in the response so that you can retrieve the remaining results.
Default: 100
Constraints: Minimum 20, maximum 100.
",
"DescribeOptionGroupOptionsMessage$MaxRecords": "The maximum number of records to include in the response. If more records exist than the specified MaxRecords
value, a pagination token called a marker is included in the response so that you can retrieve the remaining results.
Default: 100
Constraints: Minimum 20, maximum 100.
",
"DescribeOptionGroupsMessage$MaxRecords": "The maximum number of records to include in the response. If more records exist than the specified MaxRecords
value, a pagination token called a marker is included in the response so that you can retrieve the remaining results.
Default: 100
Constraints: Minimum 20, maximum 100.
",
- "DescribeOrderableDBInstanceOptionsMessage$MaxRecords": "The maximum number of records to include in the response. If more records exist than the specified MaxRecords
value, a pagination token called a marker is included in the response so that you can retrieve the remaining results.
Default: 100
Constraints: Minimum 20, maximum 100.
",
+ "DescribeOrderableDBInstanceOptionsMessage$MaxRecords": "The maximum number of records to include in the response. If more records exist than the specified MaxRecords
value, a pagination token called a marker is included in the response so that you can retrieve the remaining results.
Default: 100
Constraints: Minimum 20, maximum 10000.
",
"DescribePendingMaintenanceActionsMessage$MaxRecords": "The maximum number of records to include in the response. If more records exist than the specified MaxRecords
value, a pagination token called a marker is included in the response so that you can retrieve the remaining results.
Default: 100
Constraints: Minimum 20, maximum 100.
",
"DescribeReservedDBInstancesMessage$MaxRecords": "The maximum number of records to include in the response. If more than the MaxRecords
value is available, a pagination token called a marker is included in the response so you can retrieve the remaining results.
Default: 100
Constraints: Minimum 20, maximum 100.
",
"DescribeReservedDBInstancesOfferingsMessage$MaxRecords": "The maximum number of records to include in the response. If more than the MaxRecords
value is available, a pagination token called a marker is included in the response so you can retrieve the remaining results.
Default: 100
Constraints: Minimum 20, maximum 100.
",
@@ -2497,14 +2497,14 @@
"ModifyDBClusterMessage$AllocatedStorage": "The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster.
Type: Integer
Valid for: Multi-AZ DB clusters only
",
"ModifyDBClusterMessage$Iops": "The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster.
For information about valid Iops values, see Amazon RDS Provisioned IOPS Storage to Improve Performance in the Amazon RDS User Guide.
Constraints: Must be a multiple between .5 and 50 of the storage amount for the DB cluster.
Valid for: Multi-AZ DB clusters only
",
"ModifyDBClusterMessage$MonitoringInterval": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0. The default is 0.
If MonitoringRoleArn
is specified, also set MonitoringInterval
to a value other than 0.
Valid Values: 0, 1, 5, 10, 15, 30, 60
Valid for: Multi-AZ DB clusters only
",
- "ModifyDBClusterMessage$PerformanceInsightsRetentionPeriod": "The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
Valid for: Multi-AZ DB clusters only
",
+ "ModifyDBClusterMessage$PerformanceInsightsRetentionPeriod": "The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
For example, the following values are valid:
-
93 (3 months * 31)
-
341 (11 months * 31)
-
589 (19 months * 31)
-
731
If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
Valid for: Multi-AZ DB clusters only
",
"ModifyDBInstanceMessage$AllocatedStorage": "The new amount of storage in gibibytes (GiB) to allocate for the DB instance.
For MariaDB, MySQL, Oracle, and PostgreSQL, the value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.
For the valid values for allocated storage for each engine, see CreateDBInstance
.
",
- "ModifyDBInstanceMessage$BackupRetentionPeriod": "The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.
Enabling and disabling backups can result in a brief I/O suspension that lasts from a few seconds to a few minutes, depending on the size and class of your DB instance.
These changes are applied during the next maintenance window unless the ApplyImmediately
parameter is enabled for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible.
Amazon Aurora
Not applicable. The retention period for automated backups is managed by the DB cluster. For more information, see ModifyDBCluster
.
Default: Uses existing setting
Constraints:
-
It must be a value from 0 to 35. It can't be set to 0 if the DB instance is a source to read replicas. It can't be set to 0 or 35 for an RDS Custom for Oracle DB instance.
-
It can be specified for a MySQL read replica only if the source is running MySQL 5.6 or later.
-
It can be specified for a PostgreSQL read replica only if the source is running PostgreSQL 9.3.5.
",
+ "ModifyDBInstanceMessage$BackupRetentionPeriod": "The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.
Enabling and disabling backups can result in a brief I/O suspension that lasts from a few seconds to a few minutes, depending on the size and class of your DB instance.
These changes are applied during the next maintenance window unless the ApplyImmediately
parameter is enabled for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible.
Amazon Aurora
Not applicable. The retention period for automated backups is managed by the DB cluster. For more information, see ModifyDBCluster
.
Default: Uses existing setting
Constraints:
-
It must be a value from 0 to 35. It can't be set to 0 if the DB instance is a source to read replicas. It can't be set to 0 for an RDS Custom for Oracle DB instance.
-
It can be specified for a MySQL read replica only if the source is running MySQL 5.6 or later.
-
It can be specified for a PostgreSQL read replica only if the source is running PostgreSQL 9.3.5.
",
"ModifyDBInstanceMessage$Iops": "The new Provisioned IOPS (I/O operations per second) value for the RDS instance.
Changing this setting doesn't result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately
parameter is enabled for this request. If you are migrating from Provisioned IOPS to standard storage, set this value to 0. The DB instance will require a reboot for the change in storage type to take effect.
If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance.
Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL, the value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.
Default: Uses existing setting
",
"ModifyDBInstanceMessage$MonitoringInterval": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0, which is the default.
If MonitoringRoleArn
is specified, set MonitoringInterval
to a value other than 0.
This setting doesn't apply to RDS Custom.
Valid Values: 0, 1, 5, 10, 15, 30, 60
",
"ModifyDBInstanceMessage$DBPortNumber": "The port number on which the database accepts connections.
The value of the DBPortNumber
parameter must not match any of the port values specified for options in the option group for the DB instance.
If you change the DBPortNumber
value, your database restarts regardless of the value of the ApplyImmediately
parameter.
This setting doesn't apply to RDS Custom.
MySQL
Default: 3306
Valid values: 1150-65535
MariaDB
Default: 3306
Valid values: 1150-65535
PostgreSQL
Default: 5432
Valid values: 1150-65535
Type: Integer
Oracle
Default: 1521
Valid values: 1150-65535
SQL Server
Default: 1433
Valid values: 1150-65535
except 1234
, 1434
, 3260
, 3343
, 3389
, 47001
, and 49152-49156
.
Amazon Aurora
Default: 3306
Valid values: 1150-65535
",
"ModifyDBInstanceMessage$PromotionTier": "A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide.
This setting doesn't apply to RDS Custom.
Default: 1
Valid Values: 0 - 15
",
- "ModifyDBInstanceMessage$PerformanceInsightsRetentionPeriod": "The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
This setting doesn't apply to RDS Custom.
",
+ "ModifyDBInstanceMessage$PerformanceInsightsRetentionPeriod": "The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
For example, the following values are valid:
-
93 (3 months * 31)
-
341 (11 months * 31)
-
589 (19 months * 31)
-
731
If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
This setting doesn't apply to RDS Custom.
",
"ModifyDBInstanceMessage$MaxAllocatedStorage": "The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.
For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.
This setting doesn't apply to RDS Custom.
",
"ModifyDBInstanceMessage$ResumeFullAutomationModeMinutes": "The number of minutes to pause the automation. When the time period ends, RDS Custom resumes full automation. The minimum value is 60
(default). The maximum value is 1,440
.
",
"ModifyDBProxyRequest$IdleClientTimeout": "The number of seconds that a connection to the proxy can be inactive before the proxy disconnects it. You can set this value higher or lower than the connection timeout limit for the associated database.
",
@@ -2535,7 +2535,7 @@
"RestoreDBInstanceFromS3Message$Port": "The port number on which the database accepts connections.
Type: Integer
Valid Values: 1150
-65535
Default: 3306
",
"RestoreDBInstanceFromS3Message$Iops": "The amount of Provisioned IOPS (input/output operations per second) to allocate initially for the DB instance. For information about valid Iops values, see Amazon RDS Provisioned IOPS Storage to Improve Performance in the Amazon RDS User Guide.
",
"RestoreDBInstanceFromS3Message$MonitoringInterval": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0.
If MonitoringRoleArn
is specified, then you must also set MonitoringInterval
to a value other than 0.
Valid Values: 0, 1, 5, 10, 15, 30, 60
Default: 0
",
- "RestoreDBInstanceFromS3Message$PerformanceInsightsRetentionPeriod": "The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
",
+ "RestoreDBInstanceFromS3Message$PerformanceInsightsRetentionPeriod": "The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
For example, the following values are valid:
-
93 (3 months * 31)
-
341 (11 months * 31)
-
589 (19 months * 31)
-
731
If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
",
"RestoreDBInstanceFromS3Message$MaxAllocatedStorage": "The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.
For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.
",
"RestoreDBInstanceToPointInTimeMessage$Port": "The port number on which the database accepts connections.
Constraints: Value must be 1150-65535
Default: The same port as the original DB instance.
",
"RestoreDBInstanceToPointInTimeMessage$Iops": "The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.
Constraints: Must be an integer greater than 1000.
SQL Server
Setting the IOPS value for the SQL Server database engine isn't supported.
",
@@ -2544,7 +2544,7 @@
"ScalingConfiguration$MaxCapacity": "The maximum capacity for an Aurora DB cluster in serverless
DB engine mode.
For Aurora MySQL, valid capacity values are 1
, 2
, 4
, 8
, 16
, 32
, 64
, 128
, and 256
.
For Aurora PostgreSQL, valid capacity values are 2
, 4
, 8
, 16
, 32
, 64
, 192
, and 384
.
The maximum capacity must be greater than or equal to the minimum capacity.
",
"ScalingConfiguration$SecondsUntilAutoPause": "The time, in seconds, before an Aurora DB cluster in serverless
mode is paused.
Specify a value between 300 and 86,400 seconds.
",
"ScalingConfiguration$SecondsBeforeTimeout": "The amount of time, in seconds, that Aurora Serverless v1 tries to find a scaling point to perform seamless scaling before enforcing the timeout action. The default is 300.
Specify a value between 60 and 600 seconds.
",
- "ScalingConfigurationInfo$MinCapacity": "The maximum capacity for the Aurora DB cluster in serverless
DB engine mode.
",
+ "ScalingConfigurationInfo$MinCapacity": "The minimum capacity for an Aurora DB cluster in serverless
DB engine mode.
",
"ScalingConfigurationInfo$MaxCapacity": "The maximum capacity for an Aurora DB cluster in serverless
DB engine mode.
",
"ScalingConfigurationInfo$SecondsUntilAutoPause": "The remaining amount of time, in seconds, before the Aurora DB cluster in serverless
mode is paused. A DB cluster can be paused only when it's idle (it has no connections).
",
"ScalingConfigurationInfo$SecondsBeforeTimeout": "The number of seconds before scaling times out. What happens when an attempted scaling action times out is determined by the TimeoutAction
setting.
",
@@ -2712,8 +2712,8 @@
"PendingCloudwatchLogsExports$LogTypesToEnable": "Log types that are in the process of being deactivated. After they are deactivated, these log types aren't exported to CloudWatch Logs.
",
"PendingCloudwatchLogsExports$LogTypesToDisable": "Log types that are in the process of being enabled. After they are enabled, these log types are exported to CloudWatch Logs.
",
"RestoreDBClusterFromS3Message$EnableCloudwatchLogsExports": "The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used.
Aurora MySQL
Possible values are audit
, error
, general
, and slowquery
.
Aurora PostgreSQL
Possible value is postgresql
.
For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
",
- "RestoreDBClusterFromSnapshotMessage$EnableCloudwatchLogsExports": "The list of logs that the restored DB cluster is to export to Amazon CloudWatch Logs. The values in the list depend on the DB engine being used.
RDS for MySQL
Possible values are error
, general
, and slowquery
.
RDS for PostgreSQL
Possible values are postgresql
and upgrade
.
Aurora MySQL
Possible values are audit
, error
, general
, and slowquery
.
Aurora PostgreSQL
Possible value is postgresql
.
For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide..
For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
Valid for: Aurora DB clusters and Multi-AZ DB clusters
",
- "RestoreDBClusterToPointInTimeMessage$EnableCloudwatchLogsExports": "The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used.
RDS for MySQL
Possible values are error
, general
, and slowquery
.
RDS for PostgreSQL
Possible values are postgresql
and upgrade
.
Aurora MySQL
Possible values are audit
, error
, general
, and slowquery
.
Aurora PostgreSQL
Possible value is postgresql
.
For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide..
For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
Valid for: Aurora DB clusters and Multi-AZ DB clusters
",
+ "RestoreDBClusterFromSnapshotMessage$EnableCloudwatchLogsExports": "The list of logs that the restored DB cluster is to export to Amazon CloudWatch Logs. The values in the list depend on the DB engine being used.
RDS for MySQL
Possible values are error
, general
, and slowquery
.
RDS for PostgreSQL
Possible values are postgresql
and upgrade
.
Aurora MySQL
Possible values are audit
, error
, general
, and slowquery
.
Aurora PostgreSQL
Possible value is postgresql
.
For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.
For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
Valid for: Aurora DB clusters and Multi-AZ DB clusters
",
+ "RestoreDBClusterToPointInTimeMessage$EnableCloudwatchLogsExports": "The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used.
RDS for MySQL
Possible values are error
, general
, and slowquery
.
RDS for PostgreSQL
Possible values are postgresql
and upgrade
.
Aurora MySQL
Possible values are audit
, error
, general
, and slowquery
.
Aurora PostgreSQL
Possible value is postgresql
.
For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.
For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
Valid for: Aurora DB clusters and Multi-AZ DB clusters
",
"RestoreDBInstanceFromDBSnapshotMessage$EnableCloudwatchLogsExports": "The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.
This setting doesn't apply to RDS Custom.
",
"RestoreDBInstanceFromS3Message$EnableCloudwatchLogsExports": "The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.
",
"RestoreDBInstanceToPointInTimeMessage$EnableCloudwatchLogsExports": "The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.
This setting doesn't apply to RDS Custom.
"
@@ -3164,7 +3164,7 @@
"ProcessorFeatureList": {
"base": null,
"refs": {
- "CreateDBInstanceMessage$ProcessorFeatures": "The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.
This setting doesn't apply to RDS Custom.
",
+ "CreateDBInstanceMessage$ProcessorFeatures": "The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.
This setting doesn't apply to RDS Custom.
Amazon Aurora
Not applicable.
",
"CreateDBInstanceReadReplicaMessage$ProcessorFeatures": "The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.
This setting doesn't apply to RDS Custom.
",
"DBInstance$ProcessorFeatures": "The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.
",
"DBSnapshot$ProcessorFeatures": "The number of CPU cores and the number of threads per core for the DB instance class of the DB instance when the DB snapshot was created.
",
@@ -3721,14 +3721,14 @@
"CopyDBClusterSnapshotMessage$SourceDBClusterSnapshotIdentifier": "The identifier of the DB cluster snapshot to copy. This parameter isn't case-sensitive.
You can't copy an encrypted, shared DB cluster snapshot from one Amazon Web Services Region to another.
Constraints:
-
Must specify a valid system snapshot in the \"available\" state.
-
If the source snapshot is in the same Amazon Web Services Region as the copy, specify a valid DB snapshot identifier.
-
If the source snapshot is in a different Amazon Web Services Region than the copy, specify a valid DB cluster snapshot ARN. For more information, go to Copying Snapshots Across Amazon Web Services Regions in the Amazon Aurora User Guide.
Example: my-cluster-snapshot1
",
"CopyDBClusterSnapshotMessage$TargetDBClusterSnapshotIdentifier": "The identifier of the new DB cluster snapshot to create from the source DB cluster snapshot. This parameter isn't case-sensitive.
Constraints:
-
Must contain from 1 to 63 letters, numbers, or hyphens.
-
First character must be a letter.
-
Can't end with a hyphen or contain two consecutive hyphens.
Example: my-cluster-snapshot2
",
"CopyDBClusterSnapshotMessage$KmsKeyId": "The Amazon Web Services KMS key identifier for an encrypted DB cluster snapshot. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the Amazon Web Services KMS key.
If you copy an encrypted DB cluster snapshot from your Amazon Web Services account, you can specify a value for KmsKeyId
to encrypt the copy with a new KMS key. If you don't specify a value for KmsKeyId
, then the copy of the DB cluster snapshot is encrypted with the same KMS key as the source DB cluster snapshot.
If you copy an encrypted DB cluster snapshot that is shared from another Amazon Web Services account, then you must specify a value for KmsKeyId
.
To copy an encrypted DB cluster snapshot to another Amazon Web Services Region, you must set KmsKeyId
to the Amazon Web Services KMS key identifier you want to use to encrypt the copy of the DB cluster snapshot in the destination Amazon Web Services Region. KMS keys are specific to the Amazon Web Services Region that they are created in, and you can't use KMS keys from one Amazon Web Services Region in another Amazon Web Services Region.
If you copy an unencrypted DB cluster snapshot and specify a value for the KmsKeyId
parameter, an error is returned.
",
- "CopyDBClusterSnapshotMessage$PreSignedUrl": "The URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot
API action in the Amazon Web Services Region that contains the source DB cluster snapshot to copy. The PreSignedUrl
parameter must be used when copying an encrypted DB cluster snapshot from another Amazon Web Services Region. Don't specify PreSignedUrl
when you are copying an encrypted DB cluster snapshot in the same Amazon Web Services Region.
The pre-signed URL must be a valid request for the CopyDBClusterSnapshot
API action that can be executed in the source Amazon Web Services Region that contains the encrypted DB cluster snapshot to be copied. The pre-signed URL request must contain the following parameter values:
-
KmsKeyId
- The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB cluster snapshot in the destination Amazon Web Services Region. This is the same identifier for both the CopyDBClusterSnapshot
action that is called in the destination Amazon Web Services Region, and the action contained in the pre-signed URL.
-
DestinationRegion
- The name of the Amazon Web Services Region that the DB cluster snapshot is to be created in.
-
SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are copying an encrypted DB cluster snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBClusterSnapshotIdentifier
looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115
.
To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process.
If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI) instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a pre-signed URL that is a valid request for the operation that can be executed in the source Amazon Web Services Region.
",
+ "CopyDBClusterSnapshotMessage$PreSignedUrl": "When you are copying a DB cluster snapshot from one Amazon Web Services GovCloud (US) Region to another, the URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot
API operation in the Amazon Web Services Region that contains the source DB cluster snapshot to copy. Use the PreSignedUrl
parameter when copying an encrypted DB cluster snapshot from another Amazon Web Services Region. Don't specify PreSignedUrl
when copying an encrypted DB cluster snapshot in the same Amazon Web Services Region.
This setting applies only to Amazon Web Services GovCloud (US) Regions. It's ignored in other Amazon Web Services Regions.
The presigned URL must be a valid request for the CopyDBClusterSnapshot
API operation that can run in the source Amazon Web Services Region that contains the encrypted DB cluster snapshot to copy. The presigned URL request must contain the following parameter values:
-
KmsKeyId
- The KMS key identifier for the KMS key to use to encrypt the copy of the DB cluster snapshot in the destination Amazon Web Services Region. This is the same identifier for both the CopyDBClusterSnapshot
operation that is called in the destination Amazon Web Services Region, and the operation contained in the presigned URL.
-
DestinationRegion
- The name of the Amazon Web Services Region that the DB cluster snapshot is to be created in.
-
SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are copying an encrypted DB cluster snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBClusterSnapshotIdentifier
looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115
.
To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process.
If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI) instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region.
",
"CopyDBParameterGroupMessage$SourceDBParameterGroupIdentifier": "The identifier or ARN for the source DB parameter group. For information about creating an ARN, see Constructing an ARN for Amazon RDS in the Amazon RDS User Guide.
Constraints:
",
"CopyDBParameterGroupMessage$TargetDBParameterGroupIdentifier": "The identifier for the copied DB parameter group.
Constraints:
-
Can't be null, empty, or blank
-
Must contain from 1 to 255 letters, numbers, or hyphens
-
First character must be a letter
-
Can't end with a hyphen or contain two consecutive hyphens
Example: my-db-parameter-group
",
"CopyDBParameterGroupMessage$TargetDBParameterGroupDescription": "A description for the copied DB parameter group.
",
- "CopyDBSnapshotMessage$SourceDBSnapshotIdentifier": "The identifier for the source DB snapshot.
If the source snapshot is in the same Amazon Web Services Region as the copy, specify a valid DB snapshot identifier. For example, you might specify rds:mysql-instance1-snapshot-20130805
.
If the source snapshot is in a different Amazon Web Services Region than the copy, specify a valid DB snapshot ARN. For example, you might specify arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805
.
If you are copying from a shared manual DB snapshot, this parameter must be the Amazon Resource Name (ARN) of the shared DB snapshot.
If you are copying an encrypted snapshot this parameter must be in the ARN format for the source Amazon Web Services Region, and must match the SourceDBSnapshotIdentifier
in the PreSignedUrl
parameter.
Constraints:
Example: rds:mydb-2012-04-02-00-01
Example: arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805
",
+ "CopyDBSnapshotMessage$SourceDBSnapshotIdentifier": "The identifier for the source DB snapshot.
If the source snapshot is in the same Amazon Web Services Region as the copy, specify a valid DB snapshot identifier. For example, you might specify rds:mysql-instance1-snapshot-20130805
.
If the source snapshot is in a different Amazon Web Services Region than the copy, specify a valid DB snapshot ARN. For example, you might specify arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805
.
If you are copying from a shared manual DB snapshot, this parameter must be the Amazon Resource Name (ARN) of the shared DB snapshot.
If you are copying an encrypted snapshot this parameter must be in the ARN format for the source Amazon Web Services Region.
Constraints:
Example: rds:mydb-2012-04-02-00-01
Example: arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805
",
"CopyDBSnapshotMessage$TargetDBSnapshotIdentifier": "The identifier for the copy of the snapshot.
Constraints:
-
Can't be null, empty, or blank
-
Must contain from 1 to 255 letters, numbers, or hyphens
-
First character must be a letter
-
Can't end with a hyphen or contain two consecutive hyphens
Example: my-db-snapshot
",
"CopyDBSnapshotMessage$KmsKeyId": "The Amazon Web Services KMS key identifier for an encrypted DB snapshot. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.
If you copy an encrypted DB snapshot from your Amazon Web Services account, you can specify a value for this parameter to encrypt the copy with a new KMS key. If you don't specify a value for this parameter, then the copy of the DB snapshot is encrypted with the same Amazon Web Services KMS key as the source DB snapshot.
If you copy an encrypted DB snapshot that is shared from another Amazon Web Services account, then you must specify a value for this parameter.
If you specify this parameter when you copy an unencrypted snapshot, the copy is encrypted.
If you copy an encrypted snapshot to a different Amazon Web Services Region, then you must specify an Amazon Web Services KMS key identifier for the destination Amazon Web Services Region. KMS keys are specific to the Amazon Web Services Region that they are created in, and you can't use KMS keys from one Amazon Web Services Region in another Amazon Web Services Region.
",
- "CopyDBSnapshotMessage$PreSignedUrl": "The URL that contains a Signature Version 4 signed request for the CopyDBSnapshot
API action in the source Amazon Web Services Region that contains the source DB snapshot to copy.
You must specify this parameter when you copy an encrypted DB snapshot from another Amazon Web Services Region by using the Amazon RDS API. Don't specify PreSignedUrl
when you are copying an encrypted DB snapshot in the same Amazon Web Services Region.
The presigned URL must be a valid request for the CopyDBSnapshot
API action that can be executed in the source Amazon Web Services Region that contains the encrypted DB snapshot to be copied. The presigned URL request must contain the following parameter values:
-
DestinationRegion
- The Amazon Web Services Region that the encrypted DB snapshot is copied to. This Amazon Web Services Region is the same one where the CopyDBSnapshot
action is called that contains this presigned URL.
For example, if you copy an encrypted DB snapshot from the us-west-2 Amazon Web Services Region to the us-east-1 Amazon Web Services Region, then you call the CopyDBSnapshot
action in the us-east-1 Amazon Web Services Region and provide a presigned URL that contains a call to the CopyDBSnapshot
action in the us-west-2 Amazon Web Services Region. For this example, the DestinationRegion
in the presigned URL must be set to the us-east-1 Amazon Web Services Region.
-
KmsKeyId
- The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB snapshot in the destination Amazon Web Services Region. This is the same identifier for both the CopyDBSnapshot
action that is called in the destination Amazon Web Services Region, and the action contained in the presigned URL.
-
SourceDBSnapshotIdentifier
- The DB snapshot identifier for the encrypted snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are copying an encrypted DB snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBSnapshotIdentifier
looks like the following example: arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20161115
.
To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process.
If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI) instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a pre-signed URL that is a valid request for the operation that can be executed in the source Amazon Web Services Region.
",
+ "CopyDBSnapshotMessage$PreSignedUrl": "When you are copying a snapshot from one Amazon Web Services GovCloud (US) Region to another, the URL that contains a Signature Version 4 signed request for the CopyDBSnapshot
API operation in the source Amazon Web Services Region that contains the source DB snapshot to copy.
This setting applies only to Amazon Web Services GovCloud (US) Regions. It's ignored in other Amazon Web Services Regions.
You must specify this parameter when you copy an encrypted DB snapshot from another Amazon Web Services Region by using the Amazon RDS API. Don't specify PreSignedUrl
when you are copying an encrypted DB snapshot in the same Amazon Web Services Region.
The presigned URL must be a valid request for the CopyDBClusterSnapshot
API operation that can run in the source Amazon Web Services Region that contains the encrypted DB cluster snapshot to copy. The presigned URL request must contain the following parameter values:
-
DestinationRegion
- The Amazon Web Services Region that the encrypted DB snapshot is copied to. This Amazon Web Services Region is the same one where the CopyDBSnapshot
operation is called that contains this presigned URL.
For example, if you copy an encrypted DB snapshot from the us-west-2 Amazon Web Services Region to the us-east-1 Amazon Web Services Region, then you call the CopyDBSnapshot
operation in the us-east-1 Amazon Web Services Region and provide a presigned URL that contains a call to the CopyDBSnapshot
operation in the us-west-2 Amazon Web Services Region. For this example, the DestinationRegion
in the presigned URL must be set to the us-east-1 Amazon Web Services Region.
-
KmsKeyId
- The KMS key identifier for the KMS key to use to encrypt the copy of the DB snapshot in the destination Amazon Web Services Region. This is the same identifier for both the CopyDBSnapshot
operation that is called in the destination Amazon Web Services Region, and the operation contained in the presigned URL.
-
SourceDBSnapshotIdentifier
- The DB snapshot identifier for the encrypted snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are copying an encrypted DB snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBSnapshotIdentifier
looks like the following example: arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20161115
.
To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process.
If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI) instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region.
",
"CopyDBSnapshotMessage$OptionGroupName": "The name of an option group to associate with the copy of the snapshot.
Specify this option if you are copying a snapshot from one Amazon Web Services Region to another, and your DB instance uses a nondefault option group. If your source DB instance uses Transparent Data Encryption for Oracle or Microsoft SQL Server, you must specify this option when copying across Amazon Web Services Regions. For more information, see Option group considerations in the Amazon RDS User Guide.
",
"CopyDBSnapshotMessage$TargetCustomAvailabilityZone": "The external custom Availability Zone (CAZ) identifier for the target CAZ.
Example: rds-caz-aiqhTgQv
.
",
"CopyOptionGroupMessage$SourceOptionGroupIdentifier": "The identifier for the source option group.
Constraints:
",
@@ -3751,8 +3751,8 @@
"CreateDBClusterMessage$PreferredMaintenanceWindow": "The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).
Format: ddd:hh24:mi-ddd:hh24:mi
The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred DB Cluster Maintenance Window in the Amazon Aurora User Guide.
Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.
Constraints: Minimum 30-minute window.
Valid for: Aurora DB clusters and Multi-AZ DB clusters
",
"CreateDBClusterMessage$ReplicationSourceIdentifier": "The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a read replica.
Valid for: Aurora DB clusters only
",
"CreateDBClusterMessage$KmsKeyId": "The Amazon Web Services KMS key identifier for an encrypted DB cluster.
The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.
When a KMS key isn't specified in KmsKeyId
:
-
If ReplicationSourceIdentifier
identifies an encrypted source, then Amazon RDS will use the KMS key used to encrypt the source. Otherwise, Amazon RDS will use your default KMS key.
-
If the StorageEncrypted
parameter is enabled and ReplicationSourceIdentifier
isn't specified, then Amazon RDS will use your default KMS key.
There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.
If you create a read replica of an encrypted DB cluster in another Amazon Web Services Region, you must set KmsKeyId
to a KMS key identifier that is valid in the destination Amazon Web Services Region. This KMS key is used to encrypt the read replica in that Amazon Web Services Region.
Valid for: Aurora DB clusters and Multi-AZ DB clusters
",
- "CreateDBClusterMessage$PreSignedUrl": "A URL that contains a Signature Version 4 signed request for the CreateDBCluster
action to be called in the source Amazon Web Services Region where the DB cluster is replicated from. Specify PreSignedUrl
only when you are performing cross-Region replication from an encrypted DB cluster.
The pre-signed URL must be a valid request for the CreateDBCluster
API action that can be executed in the source Amazon Web Services Region that contains the encrypted DB cluster to be copied.
The pre-signed URL request must contain the following parameter values:
-
KmsKeyId
- The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB cluster in the destination Amazon Web Services Region. This should refer to the same KMS key for both the CreateDBCluster
action that is called in the destination Amazon Web Services Region, and the action contained in the pre-signed URL.
-
DestinationRegion
- The name of the Amazon Web Services Region that Aurora read replica will be created in.
-
ReplicationSourceIdentifier
- The DB cluster identifier for the encrypted DB cluster to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are copying an encrypted DB cluster from the us-west-2 Amazon Web Services Region, then your ReplicationSourceIdentifier
would look like Example: arn:aws:rds:us-west-2:123456789012:cluster:aurora-cluster1
.
To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process.
If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI) instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a pre-signed URL that is a valid request for the operation that can be executed in the source Amazon Web Services Region.
Valid for: Aurora DB clusters only
",
- "CreateDBClusterMessage$EngineMode": "The DB engine mode of the DB cluster, either provisioned
, serverless
, parallelquery
, global
, or multimaster
.
The parallelquery
engine mode isn't required for Aurora MySQL version 1.23 and higher 1.x versions, and version 2.09 and higher 2.x versions.
The global
engine mode isn't required for Aurora MySQL version 1.22 and higher 1.x versions, and global
engine mode isn't required for any 2.x versions.
The multimaster
engine mode only applies for DB clusters created with Aurora MySQL version 5.6.10a.
For Aurora PostgreSQL, the global
engine mode isn't required, and both the parallelquery
and the multimaster
engine modes currently aren't supported.
Limitations and requirements apply to some DB engine modes. For more information, see the following sections in the Amazon Aurora User Guide:
Valid for: Aurora DB clusters only
",
+ "CreateDBClusterMessage$PreSignedUrl": "When you are replicating a DB cluster from one Amazon Web Services GovCloud (US) Region to another, an URL that contains a Signature Version 4 signed request for the CreateDBCluster
operation to be called in the source Amazon Web Services Region where the DB cluster is replicated from. Specify PreSignedUrl
only when you are performing cross-Region replication from an encrypted DB cluster.
The presigned URL must be a valid request for the CreateDBCluster
API operation that can run in the source Amazon Web Services Region that contains the encrypted DB cluster to copy.
The presigned URL request must contain the following parameter values:
-
KmsKeyId
- The KMS key identifier for the KMS key to use to encrypt the copy of the DB cluster in the destination Amazon Web Services Region. This should refer to the same KMS key for both the CreateDBCluster
operation that is called in the destination Amazon Web Services Region, and the operation contained in the presigned URL.
-
DestinationRegion
- The name of the Amazon Web Services Region that Aurora read replica will be created in.
-
ReplicationSourceIdentifier
- The DB cluster identifier for the encrypted DB cluster to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are copying an encrypted DB cluster from the us-west-2 Amazon Web Services Region, then your ReplicationSourceIdentifier
would look like Example: arn:aws:rds:us-west-2:123456789012:cluster:aurora-cluster1
.
To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process.
If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI) instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region.
Valid for: Aurora DB clusters only
",
+ "CreateDBClusterMessage$EngineMode": "The DB engine mode of the DB cluster, either provisioned
, serverless
, parallelquery
, global
, or multimaster
.
The parallelquery
engine mode isn't required for Aurora MySQL version 1.23 and higher 1.x versions, and version 2.09 and higher 2.x versions.
The global
engine mode isn't required for Aurora MySQL version 1.22 and higher 1.x versions, and global
engine mode isn't required for any 2.x versions.
The multimaster
engine mode only applies for DB clusters created with Aurora MySQL version 5.6.10a.
The serverless
engine mode only applies for Aurora Serverless v1 DB clusters.
For Aurora PostgreSQL, the global
engine mode isn't required, and both the parallelquery
and the multimaster
engine modes currently aren't supported.
Limitations and requirements apply to some DB engine modes. For more information, see the following sections in the Amazon Aurora User Guide:
Valid for: Aurora DB clusters only
",
"CreateDBClusterMessage$GlobalClusterIdentifier": "The global cluster ID of an Aurora cluster that becomes the primary cluster in the new global database cluster.
Valid for: Aurora DB clusters only
",
"CreateDBClusterMessage$Domain": "The Active Directory directory ID to create the DB cluster in.
For Amazon Aurora DB clusters, Amazon RDS can use Kerberos authentication to authenticate users that connect to the DB cluster.
For more information, see Kerberos authentication in the Amazon Aurora User Guide.
Valid for: Aurora DB clusters only
",
"CreateDBClusterMessage$DomainIAMRoleName": "Specify the name of the IAM role to be used when making API calls to the Directory Service.
Valid for: Aurora DB clusters only
",
@@ -3767,7 +3767,7 @@
"CreateDBClusterSnapshotMessage$DBClusterIdentifier": "The identifier of the DB cluster to create a snapshot for. This parameter isn't case-sensitive.
Constraints:
Example: my-cluster1
",
"CreateDBInstanceMessage$DBName": "The meaning of this parameter differs according to the database engine you use.
MySQL
The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.
Constraints:
-
Must contain 1 to 64 letters or numbers.
-
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
-
Can't be a word reserved by the specified database engine
MariaDB
The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.
Constraints:
-
Must contain 1 to 64 letters or numbers.
-
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
-
Can't be a word reserved by the specified database engine
PostgreSQL
The name of the database to create when the DB instance is created. If this parameter isn't specified, a database named postgres
is created in the DB instance.
Constraints:
-
Must contain 1 to 63 letters, numbers, or underscores.
-
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
-
Can't be a word reserved by the specified database engine
Oracle
The Oracle System ID (SID) of the created DB instance. If you specify null
, the default value ORCL
is used. You can't specify the string NULL, or any other reserved word, for DBName
.
Default: ORCL
Constraints:
Amazon RDS Custom for Oracle
The Oracle System ID (SID) of the created RDS Custom DB instance. If you don't specify a value, the default value is ORCL
.
Default: ORCL
Constraints:
-
It must contain 1 to 8 alphanumeric characters.
-
It must contain a letter.
-
It can't be a word reserved by the database engine.
Amazon RDS Custom for SQL Server
Not applicable. Must be null.
SQL Server
Not applicable. Must be null.
Amazon Aurora MySQL
The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created in the DB cluster.
Constraints:
Amazon Aurora PostgreSQL
The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created. If this parameter isn't specified for an Aurora PostgreSQL DB cluster, a database named postgres
is created in the DB cluster.
Constraints:
-
It must contain 1 to 63 alphanumeric characters.
-
It must begin with a letter or an underscore. Subsequent characters can be letters, underscores, or digits (0 to 9).
-
It can't be a word reserved by the database engine.
",
"CreateDBInstanceMessage$DBInstanceIdentifier": "The DB instance identifier. This parameter is stored as a lowercase string.
Constraints:
-
Must contain from 1 to 63 letters, numbers, or hyphens.
-
First character must be a letter.
-
Can't end with a hyphen or contain two consecutive hyphens.
Example: mydbinstance
",
- "CreateDBInstanceMessage$DBInstanceClass": "The compute and memory capacity of the DB instance, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.
",
+ "CreateDBInstanceMessage$DBInstanceClass": "The compute and memory capacity of the DB instance, for example db.m5.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB instance classes in the Amazon RDS User Guide or Aurora DB instance classes in the Amazon Aurora User Guide.
",
"CreateDBInstanceMessage$Engine": "The name of the database engine to be used for this instance.
Not every database engine is available for every Amazon Web Services Region.
Valid Values:
-
aurora
(for MySQL 5.6-compatible Aurora)
-
aurora-mysql
(for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora)
-
aurora-postgresql
-
custom-oracle-ee (for RDS Custom for Oracle instances)
-
custom-sqlserver-ee (for RDS Custom for SQL Server instances)
-
custom-sqlserver-se (for RDS Custom for SQL Server instances)
-
custom-sqlserver-web (for RDS Custom for SQL Server instances)
-
mariadb
-
mysql
-
oracle-ee
-
oracle-ee-cdb
-
oracle-se2
-
oracle-se2-cdb
-
postgres
-
sqlserver-ee
-
sqlserver-se
-
sqlserver-ex
-
sqlserver-web
",
"CreateDBInstanceMessage$MasterUsername": "The name for the master user.
Amazon Aurora
Not applicable. The name for the master user is managed by the DB cluster.
Amazon RDS
Constraints:
-
Required.
-
Must be 1 to 16 letters, numbers, or underscores.
-
First character must be a letter.
-
Can't be a reserved word for the chosen database engine.
",
"CreateDBInstanceMessage$MasterUserPassword": "The password for the master user. The password can include any printable ASCII character except \"/\", \"\"\", or \"@\".
Amazon Aurora
Not applicable. The password for the master user is managed by the DB cluster.
MariaDB
Constraints: Must contain from 8 to 41 characters.
Microsoft SQL Server
Constraints: Must contain from 8 to 128 characters.
MySQL
Constraints: Must contain from 8 to 41 characters.
Oracle
Constraints: Must contain from 8 to 30 characters.
PostgreSQL
Constraints: Must contain from 8 to 128 characters.
",
@@ -3776,19 +3776,19 @@
"CreateDBInstanceMessage$PreferredMaintenanceWindow": "The time range each week during which system maintenance can occur, in Universal Coordinated Time (UTC). For more information, see Amazon RDS Maintenance Window.
Format: ddd:hh24:mi-ddd:hh24:mi
The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week.
Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.
Constraints: Minimum 30-minute window.
",
"CreateDBInstanceMessage$DBParameterGroupName": "The name of the DB parameter group to associate with this DB instance. If you do not specify a value, then the default DB parameter group for the specified DB engine and version is used.
This setting doesn't apply to RDS Custom.
Constraints:
-
Must be 1 to 255 letters, numbers, or hyphens.
-
First character must be a letter
-
Can't end with a hyphen or contain two consecutive hyphens
",
"CreateDBInstanceMessage$PreferredBackupWindow": "The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod
parameter. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. For more information, see Backup window in the Amazon RDS User Guide.
Amazon Aurora
Not applicable. The daily time range for creating automated backups is managed by the DB cluster.
Constraints:
-
Must be in the format hh24:mi-hh24:mi
.
-
Must be in Universal Coordinated Time (UTC).
-
Must not conflict with the preferred maintenance window.
-
Must be at least 30 minutes.
",
- "CreateDBInstanceMessage$EngineVersion": "The version number of the database engine to use.
For a list of valid engine versions, use the DescribeDBEngineVersions
action.
The following are the database engines and links to information about the major and minor versions that are available with Amazon RDS. Not every database engine is available for every Amazon Web Services Region.
Amazon Aurora
Not applicable. The version number of the database engine to be used by the DB instance is managed by the DB cluster.
Amazon RDS Custom for Oracle
A custom engine version (CEV) that you have previously created. This setting is required for RDS Custom for Oracle. The CEV name has the following format: 19.customized_string
. An example identifier is 19.my_cev1
. For more information, see Creating an RDS Custom for Oracle DB instance in the Amazon RDS User Guide.
Amazon RDS Custom for SQL Server
See RDS Custom for SQL Server general requirements in the Amazon RDS User Guide.
MariaDB
For information, see MariaDB on Amazon RDS Versions in the Amazon RDS User Guide.
Microsoft SQL Server
For information, see Microsoft SQL Server Versions on Amazon RDS in the Amazon RDS User Guide.
MySQL
For information, see MySQL on Amazon RDS Versions in the Amazon RDS User Guide.
Oracle
For information, see Oracle Database Engine Release Notes in the Amazon RDS User Guide.
PostgreSQL
For information, see Amazon RDS for PostgreSQL versions and extensions in the Amazon RDS User Guide.
",
- "CreateDBInstanceMessage$LicenseModel": "License model information for this DB instance.
Valid values: license-included
| bring-your-own-license
| general-public-license
This setting doesn't apply to RDS Custom.
",
- "CreateDBInstanceMessage$OptionGroupName": "A value that indicates that the DB instance should be associated with the specified option group.
Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group. Also, that option group can't be removed from a DB instance after it is associated with a DB instance.
This setting doesn't apply to RDS Custom.
",
+ "CreateDBInstanceMessage$EngineVersion": "The version number of the database engine to use.
For a list of valid engine versions, use the DescribeDBEngineVersions
operation.
The following are the database engines and links to information about the major and minor versions that are available with Amazon RDS. Not every database engine is available for every Amazon Web Services Region.
Amazon Aurora
Not applicable. The version number of the database engine to be used by the DB instance is managed by the DB cluster.
Amazon RDS Custom for Oracle
A custom engine version (CEV) that you have previously created. This setting is required for RDS Custom for Oracle. The CEV name has the following format: 19.customized_string
. An example identifier is 19.my_cev1
. For more information, see Creating an RDS Custom for Oracle DB instance in the Amazon RDS User Guide.
Amazon RDS Custom for SQL Server
See RDS Custom for SQL Server general requirements in the Amazon RDS User Guide.
MariaDB
For information, see MariaDB on Amazon RDS Versions in the Amazon RDS User Guide.
Microsoft SQL Server
For information, see Microsoft SQL Server Versions on Amazon RDS in the Amazon RDS User Guide.
MySQL
For information, see MySQL on Amazon RDS Versions in the Amazon RDS User Guide.
Oracle
For information, see Oracle Database Engine Release Notes in the Amazon RDS User Guide.
PostgreSQL
For information, see Amazon RDS for PostgreSQL versions and extensions in the Amazon RDS User Guide.
",
+ "CreateDBInstanceMessage$LicenseModel": "License model information for this DB instance.
Valid values: license-included
| bring-your-own-license
| general-public-license
This setting doesn't apply to RDS Custom.
Amazon Aurora
Not applicable.
",
+ "CreateDBInstanceMessage$OptionGroupName": "A value that indicates that the DB instance should be associated with the specified option group.
Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group. Also, that option group can't be removed from a DB instance after it is associated with a DB instance.
This setting doesn't apply to RDS Custom.
Amazon Aurora
Not applicable.
",
"CreateDBInstanceMessage$CharacterSetName": "For supported engines, this value indicates that the DB instance should be associated with the specified CharacterSet
.
This setting doesn't apply to RDS Custom. However, if you need to change the character set, you can change it on the database itself.
Amazon Aurora
Not applicable. The character set is managed by the DB cluster. For more information, see CreateDBCluster
.
",
"CreateDBInstanceMessage$NcharCharacterSetName": "The name of the NCHAR character set for the Oracle DB instance.
This parameter doesn't apply to RDS Custom.
",
"CreateDBInstanceMessage$DBClusterIdentifier": "The identifier of the DB cluster that the instance will belong to.
This setting doesn't apply to RDS Custom.
",
- "CreateDBInstanceMessage$StorageType": "Specifies the storage type to be associated with the DB instance.
Valid values: standard | gp2 | io1
If you specify io1
, you must also include a value for the Iops
parameter.
Default: io1
if the Iops
parameter is specified, otherwise gp2
",
- "CreateDBInstanceMessage$TdeCredentialArn": "The ARN from the key store with which to associate the instance for TDE encryption.
This setting doesn't apply to RDS Custom.
",
+ "CreateDBInstanceMessage$StorageType": "Specifies the storage type to be associated with the DB instance.
Valid values: standard | gp2 | io1
If you specify io1
, you must also include a value for the Iops
parameter.
Default: io1
if the Iops
parameter is specified, otherwise gp2
Amazon Aurora
Not applicable. Storage is managed by the DB cluster.
",
+ "CreateDBInstanceMessage$TdeCredentialArn": "The ARN from the key store with which to associate the instance for TDE encryption.
This setting doesn't apply to RDS Custom.
Amazon Aurora
Not applicable.
",
"CreateDBInstanceMessage$TdeCredentialPassword": "The password for the given ARN from the key store in order to access the device.
This setting doesn't apply to RDS Custom.
",
"CreateDBInstanceMessage$KmsKeyId": "The Amazon Web Services KMS key identifier for an encrypted DB instance.
The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.
Amazon Aurora
Not applicable. The Amazon Web Services KMS key identifier is managed by the DB cluster. For more information, see CreateDBCluster
.
If StorageEncrypted
is enabled, and you do not specify a value for the KmsKeyId
parameter, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.
Amazon RDS Custom
A KMS key is required for RDS Custom instances. For most RDS engines, if you leave this parameter empty while enabling StorageEncrypted
, the engine uses the default KMS key. However, RDS Custom doesn't use the default key when this parameter is empty. You must explicitly specify a key.
",
- "CreateDBInstanceMessage$Domain": "The Active Directory directory ID to create the DB instance in. Currently, only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain.
For more information, see Kerberos Authentication in the Amazon RDS User Guide.
This setting doesn't apply to RDS Custom.
",
+ "CreateDBInstanceMessage$Domain": "The Active Directory directory ID to create the DB instance in. Currently, only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain.
For more information, see Kerberos Authentication in the Amazon RDS User Guide.
This setting doesn't apply to RDS Custom.
Amazon Aurora
Not applicable. The domain is managed by the DB cluster.
",
"CreateDBInstanceMessage$MonitoringRoleArn": "The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess
. For information on creating a monitoring role, see Setting Up and Enabling Enhanced Monitoring in the Amazon RDS User Guide.
If MonitoringInterval
is set to a value other than 0, then you must supply a MonitoringRoleArn
value.
This setting doesn't apply to RDS Custom.
",
- "CreateDBInstanceMessage$DomainIAMRoleName": "Specify the name of the IAM role to be used when making API calls to the Directory Service.
This setting doesn't apply to RDS Custom.
",
+ "CreateDBInstanceMessage$DomainIAMRoleName": "Specify the name of the IAM role to be used when making API calls to the Directory Service.
This setting doesn't apply to RDS Custom.
Amazon Aurora
Not applicable. The domain is managed by the DB cluster.
",
"CreateDBInstanceMessage$Timezone": "The time zone of the DB instance. The time zone parameter is currently supported only by Microsoft SQL Server.
",
"CreateDBInstanceMessage$PerformanceInsightsKMSKeyId": "The Amazon Web Services KMS key identifier for encryption of Performance Insights data.
The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.
If you do not specify a value for PerformanceInsightsKMSKeyId
, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.
This setting doesn't apply to RDS Custom.
",
"CreateDBInstanceMessage$CustomIamInstanceProfile": "The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. The instance profile must meet the following requirements:
-
The profile must exist in your account.
-
The profile must have an IAM role that Amazon EC2 has permissions to assume.
-
The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom
.
For the list of permissions required for the IAM role, see Configure IAM and your VPC in the Amazon RDS User Guide.
This setting is required for RDS Custom.
",
@@ -3799,12 +3799,12 @@
"CreateDBInstanceReadReplicaMessage$DBInstanceClass": "The compute and memory capacity of the read replica, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.
Default: Inherits from the source DB instance.
",
"CreateDBInstanceReadReplicaMessage$AvailabilityZone": "The Availability Zone (AZ) where the read replica will be created.
Default: A random, system-chosen Availability Zone in the endpoint's Amazon Web Services Region.
Example: us-east-1d
",
"CreateDBInstanceReadReplicaMessage$OptionGroupName": "The option group the DB instance is associated with. If omitted, the option group associated with the source instance is used.
For SQL Server, you must use the option group associated with the source instance.
This setting doesn't apply to RDS Custom.
",
- "CreateDBInstanceReadReplicaMessage$DBParameterGroupName": "The name of the DB parameter group to associate with this DB instance.
If you do not specify a value for DBParameterGroupName
, then Amazon RDS uses the DBParameterGroup
of source DB instance for a same Region read replica, or the default DBParameterGroup
for the specified DB engine for a cross-Region read replica.
Specifying a parameter group for this operation is only supported for Oracle DB instances. It isn't supported for RDS Custom.
Constraints:
-
Must be 1 to 255 letters, numbers, or hyphens.
-
First character must be a letter
-
Can't end with a hyphen or contain two consecutive hyphens
",
+ "CreateDBInstanceReadReplicaMessage$DBParameterGroupName": "The name of the DB parameter group to associate with this DB instance.
If you do not specify a value for DBParameterGroupName
, then Amazon RDS uses the DBParameterGroup
of source DB instance for a same Region read replica, or the default DBParameterGroup
for the specified DB engine for a cross-Region read replica.
Specifying a parameter group for this operation is only supported for MySQL and Oracle DB instances. It isn't supported for RDS Custom.
Constraints:
-
Must be 1 to 255 letters, numbers, or hyphens.
-
First character must be a letter
-
Can't end with a hyphen or contain two consecutive hyphens
",
"CreateDBInstanceReadReplicaMessage$DBSubnetGroupName": "Specifies a DB subnet group for the DB instance. The new DB instance is created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance isn't created in a VPC.
Constraints:
-
Can only be specified if the source DB instance identifier specifies a DB instance in another Amazon Web Services Region.
-
If supplied, must match the name of an existing DBSubnetGroup.
-
The specified DB subnet group must be in the same Amazon Web Services Region in which the operation is running.
-
All read replicas in one Amazon Web Services Region that are created from the same source DB instance must either:>
Example: mydbsubnetgroup
",
"CreateDBInstanceReadReplicaMessage$StorageType": "Specifies the storage type to be associated with the read replica.
Valid values: standard | gp2 | io1
If you specify io1
, you must also include a value for the Iops
parameter.
Default: io1
if the Iops
parameter is specified, otherwise gp2
",
"CreateDBInstanceReadReplicaMessage$MonitoringRoleArn": "The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess
. For information on creating a monitoring role, go to To create an IAM role for Amazon RDS Enhanced Monitoring in the Amazon RDS User Guide.
If MonitoringInterval
is set to a value other than 0, then you must supply a MonitoringRoleArn
value.
This setting doesn't apply to RDS Custom.
",
"CreateDBInstanceReadReplicaMessage$KmsKeyId": "The Amazon Web Services KMS key identifier for an encrypted read replica.
The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.
If you create an encrypted read replica in the same Amazon Web Services Region as the source DB instance, then do not specify a value for this parameter. A read replica in the same Amazon Web Services Region is always encrypted with the same KMS key as the source DB instance.
If you create an encrypted read replica in a different Amazon Web Services Region, then you must specify a KMS key identifier for the destination Amazon Web Services Region. KMS keys are specific to the Amazon Web Services Region that they are created in, and you can't use KMS keys from one Amazon Web Services Region in another Amazon Web Services Region.
You can't create an encrypted read replica from an unencrypted DB instance.
This setting doesn't apply to RDS Custom, which uses the same KMS key as the primary replica.
",
- "CreateDBInstanceReadReplicaMessage$PreSignedUrl": "The URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica
API action in the source Amazon Web Services Region that contains the source DB instance.
You must specify this parameter when you create an encrypted read replica from another Amazon Web Services Region by using the Amazon RDS API. Don't specify PreSignedUrl
when you are creating an encrypted read replica in the same Amazon Web Services Region.
The presigned URL must be a valid request for the CreateDBInstanceReadReplica
API action that can be executed in the source Amazon Web Services Region that contains the encrypted source DB instance. The presigned URL request must contain the following parameter values:
-
DestinationRegion
- The Amazon Web Services Region that the encrypted read replica is created in. This Amazon Web Services Region is the same one where the CreateDBInstanceReadReplica
action is called that contains this presigned URL.
For example, if you create an encrypted DB instance in the us-west-1 Amazon Web Services Region, from a source DB instance in the us-east-2 Amazon Web Services Region, then you call the CreateDBInstanceReadReplica
action in the us-east-1 Amazon Web Services Region and provide a presigned URL that contains a call to the CreateDBInstanceReadReplica
action in the us-west-2 Amazon Web Services Region. For this example, the DestinationRegion
in the presigned URL must be set to the us-east-1 Amazon Web Services Region.
-
KmsKeyId
- The Amazon Web Services KMS key identifier for the key to use to encrypt the read replica in the destination Amazon Web Services Region. This is the same identifier for both the CreateDBInstanceReadReplica
action that is called in the destination Amazon Web Services Region, and the action contained in the presigned URL.
-
SourceDBInstanceIdentifier
- The DB instance identifier for the encrypted DB instance to be replicated. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are creating an encrypted read replica from a DB instance in the us-west-2 Amazon Web Services Region, then your SourceDBInstanceIdentifier
looks like the following example: arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-20161115
.
To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process.
If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI) instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a presigned URL that is a valid request for the operation that can be executed in the source Amazon Web Services Region.
SourceRegion
isn't supported for SQL Server, because SQL Server on Amazon RDS doesn't support cross-Region read replicas.
This setting doesn't apply to RDS Custom.
",
+ "CreateDBInstanceReadReplicaMessage$PreSignedUrl": "When you are creating a read replica from one Amazon Web Services GovCloud (US) Region to another or from one China Amazon Web Services Region to another, the URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica
API operation in the source Amazon Web Services Region that contains the source DB instance.
This setting applies only to Amazon Web Services GovCloud (US) Regions and China Amazon Web Services Regions. It's ignored in other Amazon Web Services Regions.
You must specify this parameter when you create an encrypted read replica from another Amazon Web Services Region by using the Amazon RDS API. Don't specify PreSignedUrl
when you are creating an encrypted read replica in the same Amazon Web Services Region.
The presigned URL must be a valid request for the CreateDBInstanceReadReplica
API operation that can run in the source Amazon Web Services Region that contains the encrypted source DB instance. The presigned URL request must contain the following parameter values:
-
DestinationRegion
- The Amazon Web Services Region that the encrypted read replica is created in. This Amazon Web Services Region is the same one where the CreateDBInstanceReadReplica
operation is called that contains this presigned URL.
For example, if you create an encrypted DB instance in the us-west-1 Amazon Web Services Region, from a source DB instance in the us-east-2 Amazon Web Services Region, then you call the CreateDBInstanceReadReplica
operation in the us-east-1 Amazon Web Services Region and provide a presigned URL that contains a call to the CreateDBInstanceReadReplica
operation in the us-west-2 Amazon Web Services Region. For this example, the DestinationRegion
in the presigned URL must be set to the us-east-1 Amazon Web Services Region.
-
KmsKeyId
- The KMS key identifier for the key to use to encrypt the read replica in the destination Amazon Web Services Region. This is the same identifier for both the CreateDBInstanceReadReplica
operation that is called in the destination Amazon Web Services Region, and the operation contained in the presigned URL.
-
SourceDBInstanceIdentifier
- The DB instance identifier for the encrypted DB instance to be replicated. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are creating an encrypted read replica from a DB instance in the us-west-2 Amazon Web Services Region, then your SourceDBInstanceIdentifier
looks like the following example: arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-20161115
.
To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process.
If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI) instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region.
SourceRegion
isn't supported for SQL Server, because Amazon RDS for SQL Server doesn't support cross-Region read replicas.
This setting doesn't apply to RDS Custom.
",
"CreateDBInstanceReadReplicaMessage$PerformanceInsightsKMSKeyId": "The Amazon Web Services KMS key identifier for encryption of Performance Insights data.
The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.
If you do not specify a value for PerformanceInsightsKMSKeyId
, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.
This setting doesn't apply to RDS Custom.
",
"CreateDBInstanceReadReplicaMessage$Domain": "The Active Directory directory ID to create the DB instance in. Currently, only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain.
For more information, see Kerberos Authentication in the Amazon RDS User Guide.
This setting doesn't apply to RDS Custom.
",
"CreateDBInstanceReadReplicaMessage$DomainIAMRoleName": "Specify the name of the IAM role to be used when making API calls to the Directory Service.
This setting doesn't apply to RDS Custom.
",
@@ -3828,7 +3828,7 @@
"CreateGlobalClusterMessage$SourceDBClusterIdentifier": "The Amazon Resource Name (ARN) to use as the primary cluster of the global database. This parameter is optional.
",
"CreateGlobalClusterMessage$Engine": "The name of the database engine to be used for this DB cluster.
",
"CreateGlobalClusterMessage$EngineVersion": "The engine version of the Aurora global database.
",
- "CreateGlobalClusterMessage$DatabaseName": "The name for your database of up to 64 alpha-numeric characters. If you do not provide a name, Amazon Aurora will not create a database in the global database cluster you are creating.
",
+ "CreateGlobalClusterMessage$DatabaseName": "The name for your database of up to 64 alphanumeric characters. If you do not provide a name, Amazon Aurora will not create a database in the global database cluster you are creating.
",
"CreateOptionGroupMessage$OptionGroupName": "Specifies the name of the option group to be created.
Constraints:
-
Must be 1 to 255 letters, numbers, or hyphens
-
First character must be a letter
-
Can't end with a hyphen or contain two consecutive hyphens
Example: myoptiongroup
",
"CreateOptionGroupMessage$EngineName": "Specifies the name of the engine that this option group should be associated with.
Valid Values:
-
mariadb
-
mysql
-
oracle-ee
-
oracle-ee-cdb
-
oracle-se2
-
oracle-se2-cdb
-
postgres
-
sqlserver-ee
-
sqlserver-se
-
sqlserver-ex
-
sqlserver-web
",
"CreateOptionGroupMessage$MajorEngineVersion": "Specifies the major version of the engine that this option group should be associated with.
",
@@ -3988,7 +3988,7 @@
"DBParameterGroupsMessage$Marker": "An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
",
"DBProxy$DBProxyName": "The identifier for the proxy. This name must be unique for all proxies owned by your Amazon Web Services account in the specified Amazon Web Services Region.
",
"DBProxy$DBProxyArn": "The Amazon Resource Name (ARN) for the proxy.
",
- "DBProxy$EngineFamily": "The engine family applies to MySQL and PostgreSQL for both RDS and Aurora.
",
+ "DBProxy$EngineFamily": "The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. MYSQL
supports Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases. POSTGRESQL
supports Aurora PostgreSQL and RDS for PostgreSQL databases.
",
"DBProxy$VpcId": "Provides the VPC ID of the DB proxy.
",
"DBProxy$RoleArn": "The Amazon Resource Name (ARN) for the IAM role that the proxy uses to access Amazon Secrets Manager.
",
"DBProxy$Endpoint": "The endpoint that you can use to connect to the DB proxy. You include the endpoint value in the connection string for a database client application.
",
@@ -4118,8 +4118,8 @@
"DescribeDBSecurityGroupsMessage$DBSecurityGroupName": "The name of the DB security group to return details for.
",
"DescribeDBSecurityGroupsMessage$Marker": "An optional pagination token provided by a previous DescribeDBSecurityGroups
request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
",
"DescribeDBSnapshotAttributesMessage$DBSnapshotIdentifier": "The identifier for the DB snapshot to describe the attributes for.
",
- "DescribeDBSnapshotsMessage$DBInstanceIdentifier": "The ID of the DB instance to retrieve the list of DB snapshots for. This parameter can't be used in conjunction with DBSnapshotIdentifier
. This parameter isn't case-sensitive.
Constraints:
",
- "DescribeDBSnapshotsMessage$DBSnapshotIdentifier": "A specific DB snapshot identifier to describe. This parameter can't be used in conjunction with DBInstanceIdentifier
. This value is stored as a lowercase string.
Constraints:
-
If supplied, must match the identifier of an existing DBSnapshot.
-
If this identifier is for an automated snapshot, the SnapshotType
parameter must also be specified.
",
+ "DescribeDBSnapshotsMessage$DBInstanceIdentifier": "The ID of the DB instance to retrieve the list of DB snapshots for. This parameter isn't case-sensitive.
Constraints:
",
+ "DescribeDBSnapshotsMessage$DBSnapshotIdentifier": "A specific DB snapshot identifier to describe. This value is stored as a lowercase string.
Constraints:
-
If supplied, must match the identifier of an existing DBSnapshot.
-
If this identifier is for an automated snapshot, the SnapshotType
parameter must also be specified.
",
"DescribeDBSnapshotsMessage$SnapshotType": "The type of snapshots to be returned. You can specify one of the following values:
-
automated
- Return all DB snapshots that have been automatically taken by Amazon RDS for my Amazon Web Services account.
-
manual
- Return all DB snapshots that have been taken by my Amazon Web Services account.
-
shared
- Return all manual DB snapshots that have been shared to my Amazon Web Services account.
-
public
- Return all DB snapshots that have been marked as public.
-
awsbackup
- Return the DB snapshots managed by the Amazon Web Services Backup service.
For information about Amazon Web Services Backup, see the Amazon Web Services Backup Developer Guide.
The awsbackup
type does not apply to Aurora.
If you don't specify a SnapshotType
value, then both automated and manual snapshots are returned. Shared and public DB snapshots are not included in the returned results by default. You can include shared snapshots with these results by enabling the IncludeShared
parameter. You can include public snapshots with these results by enabling the IncludePublic
parameter.
The IncludeShared
and IncludePublic
parameters don't apply for SnapshotType
values of manual
or automated
. The IncludePublic
parameter doesn't apply when SnapshotType
is set to shared
. The IncludeShared
parameter doesn't apply when SnapshotType
is set to public
.
",
"DescribeDBSnapshotsMessage$Marker": "An optional pagination token provided by a previous DescribeDBSnapshots
request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
",
"DescribeDBSnapshotsMessage$DbiResourceId": "A specific DB resource ID to describe.
",
@@ -4258,11 +4258,11 @@
"ModifyDBClusterMessage$PerformanceInsightsKMSKeyId": "The Amazon Web Services KMS key identifier for encryption of Performance Insights data.
The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.
If you don't specify a value for PerformanceInsightsKMSKeyId
, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.
Valid for: Multi-AZ DB clusters only
",
"ModifyDBClusterParameterGroupMessage$DBClusterParameterGroupName": "The name of the DB cluster parameter group to modify.
",
"ModifyDBClusterSnapshotAttributeMessage$DBClusterSnapshotIdentifier": "The identifier for the DB cluster snapshot to modify the attributes for.
",
- "ModifyDBClusterSnapshotAttributeMessage$AttributeName": "The name of the DB cluster snapshot attribute to modify.
To manage authorization for other Amazon Web Services accounts to copy or restore a manual DB cluster snapshot, set this value to restore
.
To view the list of attributes available to modify, use the DescribeDBClusterSnapshotAttributes API action.
",
+ "ModifyDBClusterSnapshotAttributeMessage$AttributeName": "The name of the DB cluster snapshot attribute to modify.
To manage authorization for other Amazon Web Services accounts to copy or restore a manual DB cluster snapshot, set this value to restore
.
To view the list of attributes available to modify, use the DescribeDBClusterSnapshotAttributes API operation.
",
"ModifyDBInstanceMessage$DBInstanceIdentifier": "The DB instance identifier. This value is stored as a lowercase string.
Constraints:
",
- "ModifyDBInstanceMessage$DBInstanceClass": "The new compute and memory capacity of the DB instance, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.
If you modify the DB instance class, an outage occurs during the change. The change is applied during the next maintenance window, unless ApplyImmediately
is enabled for this request.
This setting doesn't apply to RDS Custom for Oracle.
Default: Uses existing setting
",
+ "ModifyDBInstanceMessage$DBInstanceClass": "The new compute and memory capacity of the DB instance, for example db.m5.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB instance classes in the Amazon RDS User Guide or Aurora DB instance classes in the Amazon Aurora User Guide.
If you modify the DB instance class, an outage occurs during the change. The change is applied during the next maintenance window, unless ApplyImmediately
is enabled for this request.
This setting doesn't apply to RDS Custom for Oracle.
Default: Uses existing setting
",
"ModifyDBInstanceMessage$DBSubnetGroupName": "The new DB subnet group for the DB instance. You can use this parameter to move your DB instance to a different VPC. If your DB instance isn't in a VPC, you can also use this parameter to move your DB instance into a VPC. For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide.
Changing the subnet group causes an outage during the change. The change is applied during the next maintenance window, unless you enable ApplyImmediately
.
This parameter doesn't apply to RDS Custom.
Constraints: If supplied, must match the name of an existing DBSubnetGroup.
Example: mydbsubnetgroup
",
- "ModifyDBInstanceMessage$MasterUserPassword": "The new password for the master user. The password can include any printable ASCII character except \"/\", \"\"\", or \"@\".
Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword
element exists in the PendingModifiedValues
element of the operation response.
This setting doesn't apply to RDS Custom.
Amazon Aurora
Not applicable. The password for the master user is managed by the DB cluster. For more information, see ModifyDBCluster
.
Default: Uses existing setting
MariaDB
Constraints: Must contain from 8 to 41 characters.
Microsoft SQL Server
Constraints: Must contain from 8 to 128 characters.
MySQL
Constraints: Must contain from 8 to 41 characters.
Oracle
Constraints: Must contain from 8 to 30 characters.
PostgreSQL
Constraints: Must contain from 8 to 128 characters.
Amazon RDS API actions never return the password, so this action provides a way to regain access to a primary instance user if the password is lost. This includes restoring privileges that might have been accidentally revoked.
",
+ "ModifyDBInstanceMessage$MasterUserPassword": "The new password for the master user. The password can include any printable ASCII character except \"/\", \"\"\", or \"@\".
Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword
element exists in the PendingModifiedValues
element of the operation response.
This setting doesn't apply to RDS Custom.
Amazon Aurora
Not applicable. The password for the master user is managed by the DB cluster. For more information, see ModifyDBCluster
.
Default: Uses existing setting
MariaDB
Constraints: Must contain from 8 to 41 characters.
Microsoft SQL Server
Constraints: Must contain from 8 to 128 characters.
MySQL
Constraints: Must contain from 8 to 41 characters.
Oracle
Constraints: Must contain from 8 to 30 characters.
PostgreSQL
Constraints: Must contain from 8 to 128 characters.
Amazon RDS API operations never return the password, so this action provides a way to regain access to a primary instance user if the password is lost. This includes restoring privileges that might have been accidentally revoked.
",
"ModifyDBInstanceMessage$DBParameterGroupName": "The name of the DB parameter group to apply to the DB instance.
Changing this setting doesn't result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the instance without failover. In this case, the DB instance isn't rebooted automatically, and the parameter changes aren't applied during the next maintenance window. However, if you modify dynamic parameters in the newly associated DB parameter group, these changes are applied immediately without a reboot.
This setting doesn't apply to RDS Custom.
Default: Uses existing setting
Constraints: The DB parameter group must be in the same DB parameter group family as the DB instance.
",
"ModifyDBInstanceMessage$PreferredBackupWindow": "The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod
parameter. Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. For more information, see Backup window in the Amazon RDS User Guide.
Amazon Aurora
Not applicable. The daily time range for creating automated backups is managed by the DB cluster. For more information, see ModifyDBCluster
.
Constraints:
-
Must be in the format hh24:mi-hh24:mi
-
Must be in Universal Time Coordinated (UTC)
-
Must not conflict with the preferred maintenance window
-
Must be at least 30 minutes
",
"ModifyDBInstanceMessage$PreferredMaintenanceWindow": "The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter doesn't result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, then changing this parameter will cause a reboot of the DB instance. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied.
For more information, see Amazon RDS Maintenance Window in the Amazon RDS User Guide.
Default: Uses existing setting
Format: ddd:hh24:mi-ddd:hh24:mi
Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
Constraints: Must be at least 30 minutes
",
@@ -4283,11 +4283,11 @@
"ModifyDBProxyRequest$DBProxyName": "The identifier for the DBProxy
to modify.
",
"ModifyDBProxyRequest$NewDBProxyName": "The new identifier for the DBProxy
. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive hyphens.
",
"ModifyDBProxyRequest$RoleArn": "The Amazon Resource Name (ARN) of the IAM role that the proxy uses to access secrets in Amazon Web Services Secrets Manager.
",
- "ModifyDBProxyTargetGroupRequest$TargetGroupName": "The name of the new target group to assign to the proxy.
",
- "ModifyDBProxyTargetGroupRequest$DBProxyName": "The name of the new proxy to which to assign the target group.
",
+ "ModifyDBProxyTargetGroupRequest$TargetGroupName": "The name of the target group to modify.
",
+ "ModifyDBProxyTargetGroupRequest$DBProxyName": "The name of the proxy.
",
"ModifyDBProxyTargetGroupRequest$NewName": "The new name for the modified DBProxyTarget
. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive hyphens.
",
"ModifyDBSnapshotAttributeMessage$DBSnapshotIdentifier": "The identifier for the DB snapshot to modify the attributes for.
",
- "ModifyDBSnapshotAttributeMessage$AttributeName": "The name of the DB snapshot attribute to modify.
To manage authorization for other Amazon Web Services accounts to copy or restore a manual DB snapshot, set this value to restore
.
To view the list of attributes available to modify, use the DescribeDBSnapshotAttributes API action.
",
+ "ModifyDBSnapshotAttributeMessage$AttributeName": "The name of the DB snapshot attribute to modify.
To manage authorization for other Amazon Web Services accounts to copy or restore a manual DB snapshot, set this value to restore
.
To view the list of attributes available to modify, use the DescribeDBSnapshotAttributes API operation.
",
"ModifyDBSnapshotMessage$DBSnapshotIdentifier": "The identifier of the DB snapshot to modify.
",
"ModifyDBSnapshotMessage$EngineVersion": "The engine version to upgrade the DB snapshot to.
The following are the database engines and engine versions that are available when you upgrade a DB snapshot.
MySQL
Oracle
-
12.1.0.2.v8
(supported for 12.1.0.1 DB snapshots)
-
11.2.0.4.v12
(supported for 11.2.0.2 DB snapshots)
-
11.2.0.4.v11
(supported for 11.2.0.3 DB snapshots)
PostgreSQL
For the list of engine versions that are available for upgrading a DB snapshot, see Upgrading the PostgreSQL DB Engine for Amazon RDS.
",
"ModifyDBSnapshotMessage$OptionGroupName": "The option group to identify with the upgraded DB snapshot.
You can specify this parameter when you upgrade an Oracle DB snapshot. The same option group considerations apply when upgrading a DB snapshot as when upgrading a DB instance. For more information, see Option group considerations in the Amazon RDS User Guide.
",
@@ -4415,8 +4415,8 @@
"RestoreDBClusterFromS3Message$DBClusterIdentifier": "The name of the DB cluster to create from the source data in the Amazon S3 bucket. This parameter isn't case-sensitive.
Constraints:
-
Must contain from 1 to 63 letters, numbers, or hyphens.
-
First character must be a letter.
-
Can't end with a hyphen or contain two consecutive hyphens.
Example: my-cluster1
",
"RestoreDBClusterFromS3Message$DBClusterParameterGroupName": "The name of the DB cluster parameter group to associate with the restored DB cluster. If this argument is omitted, default.aurora5.6
is used.
Constraints:
",
"RestoreDBClusterFromS3Message$DBSubnetGroupName": "A DB subnet group to associate with the restored DB cluster.
Constraints: If supplied, must match the name of an existing DBSubnetGroup.
Example: mydbsubnetgroup
",
- "RestoreDBClusterFromS3Message$Engine": "The name of the database engine to be used for this DB cluster.
Valid Values: aurora
(for MySQL 5.6-compatible Aurora), aurora-mysql
(for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora), and aurora-postgresql
",
- "RestoreDBClusterFromS3Message$EngineVersion": "The version number of the database engine to use.
To list all of the available engine versions for aurora
(for MySQL 5.6-compatible Aurora), use the following command:
aws rds describe-db-engine-versions --engine aurora --query \"DBEngineVersions[].EngineVersion\"
To list all of the available engine versions for aurora-mysql
(for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora), use the following command:
aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"
To list all of the available engine versions for aurora-postgresql
, use the following command:
aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"
Aurora MySQL
Example: 5.6.10a
, 5.6.mysql_aurora.1.19.2
, 5.7.12
, 5.7.mysql_aurora.2.04.5
, 8.0.mysql_aurora.3.01.0
Aurora PostgreSQL
Example: 9.6.3
, 10.7
",
+ "RestoreDBClusterFromS3Message$Engine": "The name of the database engine to be used for this DB cluster.
Valid Values: aurora
(for MySQL 5.6-compatible Aurora) and aurora-mysql
(for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora)
",
+ "RestoreDBClusterFromS3Message$EngineVersion": "The version number of the database engine to use.
To list all of the available engine versions for aurora
(for MySQL 5.6-compatible Aurora), use the following command:
aws rds describe-db-engine-versions --engine aurora --query \"DBEngineVersions[].EngineVersion\"
To list all of the available engine versions for aurora-mysql
(for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora), use the following command:
aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"
Aurora MySQL
Example: 5.6.10a
, 5.6.mysql_aurora.1.19.2
, 5.7.mysql_aurora.2.07.1
, 8.0.mysql_aurora.3.02.0
",
"RestoreDBClusterFromS3Message$MasterUsername": "The name of the master user for the restored DB cluster.
Constraints:
-
Must be 1 to 16 letters or numbers.
-
First character must be a letter.
-
Can't be a reserved word for the chosen database engine.
",
"RestoreDBClusterFromS3Message$MasterUserPassword": "The password for the master database user. This password can contain any printable ASCII character except \"/\", \"\"\", or \"@\".
Constraints: Must contain from 8 to 41 characters.
",
"RestoreDBClusterFromS3Message$OptionGroupName": "A value that indicates that the restored DB cluster should be associated with the specified option group.
Permanent options can't be removed from an option group. An option group can't be removed from a DB cluster once it is associated with a DB cluster.
",
@@ -4537,7 +4537,7 @@
"StartDBClusterMessage$DBClusterIdentifier": "The DB cluster identifier of the Amazon Aurora DB cluster to be started. This parameter is stored as a lowercase string.
",
"StartDBInstanceAutomatedBackupsReplicationMessage$SourceDBInstanceArn": "The Amazon Resource Name (ARN) of the source DB instance for the replicated automated backups, for example, arn:aws:rds:us-west-2:123456789012:db:mydatabase
.
",
"StartDBInstanceAutomatedBackupsReplicationMessage$KmsKeyId": "The Amazon Web Services KMS key identifier for encryption of the replicated automated backups. The KMS key ID is the Amazon Resource Name (ARN) for the KMS encryption key in the destination Amazon Web Services Region, for example, arn:aws:kms:us-east-1:123456789012:key/AKIAIOSFODNN7EXAMPLE
.
",
- "StartDBInstanceAutomatedBackupsReplicationMessage$PreSignedUrl": "A URL that contains a Signature Version 4 signed request for the StartDBInstanceAutomatedBackupsReplication action to be called in the Amazon Web Services Region of the source DB instance. The presigned URL must be a valid request for the StartDBInstanceAutomatedBackupsReplication API action that can be executed in the Amazon Web Services Region that contains the source DB instance.
",
+ "StartDBInstanceAutomatedBackupsReplicationMessage$PreSignedUrl": "In an Amazon Web Services GovCloud (US) Region, an URL that contains a Signature Version 4 signed request for the StartDBInstanceAutomatedBackupsReplication
operation to call in the Amazon Web Services Region of the source DB instance. The presigned URL must be a valid request for the StartDBInstanceAutomatedBackupsReplication
API operation that can run in the Amazon Web Services Region that contains the source DB instance.
This setting applies only to Amazon Web Services GovCloud (US) Regions. It's ignored in other Amazon Web Services Regions.
To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process.
If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI) instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region.
",
"StartDBInstanceMessage$DBInstanceIdentifier": "The user-supplied instance identifier.
",
"StartExportTaskMessage$ExportTaskIdentifier": "A unique identifier for the snapshot export task. This ID isn't an identifier for the Amazon S3 bucket where the snapshot is to be exported to.
",
"StartExportTaskMessage$SourceArn": "The Amazon Resource Name (ARN) of the snapshot to export to Amazon S3.
",
@@ -4572,7 +4572,7 @@
"ValidStorageOptions$StorageType": "The valid storage types for your DB instance. For example, gp2, io1.
",
"VpcSecurityGroupIdList$member": null,
"VpcSecurityGroupMembership$VpcSecurityGroupId": "The name of the VPC security group.
",
- "VpcSecurityGroupMembership$Status": "The status of the VPC security group.
"
+ "VpcSecurityGroupMembership$Status": "The membership status of the VPC security group.
Currently, the only valid status is active
.
"
}
},
"String255": {
diff --git a/gems/aws-sdk-athena/CHANGELOG.md b/gems/aws-sdk-athena/CHANGELOG.md
index 32cf3ecc358..c9ad36d1b4e 100644
--- a/gems/aws-sdk-athena/CHANGELOG.md
+++ b/gems/aws-sdk-athena/CHANGELOG.md
@@ -1,6 +1,11 @@
Unreleased Changes
------------------
+1.56.0 (2022-07-21)
+------------------
+
+* Feature - This feature allows customers to retrieve runtime statistics for completed queries
+
1.55.0 (2022-07-14)
------------------
diff --git a/gems/aws-sdk-athena/VERSION b/gems/aws-sdk-athena/VERSION
index 094d6ad00ce..3ebf789f5a8 100644
--- a/gems/aws-sdk-athena/VERSION
+++ b/gems/aws-sdk-athena/VERSION
@@ -1 +1 @@
-1.55.0
+1.56.0
diff --git a/gems/aws-sdk-athena/lib/aws-sdk-athena.rb b/gems/aws-sdk-athena/lib/aws-sdk-athena.rb
index d028f9f3c4c..6099c8514d0 100644
--- a/gems/aws-sdk-athena/lib/aws-sdk-athena.rb
+++ b/gems/aws-sdk-athena/lib/aws-sdk-athena.rb
@@ -48,6 +48,6 @@
# @!group service
module Aws::Athena
- GEM_VERSION = '1.55.0'
+ GEM_VERSION = '1.56.0'
end
diff --git a/gems/aws-sdk-athena/lib/aws-sdk-athena/client.rb b/gems/aws-sdk-athena/lib/aws-sdk-athena/client.rb
index d10b39a895d..392a8137749 100644
--- a/gems/aws-sdk-athena/lib/aws-sdk-athena/client.rb
+++ b/gems/aws-sdk-athena/lib/aws-sdk-athena/client.rb
@@ -1175,6 +1175,60 @@ def get_query_results(params = {}, options = {})
req.send_request(options)
end
+ # Returns query execution runtime statistics related to a single
+ # execution of a query if you have access to the workgroup in which the
+ # query ran. The query execution runtime statistics is returned only
+ # when QueryExecutionStatus$State is in a SUCCEEDED or FAILED state.
+ #
+ # @option params [required, String] :query_execution_id
+ # The unique ID of the query execution.
+ #
+ # @return [Types::GetQueryRuntimeStatisticsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
+ #
+ # * {Types::GetQueryRuntimeStatisticsOutput#query_runtime_statistics #query_runtime_statistics} => Types::QueryRuntimeStatistics
+ #
+ # @example Request syntax with placeholder values
+ #
+ # resp = client.get_query_runtime_statistics({
+ # query_execution_id: "QueryExecutionId", # required
+ # })
+ #
+ # @example Response structure
+ #
+ # resp.query_runtime_statistics.timeline.query_queue_time_in_millis #=> Integer
+ # resp.query_runtime_statistics.timeline.query_planning_time_in_millis #=> Integer
+ # resp.query_runtime_statistics.timeline.engine_execution_time_in_millis #=> Integer
+ # resp.query_runtime_statistics.timeline.service_processing_time_in_millis #=> Integer
+ # resp.query_runtime_statistics.timeline.total_execution_time_in_millis #=> Integer
+ # resp.query_runtime_statistics.rows.input_rows #=> Integer
+ # resp.query_runtime_statistics.rows.input_bytes #=> Integer
+ # resp.query_runtime_statistics.rows.output_bytes #=> Integer
+ # resp.query_runtime_statistics.rows.output_rows #=> Integer
+ # resp.query_runtime_statistics.output_stage.stage_id #=> Integer
+ # resp.query_runtime_statistics.output_stage.state #=> String
+ # resp.query_runtime_statistics.output_stage.output_bytes #=> Integer
+ # resp.query_runtime_statistics.output_stage.output_rows #=> Integer
+ # resp.query_runtime_statistics.output_stage.input_bytes #=> Integer
+ # resp.query_runtime_statistics.output_stage.input_rows #=> Integer
+ # resp.query_runtime_statistics.output_stage.execution_time #=> Integer
+ # resp.query_runtime_statistics.output_stage.query_stage_plan.name #=> String
+ # resp.query_runtime_statistics.output_stage.query_stage_plan.identifier #=> String
+ # resp.query_runtime_statistics.output_stage.query_stage_plan.children #=> Array
+ # resp.query_runtime_statistics.output_stage.query_stage_plan.children[0] #=> Types::QueryStagePlanNode
+ # resp.query_runtime_statistics.output_stage.query_stage_plan.remote_sources #=> Array
+ # resp.query_runtime_statistics.output_stage.query_stage_plan.remote_sources[0] #=> String
+ # resp.query_runtime_statistics.output_stage.sub_stages #=> Array
+ # resp.query_runtime_statistics.output_stage.sub_stages[0] #=> Types::QueryStage
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryRuntimeStatistics AWS API Documentation
+ #
+ # @overload get_query_runtime_statistics(params = {})
+ # @param [Hash] params ({})
+ def get_query_runtime_statistics(params = {}, options = {})
+ req = build_request(:get_query_runtime_statistics, params)
+ req.send_request(options)
+ end
+
# Returns table metadata for the specified catalog, database, and table.
#
# @option params [required, String] :catalog_name
@@ -1372,6 +1426,8 @@ def list_databases(params = {}, options = {})
# * {Types::ListEngineVersionsOutput#engine_versions #engine_versions} => Array<Types::EngineVersion>
# * {Types::ListEngineVersionsOutput#next_token #next_token} => String
#
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
+ #
# @example Request syntax with placeholder values
#
# resp = client.list_engine_versions({
@@ -2120,7 +2176,7 @@ def build_request(operation_name, params = {})
params: params,
config: config)
context[:gem_name] = 'aws-sdk-athena'
- context[:gem_version] = '1.55.0'
+ context[:gem_version] = '1.56.0'
Seahorse::Client::Request.new(handlers, context)
end
diff --git a/gems/aws-sdk-athena/lib/aws-sdk-athena/client_api.rb b/gems/aws-sdk-athena/lib/aws-sdk-athena/client_api.rb
index 875e4343895..95c550ef41a 100644
--- a/gems/aws-sdk-athena/lib/aws-sdk-athena/client_api.rb
+++ b/gems/aws-sdk-athena/lib/aws-sdk-athena/client_api.rb
@@ -82,6 +82,8 @@ module ClientApi
GetQueryExecutionOutput = Shapes::StructureShape.new(name: 'GetQueryExecutionOutput')
GetQueryResultsInput = Shapes::StructureShape.new(name: 'GetQueryResultsInput')
GetQueryResultsOutput = Shapes::StructureShape.new(name: 'GetQueryResultsOutput')
+ GetQueryRuntimeStatisticsInput = Shapes::StructureShape.new(name: 'GetQueryRuntimeStatisticsInput')
+ GetQueryRuntimeStatisticsOutput = Shapes::StructureShape.new(name: 'GetQueryRuntimeStatisticsOutput')
GetTableMetadataInput = Shapes::StructureShape.new(name: 'GetTableMetadataInput')
GetTableMetadataOutput = Shapes::StructureShape.new(name: 'GetTableMetadataOutput')
GetWorkGroupInput = Shapes::StructureShape.new(name: 'GetWorkGroupInput')
@@ -142,6 +144,13 @@ module ClientApi
QueryExecutionState = Shapes::StringShape.new(name: 'QueryExecutionState')
QueryExecutionStatistics = Shapes::StructureShape.new(name: 'QueryExecutionStatistics')
QueryExecutionStatus = Shapes::StructureShape.new(name: 'QueryExecutionStatus')
+ QueryRuntimeStatistics = Shapes::StructureShape.new(name: 'QueryRuntimeStatistics')
+ QueryRuntimeStatisticsRows = Shapes::StructureShape.new(name: 'QueryRuntimeStatisticsRows')
+ QueryRuntimeStatisticsTimeline = Shapes::StructureShape.new(name: 'QueryRuntimeStatisticsTimeline')
+ QueryStage = Shapes::StructureShape.new(name: 'QueryStage')
+ QueryStagePlanNode = Shapes::StructureShape.new(name: 'QueryStagePlanNode')
+ QueryStagePlanNodes = Shapes::ListShape.new(name: 'QueryStagePlanNodes')
+ QueryStages = Shapes::ListShape.new(name: 'QueryStages')
QueryString = Shapes::StringShape.new(name: 'QueryString')
ResourceNotFoundException = Shapes::StructureShape.new(name: 'ResourceNotFoundException')
ResultConfiguration = Shapes::StructureShape.new(name: 'ResultConfiguration')
@@ -159,6 +168,7 @@ module ClientApi
StopQueryExecutionInput = Shapes::StructureShape.new(name: 'StopQueryExecutionInput')
StopQueryExecutionOutput = Shapes::StructureShape.new(name: 'StopQueryExecutionOutput')
String = Shapes::StringShape.new(name: 'String')
+ StringList = Shapes::ListShape.new(name: 'StringList')
TableMetadata = Shapes::StructureShape.new(name: 'TableMetadata')
TableMetadataList = Shapes::ListShape.new(name: 'TableMetadataList')
TableTypeString = Shapes::StringShape.new(name: 'TableTypeString')
@@ -387,6 +397,12 @@ module ClientApi
GetQueryResultsOutput.add_member(:next_token, Shapes::ShapeRef.new(shape: Token, location_name: "NextToken"))
GetQueryResultsOutput.struct_class = Types::GetQueryResultsOutput
+ GetQueryRuntimeStatisticsInput.add_member(:query_execution_id, Shapes::ShapeRef.new(shape: QueryExecutionId, required: true, location_name: "QueryExecutionId"))
+ GetQueryRuntimeStatisticsInput.struct_class = Types::GetQueryRuntimeStatisticsInput
+
+ GetQueryRuntimeStatisticsOutput.add_member(:query_runtime_statistics, Shapes::ShapeRef.new(shape: QueryRuntimeStatistics, location_name: "QueryRuntimeStatistics"))
+ GetQueryRuntimeStatisticsOutput.struct_class = Types::GetQueryRuntimeStatisticsOutput
+
GetTableMetadataInput.add_member(:catalog_name, Shapes::ShapeRef.new(shape: CatalogNameString, required: true, location_name: "CatalogName"))
GetTableMetadataInput.add_member(:database_name, Shapes::ShapeRef.new(shape: NameString, required: true, location_name: "DatabaseName"))
GetTableMetadataInput.add_member(:table_name, Shapes::ShapeRef.new(shape: NameString, required: true, location_name: "TableName"))
@@ -559,6 +575,45 @@ module ClientApi
QueryExecutionStatus.add_member(:athena_error, Shapes::ShapeRef.new(shape: AthenaError, location_name: "AthenaError"))
QueryExecutionStatus.struct_class = Types::QueryExecutionStatus
+ QueryRuntimeStatistics.add_member(:timeline, Shapes::ShapeRef.new(shape: QueryRuntimeStatisticsTimeline, location_name: "Timeline"))
+ QueryRuntimeStatistics.add_member(:rows, Shapes::ShapeRef.new(shape: QueryRuntimeStatisticsRows, location_name: "Rows"))
+ QueryRuntimeStatistics.add_member(:output_stage, Shapes::ShapeRef.new(shape: QueryStage, location_name: "OutputStage"))
+ QueryRuntimeStatistics.struct_class = Types::QueryRuntimeStatistics
+
+ QueryRuntimeStatisticsRows.add_member(:input_rows, Shapes::ShapeRef.new(shape: Long, location_name: "InputRows"))
+ QueryRuntimeStatisticsRows.add_member(:input_bytes, Shapes::ShapeRef.new(shape: Long, location_name: "InputBytes"))
+ QueryRuntimeStatisticsRows.add_member(:output_bytes, Shapes::ShapeRef.new(shape: Long, location_name: "OutputBytes"))
+ QueryRuntimeStatisticsRows.add_member(:output_rows, Shapes::ShapeRef.new(shape: Long, location_name: "OutputRows"))
+ QueryRuntimeStatisticsRows.struct_class = Types::QueryRuntimeStatisticsRows
+
+ QueryRuntimeStatisticsTimeline.add_member(:query_queue_time_in_millis, Shapes::ShapeRef.new(shape: Long, location_name: "QueryQueueTimeInMillis"))
+ QueryRuntimeStatisticsTimeline.add_member(:query_planning_time_in_millis, Shapes::ShapeRef.new(shape: Long, location_name: "QueryPlanningTimeInMillis"))
+ QueryRuntimeStatisticsTimeline.add_member(:engine_execution_time_in_millis, Shapes::ShapeRef.new(shape: Long, location_name: "EngineExecutionTimeInMillis"))
+ QueryRuntimeStatisticsTimeline.add_member(:service_processing_time_in_millis, Shapes::ShapeRef.new(shape: Long, location_name: "ServiceProcessingTimeInMillis"))
+ QueryRuntimeStatisticsTimeline.add_member(:total_execution_time_in_millis, Shapes::ShapeRef.new(shape: Long, location_name: "TotalExecutionTimeInMillis"))
+ QueryRuntimeStatisticsTimeline.struct_class = Types::QueryRuntimeStatisticsTimeline
+
+ QueryStage.add_member(:stage_id, Shapes::ShapeRef.new(shape: Long, location_name: "StageId"))
+ QueryStage.add_member(:state, Shapes::ShapeRef.new(shape: String, location_name: "State"))
+ QueryStage.add_member(:output_bytes, Shapes::ShapeRef.new(shape: Long, location_name: "OutputBytes"))
+ QueryStage.add_member(:output_rows, Shapes::ShapeRef.new(shape: Long, location_name: "OutputRows"))
+ QueryStage.add_member(:input_bytes, Shapes::ShapeRef.new(shape: Long, location_name: "InputBytes"))
+ QueryStage.add_member(:input_rows, Shapes::ShapeRef.new(shape: Long, location_name: "InputRows"))
+ QueryStage.add_member(:execution_time, Shapes::ShapeRef.new(shape: Long, location_name: "ExecutionTime"))
+ QueryStage.add_member(:query_stage_plan, Shapes::ShapeRef.new(shape: QueryStagePlanNode, location_name: "QueryStagePlan"))
+ QueryStage.add_member(:sub_stages, Shapes::ShapeRef.new(shape: QueryStages, location_name: "SubStages"))
+ QueryStage.struct_class = Types::QueryStage
+
+ QueryStagePlanNode.add_member(:name, Shapes::ShapeRef.new(shape: String, location_name: "Name"))
+ QueryStagePlanNode.add_member(:identifier, Shapes::ShapeRef.new(shape: String, location_name: "Identifier"))
+ QueryStagePlanNode.add_member(:children, Shapes::ShapeRef.new(shape: QueryStagePlanNodes, location_name: "Children"))
+ QueryStagePlanNode.add_member(:remote_sources, Shapes::ShapeRef.new(shape: StringList, location_name: "RemoteSources"))
+ QueryStagePlanNode.struct_class = Types::QueryStagePlanNode
+
+ QueryStagePlanNodes.member = Shapes::ShapeRef.new(shape: QueryStagePlanNode)
+
+ QueryStages.member = Shapes::ShapeRef.new(shape: QueryStage)
+
ResourceNotFoundException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessage, location_name: "Message"))
ResourceNotFoundException.add_member(:resource_name, Shapes::ShapeRef.new(shape: AmazonResourceName, location_name: "ResourceName"))
ResourceNotFoundException.struct_class = Types::ResourceNotFoundException
@@ -607,6 +662,8 @@ module ClientApi
StopQueryExecutionOutput.struct_class = Types::StopQueryExecutionOutput
+ StringList.member = Shapes::ShapeRef.new(shape: String)
+
TableMetadata.add_member(:name, Shapes::ShapeRef.new(shape: NameString, required: true, location_name: "Name"))
TableMetadata.add_member(:create_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "CreateTime"))
TableMetadata.add_member(:last_access_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "LastAccessTime"))
@@ -927,6 +984,16 @@ module ClientApi
)
end)
+ api.add_operation(:get_query_runtime_statistics, Seahorse::Model::Operation.new.tap do |o|
+ o.name = "GetQueryRuntimeStatistics"
+ o.http_method = "POST"
+ o.http_request_uri = "/"
+ o.input = Shapes::ShapeRef.new(shape: GetQueryRuntimeStatisticsInput)
+ o.output = Shapes::ShapeRef.new(shape: GetQueryRuntimeStatisticsOutput)
+ o.errors << Shapes::ShapeRef.new(shape: InternalServerException)
+ o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
+ end)
+
api.add_operation(:get_table_metadata, Seahorse::Model::Operation.new.tap do |o|
o.name = "GetTableMetadata"
o.http_method = "POST"
@@ -989,6 +1056,12 @@ module ClientApi
o.output = Shapes::ShapeRef.new(shape: ListEngineVersionsOutput)
o.errors << Shapes::ShapeRef.new(shape: InternalServerException)
o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
+ o[:pager] = Aws::Pager.new(
+ limit_key: "max_results",
+ tokens: {
+ "next_token" => "next_token"
+ }
+ )
end)
api.add_operation(:list_named_queries, Seahorse::Model::Operation.new.tap do |o|
diff --git a/gems/aws-sdk-athena/lib/aws-sdk-athena/types.rb b/gems/aws-sdk-athena/lib/aws-sdk-athena/types.rb
index 839f942e0f9..3b2294bde6d 100644
--- a/gems/aws-sdk-athena/lib/aws-sdk-athena/types.rb
+++ b/gems/aws-sdk-athena/lib/aws-sdk-athena/types.rb
@@ -1132,6 +1132,37 @@ class GetQueryResultsOutput < Struct.new(
include Aws::Structure
end
+ # @note When making an API call, you may pass GetQueryRuntimeStatisticsInput
+ # data as a hash:
+ #
+ # {
+ # query_execution_id: "QueryExecutionId", # required
+ # }
+ #
+ # @!attribute [rw] query_execution_id
+ # The unique ID of the query execution.
+ # @return [String]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryRuntimeStatisticsInput AWS API Documentation
+ #
+ class GetQueryRuntimeStatisticsInput < Struct.new(
+ :query_execution_id)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
+ # @!attribute [rw] query_runtime_statistics
+ # Runtime statistics about the query execution.
+ # @return [Types::QueryRuntimeStatistics]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryRuntimeStatisticsOutput AWS API Documentation
+ #
+ class GetQueryRuntimeStatisticsOutput < Struct.new(
+ :query_runtime_statistics)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
# @note When making an API call, you may pass GetTableMetadataInput
# data as a hash:
#
@@ -2047,6 +2078,200 @@ class QueryExecutionStatus < Struct.new(
include Aws::Structure
end
+ # The query execution timeline, statistics on input and output rows and
+ # bytes, and the different query stages that form the query execution
+ # plan.
+ #
+ # @!attribute [rw] timeline
+ # Timeline statistics such as query queue time, planning time,
+ # execution time, service processing time, and total execution time.
+ # @return [Types::QueryRuntimeStatisticsTimeline]
+ #
+ # @!attribute [rw] rows
+ # Statistics such as input rows and bytes read by the query, rows and
+ # bytes output by the query, and the number of rows written by the
+ # query.
+ # @return [Types::QueryRuntimeStatisticsRows]
+ #
+ # @!attribute [rw] output_stage
+ # Stage statistics such as input and output rows and bytes, execution
+ # time, and stage state. This information also includes substages and
+ # the query stage plan.
+ # @return [Types::QueryStage]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/QueryRuntimeStatistics AWS API Documentation
+ #
+ class QueryRuntimeStatistics < Struct.new(
+ :timeline,
+ :rows,
+ :output_stage)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
+ # Statistics such as input rows and bytes read by the query, rows and
+ # bytes output by the query, and the number of rows written by the
+ # query.
+ #
+ # @!attribute [rw] input_rows
+ # The number of rows read to execute the query.
+ # @return [Integer]
+ #
+ # @!attribute [rw] input_bytes
+ # The number of bytes read to execute the query.
+ # @return [Integer]
+ #
+ # @!attribute [rw] output_bytes
+ # The number of bytes returned by the query.
+ # @return [Integer]
+ #
+ # @!attribute [rw] output_rows
+ # The number of rows returned by the query.
+ # @return [Integer]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/QueryRuntimeStatisticsRows AWS API Documentation
+ #
+ class QueryRuntimeStatisticsRows < Struct.new(
+ :input_rows,
+ :input_bytes,
+ :output_bytes,
+ :output_rows)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
+ # Timeline statistics such as query queue time, planning time, execution
+ # time, service processing time, and total execution time.
+ #
+ # @!attribute [rw] query_queue_time_in_millis
+ # The number of milliseconds that the query was in your query queue
+ # waiting for resources. Note that if transient errors occur, Athena
+ # might automatically add the query back to the queue.
+ # @return [Integer]
+ #
+ # @!attribute [rw] query_planning_time_in_millis
+ # The number of milliseconds that Athena took to plan the query
+ # processing flow. This includes the time spent retrieving table
+ # partitions from the data source. Note that because the query engine
+ # performs the query planning, query planning time is a subset of
+ # engine processing time.
+ # @return [Integer]
+ #
+ # @!attribute [rw] engine_execution_time_in_millis
+ # The number of milliseconds that the query took to execute.
+ # @return [Integer]
+ #
+ # @!attribute [rw] service_processing_time_in_millis
+ # The number of milliseconds that Athena took to finalize and publish
+ # the query results after the query engine finished running the query.
+ # @return [Integer]
+ #
+ # @!attribute [rw] total_execution_time_in_millis
+ # The number of milliseconds that Athena took to run the query.
+ # @return [Integer]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/QueryRuntimeStatisticsTimeline AWS API Documentation
+ #
+ class QueryRuntimeStatisticsTimeline < Struct.new(
+ :query_queue_time_in_millis,
+ :query_planning_time_in_millis,
+ :engine_execution_time_in_millis,
+ :service_processing_time_in_millis,
+ :total_execution_time_in_millis)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
+ # Stage statistics such as input and output rows and bytes, execution
+ # time and stage state. This information also includes substages and the
+ # query stage plan.
+ #
+ # @!attribute [rw] stage_id
+ # The identifier for a stage.
+ # @return [Integer]
+ #
+ # @!attribute [rw] state
+ # State of the stage after query execution.
+ # @return [String]
+ #
+ # @!attribute [rw] output_bytes
+ # The number of bytes output from the stage after execution.
+ # @return [Integer]
+ #
+ # @!attribute [rw] output_rows
+ # The number of rows output from the stage after execution.
+ # @return [Integer]
+ #
+ # @!attribute [rw] input_bytes
+ # The number of bytes input into the stage for execution.
+ # @return [Integer]
+ #
+ # @!attribute [rw] input_rows
+ # The number of rows input into the stage for execution.
+ # @return [Integer]
+ #
+ # @!attribute [rw] execution_time
+ # Time taken to execute this stage.
+ # @return [Integer]
+ #
+ # @!attribute [rw] query_stage_plan
+ # Stage plan information such as name, identifier, sub plans, and
+ # source stages.
+ # @return [Types::QueryStagePlanNode]
+ #
+ # @!attribute [rw] sub_stages
+ # List of sub query stages that form this stage execution plan.
+ # @return [Array]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/QueryStage AWS API Documentation
+ #
+ class QueryStage < Struct.new(
+ :stage_id,
+ :state,
+ :output_bytes,
+ :output_rows,
+ :input_bytes,
+ :input_rows,
+ :execution_time,
+ :query_stage_plan,
+ :sub_stages)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
+ # Stage plan information such as name, identifier, sub plans, and remote
+ # sources.
+ #
+ # @!attribute [rw] name
+ # Name of the query stage plan that describes the operation this stage
+ # is performing as part of query execution.
+ # @return [String]
+ #
+ # @!attribute [rw] identifier
+ # Information about the operation this query stage plan node is
+ # performing.
+ # @return [String]
+ #
+ # @!attribute [rw] children
+ # Stage plan information such as name, identifier, sub plans, and
+ # remote sources of child plan nodes/
+ # @return [Array]
+ #
+ # @!attribute [rw] remote_sources
+ # Source plan node IDs.
+ # @return [Array]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/QueryStagePlanNode AWS API Documentation
+ #
+ class QueryStagePlanNode < Struct.new(
+ :name,
+ :identifier,
+ :children,
+ :remote_sources)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
# A resource, such as a workgroup, was not found.
#
# @!attribute [rw] message
diff --git a/gems/aws-sdk-cloudwatch/CHANGELOG.md b/gems/aws-sdk-cloudwatch/CHANGELOG.md
index c6df4f81b03..0ae29e590bd 100644
--- a/gems/aws-sdk-cloudwatch/CHANGELOG.md
+++ b/gems/aws-sdk-cloudwatch/CHANGELOG.md
@@ -1,6 +1,11 @@
Unreleased Changes
------------------
+1.65.0 (2022-07-21)
+------------------
+
+* Feature - Adding support for the suppression of Composite Alarm actions
+
1.64.0 (2022-04-14)
------------------
diff --git a/gems/aws-sdk-cloudwatch/VERSION b/gems/aws-sdk-cloudwatch/VERSION
index 9405730420f..902c74186fb 100644
--- a/gems/aws-sdk-cloudwatch/VERSION
+++ b/gems/aws-sdk-cloudwatch/VERSION
@@ -1 +1 @@
-1.64.0
+1.65.0
diff --git a/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch.rb b/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch.rb
index 0ac42595a86..82292837ca6 100644
--- a/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch.rb
+++ b/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch.rb
@@ -52,6 +52,6 @@
# @!group service
module Aws::CloudWatch
- GEM_VERSION = '1.64.0'
+ GEM_VERSION = '1.65.0'
end
diff --git a/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/client.rb b/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/client.rb
index c3c405f8ff7..dc55f2c10af 100644
--- a/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/client.rb
+++ b/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/client.rb
@@ -809,6 +809,12 @@ def describe_alarm_history(params = {}, options = {})
# resp.composite_alarms[0].state_reason_data #=> String
# resp.composite_alarms[0].state_updated_timestamp #=> Time
# resp.composite_alarms[0].state_value #=> String, one of "OK", "ALARM", "INSUFFICIENT_DATA"
+ # resp.composite_alarms[0].state_transitioned_timestamp #=> Time
+ # resp.composite_alarms[0].actions_suppressed_by #=> String, one of "WaitPeriod", "ExtensionPeriod", "Alarm"
+ # resp.composite_alarms[0].actions_suppressed_reason #=> String
+ # resp.composite_alarms[0].actions_suppressor #=> String
+ # resp.composite_alarms[0].actions_suppressor_wait_period #=> Integer
+ # resp.composite_alarms[0].actions_suppressor_extension_period #=> Integer
# resp.metric_alarms #=> Array
# resp.metric_alarms[0].alarm_name #=> String
# resp.metric_alarms[0].alarm_arn #=> String
@@ -2550,6 +2556,26 @@ def put_anomaly_detector(params = {}, options = {})
# use them to scope user permissions, by granting a user permission to
# access or change only resources with certain tag values.
#
+ # @option params [String] :actions_suppressor
+ # Actions will be suppressed if the suppressor alarm is in the `ALARM`
+ # state. `ActionsSuppressor` can be an AlarmName or an Amazon Resource
+ # Name (ARN) from an existing alarm.
+ #
+ # @option params [Integer] :actions_suppressor_wait_period
+ # The maximum time in seconds that the composite alarm waits for the
+ # suppressor alarm to go into the `ALARM` state. After this time, the
+ # composite alarm performs its actions.
+ #
+ # `WaitPeriod` is required only when `ActionsSuppressor` is specified.
+ #
+ # @option params [Integer] :actions_suppressor_extension_period
+ # The maximum time in seconds that the composite alarm waits after
+ # suppressor alarm goes out of the `ALARM` state. After this time, the
+ # composite alarm performs its actions.
+ #
+ # `ExtensionPeriod` is required only when `ActionsSuppressor` is
+ # specified.
+ #
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
@@ -2568,6 +2594,9 @@ def put_anomaly_detector(params = {}, options = {})
# value: "TagValue", # required
# },
# ],
+ # actions_suppressor: "AlarmArn",
+ # actions_suppressor_wait_period: 1,
+ # actions_suppressor_extension_period: 1,
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/PutCompositeAlarm AWS API Documentation
@@ -3633,7 +3662,7 @@ def build_request(operation_name, params = {})
params: params,
config: config)
context[:gem_name] = 'aws-sdk-cloudwatch'
- context[:gem_version] = '1.64.0'
+ context[:gem_version] = '1.65.0'
Seahorse::Client::Request.new(handlers, context)
end
diff --git a/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/client_api.rb b/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/client_api.rb
index addbde7ad53..772be63ba82 100644
--- a/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/client_api.rb
+++ b/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/client_api.rb
@@ -16,6 +16,8 @@ module ClientApi
AccountId = Shapes::StringShape.new(name: 'AccountId')
ActionPrefix = Shapes::StringShape.new(name: 'ActionPrefix')
ActionsEnabled = Shapes::BooleanShape.new(name: 'ActionsEnabled')
+ ActionsSuppressedBy = Shapes::StringShape.new(name: 'ActionsSuppressedBy')
+ ActionsSuppressedReason = Shapes::StringShape.new(name: 'ActionsSuppressedReason')
AlarmArn = Shapes::StringShape.new(name: 'AlarmArn')
AlarmDescription = Shapes::StringShape.new(name: 'AlarmDescription')
AlarmHistoryItem = Shapes::StructureShape.new(name: 'AlarmHistoryItem')
@@ -246,6 +248,7 @@ module ClientApi
StopMetricStreamsInput = Shapes::StructureShape.new(name: 'StopMetricStreamsInput')
StopMetricStreamsOutput = Shapes::StructureShape.new(name: 'StopMetricStreamsOutput')
StorageResolution = Shapes::IntegerShape.new(name: 'StorageResolution')
+ SuppressorPeriod = Shapes::IntegerShape.new(name: 'SuppressorPeriod')
Tag = Shapes::StructureShape.new(name: 'Tag')
TagKey = Shapes::StringShape.new(name: 'TagKey')
TagKeyList = Shapes::ListShape.new(name: 'TagKeyList')
@@ -310,6 +313,12 @@ module ClientApi
CompositeAlarm.add_member(:state_reason_data, Shapes::ShapeRef.new(shape: StateReasonData, location_name: "StateReasonData"))
CompositeAlarm.add_member(:state_updated_timestamp, Shapes::ShapeRef.new(shape: Timestamp, location_name: "StateUpdatedTimestamp"))
CompositeAlarm.add_member(:state_value, Shapes::ShapeRef.new(shape: StateValue, location_name: "StateValue"))
+ CompositeAlarm.add_member(:state_transitioned_timestamp, Shapes::ShapeRef.new(shape: Timestamp, location_name: "StateTransitionedTimestamp"))
+ CompositeAlarm.add_member(:actions_suppressed_by, Shapes::ShapeRef.new(shape: ActionsSuppressedBy, location_name: "ActionsSuppressedBy"))
+ CompositeAlarm.add_member(:actions_suppressed_reason, Shapes::ShapeRef.new(shape: ActionsSuppressedReason, location_name: "ActionsSuppressedReason"))
+ CompositeAlarm.add_member(:actions_suppressor, Shapes::ShapeRef.new(shape: AlarmArn, location_name: "ActionsSuppressor"))
+ CompositeAlarm.add_member(:actions_suppressor_wait_period, Shapes::ShapeRef.new(shape: SuppressorPeriod, location_name: "ActionsSuppressorWaitPeriod"))
+ CompositeAlarm.add_member(:actions_suppressor_extension_period, Shapes::ShapeRef.new(shape: SuppressorPeriod, location_name: "ActionsSuppressorExtensionPeriod"))
CompositeAlarm.struct_class = Types::CompositeAlarm
CompositeAlarms.member = Shapes::ShapeRef.new(shape: CompositeAlarm)
@@ -802,6 +811,9 @@ module ClientApi
PutCompositeAlarmInput.add_member(:insufficient_data_actions, Shapes::ShapeRef.new(shape: ResourceList, location_name: "InsufficientDataActions"))
PutCompositeAlarmInput.add_member(:ok_actions, Shapes::ShapeRef.new(shape: ResourceList, location_name: "OKActions"))
PutCompositeAlarmInput.add_member(:tags, Shapes::ShapeRef.new(shape: TagList, location_name: "Tags"))
+ PutCompositeAlarmInput.add_member(:actions_suppressor, Shapes::ShapeRef.new(shape: AlarmArn, location_name: "ActionsSuppressor"))
+ PutCompositeAlarmInput.add_member(:actions_suppressor_wait_period, Shapes::ShapeRef.new(shape: SuppressorPeriod, location_name: "ActionsSuppressorWaitPeriod"))
+ PutCompositeAlarmInput.add_member(:actions_suppressor_extension_period, Shapes::ShapeRef.new(shape: SuppressorPeriod, location_name: "ActionsSuppressorExtensionPeriod"))
PutCompositeAlarmInput.struct_class = Types::PutCompositeAlarmInput
PutDashboardInput.add_member(:dashboard_name, Shapes::ShapeRef.new(shape: DashboardName, required: true, location_name: "DashboardName"))
diff --git a/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/composite_alarm.rb b/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/composite_alarm.rb
index cb30d85ce62..74a63771923 100644
--- a/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/composite_alarm.rb
+++ b/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/composite_alarm.rb
@@ -102,7 +102,8 @@ def state_reason_data
data[:state_reason_data]
end
- # The time stamp of the last update to the alarm state.
+ # Tracks the timestamp of any state update, even if `StateValue`
+ # doesn't change.
# @return [Time]
def state_updated_timestamp
data[:state_updated_timestamp]
@@ -114,6 +115,63 @@ def state_value
data[:state_value]
end
+ # The timestamp of the last change to the alarm's `StateValue`.
+ # @return [Time]
+ def state_transitioned_timestamp
+ data[:state_transitioned_timestamp]
+ end
+
+ # When the value is `ALARM`, it means that the actions are suppressed
+ # because the suppressor alarm is in `ALARM` When the value is
+ # `WaitPeriod`, it means that the actions are suppressed because the
+ # composite alarm is waiting for the suppressor alarm to go into into
+ # the `ALARM` state. The maximum waiting time is as specified in
+ # `ActionsSuppressorWaitPeriod`. After this time, the composite alarm
+ # performs its actions. When the value is `ExtensionPeriod`, it means
+ # that the actions are suppressed because the composite alarm is waiting
+ # after the suppressor alarm went out of the `ALARM` state. The maximum
+ # waiting time is as specified in `ActionsSuppressorExtensionPeriod`.
+ # After this time, the composite alarm performs its actions.
+ # @return [String]
+ def actions_suppressed_by
+ data[:actions_suppressed_by]
+ end
+
+ # Captures the reason for action suppression.
+ # @return [String]
+ def actions_suppressed_reason
+ data[:actions_suppressed_reason]
+ end
+
+ # Actions will be suppressed if the suppressor alarm is in the `ALARM`
+ # state. `ActionsSuppressor` can be an AlarmName or an Amazon Resource
+ # Name (ARN) from an existing alarm.
+ # @return [String]
+ def actions_suppressor
+ data[:actions_suppressor]
+ end
+
+ # The maximum time in seconds that the composite alarm waits for the
+ # suppressor alarm to go into the `ALARM` state. After this time, the
+ # composite alarm performs its actions.
+ #
+ # `WaitPeriod` is required only when `ActionsSuppressor` is specified.
+ # @return [Integer]
+ def actions_suppressor_wait_period
+ data[:actions_suppressor_wait_period]
+ end
+
+ # The maximum time in seconds that the composite alarm waits after
+ # suppressor alarm goes out of the `ALARM` state. After this time, the
+ # composite alarm performs its actions.
+ #
+ # `ExtensionPeriod` is required only when `ActionsSuppressor` is
+ # specified.
+ # @return [Integer]
+ def actions_suppressor_extension_period
+ data[:actions_suppressor_extension_period]
+ end
+
# @!endgroup
# @return [Client]
diff --git a/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/types.rb b/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/types.rb
index e2693c9c9fe..caceea78d57 100644
--- a/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/types.rb
+++ b/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/types.rb
@@ -205,13 +205,60 @@ class AnomalyDetectorConfiguration < Struct.new(
# @return [String]
#
# @!attribute [rw] state_updated_timestamp
- # The time stamp of the last update to the alarm state.
+ # Tracks the timestamp of any state update, even if `StateValue`
+ # doesn't change.
# @return [Time]
#
# @!attribute [rw] state_value
# The state value for the alarm.
# @return [String]
#
+ # @!attribute [rw] state_transitioned_timestamp
+ # The timestamp of the last change to the alarm's `StateValue`.
+ # @return [Time]
+ #
+ # @!attribute [rw] actions_suppressed_by
+ # When the value is `ALARM`, it means that the actions are suppressed
+ # because the suppressor alarm is in `ALARM` When the value is
+ # `WaitPeriod`, it means that the actions are suppressed because the
+ # composite alarm is waiting for the suppressor alarm to go into into
+ # the `ALARM` state. The maximum waiting time is as specified in
+ # `ActionsSuppressorWaitPeriod`. After this time, the composite alarm
+ # performs its actions. When the value is `ExtensionPeriod`, it means
+ # that the actions are suppressed because the composite alarm is
+ # waiting after the suppressor alarm went out of the `ALARM` state.
+ # The maximum waiting time is as specified in
+ # `ActionsSuppressorExtensionPeriod`. After this time, the composite
+ # alarm performs its actions.
+ # @return [String]
+ #
+ # @!attribute [rw] actions_suppressed_reason
+ # Captures the reason for action suppression.
+ # @return [String]
+ #
+ # @!attribute [rw] actions_suppressor
+ # Actions will be suppressed if the suppressor alarm is in the `ALARM`
+ # state. `ActionsSuppressor` can be an AlarmName or an Amazon Resource
+ # Name (ARN) from an existing alarm.
+ # @return [String]
+ #
+ # @!attribute [rw] actions_suppressor_wait_period
+ # The maximum time in seconds that the composite alarm waits for the
+ # suppressor alarm to go into the `ALARM` state. After this time, the
+ # composite alarm performs its actions.
+ #
+ # `WaitPeriod` is required only when `ActionsSuppressor` is specified.
+ # @return [Integer]
+ #
+ # @!attribute [rw] actions_suppressor_extension_period
+ # The maximum time in seconds that the composite alarm waits after
+ # suppressor alarm goes out of the `ALARM` state. After this time, the
+ # composite alarm performs its actions.
+ #
+ # `ExtensionPeriod` is required only when `ActionsSuppressor` is
+ # specified.
+ # @return [Integer]
+ #
# @see http://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/CompositeAlarm AWS API Documentation
#
class CompositeAlarm < Struct.new(
@@ -227,7 +274,13 @@ class CompositeAlarm < Struct.new(
:state_reason,
:state_reason_data,
:state_updated_timestamp,
- :state_value)
+ :state_value,
+ :state_transitioned_timestamp,
+ :actions_suppressed_by,
+ :actions_suppressed_reason,
+ :actions_suppressor,
+ :actions_suppressor_wait_period,
+ :actions_suppressor_extension_period)
SENSITIVE = []
include Aws::Structure
end
@@ -3525,6 +3578,9 @@ class PutAnomalyDetectorOutput < Aws::EmptyStructure; end
# value: "TagValue", # required
# },
# ],
+ # actions_suppressor: "AlarmArn",
+ # actions_suppressor_wait_period: 1,
+ # actions_suppressor_extension_period: 1,
# }
#
# @!attribute [rw] actions_enabled
@@ -3635,6 +3691,29 @@ class PutAnomalyDetectorOutput < Aws::EmptyStructure; end
# values.
# @return [Array]
#
+ # @!attribute [rw] actions_suppressor
+ # Actions will be suppressed if the suppressor alarm is in the `ALARM`
+ # state. `ActionsSuppressor` can be an AlarmName or an Amazon Resource
+ # Name (ARN) from an existing alarm.
+ # @return [String]
+ #
+ # @!attribute [rw] actions_suppressor_wait_period
+ # The maximum time in seconds that the composite alarm waits for the
+ # suppressor alarm to go into the `ALARM` state. After this time, the
+ # composite alarm performs its actions.
+ #
+ # `WaitPeriod` is required only when `ActionsSuppressor` is specified.
+ # @return [Integer]
+ #
+ # @!attribute [rw] actions_suppressor_extension_period
+ # The maximum time in seconds that the composite alarm waits after
+ # suppressor alarm goes out of the `ALARM` state. After this time, the
+ # composite alarm performs its actions.
+ #
+ # `ExtensionPeriod` is required only when `ActionsSuppressor` is
+ # specified.
+ # @return [Integer]
+ #
# @see http://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/PutCompositeAlarmInput AWS API Documentation
#
class PutCompositeAlarmInput < Struct.new(
@@ -3645,7 +3724,10 @@ class PutCompositeAlarmInput < Struct.new(
:alarm_rule,
:insufficient_data_actions,
:ok_actions,
- :tags)
+ :tags,
+ :actions_suppressor,
+ :actions_suppressor_wait_period,
+ :actions_suppressor_extension_period)
SENSITIVE = []
include Aws::Structure
end
diff --git a/gems/aws-sdk-databasemigrationservice/CHANGELOG.md b/gems/aws-sdk-databasemigrationservice/CHANGELOG.md
index 34b11f65199..73c04908303 100644
--- a/gems/aws-sdk-databasemigrationservice/CHANGELOG.md
+++ b/gems/aws-sdk-databasemigrationservice/CHANGELOG.md
@@ -1,6 +1,11 @@
Unreleased Changes
------------------
+1.71.0 (2022-07-21)
+------------------
+
+* Feature - Documentation updates for Database Migration Service (DMS).
+
1.70.0 (2022-07-07)
------------------
diff --git a/gems/aws-sdk-databasemigrationservice/VERSION b/gems/aws-sdk-databasemigrationservice/VERSION
index 832e9afb6c1..df484cbb1d9 100644
--- a/gems/aws-sdk-databasemigrationservice/VERSION
+++ b/gems/aws-sdk-databasemigrationservice/VERSION
@@ -1 +1 @@
-1.70.0
+1.71.0
diff --git a/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice.rb b/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice.rb
index c35539af521..49da8a4462d 100644
--- a/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice.rb
+++ b/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice.rb
@@ -49,6 +49,6 @@
# @!group service
module Aws::DatabaseMigrationService
- GEM_VERSION = '1.70.0'
+ GEM_VERSION = '1.71.0'
end
diff --git a/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice/client.rb b/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice/client.rb
index 6a7f14ce871..c64f3e21feb 100644
--- a/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice/client.rb
+++ b/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice/client.rb
@@ -552,9 +552,9 @@ def cancel_replication_task_assessment_run(params = {}, options = {})
# The type of engine for the endpoint. Valid values, depending on the
# `EndpointType` value, include `"mysql"`, `"oracle"`, `"postgres"`,
# `"mariadb"`, `"aurora"`, `"aurora-postgresql"`, `"opensearch"`,
- # `"redshift"`, `"s3"`, `"db2"`, `"azuredb"`, `"sybase"`, `"dynamodb"`,
- # `"mongodb"`, `"kinesis"`, `"kafka"`, `"elasticsearch"`, `"docdb"`,
- # `"sqlserver"`, and `"neptune"`.
+ # `"redshift"`, `"s3"`, `"db2"`, `db2-zos`, `"azuredb"`, `"sybase"`,
+ # `"dynamodb"`, `"mongodb"`, `"kinesis"`, `"kafka"`, `"elasticsearch"`,
+ # `"docdb"`, `"sqlserver"`, `"neptune"`, and `babelfish`.
#
# @option params [String] :username
# The user name to be used to log in to the endpoint database.
@@ -7901,7 +7901,7 @@ def build_request(operation_name, params = {})
params: params,
config: config)
context[:gem_name] = 'aws-sdk-databasemigrationservice'
- context[:gem_version] = '1.70.0'
+ context[:gem_version] = '1.71.0'
Seahorse::Client::Request.new(handlers, context)
end
diff --git a/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice/types.rb b/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice/types.rb
index 903c360be17..42ebedecb4f 100644
--- a/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice/types.rb
+++ b/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice/types.rb
@@ -809,9 +809,10 @@ class Connection < Struct.new(
# The type of engine for the endpoint. Valid values, depending on the
# `EndpointType` value, include `"mysql"`, `"oracle"`, `"postgres"`,
# `"mariadb"`, `"aurora"`, `"aurora-postgresql"`, `"opensearch"`,
- # `"redshift"`, `"s3"`, `"db2"`, `"azuredb"`, `"sybase"`,
+ # `"redshift"`, `"s3"`, `"db2"`, `db2-zos`, `"azuredb"`, `"sybase"`,
# `"dynamodb"`, `"mongodb"`, `"kinesis"`, `"kafka"`,
- # `"elasticsearch"`, `"docdb"`, `"sqlserver"`, and `"neptune"`.
+ # `"elasticsearch"`, `"docdb"`, `"sqlserver"`, `"neptune"`, and
+ # `babelfish`.
# @return [String]
#
# @!attribute [rw] username
@@ -10942,8 +10943,8 @@ class SybaseSettings < Struct.new(
# The state of the tables described.
#
# Valid states: Table does not exist \| Before load \| Full load \|
- # Table completed \| Table cancelled \| Table error \| Table all \|
- # Table updates \| Table is being reloaded
+ # Table completed \| Table cancelled \| Table error \| Table is being
+ # reloaded
# @return [String]
#
# @!attribute [rw] validation_pending_records
diff --git a/gems/aws-sdk-docdb/CHANGELOG.md b/gems/aws-sdk-docdb/CHANGELOG.md
index da657e18d23..deb51a44fff 100644
--- a/gems/aws-sdk-docdb/CHANGELOG.md
+++ b/gems/aws-sdk-docdb/CHANGELOG.md
@@ -1,6 +1,11 @@
Unreleased Changes
------------------
+1.43.0 (2022-07-21)
+------------------
+
+* Feature - Enable copy-on-write restore type
+
1.42.0 (2022-04-07)
------------------
diff --git a/gems/aws-sdk-docdb/VERSION b/gems/aws-sdk-docdb/VERSION
index a50908ca3da..b978278f05f 100644
--- a/gems/aws-sdk-docdb/VERSION
+++ b/gems/aws-sdk-docdb/VERSION
@@ -1 +1 @@
-1.42.0
+1.43.0
diff --git a/gems/aws-sdk-docdb/lib/aws-sdk-docdb.rb b/gems/aws-sdk-docdb/lib/aws-sdk-docdb.rb
index 7631ddd114b..62b9aefb5d4 100644
--- a/gems/aws-sdk-docdb/lib/aws-sdk-docdb.rb
+++ b/gems/aws-sdk-docdb/lib/aws-sdk-docdb.rb
@@ -49,6 +49,6 @@
# @!group service
module Aws::DocDB
- GEM_VERSION = '1.42.0'
+ GEM_VERSION = '1.43.0'
end
diff --git a/gems/aws-sdk-docdb/lib/aws-sdk-docdb/client.rb b/gems/aws-sdk-docdb/lib/aws-sdk-docdb/client.rb
index 82eeb2ce9cb..617d8762b18 100644
--- a/gems/aws-sdk-docdb/lib/aws-sdk-docdb/client.rb
+++ b/gems/aws-sdk-docdb/lib/aws-sdk-docdb/client.rb
@@ -985,6 +985,7 @@ def copy_db_cluster_snapshot(params = {}, options = {})
# resp.db_cluster.associated_roles #=> Array
# resp.db_cluster.associated_roles[0].role_arn #=> String
# resp.db_cluster.associated_roles[0].status #=> String
+ # resp.db_cluster.clone_group_id #=> String
# resp.db_cluster.cluster_create_time #=> Time
# resp.db_cluster.enabled_cloudwatch_logs_exports #=> Array
# resp.db_cluster.enabled_cloudwatch_logs_exports[0] #=> String
@@ -1217,6 +1218,10 @@ def create_db_cluster_snapshot(params = {}, options = {})
# @option params [required, String] :db_cluster_identifier
# The identifier of the cluster that the instance will belong to.
#
+ # @option params [Boolean] :copy_tags_to_snapshot
+ # A value that indicates whether to copy tags from the DB instance to
+ # snapshots of the DB instance. By default, tags are not copied.
+ #
# @option params [Integer] :promotion_tier
# A value that specifies the order in which an Amazon DocumentDB replica
# is promoted to the primary instance after a failure of the existing
@@ -1267,6 +1272,7 @@ def create_db_cluster_snapshot(params = {}, options = {})
# },
# ],
# db_cluster_identifier: "String", # required
+ # copy_tags_to_snapshot: false,
# promotion_tier: 1,
# enable_performance_insights: false,
# performance_insights_kms_key_id: "String",
@@ -1329,6 +1335,7 @@ def create_db_cluster_snapshot(params = {}, options = {})
# resp.db_instance.kms_key_id #=> String
# resp.db_instance.dbi_resource_id #=> String
# resp.db_instance.ca_certificate_identifier #=> String
+ # resp.db_instance.copy_tags_to_snapshot #=> Boolean
# resp.db_instance.promotion_tier #=> Integer
# resp.db_instance.db_instance_arn #=> String
# resp.db_instance.enabled_cloudwatch_logs_exports #=> Array
@@ -1710,6 +1717,7 @@ def create_global_cluster(params = {}, options = {})
# resp.db_cluster.associated_roles #=> Array
# resp.db_cluster.associated_roles[0].role_arn #=> String
# resp.db_cluster.associated_roles[0].status #=> String
+ # resp.db_cluster.clone_group_id #=> String
# resp.db_cluster.cluster_create_time #=> Time
# resp.db_cluster.enabled_cloudwatch_logs_exports #=> Array
# resp.db_cluster.enabled_cloudwatch_logs_exports[0] #=> String
@@ -1887,6 +1895,7 @@ def delete_db_cluster_snapshot(params = {}, options = {})
# resp.db_instance.kms_key_id #=> String
# resp.db_instance.dbi_resource_id #=> String
# resp.db_instance.ca_certificate_identifier #=> String
+ # resp.db_instance.copy_tags_to_snapshot #=> Boolean
# resp.db_instance.promotion_tier #=> Integer
# resp.db_instance.db_instance_arn #=> String
# resp.db_instance.enabled_cloudwatch_logs_exports #=> Array
@@ -2549,6 +2558,7 @@ def describe_db_cluster_snapshots(params = {}, options = {})
# resp.db_clusters[0].associated_roles #=> Array
# resp.db_clusters[0].associated_roles[0].role_arn #=> String
# resp.db_clusters[0].associated_roles[0].status #=> String
+ # resp.db_clusters[0].clone_group_id #=> String
# resp.db_clusters[0].cluster_create_time #=> Time
# resp.db_clusters[0].enabled_cloudwatch_logs_exports #=> Array
# resp.db_clusters[0].enabled_cloudwatch_logs_exports[0] #=> String
@@ -2791,6 +2801,7 @@ def describe_db_engine_versions(params = {}, options = {})
# resp.db_instances[0].kms_key_id #=> String
# resp.db_instances[0].dbi_resource_id #=> String
# resp.db_instances[0].ca_certificate_identifier #=> String
+ # resp.db_instances[0].copy_tags_to_snapshot #=> Boolean
# resp.db_instances[0].promotion_tier #=> Integer
# resp.db_instances[0].db_instance_arn #=> String
# resp.db_instances[0].enabled_cloudwatch_logs_exports #=> Array
@@ -3508,6 +3519,7 @@ def describe_pending_maintenance_actions(params = {}, options = {})
# resp.db_cluster.associated_roles #=> Array
# resp.db_cluster.associated_roles[0].role_arn #=> String
# resp.db_cluster.associated_roles[0].status #=> String
+ # resp.db_cluster.clone_group_id #=> String
# resp.db_cluster.cluster_create_time #=> Time
# resp.db_cluster.enabled_cloudwatch_logs_exports #=> Array
# resp.db_cluster.enabled_cloudwatch_logs_exports[0] #=> String
@@ -3754,6 +3766,7 @@ def list_tags_for_resource(params = {}, options = {})
# resp.db_cluster.associated_roles #=> Array
# resp.db_cluster.associated_roles[0].role_arn #=> String
# resp.db_cluster.associated_roles[0].status #=> String
+ # resp.db_cluster.clone_group_id #=> String
# resp.db_cluster.cluster_create_time #=> Time
# resp.db_cluster.enabled_cloudwatch_logs_exports #=> Array
# resp.db_cluster.enabled_cloudwatch_logs_exports[0] #=> String
@@ -3991,6 +4004,10 @@ def modify_db_cluster_snapshot_attribute(params = {}, options = {})
# Indicates the certificate that needs to be associated with the
# instance.
#
+ # @option params [Boolean] :copy_tags_to_snapshot
+ # A value that indicates whether to copy all tags from the DB instance
+ # to snapshots of the DB instance. By default, tags are not copied.
+ #
# @option params [Integer] :promotion_tier
# A value that specifies the order in which an Amazon DocumentDB replica
# is promoted to the primary instance after a failure of the existing
@@ -4035,6 +4052,7 @@ def modify_db_cluster_snapshot_attribute(params = {}, options = {})
# auto_minor_version_upgrade: false,
# new_db_instance_identifier: "String",
# ca_certificate_identifier: "String",
+ # copy_tags_to_snapshot: false,
# promotion_tier: 1,
# enable_performance_insights: false,
# performance_insights_kms_key_id: "String",
@@ -4097,6 +4115,7 @@ def modify_db_cluster_snapshot_attribute(params = {}, options = {})
# resp.db_instance.kms_key_id #=> String
# resp.db_instance.dbi_resource_id #=> String
# resp.db_instance.ca_certificate_identifier #=> String
+ # resp.db_instance.copy_tags_to_snapshot #=> Boolean
# resp.db_instance.promotion_tier #=> Integer
# resp.db_instance.db_instance_arn #=> String
# resp.db_instance.enabled_cloudwatch_logs_exports #=> Array
@@ -4393,6 +4412,7 @@ def modify_global_cluster(params = {}, options = {})
# resp.db_instance.kms_key_id #=> String
# resp.db_instance.dbi_resource_id #=> String
# resp.db_instance.ca_certificate_identifier #=> String
+ # resp.db_instance.copy_tags_to_snapshot #=> Boolean
# resp.db_instance.promotion_tier #=> Integer
# resp.db_instance.db_instance_arn #=> String
# resp.db_instance.enabled_cloudwatch_logs_exports #=> Array
@@ -4766,6 +4786,7 @@ def reset_db_cluster_parameter_group(params = {}, options = {})
# resp.db_cluster.associated_roles #=> Array
# resp.db_cluster.associated_roles[0].role_arn #=> String
# resp.db_cluster.associated_roles[0].status #=> String
+ # resp.db_cluster.clone_group_id #=> String
# resp.db_cluster.cluster_create_time #=> Time
# resp.db_cluster.enabled_cloudwatch_logs_exports #=> Array
# resp.db_cluster.enabled_cloudwatch_logs_exports[0] #=> String
@@ -4798,6 +4819,22 @@ def restore_db_cluster_from_snapshot(params = {}, options = {})
#
# * Cannot end with a hyphen or contain two consecutive hyphens.
#
+ # @option params [String] :restore_type
+ # The type of restore to be performed. You can specify one of the
+ # following values:
+ #
+ # * `full-copy` - The new DB cluster is restored as a full copy of the
+ # source DB cluster.
+ #
+ # * `copy-on-write` - The new DB cluster is restored as a clone of the
+ # source DB cluster.
+ #
+ # Constraints: You can't specify `copy-on-write` if the engine version
+ # of the source DB cluster is earlier than 1.11.
+ #
+ # If you don't specify a `RestoreType` value, then the new DB cluster
+ # is restored as a full copy of the source DB cluster.
+ #
# @option params [required, String] :source_db_cluster_identifier
# The identifier of the source cluster from which to restore.
#
@@ -4902,6 +4939,7 @@ def restore_db_cluster_from_snapshot(params = {}, options = {})
#
# resp = client.restore_db_cluster_to_point_in_time({
# db_cluster_identifier: "String", # required
+ # restore_type: "String",
# source_db_cluster_identifier: "String", # required
# restore_to_time: Time.now,
# use_latest_restorable_time: false,
@@ -4959,6 +4997,7 @@ def restore_db_cluster_from_snapshot(params = {}, options = {})
# resp.db_cluster.associated_roles #=> Array
# resp.db_cluster.associated_roles[0].role_arn #=> String
# resp.db_cluster.associated_roles[0].status #=> String
+ # resp.db_cluster.clone_group_id #=> String
# resp.db_cluster.cluster_create_time #=> Time
# resp.db_cluster.enabled_cloudwatch_logs_exports #=> Array
# resp.db_cluster.enabled_cloudwatch_logs_exports[0] #=> String
@@ -5035,6 +5074,7 @@ def restore_db_cluster_to_point_in_time(params = {}, options = {})
# resp.db_cluster.associated_roles #=> Array
# resp.db_cluster.associated_roles[0].role_arn #=> String
# resp.db_cluster.associated_roles[0].status #=> String
+ # resp.db_cluster.clone_group_id #=> String
# resp.db_cluster.cluster_create_time #=> Time
# resp.db_cluster.enabled_cloudwatch_logs_exports #=> Array
# resp.db_cluster.enabled_cloudwatch_logs_exports[0] #=> String
@@ -5111,6 +5151,7 @@ def start_db_cluster(params = {}, options = {})
# resp.db_cluster.associated_roles #=> Array
# resp.db_cluster.associated_roles[0].role_arn #=> String
# resp.db_cluster.associated_roles[0].status #=> String
+ # resp.db_cluster.clone_group_id #=> String
# resp.db_cluster.cluster_create_time #=> Time
# resp.db_cluster.enabled_cloudwatch_logs_exports #=> Array
# resp.db_cluster.enabled_cloudwatch_logs_exports[0] #=> String
@@ -5138,7 +5179,7 @@ def build_request(operation_name, params = {})
params: params,
config: config)
context[:gem_name] = 'aws-sdk-docdb'
- context[:gem_version] = '1.42.0'
+ context[:gem_version] = '1.43.0'
Seahorse::Client::Request.new(handlers, context)
end
diff --git a/gems/aws-sdk-docdb/lib/aws-sdk-docdb/client_api.rb b/gems/aws-sdk-docdb/lib/aws-sdk-docdb/client_api.rb
index 3c1e0c71399..d6a9c630279 100644
--- a/gems/aws-sdk-docdb/lib/aws-sdk-docdb/client_api.rb
+++ b/gems/aws-sdk-docdb/lib/aws-sdk-docdb/client_api.rb
@@ -372,6 +372,7 @@ module ClientApi
CreateDBInstanceMessage.add_member(:auto_minor_version_upgrade, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "AutoMinorVersionUpgrade"))
CreateDBInstanceMessage.add_member(:tags, Shapes::ShapeRef.new(shape: TagList, location_name: "Tags"))
CreateDBInstanceMessage.add_member(:db_cluster_identifier, Shapes::ShapeRef.new(shape: String, required: true, location_name: "DBClusterIdentifier"))
+ CreateDBInstanceMessage.add_member(:copy_tags_to_snapshot, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "CopyTagsToSnapshot"))
CreateDBInstanceMessage.add_member(:promotion_tier, Shapes::ShapeRef.new(shape: IntegerOptional, location_name: "PromotionTier"))
CreateDBInstanceMessage.add_member(:enable_performance_insights, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "EnablePerformanceInsights"))
CreateDBInstanceMessage.add_member(:performance_insights_kms_key_id, Shapes::ShapeRef.new(shape: String, location_name: "PerformanceInsightsKMSKeyId"))
@@ -441,6 +442,7 @@ module ClientApi
DBCluster.add_member(:db_cluster_resource_id, Shapes::ShapeRef.new(shape: String, location_name: "DbClusterResourceId"))
DBCluster.add_member(:db_cluster_arn, Shapes::ShapeRef.new(shape: String, location_name: "DBClusterArn"))
DBCluster.add_member(:associated_roles, Shapes::ShapeRef.new(shape: DBClusterRoles, location_name: "AssociatedRoles"))
+ DBCluster.add_member(:clone_group_id, Shapes::ShapeRef.new(shape: String, location_name: "CloneGroupId"))
DBCluster.add_member(:cluster_create_time, Shapes::ShapeRef.new(shape: TStamp, location_name: "ClusterCreateTime"))
DBCluster.add_member(:enabled_cloudwatch_logs_exports, Shapes::ShapeRef.new(shape: LogTypeList, location_name: "EnabledCloudwatchLogsExports"))
DBCluster.add_member(:deletion_protection, Shapes::ShapeRef.new(shape: Boolean, location_name: "DeletionProtection"))
@@ -571,6 +573,7 @@ module ClientApi
DBInstance.add_member(:kms_key_id, Shapes::ShapeRef.new(shape: String, location_name: "KmsKeyId"))
DBInstance.add_member(:dbi_resource_id, Shapes::ShapeRef.new(shape: String, location_name: "DbiResourceId"))
DBInstance.add_member(:ca_certificate_identifier, Shapes::ShapeRef.new(shape: String, location_name: "CACertificateIdentifier"))
+ DBInstance.add_member(:copy_tags_to_snapshot, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "CopyTagsToSnapshot"))
DBInstance.add_member(:promotion_tier, Shapes::ShapeRef.new(shape: IntegerOptional, location_name: "PromotionTier"))
DBInstance.add_member(:db_instance_arn, Shapes::ShapeRef.new(shape: String, location_name: "DBInstanceArn"))
DBInstance.add_member(:enabled_cloudwatch_logs_exports, Shapes::ShapeRef.new(shape: LogTypeList, location_name: "EnabledCloudwatchLogsExports"))
@@ -969,6 +972,7 @@ module ClientApi
ModifyDBInstanceMessage.add_member(:auto_minor_version_upgrade, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "AutoMinorVersionUpgrade"))
ModifyDBInstanceMessage.add_member(:new_db_instance_identifier, Shapes::ShapeRef.new(shape: String, location_name: "NewDBInstanceIdentifier"))
ModifyDBInstanceMessage.add_member(:ca_certificate_identifier, Shapes::ShapeRef.new(shape: String, location_name: "CACertificateIdentifier"))
+ ModifyDBInstanceMessage.add_member(:copy_tags_to_snapshot, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "CopyTagsToSnapshot"))
ModifyDBInstanceMessage.add_member(:promotion_tier, Shapes::ShapeRef.new(shape: IntegerOptional, location_name: "PromotionTier"))
ModifyDBInstanceMessage.add_member(:enable_performance_insights, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "EnablePerformanceInsights"))
ModifyDBInstanceMessage.add_member(:performance_insights_kms_key_id, Shapes::ShapeRef.new(shape: String, location_name: "PerformanceInsightsKMSKeyId"))
@@ -1125,6 +1129,7 @@ module ClientApi
RestoreDBClusterFromSnapshotResult.struct_class = Types::RestoreDBClusterFromSnapshotResult
RestoreDBClusterToPointInTimeMessage.add_member(:db_cluster_identifier, Shapes::ShapeRef.new(shape: String, required: true, location_name: "DBClusterIdentifier"))
+ RestoreDBClusterToPointInTimeMessage.add_member(:restore_type, Shapes::ShapeRef.new(shape: String, location_name: "RestoreType"))
RestoreDBClusterToPointInTimeMessage.add_member(:source_db_cluster_identifier, Shapes::ShapeRef.new(shape: String, required: true, location_name: "SourceDBClusterIdentifier"))
RestoreDBClusterToPointInTimeMessage.add_member(:restore_to_time, Shapes::ShapeRef.new(shape: TStamp, location_name: "RestoreToTime"))
RestoreDBClusterToPointInTimeMessage.add_member(:use_latest_restorable_time, Shapes::ShapeRef.new(shape: Boolean, location_name: "UseLatestRestorableTime"))
diff --git a/gems/aws-sdk-docdb/lib/aws-sdk-docdb/types.rb b/gems/aws-sdk-docdb/lib/aws-sdk-docdb/types.rb
index fa20e776720..cfc280645b7 100644
--- a/gems/aws-sdk-docdb/lib/aws-sdk-docdb/types.rb
+++ b/gems/aws-sdk-docdb/lib/aws-sdk-docdb/types.rb
@@ -942,6 +942,7 @@ class CreateDBClusterSnapshotResult < Struct.new(
# },
# ],
# db_cluster_identifier: "String", # required
+ # copy_tags_to_snapshot: false,
# promotion_tier: 1,
# enable_performance_insights: false,
# performance_insights_kms_key_id: "String",
@@ -1014,6 +1015,11 @@ class CreateDBClusterSnapshotResult < Struct.new(
# The identifier of the cluster that the instance will belong to.
# @return [String]
#
+ # @!attribute [rw] copy_tags_to_snapshot
+ # A value that indicates whether to copy tags from the DB instance to
+ # snapshots of the DB instance. By default, tags are not copied.
+ # @return [Boolean]
+ #
# @!attribute [rw] promotion_tier
# A value that specifies the order in which an Amazon DocumentDB
# replica is promoted to the primary instance after a failure of the
@@ -1058,6 +1064,7 @@ class CreateDBInstanceMessage < Struct.new(
:auto_minor_version_upgrade,
:tags,
:db_cluster_identifier,
+ :copy_tags_to_snapshot,
:promotion_tier,
:enable_performance_insights,
:performance_insights_kms_key_id)
@@ -1468,6 +1475,10 @@ class CreateGlobalClusterResult < Struct.new(
# other Amazon Web Services services on your behalf.
# @return [Array]
#
+ # @!attribute [rw] clone_group_id
+ # Identifies the clone group to which the DB cluster is associated.
+ # @return [String]
+ #
# @!attribute [rw] cluster_create_time
# Specifies the time when the cluster was created, in Universal
# Coordinated Time (UTC).
@@ -1517,6 +1528,7 @@ class DBCluster < Struct.new(
:db_cluster_resource_id,
:db_cluster_arn,
:associated_roles,
+ :clone_group_id,
:cluster_create_time,
:enabled_cloudwatch_logs_exports,
:deletion_protection)
@@ -2114,6 +2126,11 @@ class DBEngineVersionMessage < Struct.new(
# The identifier of the CA certificate for this DB instance.
# @return [String]
#
+ # @!attribute [rw] copy_tags_to_snapshot
+ # A value that indicates whether to copy tags from the DB instance to
+ # snapshots of the DB instance. By default, tags are not copied.
+ # @return [Boolean]
+ #
# @!attribute [rw] promotion_tier
# A value that specifies the order in which an Amazon DocumentDB
# replica is promoted to the primary instance after a failure of the
@@ -2155,6 +2172,7 @@ class DBInstance < Struct.new(
:kms_key_id,
:dbi_resource_id,
:ca_certificate_identifier,
+ :copy_tags_to_snapshot,
:promotion_tier,
:db_instance_arn,
:enabled_cloudwatch_logs_exports)
@@ -4690,6 +4708,7 @@ class ModifyDBClusterSnapshotAttributeResult < Struct.new(
# auto_minor_version_upgrade: false,
# new_db_instance_identifier: "String",
# ca_certificate_identifier: "String",
+ # copy_tags_to_snapshot: false,
# promotion_tier: 1,
# enable_performance_insights: false,
# performance_insights_kms_key_id: "String",
@@ -4780,6 +4799,11 @@ class ModifyDBClusterSnapshotAttributeResult < Struct.new(
# instance.
# @return [String]
#
+ # @!attribute [rw] copy_tags_to_snapshot
+ # A value that indicates whether to copy all tags from the DB instance
+ # to snapshots of the DB instance. By default, tags are not copied.
+ # @return [Boolean]
+ #
# @!attribute [rw] promotion_tier
# A value that specifies the order in which an Amazon DocumentDB
# replica is promoted to the primary instance after a failure of the
@@ -4823,6 +4847,7 @@ class ModifyDBInstanceMessage < Struct.new(
:auto_minor_version_upgrade,
:new_db_instance_identifier,
:ca_certificate_identifier,
+ :copy_tags_to_snapshot,
:promotion_tier,
:enable_performance_insights,
:performance_insights_kms_key_id)
@@ -5751,6 +5776,7 @@ class RestoreDBClusterFromSnapshotResult < Struct.new(
#
# {
# db_cluster_identifier: "String", # required
+ # restore_type: "String",
# source_db_cluster_identifier: "String", # required
# restore_to_time: Time.now,
# use_latest_restorable_time: false,
@@ -5780,6 +5806,23 @@ class RestoreDBClusterFromSnapshotResult < Struct.new(
# * Cannot end with a hyphen or contain two consecutive hyphens.
# @return [String]
#
+ # @!attribute [rw] restore_type
+ # The type of restore to be performed. You can specify one of the
+ # following values:
+ #
+ # * `full-copy` - The new DB cluster is restored as a full copy of the
+ # source DB cluster.
+ #
+ # * `copy-on-write` - The new DB cluster is restored as a clone of the
+ # source DB cluster.
+ #
+ # Constraints: You can't specify `copy-on-write` if the engine
+ # version of the source DB cluster is earlier than 1.11.
+ #
+ # If you don't specify a `RestoreType` value, then the new DB cluster
+ # is restored as a full copy of the source DB cluster.
+ # @return [String]
+ #
# @!attribute [rw] source_db_cluster_identifier
# The identifier of the source cluster from which to restore.
#
@@ -5892,6 +5935,7 @@ class RestoreDBClusterFromSnapshotResult < Struct.new(
#
class RestoreDBClusterToPointInTimeMessage < Struct.new(
:db_cluster_identifier,
+ :restore_type,
:source_db_cluster_identifier,
:restore_to_time,
:use_latest_restorable_time,
diff --git a/gems/aws-sdk-ec2instanceconnect/CHANGELOG.md b/gems/aws-sdk-ec2instanceconnect/CHANGELOG.md
index b61ce387e46..677bfff0d07 100644
--- a/gems/aws-sdk-ec2instanceconnect/CHANGELOG.md
+++ b/gems/aws-sdk-ec2instanceconnect/CHANGELOG.md
@@ -1,6 +1,11 @@
Unreleased Changes
------------------
+1.25.0 (2022-07-21)
+------------------
+
+* Feature - This release includes a new exception type "EC2InstanceUnavailableException" for SendSSHPublicKey and SendSerialConsoleSSHPublicKey APIs.
+
1.24.0 (2022-02-24)
------------------
diff --git a/gems/aws-sdk-ec2instanceconnect/VERSION b/gems/aws-sdk-ec2instanceconnect/VERSION
index 53cc1a6f929..ad2191947f7 100644
--- a/gems/aws-sdk-ec2instanceconnect/VERSION
+++ b/gems/aws-sdk-ec2instanceconnect/VERSION
@@ -1 +1 @@
-1.24.0
+1.25.0
diff --git a/gems/aws-sdk-ec2instanceconnect/lib/aws-sdk-ec2instanceconnect.rb b/gems/aws-sdk-ec2instanceconnect/lib/aws-sdk-ec2instanceconnect.rb
index a659989674a..4dd2c59f63f 100644
--- a/gems/aws-sdk-ec2instanceconnect/lib/aws-sdk-ec2instanceconnect.rb
+++ b/gems/aws-sdk-ec2instanceconnect/lib/aws-sdk-ec2instanceconnect.rb
@@ -48,6 +48,6 @@
# @!group service
module Aws::EC2InstanceConnect
- GEM_VERSION = '1.24.0'
+ GEM_VERSION = '1.25.0'
end
diff --git a/gems/aws-sdk-ec2instanceconnect/lib/aws-sdk-ec2instanceconnect/client.rb b/gems/aws-sdk-ec2instanceconnect/lib/aws-sdk-ec2instanceconnect/client.rb
index 2270a982301..a644b159944 100644
--- a/gems/aws-sdk-ec2instanceconnect/lib/aws-sdk-ec2instanceconnect/client.rb
+++ b/gems/aws-sdk-ec2instanceconnect/lib/aws-sdk-ec2instanceconnect/client.rb
@@ -500,7 +500,7 @@ def build_request(operation_name, params = {})
params: params,
config: config)
context[:gem_name] = 'aws-sdk-ec2instanceconnect'
- context[:gem_version] = '1.24.0'
+ context[:gem_version] = '1.25.0'
Seahorse::Client::Request.new(handlers, context)
end
diff --git a/gems/aws-sdk-ec2instanceconnect/lib/aws-sdk-ec2instanceconnect/client_api.rb b/gems/aws-sdk-ec2instanceconnect/lib/aws-sdk-ec2instanceconnect/client_api.rb
index 771a749f14d..1f7e33a4673 100644
--- a/gems/aws-sdk-ec2instanceconnect/lib/aws-sdk-ec2instanceconnect/client_api.rb
+++ b/gems/aws-sdk-ec2instanceconnect/lib/aws-sdk-ec2instanceconnect/client_api.rb
@@ -18,6 +18,7 @@ module ClientApi
EC2InstanceNotFoundException = Shapes::StructureShape.new(name: 'EC2InstanceNotFoundException')
EC2InstanceStateInvalidException = Shapes::StructureShape.new(name: 'EC2InstanceStateInvalidException')
EC2InstanceTypeInvalidException = Shapes::StructureShape.new(name: 'EC2InstanceTypeInvalidException')
+ EC2InstanceUnavailableException = Shapes::StructureShape.new(name: 'EC2InstanceUnavailableException')
InstanceId = Shapes::StringShape.new(name: 'InstanceId')
InstanceOSUser = Shapes::StringShape.new(name: 'InstanceOSUser')
InvalidArgsException = Shapes::StructureShape.new(name: 'InvalidArgsException')
@@ -48,6 +49,9 @@ module ClientApi
EC2InstanceTypeInvalidException.add_member(:message, Shapes::ShapeRef.new(shape: String, location_name: "Message"))
EC2InstanceTypeInvalidException.struct_class = Types::EC2InstanceTypeInvalidException
+ EC2InstanceUnavailableException.add_member(:message, Shapes::ShapeRef.new(shape: String, location_name: "Message"))
+ EC2InstanceUnavailableException.struct_class = Types::EC2InstanceUnavailableException
+
InvalidArgsException.add_member(:message, Shapes::ShapeRef.new(shape: String, location_name: "Message"))
InvalidArgsException.struct_class = Types::InvalidArgsException
@@ -116,6 +120,7 @@ module ClientApi
o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
o.errors << Shapes::ShapeRef.new(shape: EC2InstanceNotFoundException)
o.errors << Shapes::ShapeRef.new(shape: EC2InstanceStateInvalidException)
+ o.errors << Shapes::ShapeRef.new(shape: EC2InstanceUnavailableException)
end)
api.add_operation(:send_serial_console_ssh_public_key, Seahorse::Model::Operation.new.tap do |o|
@@ -134,6 +139,7 @@ module ClientApi
o.errors << Shapes::ShapeRef.new(shape: SerialConsoleSessionLimitExceededException)
o.errors << Shapes::ShapeRef.new(shape: SerialConsoleSessionUnavailableException)
o.errors << Shapes::ShapeRef.new(shape: EC2InstanceStateInvalidException)
+ o.errors << Shapes::ShapeRef.new(shape: EC2InstanceUnavailableException)
end)
end
diff --git a/gems/aws-sdk-ec2instanceconnect/lib/aws-sdk-ec2instanceconnect/errors.rb b/gems/aws-sdk-ec2instanceconnect/lib/aws-sdk-ec2instanceconnect/errors.rb
index 8cfb7b78ce3..4addf16fd66 100644
--- a/gems/aws-sdk-ec2instanceconnect/lib/aws-sdk-ec2instanceconnect/errors.rb
+++ b/gems/aws-sdk-ec2instanceconnect/lib/aws-sdk-ec2instanceconnect/errors.rb
@@ -31,6 +31,7 @@ module Aws::EC2InstanceConnect
# * {EC2InstanceNotFoundException}
# * {EC2InstanceStateInvalidException}
# * {EC2InstanceTypeInvalidException}
+ # * {EC2InstanceUnavailableException}
# * {InvalidArgsException}
# * {SerialConsoleAccessDisabledException}
# * {SerialConsoleSessionLimitExceededException}
@@ -104,6 +105,21 @@ def message
end
end
+ class EC2InstanceUnavailableException < ServiceError
+
+ # @param [Seahorse::Client::RequestContext] context
+ # @param [String] message
+ # @param [Aws::EC2InstanceConnect::Types::EC2InstanceUnavailableException] data
+ def initialize(context, message, data = Aws::EmptyStructure.new)
+ super(context, message, data)
+ end
+
+ # @return [String]
+ def message
+ @message || @data[:message]
+ end
+ end
+
class InvalidArgsException < ServiceError
# @param [Seahorse::Client::RequestContext] context
diff --git a/gems/aws-sdk-ec2instanceconnect/lib/aws-sdk-ec2instanceconnect/types.rb b/gems/aws-sdk-ec2instanceconnect/lib/aws-sdk-ec2instanceconnect/types.rb
index f20bae160b7..87f2be19da9 100644
--- a/gems/aws-sdk-ec2instanceconnect/lib/aws-sdk-ec2instanceconnect/types.rb
+++ b/gems/aws-sdk-ec2instanceconnect/lib/aws-sdk-ec2instanceconnect/types.rb
@@ -67,6 +67,20 @@ class EC2InstanceTypeInvalidException < Struct.new(
include Aws::Structure
end
+ # The instance is currently unavailable. Wait a few minutes and try
+ # again.
+ #
+ # @!attribute [rw] message
+ # @return [String]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/ec2-instance-connect-2018-04-02/EC2InstanceUnavailableException AWS API Documentation
+ #
+ class EC2InstanceUnavailableException < Struct.new(
+ :message)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
# One of the parameters is not valid.
#
# @!attribute [rw] message
diff --git a/gems/aws-sdk-frauddetector/CHANGELOG.md b/gems/aws-sdk-frauddetector/CHANGELOG.md
index a614dd99456..142fce32a6e 100644
--- a/gems/aws-sdk-frauddetector/CHANGELOG.md
+++ b/gems/aws-sdk-frauddetector/CHANGELOG.md
@@ -1,6 +1,11 @@
Unreleased Changes
------------------
+1.34.0 (2022-07-21)
+------------------
+
+* Feature - The release introduces Account Takeover Insights (ATI) model. The ATI model detects fraud relating to account takeover. This release also adds support for new variable types: ARE_CREDENTIALS_VALID and SESSION_ID and adds new structures to Model Version APIs.
+
1.33.0 (2022-06-10)
------------------
diff --git a/gems/aws-sdk-frauddetector/VERSION b/gems/aws-sdk-frauddetector/VERSION
index 7aa332e4163..2b17ffd5042 100644
--- a/gems/aws-sdk-frauddetector/VERSION
+++ b/gems/aws-sdk-frauddetector/VERSION
@@ -1 +1 @@
-1.33.0
+1.34.0
diff --git a/gems/aws-sdk-frauddetector/lib/aws-sdk-frauddetector.rb b/gems/aws-sdk-frauddetector/lib/aws-sdk-frauddetector.rb
index 71aad904cd7..222bfe489db 100644
--- a/gems/aws-sdk-frauddetector/lib/aws-sdk-frauddetector.rb
+++ b/gems/aws-sdk-frauddetector/lib/aws-sdk-frauddetector.rb
@@ -48,6 +48,6 @@
# @!group service
module Aws::FraudDetector
- GEM_VERSION = '1.33.0'
+ GEM_VERSION = '1.34.0'
end
diff --git a/gems/aws-sdk-frauddetector/lib/aws-sdk-frauddetector/client.rb b/gems/aws-sdk-frauddetector/lib/aws-sdk-frauddetector/client.rb
index ceb5dbaafc0..7c3815617c1 100644
--- a/gems/aws-sdk-frauddetector/lib/aws-sdk-frauddetector/client.rb
+++ b/gems/aws-sdk-frauddetector/lib/aws-sdk-frauddetector/client.rb
@@ -666,7 +666,7 @@ def create_batch_prediction_job(params = {}, options = {})
# model_versions: [
# {
# model_id: "modelIdentifier", # required
- # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# model_version_number: "floatVersionString", # required
# arn: "fraudDetectorArn",
# },
@@ -718,7 +718,7 @@ def create_detector_version(params = {}, options = {})
#
# resp = client.create_model({
# model_id: "modelIdentifier", # required
- # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# description: "description",
# event_type_name: "string", # required
# tags: [
@@ -775,12 +775,12 @@ def create_model(params = {}, options = {})
#
# resp = client.create_model_version({
# model_id: "modelIdentifier", # required
- # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# training_data_source: "EXTERNAL_EVENTS", # required, accepts EXTERNAL_EVENTS, INGESTED_EVENTS
# training_data_schema: { # required
# model_variables: ["string"], # required
- # label_schema: { # required
- # label_mapper: { # required
+ # label_schema: {
+ # label_mapper: {
# "string" => ["string"],
# },
# unlabeled_events_treatment: "IGNORE", # accepts IGNORE, FRAUD, LEGIT
@@ -807,7 +807,7 @@ def create_model(params = {}, options = {})
# @example Response structure
#
# resp.model_id #=> String
- # resp.model_type #=> String, one of "ONLINE_FRAUD_INSIGHTS", "TRANSACTION_FRAUD_INSIGHTS"
+ # resp.model_type #=> String, one of "ONLINE_FRAUD_INSIGHTS", "TRANSACTION_FRAUD_INSIGHTS", "ACCOUNT_TAKEOVER_INSIGHTS"
# resp.model_version_number #=> String
# resp.status #=> String
#
@@ -1245,7 +1245,7 @@ def delete_label(params = {}, options = {})
#
# resp = client.delete_model({
# model_id: "modelIdentifier", # required
- # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/frauddetector-2019-11-15/DeleteModel AWS API Documentation
@@ -1281,7 +1281,7 @@ def delete_model(params = {}, options = {})
#
# resp = client.delete_model_version({
# model_id: "modelIdentifier", # required
- # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# model_version_number: "floatVersionString", # required
# })
#
@@ -1462,7 +1462,7 @@ def describe_detector(params = {}, options = {})
# resp = client.describe_model_versions({
# model_id: "modelIdentifier",
# model_version_number: "floatVersionString",
- # model_type: "ONLINE_FRAUD_INSIGHTS", # accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# next_token: "string",
# max_results: 1,
# })
@@ -1471,7 +1471,7 @@ def describe_detector(params = {}, options = {})
#
# resp.model_version_details #=> Array
# resp.model_version_details[0].model_id #=> String
- # resp.model_version_details[0].model_type #=> String, one of "ONLINE_FRAUD_INSIGHTS", "TRANSACTION_FRAUD_INSIGHTS"
+ # resp.model_version_details[0].model_type #=> String, one of "ONLINE_FRAUD_INSIGHTS", "TRANSACTION_FRAUD_INSIGHTS", "ACCOUNT_TAKEOVER_INSIGHTS"
# resp.model_version_details[0].model_version_number #=> String
# resp.model_version_details[0].status #=> String
# resp.model_version_details[0].training_data_source #=> String, one of "EXTERNAL_EVENTS", "INGESTED_EVENTS"
@@ -1508,6 +1508,42 @@ def describe_detector(params = {}, options = {})
# resp.model_version_details[0].last_updated_time #=> String
# resp.model_version_details[0].created_time #=> String
# resp.model_version_details[0].arn #=> String
+ # resp.model_version_details[0].training_result_v2.data_validation_metrics.file_level_messages #=> Array
+ # resp.model_version_details[0].training_result_v2.data_validation_metrics.file_level_messages[0].title #=> String
+ # resp.model_version_details[0].training_result_v2.data_validation_metrics.file_level_messages[0].content #=> String
+ # resp.model_version_details[0].training_result_v2.data_validation_metrics.file_level_messages[0].type #=> String
+ # resp.model_version_details[0].training_result_v2.data_validation_metrics.field_level_messages #=> Array
+ # resp.model_version_details[0].training_result_v2.data_validation_metrics.field_level_messages[0].field_name #=> String
+ # resp.model_version_details[0].training_result_v2.data_validation_metrics.field_level_messages[0].identifier #=> String
+ # resp.model_version_details[0].training_result_v2.data_validation_metrics.field_level_messages[0].title #=> String
+ # resp.model_version_details[0].training_result_v2.data_validation_metrics.field_level_messages[0].content #=> String
+ # resp.model_version_details[0].training_result_v2.data_validation_metrics.field_level_messages[0].type #=> String
+ # resp.model_version_details[0].training_result_v2.training_metrics_v2.ofi.metric_data_points #=> Array
+ # resp.model_version_details[0].training_result_v2.training_metrics_v2.ofi.metric_data_points[0].fpr #=> Float
+ # resp.model_version_details[0].training_result_v2.training_metrics_v2.ofi.metric_data_points[0].precision #=> Float
+ # resp.model_version_details[0].training_result_v2.training_metrics_v2.ofi.metric_data_points[0].tpr #=> Float
+ # resp.model_version_details[0].training_result_v2.training_metrics_v2.ofi.metric_data_points[0].threshold #=> Float
+ # resp.model_version_details[0].training_result_v2.training_metrics_v2.ofi.model_performance.auc #=> Float
+ # resp.model_version_details[0].training_result_v2.training_metrics_v2.tfi.metric_data_points #=> Array
+ # resp.model_version_details[0].training_result_v2.training_metrics_v2.tfi.metric_data_points[0].fpr #=> Float
+ # resp.model_version_details[0].training_result_v2.training_metrics_v2.tfi.metric_data_points[0].precision #=> Float
+ # resp.model_version_details[0].training_result_v2.training_metrics_v2.tfi.metric_data_points[0].tpr #=> Float
+ # resp.model_version_details[0].training_result_v2.training_metrics_v2.tfi.metric_data_points[0].threshold #=> Float
+ # resp.model_version_details[0].training_result_v2.training_metrics_v2.tfi.model_performance.auc #=> Float
+ # resp.model_version_details[0].training_result_v2.training_metrics_v2.ati.metric_data_points #=> Array
+ # resp.model_version_details[0].training_result_v2.training_metrics_v2.ati.metric_data_points[0].cr #=> Float
+ # resp.model_version_details[0].training_result_v2.training_metrics_v2.ati.metric_data_points[0].adr #=> Float
+ # resp.model_version_details[0].training_result_v2.training_metrics_v2.ati.metric_data_points[0].threshold #=> Float
+ # resp.model_version_details[0].training_result_v2.training_metrics_v2.ati.metric_data_points[0].atodr #=> Float
+ # resp.model_version_details[0].training_result_v2.training_metrics_v2.ati.model_performance.asi #=> Float
+ # resp.model_version_details[0].training_result_v2.variable_importance_metrics.log_odds_metrics #=> Array
+ # resp.model_version_details[0].training_result_v2.variable_importance_metrics.log_odds_metrics[0].variable_name #=> String
+ # resp.model_version_details[0].training_result_v2.variable_importance_metrics.log_odds_metrics[0].variable_type #=> String
+ # resp.model_version_details[0].training_result_v2.variable_importance_metrics.log_odds_metrics[0].variable_importance #=> Float
+ # resp.model_version_details[0].training_result_v2.aggregated_variables_importance_metrics.log_odds_metrics #=> Array
+ # resp.model_version_details[0].training_result_v2.aggregated_variables_importance_metrics.log_odds_metrics[0].variable_names #=> Array
+ # resp.model_version_details[0].training_result_v2.aggregated_variables_importance_metrics.log_odds_metrics[0].variable_names[0] #=> String
+ # resp.model_version_details[0].training_result_v2.aggregated_variables_importance_metrics.log_odds_metrics[0].aggregated_variables_importance #=> Float
# resp.next_token #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/frauddetector-2019-11-15/DescribeModelVersions AWS API Documentation
@@ -1707,7 +1743,7 @@ def get_delete_events_by_event_type_status(params = {}, options = {})
# resp.external_model_endpoints[0] #=> String
# resp.model_versions #=> Array
# resp.model_versions[0].model_id #=> String
- # resp.model_versions[0].model_type #=> String, one of "ONLINE_FRAUD_INSIGHTS", "TRANSACTION_FRAUD_INSIGHTS"
+ # resp.model_versions[0].model_type #=> String, one of "ONLINE_FRAUD_INSIGHTS", "TRANSACTION_FRAUD_INSIGHTS", "ACCOUNT_TAKEOVER_INSIGHTS"
# resp.model_versions[0].model_version_number #=> String
# resp.model_versions[0].arn #=> String
# resp.rules #=> Array
@@ -1963,7 +1999,7 @@ def get_event(params = {}, options = {})
#
# resp.model_scores #=> Array
# resp.model_scores[0].model_version.model_id #=> String
- # resp.model_scores[0].model_version.model_type #=> String, one of "ONLINE_FRAUD_INSIGHTS", "TRANSACTION_FRAUD_INSIGHTS"
+ # resp.model_scores[0].model_version.model_type #=> String, one of "ONLINE_FRAUD_INSIGHTS", "TRANSACTION_FRAUD_INSIGHTS", "ACCOUNT_TAKEOVER_INSIGHTS"
# resp.model_scores[0].model_version.model_version_number #=> String
# resp.model_scores[0].model_version.arn #=> String
# resp.model_scores[0].scores #=> Hash
@@ -2081,6 +2117,11 @@ def get_event_prediction(params = {}, options = {})
# resp.evaluated_model_versions[0].evaluations[0].prediction_explanations.variable_impact_explanations[0].event_variable_name #=> String
# resp.evaluated_model_versions[0].evaluations[0].prediction_explanations.variable_impact_explanations[0].relative_impact #=> String
# resp.evaluated_model_versions[0].evaluations[0].prediction_explanations.variable_impact_explanations[0].log_odds_impact #=> Float
+ # resp.evaluated_model_versions[0].evaluations[0].prediction_explanations.aggregated_variables_impact_explanations #=> Array
+ # resp.evaluated_model_versions[0].evaluations[0].prediction_explanations.aggregated_variables_impact_explanations[0].event_variable_names #=> Array
+ # resp.evaluated_model_versions[0].evaluations[0].prediction_explanations.aggregated_variables_impact_explanations[0].event_variable_names[0] #=> String
+ # resp.evaluated_model_versions[0].evaluations[0].prediction_explanations.aggregated_variables_impact_explanations[0].relative_impact #=> String
+ # resp.evaluated_model_versions[0].evaluations[0].prediction_explanations.aggregated_variables_impact_explanations[0].log_odds_impact #=> Float
# resp.evaluated_external_models #=> Array
# resp.evaluated_external_models[0].model_endpoint #=> String
# resp.evaluated_external_models[0].use_event_variables #=> Boolean
@@ -2323,14 +2364,14 @@ def get_labels(params = {}, options = {})
#
# resp = client.get_model_version({
# model_id: "modelIdentifier", # required
- # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# model_version_number: "floatVersionString", # required
# })
#
# @example Response structure
#
# resp.model_id #=> String
- # resp.model_type #=> String, one of "ONLINE_FRAUD_INSIGHTS", "TRANSACTION_FRAUD_INSIGHTS"
+ # resp.model_type #=> String, one of "ONLINE_FRAUD_INSIGHTS", "TRANSACTION_FRAUD_INSIGHTS", "ACCOUNT_TAKEOVER_INSIGHTS"
# resp.model_version_number #=> String
# resp.training_data_source #=> String, one of "EXTERNAL_EVENTS", "INGESTED_EVENTS"
# resp.training_data_schema.model_variables #=> Array
@@ -2391,7 +2432,7 @@ def get_model_version(params = {}, options = {})
#
# resp = client.get_models({
# model_id: "modelIdentifier",
- # model_type: "ONLINE_FRAUD_INSIGHTS", # accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# next_token: "string",
# max_results: 1,
# })
@@ -2401,7 +2442,7 @@ def get_model_version(params = {}, options = {})
# resp.next_token #=> String
# resp.models #=> Array
# resp.models[0].model_id #=> String
- # resp.models[0].model_type #=> String, one of "ONLINE_FRAUD_INSIGHTS", "TRANSACTION_FRAUD_INSIGHTS"
+ # resp.models[0].model_type #=> String, one of "ONLINE_FRAUD_INSIGHTS", "TRANSACTION_FRAUD_INSIGHTS", "ACCOUNT_TAKEOVER_INSIGHTS"
# resp.models[0].description #=> String
# resp.models[0].event_type_name #=> String
# resp.models[0].created_time #=> String
@@ -3196,7 +3237,7 @@ def untag_resource(params = {}, options = {})
# model_versions: [
# {
# model_id: "modelIdentifier", # required
- # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# model_version_number: "floatVersionString", # required
# arn: "fraudDetectorArn",
# },
@@ -3328,7 +3369,7 @@ def update_event_label(params = {}, options = {})
#
# resp = client.update_model({
# model_id: "modelIdentifier", # required
- # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# description: "description",
# })
#
@@ -3379,7 +3420,7 @@ def update_model(params = {}, options = {})
#
# resp = client.update_model_version({
# model_id: "modelIdentifier", # required
- # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# major_version_number: "wholeNumberVersionString", # required
# external_events_detail: {
# data_location: "s3BucketLocation", # required
@@ -3402,7 +3443,7 @@ def update_model(params = {}, options = {})
# @example Response structure
#
# resp.model_id #=> String
- # resp.model_type #=> String, one of "ONLINE_FRAUD_INSIGHTS", "TRANSACTION_FRAUD_INSIGHTS"
+ # resp.model_type #=> String, one of "ONLINE_FRAUD_INSIGHTS", "TRANSACTION_FRAUD_INSIGHTS", "ACCOUNT_TAKEOVER_INSIGHTS"
# resp.model_version_number #=> String
# resp.status #=> String
#
@@ -3443,7 +3484,7 @@ def update_model_version(params = {}, options = {})
#
# resp = client.update_model_version_status({
# model_id: "modelIdentifier", # required
- # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# model_version_number: "floatVersionString", # required
# status: "ACTIVE", # required, accepts ACTIVE, INACTIVE, TRAINING_CANCELLED
# })
@@ -3598,7 +3639,7 @@ def build_request(operation_name, params = {})
params: params,
config: config)
context[:gem_name] = 'aws-sdk-frauddetector'
- context[:gem_version] = '1.33.0'
+ context[:gem_version] = '1.34.0'
Seahorse::Client::Request.new(handlers, context)
end
diff --git a/gems/aws-sdk-frauddetector/lib/aws-sdk-frauddetector/client_api.rb b/gems/aws-sdk-frauddetector/lib/aws-sdk-frauddetector/client_api.rb
index 7fca28936ac..5702b83f62a 100644
--- a/gems/aws-sdk-frauddetector/lib/aws-sdk-frauddetector/client_api.rb
+++ b/gems/aws-sdk-frauddetector/lib/aws-sdk-frauddetector/client_api.rb
@@ -13,7 +13,14 @@ module ClientApi
include Seahorse::Model
+ ATIMetricDataPoint = Shapes::StructureShape.new(name: 'ATIMetricDataPoint')
+ ATIMetricDataPointsList = Shapes::ListShape.new(name: 'ATIMetricDataPointsList')
+ ATIModelPerformance = Shapes::StructureShape.new(name: 'ATIModelPerformance')
+ ATITrainingMetricsValue = Shapes::StructureShape.new(name: 'ATITrainingMetricsValue')
AccessDeniedException = Shapes::StructureShape.new(name: 'AccessDeniedException')
+ AggregatedLogOddsMetric = Shapes::StructureShape.new(name: 'AggregatedLogOddsMetric')
+ AggregatedVariablesImpactExplanation = Shapes::StructureShape.new(name: 'AggregatedVariablesImpactExplanation')
+ AggregatedVariablesImportanceMetrics = Shapes::StructureShape.new(name: 'AggregatedVariablesImportanceMetrics')
AsyncJobStatus = Shapes::StringShape.new(name: 'AsyncJobStatus')
BatchCreateVariableError = Shapes::StructureShape.new(name: 'BatchCreateVariableError')
BatchCreateVariableErrorList = Shapes::ListShape.new(name: 'BatchCreateVariableErrorList')
@@ -166,6 +173,8 @@ module ClientApi
Language = Shapes::StringShape.new(name: 'Language')
ListEventPredictionsRequest = Shapes::StructureShape.new(name: 'ListEventPredictionsRequest')
ListEventPredictionsResult = Shapes::StructureShape.new(name: 'ListEventPredictionsResult')
+ ListOfAggregatedLogOddsMetrics = Shapes::ListShape.new(name: 'ListOfAggregatedLogOddsMetrics')
+ ListOfAggregatedVariablesImpactExplanations = Shapes::ListShape.new(name: 'ListOfAggregatedVariablesImpactExplanations')
ListOfEvaluatedExternalModels = Shapes::ListShape.new(name: 'ListOfEvaluatedExternalModels')
ListOfEvaluatedModelVersions = Shapes::ListShape.new(name: 'ListOfEvaluatedModelVersions')
ListOfEventPredictionSummaries = Shapes::ListShape.new(name: 'ListOfEventPredictionSummaries')
@@ -200,6 +209,10 @@ module ClientApi
ModelVersionStatus = Shapes::StringShape.new(name: 'ModelVersionStatus')
NameList = Shapes::ListShape.new(name: 'NameList')
NonEmptyListOfStrings = Shapes::ListShape.new(name: 'NonEmptyListOfStrings')
+ OFIMetricDataPoint = Shapes::StructureShape.new(name: 'OFIMetricDataPoint')
+ OFIMetricDataPointsList = Shapes::ListShape.new(name: 'OFIMetricDataPointsList')
+ OFIModelPerformance = Shapes::StructureShape.new(name: 'OFIModelPerformance')
+ OFITrainingMetricsValue = Shapes::StructureShape.new(name: 'OFITrainingMetricsValue')
Outcome = Shapes::StructureShape.new(name: 'Outcome')
OutcomeList = Shapes::ListShape.new(name: 'OutcomeList')
OutcomesMaxResults = Shapes::IntegerShape.new(name: 'OutcomesMaxResults')
@@ -230,6 +243,10 @@ module ClientApi
RulesMaxResults = Shapes::IntegerShape.new(name: 'RulesMaxResults')
SendEventRequest = Shapes::StructureShape.new(name: 'SendEventRequest')
SendEventResult = Shapes::StructureShape.new(name: 'SendEventResult')
+ TFIMetricDataPoint = Shapes::StructureShape.new(name: 'TFIMetricDataPoint')
+ TFIMetricDataPointsList = Shapes::ListShape.new(name: 'TFIMetricDataPointsList')
+ TFIModelPerformance = Shapes::StructureShape.new(name: 'TFIModelPerformance')
+ TFITrainingMetricsValue = Shapes::StructureShape.new(name: 'TFITrainingMetricsValue')
Tag = Shapes::StructureShape.new(name: 'Tag')
TagResourceRequest = Shapes::StructureShape.new(name: 'TagResourceRequest')
TagResourceResult = Shapes::StructureShape.new(name: 'TagResourceResult')
@@ -238,7 +255,9 @@ module ClientApi
TrainingDataSchema = Shapes::StructureShape.new(name: 'TrainingDataSchema')
TrainingDataSourceEnum = Shapes::StringShape.new(name: 'TrainingDataSourceEnum')
TrainingMetrics = Shapes::StructureShape.new(name: 'TrainingMetrics')
+ TrainingMetricsV2 = Shapes::StructureShape.new(name: 'TrainingMetricsV2')
TrainingResult = Shapes::StructureShape.new(name: 'TrainingResult')
+ TrainingResultV2 = Shapes::StructureShape.new(name: 'TrainingResultV2')
UnlabeledEventsTreatment = Shapes::StringShape.new(name: 'UnlabeledEventsTreatment')
UntagResourceRequest = Shapes::StructureShape.new(name: 'UntagResourceRequest')
UntagResourceResult = Shapes::StructureShape.new(name: 'UntagResourceResult')
@@ -318,9 +337,36 @@ module ClientApi
variableValue = Shapes::StringShape.new(name: 'variableValue')
wholeNumberVersionString = Shapes::StringShape.new(name: 'wholeNumberVersionString')
+ ATIMetricDataPoint.add_member(:cr, Shapes::ShapeRef.new(shape: float, location_name: "cr"))
+ ATIMetricDataPoint.add_member(:adr, Shapes::ShapeRef.new(shape: float, location_name: "adr"))
+ ATIMetricDataPoint.add_member(:threshold, Shapes::ShapeRef.new(shape: float, location_name: "threshold"))
+ ATIMetricDataPoint.add_member(:atodr, Shapes::ShapeRef.new(shape: float, location_name: "atodr"))
+ ATIMetricDataPoint.struct_class = Types::ATIMetricDataPoint
+
+ ATIMetricDataPointsList.member = Shapes::ShapeRef.new(shape: ATIMetricDataPoint)
+
+ ATIModelPerformance.add_member(:asi, Shapes::ShapeRef.new(shape: float, location_name: "asi"))
+ ATIModelPerformance.struct_class = Types::ATIModelPerformance
+
+ ATITrainingMetricsValue.add_member(:metric_data_points, Shapes::ShapeRef.new(shape: ATIMetricDataPointsList, location_name: "metricDataPoints"))
+ ATITrainingMetricsValue.add_member(:model_performance, Shapes::ShapeRef.new(shape: ATIModelPerformance, location_name: "modelPerformance"))
+ ATITrainingMetricsValue.struct_class = Types::ATITrainingMetricsValue
+
AccessDeniedException.add_member(:message, Shapes::ShapeRef.new(shape: string, required: true, location_name: "message"))
AccessDeniedException.struct_class = Types::AccessDeniedException
+ AggregatedLogOddsMetric.add_member(:variable_names, Shapes::ShapeRef.new(shape: ListOfStrings, required: true, location_name: "variableNames"))
+ AggregatedLogOddsMetric.add_member(:aggregated_variables_importance, Shapes::ShapeRef.new(shape: float, required: true, location_name: "aggregatedVariablesImportance"))
+ AggregatedLogOddsMetric.struct_class = Types::AggregatedLogOddsMetric
+
+ AggregatedVariablesImpactExplanation.add_member(:event_variable_names, Shapes::ShapeRef.new(shape: ListOfStrings, location_name: "eventVariableNames"))
+ AggregatedVariablesImpactExplanation.add_member(:relative_impact, Shapes::ShapeRef.new(shape: string, location_name: "relativeImpact"))
+ AggregatedVariablesImpactExplanation.add_member(:log_odds_impact, Shapes::ShapeRef.new(shape: float, location_name: "logOddsImpact"))
+ AggregatedVariablesImpactExplanation.struct_class = Types::AggregatedVariablesImpactExplanation
+
+ AggregatedVariablesImportanceMetrics.add_member(:log_odds_metrics, Shapes::ShapeRef.new(shape: ListOfAggregatedLogOddsMetrics, location_name: "logOddsMetrics"))
+ AggregatedVariablesImportanceMetrics.struct_class = Types::AggregatedVariablesImportanceMetrics
+
BatchCreateVariableError.add_member(:name, Shapes::ShapeRef.new(shape: string, location_name: "name"))
BatchCreateVariableError.add_member(:code, Shapes::ShapeRef.new(shape: integer, location_name: "code"))
BatchCreateVariableError.add_member(:message, Shapes::ShapeRef.new(shape: string, location_name: "message"))
@@ -952,7 +998,7 @@ module ClientApi
Label.add_member(:arn, Shapes::ShapeRef.new(shape: fraudDetectorArn, location_name: "arn"))
Label.struct_class = Types::Label
- LabelSchema.add_member(:label_mapper, Shapes::ShapeRef.new(shape: labelMapper, required: true, location_name: "labelMapper"))
+ LabelSchema.add_member(:label_mapper, Shapes::ShapeRef.new(shape: labelMapper, location_name: "labelMapper"))
LabelSchema.add_member(:unlabeled_events_treatment, Shapes::ShapeRef.new(shape: UnlabeledEventsTreatment, location_name: "unlabeledEventsTreatment"))
LabelSchema.struct_class = Types::LabelSchema
@@ -969,6 +1015,10 @@ module ClientApi
ListEventPredictionsResult.add_member(:next_token, Shapes::ShapeRef.new(shape: string, location_name: "nextToken"))
ListEventPredictionsResult.struct_class = Types::ListEventPredictionsResult
+ ListOfAggregatedLogOddsMetrics.member = Shapes::ShapeRef.new(shape: AggregatedLogOddsMetric)
+
+ ListOfAggregatedVariablesImpactExplanations.member = Shapes::ShapeRef.new(shape: AggregatedVariablesImpactExplanation)
+
ListOfEvaluatedExternalModels.member = Shapes::ShapeRef.new(shape: EvaluatedExternalModel)
ListOfEvaluatedModelVersions.member = Shapes::ShapeRef.new(shape: EvaluatedModelVersion)
@@ -1064,6 +1114,7 @@ module ClientApi
ModelVersionDetail.add_member(:last_updated_time, Shapes::ShapeRef.new(shape: time, location_name: "lastUpdatedTime"))
ModelVersionDetail.add_member(:created_time, Shapes::ShapeRef.new(shape: time, location_name: "createdTime"))
ModelVersionDetail.add_member(:arn, Shapes::ShapeRef.new(shape: fraudDetectorArn, location_name: "arn"))
+ ModelVersionDetail.add_member(:training_result_v2, Shapes::ShapeRef.new(shape: TrainingResultV2, location_name: "trainingResultV2"))
ModelVersionDetail.struct_class = Types::ModelVersionDetail
ModelVersionEvaluation.add_member(:output_variable_name, Shapes::ShapeRef.new(shape: string, location_name: "outputVariableName"))
@@ -1075,6 +1126,21 @@ module ClientApi
NonEmptyListOfStrings.member = Shapes::ShapeRef.new(shape: string)
+ OFIMetricDataPoint.add_member(:fpr, Shapes::ShapeRef.new(shape: float, location_name: "fpr"))
+ OFIMetricDataPoint.add_member(:precision, Shapes::ShapeRef.new(shape: float, location_name: "precision"))
+ OFIMetricDataPoint.add_member(:tpr, Shapes::ShapeRef.new(shape: float, location_name: "tpr"))
+ OFIMetricDataPoint.add_member(:threshold, Shapes::ShapeRef.new(shape: float, location_name: "threshold"))
+ OFIMetricDataPoint.struct_class = Types::OFIMetricDataPoint
+
+ OFIMetricDataPointsList.member = Shapes::ShapeRef.new(shape: OFIMetricDataPoint)
+
+ OFIModelPerformance.add_member(:auc, Shapes::ShapeRef.new(shape: float, location_name: "auc"))
+ OFIModelPerformance.struct_class = Types::OFIModelPerformance
+
+ OFITrainingMetricsValue.add_member(:metric_data_points, Shapes::ShapeRef.new(shape: OFIMetricDataPointsList, location_name: "metricDataPoints"))
+ OFITrainingMetricsValue.add_member(:model_performance, Shapes::ShapeRef.new(shape: OFIModelPerformance, location_name: "modelPerformance"))
+ OFITrainingMetricsValue.struct_class = Types::OFITrainingMetricsValue
+
Outcome.add_member(:name, Shapes::ShapeRef.new(shape: identifier, location_name: "name"))
Outcome.add_member(:description, Shapes::ShapeRef.new(shape: description, location_name: "description"))
Outcome.add_member(:last_updated_time, Shapes::ShapeRef.new(shape: time, location_name: "lastUpdatedTime"))
@@ -1085,6 +1151,7 @@ module ClientApi
OutcomeList.member = Shapes::ShapeRef.new(shape: Outcome)
PredictionExplanations.add_member(:variable_impact_explanations, Shapes::ShapeRef.new(shape: listOfVariableImpactExplanations, location_name: "variableImpactExplanations"))
+ PredictionExplanations.add_member(:aggregated_variables_impact_explanations, Shapes::ShapeRef.new(shape: ListOfAggregatedVariablesImpactExplanations, location_name: "aggregatedVariablesImpactExplanations"))
PredictionExplanations.struct_class = Types::PredictionExplanations
PredictionTimeRange.add_member(:start_time, Shapes::ShapeRef.new(shape: time, required: true, location_name: "startTime"))
@@ -1189,6 +1256,21 @@ module ClientApi
SendEventResult.struct_class = Types::SendEventResult
+ TFIMetricDataPoint.add_member(:fpr, Shapes::ShapeRef.new(shape: float, location_name: "fpr"))
+ TFIMetricDataPoint.add_member(:precision, Shapes::ShapeRef.new(shape: float, location_name: "precision"))
+ TFIMetricDataPoint.add_member(:tpr, Shapes::ShapeRef.new(shape: float, location_name: "tpr"))
+ TFIMetricDataPoint.add_member(:threshold, Shapes::ShapeRef.new(shape: float, location_name: "threshold"))
+ TFIMetricDataPoint.struct_class = Types::TFIMetricDataPoint
+
+ TFIMetricDataPointsList.member = Shapes::ShapeRef.new(shape: TFIMetricDataPoint)
+
+ TFIModelPerformance.add_member(:auc, Shapes::ShapeRef.new(shape: float, location_name: "auc"))
+ TFIModelPerformance.struct_class = Types::TFIModelPerformance
+
+ TFITrainingMetricsValue.add_member(:metric_data_points, Shapes::ShapeRef.new(shape: TFIMetricDataPointsList, location_name: "metricDataPoints"))
+ TFITrainingMetricsValue.add_member(:model_performance, Shapes::ShapeRef.new(shape: TFIModelPerformance, location_name: "modelPerformance"))
+ TFITrainingMetricsValue.struct_class = Types::TFITrainingMetricsValue
+
Tag.add_member(:key, Shapes::ShapeRef.new(shape: tagKey, required: true, location_name: "key"))
Tag.add_member(:value, Shapes::ShapeRef.new(shape: tagValue, required: true, location_name: "value"))
Tag.struct_class = Types::Tag
@@ -1203,18 +1285,29 @@ module ClientApi
ThrottlingException.struct_class = Types::ThrottlingException
TrainingDataSchema.add_member(:model_variables, Shapes::ShapeRef.new(shape: ListOfStrings, required: true, location_name: "modelVariables"))
- TrainingDataSchema.add_member(:label_schema, Shapes::ShapeRef.new(shape: LabelSchema, required: true, location_name: "labelSchema"))
+ TrainingDataSchema.add_member(:label_schema, Shapes::ShapeRef.new(shape: LabelSchema, location_name: "labelSchema"))
TrainingDataSchema.struct_class = Types::TrainingDataSchema
TrainingMetrics.add_member(:auc, Shapes::ShapeRef.new(shape: float, location_name: "auc"))
TrainingMetrics.add_member(:metric_data_points, Shapes::ShapeRef.new(shape: metricDataPointsList, location_name: "metricDataPoints"))
TrainingMetrics.struct_class = Types::TrainingMetrics
+ TrainingMetricsV2.add_member(:ofi, Shapes::ShapeRef.new(shape: OFITrainingMetricsValue, location_name: "ofi"))
+ TrainingMetricsV2.add_member(:tfi, Shapes::ShapeRef.new(shape: TFITrainingMetricsValue, location_name: "tfi"))
+ TrainingMetricsV2.add_member(:ati, Shapes::ShapeRef.new(shape: ATITrainingMetricsValue, location_name: "ati"))
+ TrainingMetricsV2.struct_class = Types::TrainingMetricsV2
+
TrainingResult.add_member(:data_validation_metrics, Shapes::ShapeRef.new(shape: DataValidationMetrics, location_name: "dataValidationMetrics"))
TrainingResult.add_member(:training_metrics, Shapes::ShapeRef.new(shape: TrainingMetrics, location_name: "trainingMetrics"))
TrainingResult.add_member(:variable_importance_metrics, Shapes::ShapeRef.new(shape: VariableImportanceMetrics, location_name: "variableImportanceMetrics"))
TrainingResult.struct_class = Types::TrainingResult
+ TrainingResultV2.add_member(:data_validation_metrics, Shapes::ShapeRef.new(shape: DataValidationMetrics, location_name: "dataValidationMetrics"))
+ TrainingResultV2.add_member(:training_metrics_v2, Shapes::ShapeRef.new(shape: TrainingMetricsV2, location_name: "trainingMetricsV2"))
+ TrainingResultV2.add_member(:variable_importance_metrics, Shapes::ShapeRef.new(shape: VariableImportanceMetrics, location_name: "variableImportanceMetrics"))
+ TrainingResultV2.add_member(:aggregated_variables_importance_metrics, Shapes::ShapeRef.new(shape: AggregatedVariablesImportanceMetrics, location_name: "aggregatedVariablesImportanceMetrics"))
+ TrainingResultV2.struct_class = Types::TrainingResultV2
+
UntagResourceRequest.add_member(:resource_arn, Shapes::ShapeRef.new(shape: fraudDetectorArn, required: true, location_name: "resourceARN"))
UntagResourceRequest.add_member(:tag_keys, Shapes::ShapeRef.new(shape: tagKeyList, required: true, location_name: "tagKeys"))
UntagResourceRequest.struct_class = Types::UntagResourceRequest
@@ -1353,7 +1446,7 @@ module ClientApi
labelList.member = Shapes::ShapeRef.new(shape: Label)
labelMapper.key = Shapes::ShapeRef.new(shape: string)
- labelMapper.value = Shapes::ShapeRef.new(shape: NonEmptyListOfStrings)
+ labelMapper.value = Shapes::ShapeRef.new(shape: ListOfStrings)
listOfEntities.member = Shapes::ShapeRef.new(shape: Entity)
diff --git a/gems/aws-sdk-frauddetector/lib/aws-sdk-frauddetector/types.rb b/gems/aws-sdk-frauddetector/lib/aws-sdk-frauddetector/types.rb
index f4325a6ba5c..da51f4e6f07 100644
--- a/gems/aws-sdk-frauddetector/lib/aws-sdk-frauddetector/types.rb
+++ b/gems/aws-sdk-frauddetector/lib/aws-sdk-frauddetector/types.rb
@@ -10,6 +10,89 @@
module Aws::FraudDetector
module Types
+ # The Account Takeover Insights (ATI) model performance metrics data
+ # points.
+ #
+ # @!attribute [rw] cr
+ # The challenge rate. This indicates the percentage of login events
+ # that the model recommends to challenge such as one-time password,
+ # multi-factor authentication, and investigations.
+ # @return [Float]
+ #
+ # @!attribute [rw] adr
+ # The anomaly discovery rate. This metric quantifies the percentage of
+ # anomalies that can be detected by the model at the selected score
+ # threshold. A lower score threshold increases the percentage of
+ # anomalies captured by the model, but would also require challenging
+ # a larger percentage of login events, leading to a higher customer
+ # friction.
+ # @return [Float]
+ #
+ # @!attribute [rw] threshold
+ # The model's threshold that specifies an acceptable fraud capture
+ # rate. For example, a threshold of 500 means any model score 500 or
+ # above is labeled as fraud.
+ # @return [Float]
+ #
+ # @!attribute [rw] atodr
+ # The account takeover discovery rate. This metric quantifies the
+ # percentage of account compromise events that can be detected by the
+ # model at the selected score threshold. This metric is only available
+ # if 50 or more entities with at-least one labeled account takeover
+ # event is present in the ingested dataset.
+ # @return [Float]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/frauddetector-2019-11-15/ATIMetricDataPoint AWS API Documentation
+ #
+ class ATIMetricDataPoint < Struct.new(
+ :cr,
+ :adr,
+ :threshold,
+ :atodr)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
+ # The Account Takeover Insights (ATI) model performance score.
+ #
+ # @!attribute [rw] asi
+ # The anomaly separation index (ASI) score. This metric summarizes the
+ # overall ability of the model to separate anomalous activities from
+ # the normal behavior. Depending on the business, a large fraction of
+ # these anomalous activities can be malicious and correspond to the
+ # account takeover attacks. A model with no separability power will
+ # have the lowest possible ASI score of 0.5, whereas the a model with
+ # a high separability power will have the highest possible ASI score
+ # of 1.0
+ # @return [Float]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/frauddetector-2019-11-15/ATIModelPerformance AWS API Documentation
+ #
+ class ATIModelPerformance < Struct.new(
+ :asi)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
+ # The Account Takeover Insights (ATI) model training metric details.
+ #
+ # @!attribute [rw] metric_data_points
+ # The model's performance metrics data points.
+ # @return [Array]
+ #
+ # @!attribute [rw] model_performance
+ # The model's overall performance scores.
+ # @return [Types::ATIModelPerformance]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/frauddetector-2019-11-15/ATITrainingMetricsValue AWS API Documentation
+ #
+ class ATITrainingMetricsValue < Struct.new(
+ :metric_data_points,
+ :model_performance)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
# An exception indicating Amazon Fraud Detector does not have the needed
# permissions. This can occur if you submit a request, such as
# `PutExternalModel`, that specifies a role that is not in your account.
@@ -25,6 +108,96 @@ class AccessDeniedException < Struct.new(
include Aws::Structure
end
+ # The log odds metric details.
+ #
+ # Account Takeover Insights (ATI) model uses event variables from the
+ # login data you provide to continuously calculate a set of variables
+ # (aggregated variables) based on historical events. For example, your
+ # ATI model might calculate the number of times an user has logged in
+ # using the same IP address. In this case, event variables used to
+ # derive the aggregated variables are `IP address` and `user`.
+ #
+ # @!attribute [rw] variable_names
+ # The names of all the variables.
+ # @return [Array]
+ #
+ # @!attribute [rw] aggregated_variables_importance
+ # The relative importance of the variables in the list to the other
+ # event variable.
+ # @return [Float]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/frauddetector-2019-11-15/AggregatedLogOddsMetric AWS API Documentation
+ #
+ class AggregatedLogOddsMetric < Struct.new(
+ :variable_names,
+ :aggregated_variables_importance)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
+ # The details of the impact of aggregated variables on the prediction
+ # score.
+ #
+ # Account Takeover Insights (ATI) model uses the login data you provide
+ # to continuously calculate a set of variables (aggregated variables)
+ # based on historical events. For example, the model might calculate the
+ # number of times an user has logged in using the same IP address. In
+ # this case, event variables used to derive the aggregated variables are
+ # `IP address` and `user`.
+ #
+ # @!attribute [rw] event_variable_names
+ # The names of all the event variables that were used to derive the
+ # aggregated variables.
+ # @return [Array]
+ #
+ # @!attribute [rw] relative_impact
+ # The relative impact of the aggregated variables in terms of
+ # magnitude on the prediction scores.
+ # @return [String]
+ #
+ # @!attribute [rw] log_odds_impact
+ # The raw, uninterpreted value represented as log-odds of the fraud.
+ # These values are usually between -10 to +10, but range from
+ # -infinity to +infinity.
+ #
+ # * A positive value indicates that the variables drove the risk score
+ # up.
+ #
+ # * A negative value indicates that the variables drove the risk score
+ # down.
+ # @return [Float]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/frauddetector-2019-11-15/AggregatedVariablesImpactExplanation AWS API Documentation
+ #
+ class AggregatedVariablesImpactExplanation < Struct.new(
+ :event_variable_names,
+ :relative_impact,
+ :log_odds_impact)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
+ # The details of the relative importance of the aggregated variables.
+ #
+ # Account Takeover Insights (ATI) model uses event variables from the
+ # login data you provide to continuously calculate a set of variables
+ # (aggregated variables) based on historical events. For example, your
+ # ATI model might calculate the number of times an user has logged in
+ # using the same IP address. In this case, event variables used to
+ # derive the aggregated variables are `IP address` and `user`.
+ #
+ # @!attribute [rw] log_odds_metrics
+ # List of variables' metrics.
+ # @return [Array]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/frauddetector-2019-11-15/AggregatedVariablesImportanceMetrics AWS API Documentation
+ #
+ class AggregatedVariablesImportanceMetrics < Struct.new(
+ :log_odds_metrics)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
# Provides the error of the batch create variable API.
#
# @!attribute [rw] name
@@ -531,7 +704,7 @@ class CreateBatchPredictionJobResult < Aws::EmptyStructure; end
# model_versions: [
# {
# model_id: "modelIdentifier", # required
- # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# model_version_number: "floatVersionString", # required
# arn: "fraudDetectorArn",
# },
@@ -629,7 +802,7 @@ class CreateDetectorVersionResult < Struct.new(
#
# {
# model_id: "modelIdentifier", # required
- # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# description: "description",
# event_type_name: "string", # required
# tags: [
@@ -681,12 +854,12 @@ class CreateModelResult < Aws::EmptyStructure; end
#
# {
# model_id: "modelIdentifier", # required
- # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# training_data_source: "EXTERNAL_EVENTS", # required, accepts EXTERNAL_EVENTS, INGESTED_EVENTS
# training_data_schema: { # required
# model_variables: ["string"], # required
- # label_schema: { # required
- # label_mapper: { # required
+ # label_schema: {
+ # label_mapper: {
# "string" => ["string"],
# },
# unlabeled_events_treatment: "IGNORE", # accepts IGNORE, FRAUD, LEGIT
@@ -931,10 +1104,10 @@ class CreateVariableRequest < Struct.new(
#
class CreateVariableResult < Aws::EmptyStructure; end
- # The model training validation messages.
+ # The model training data validation metrics.
#
# @!attribute [rw] file_level_messages
- # The file-specific model training validation messages.
+ # The file-specific model training data validation messages.
# @return [Array]
#
# @!attribute [rw] field_level_messages
@@ -1217,7 +1390,7 @@ class DeleteLabelResult < Aws::EmptyStructure; end
#
# {
# model_id: "modelIdentifier", # required
- # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# }
#
# @!attribute [rw] model_id
@@ -1246,7 +1419,7 @@ class DeleteModelResult < Aws::EmptyStructure; end
#
# {
# model_id: "modelIdentifier", # required
- # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# model_version_number: "floatVersionString", # required
# }
#
@@ -1413,7 +1586,7 @@ class DescribeDetectorResult < Struct.new(
# {
# model_id: "modelIdentifier",
# model_version_number: "floatVersionString",
- # model_type: "ONLINE_FRAUD_INSIGHTS", # accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# next_token: "string",
# max_results: 1,
# }
@@ -2868,7 +3041,7 @@ class GetLabelsResult < Struct.new(
#
# {
# model_id: "modelIdentifier", # required
- # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# model_version_number: "floatVersionString", # required
# }
#
@@ -2975,7 +3148,7 @@ class GetModelVersionResult < Struct.new(
#
# {
# model_id: "modelIdentifier",
- # model_type: "ONLINE_FRAUD_INSIGHTS", # accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# next_token: "string",
# max_results: 1,
# }
@@ -3332,7 +3505,7 @@ class Label < Struct.new(
# data as a hash:
#
# {
- # label_mapper: { # required
+ # label_mapper: {
# "string" => ["string"],
# },
# unlabeled_events_treatment: "IGNORE", # accepts IGNORE, FRAUD, LEGIT
@@ -3754,7 +3927,7 @@ class ModelScores < Struct.new(
#
# {
# model_id: "modelIdentifier", # required
- # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# model_version_number: "floatVersionString", # required
# arn: "fraudDetectorArn",
# }
@@ -3840,6 +4013,11 @@ class ModelVersion < Struct.new(
# The model version ARN.
# @return [String]
#
+ # @!attribute [rw] training_result_v2
+ # The training result details. The details include the relative
+ # importance of the variables.
+ # @return [Types::TrainingResultV2]
+ #
# @see http://docs.aws.amazon.com/goto/WebAPI/frauddetector-2019-11-15/ModelVersionDetail AWS API Documentation
#
class ModelVersionDetail < Struct.new(
@@ -3854,7 +4032,8 @@ class ModelVersionDetail < Struct.new(
:training_result,
:last_updated_time,
:created_time,
- :arn)
+ :arn,
+ :training_result_v2)
SENSITIVE = []
include Aws::Structure
end
@@ -3883,6 +4062,75 @@ class ModelVersionEvaluation < Struct.new(
include Aws::Structure
end
+ # The Online Fraud Insights (OFI) model performance metrics data points.
+ #
+ # @!attribute [rw] fpr
+ # The false positive rate. This is the percentage of total legitimate
+ # events that are incorrectly predicted as fraud.
+ # @return [Float]
+ #
+ # @!attribute [rw] precision
+ # The percentage of fraud events correctly predicted as fraudulent as
+ # compared to all events predicted as fraudulent.
+ # @return [Float]
+ #
+ # @!attribute [rw] tpr
+ # The true positive rate. This is the percentage of total fraud the
+ # model detects. Also known as capture rate.
+ # @return [Float]
+ #
+ # @!attribute [rw] threshold
+ # The model threshold that specifies an acceptable fraud capture rate.
+ # For example, a threshold of 500 means any model score 500 or above
+ # is labeled as fraud.
+ # @return [Float]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/frauddetector-2019-11-15/OFIMetricDataPoint AWS API Documentation
+ #
+ class OFIMetricDataPoint < Struct.new(
+ :fpr,
+ :precision,
+ :tpr,
+ :threshold)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
+ # The Online Fraud Insights (OFI) model performance score.
+ #
+ # @!attribute [rw] auc
+ # The area under the curve (auc). This summarizes the total positive
+ # rate (tpr) and false positive rate (FPR) across all possible model
+ # score thresholds.
+ # @return [Float]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/frauddetector-2019-11-15/OFIModelPerformance AWS API Documentation
+ #
+ class OFIModelPerformance < Struct.new(
+ :auc)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
+ # The Online Fraud Insights (OFI) model training metric details.
+ #
+ # @!attribute [rw] metric_data_points
+ # The model's performance metrics data points.
+ # @return [Array]
+ #
+ # @!attribute [rw] model_performance
+ # The model's overall performance score.
+ # @return [Types::OFIModelPerformance]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/frauddetector-2019-11-15/OFITrainingMetricsValue AWS API Documentation
+ #
+ class OFITrainingMetricsValue < Struct.new(
+ :metric_data_points,
+ :model_performance)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
# The outcome.
#
# @!attribute [rw] name
@@ -3924,10 +4172,23 @@ class Outcome < Struct.new(
# The details of the event variable's impact on the prediction score.
# @return [Array]
#
+ # @!attribute [rw] aggregated_variables_impact_explanations
+ # The details of the aggregated variables impact on the prediction
+ # score.
+ #
+ # Account Takeover Insights (ATI) model uses event variables from the
+ # login data you provide to continuously calculate a set of variables
+ # (aggregated variables) based on historical events. For example, your
+ # ATI model might calculate the number of times an user has logged in
+ # using the same IP address. In this case, event variables used to
+ # derive the aggregated variables are `IP address` and `user`.
+ # @return [Array]
+ #
# @see http://docs.aws.amazon.com/goto/WebAPI/frauddetector-2019-11-15/PredictionExplanations AWS API Documentation
#
class PredictionExplanations < Struct.new(
- :variable_impact_explanations)
+ :variable_impact_explanations,
+ :aggregated_variables_impact_explanations)
SENSITIVE = []
include Aws::Structure
end
@@ -4501,6 +4762,76 @@ class SendEventRequest < Struct.new(
#
class SendEventResult < Aws::EmptyStructure; end
+ # The performance metrics data points for Transaction Fraud Insights
+ # (TFI) model.
+ #
+ # @!attribute [rw] fpr
+ # The false positive rate. This is the percentage of total legitimate
+ # events that are incorrectly predicted as fraud.
+ # @return [Float]
+ #
+ # @!attribute [rw] precision
+ # The percentage of fraud events correctly predicted as fraudulent as
+ # compared to all events predicted as fraudulent.
+ # @return [Float]
+ #
+ # @!attribute [rw] tpr
+ # The true positive rate. This is the percentage of total fraud the
+ # model detects. Also known as capture rate.
+ # @return [Float]
+ #
+ # @!attribute [rw] threshold
+ # The model threshold that specifies an acceptable fraud capture rate.
+ # For example, a threshold of 500 means any model score 500 or above
+ # is labeled as fraud.
+ # @return [Float]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/frauddetector-2019-11-15/TFIMetricDataPoint AWS API Documentation
+ #
+ class TFIMetricDataPoint < Struct.new(
+ :fpr,
+ :precision,
+ :tpr,
+ :threshold)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
+ # The Transaction Fraud Insights (TFI) model performance score.
+ #
+ # @!attribute [rw] auc
+ # The area under the curve (auc). This summarizes the total positive
+ # rate (tpr) and false positive rate (FPR) across all possible model
+ # score thresholds.
+ # @return [Float]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/frauddetector-2019-11-15/TFIModelPerformance AWS API Documentation
+ #
+ class TFIModelPerformance < Struct.new(
+ :auc)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
+ # The Transaction Fraud Insights (TFI) model training metric details.
+ #
+ # @!attribute [rw] metric_data_points
+ # The model's performance metrics data points.
+ # @return [Array]
+ #
+ # @!attribute [rw] model_performance
+ # The model performance score.
+ # @return [Types::TFIModelPerformance]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/frauddetector-2019-11-15/TFITrainingMetricsValue AWS API Documentation
+ #
+ class TFITrainingMetricsValue < Struct.new(
+ :metric_data_points,
+ :model_performance)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
# A key and value pair.
#
# @note When making an API call, you may pass Tag
@@ -4582,8 +4913,8 @@ class ThrottlingException < Struct.new(
#
# {
# model_variables: ["string"], # required
- # label_schema: { # required
- # label_mapper: { # required
+ # label_schema: {
+ # label_mapper: {
# "string" => ["string"],
# },
# unlabeled_events_treatment: "IGNORE", # accepts IGNORE, FRAUD, LEGIT
@@ -4629,6 +4960,30 @@ class TrainingMetrics < Struct.new(
include Aws::Structure
end
+ # The training metrics details.
+ #
+ # @!attribute [rw] ofi
+ # The Online Fraud Insights (OFI) model training metric details.
+ # @return [Types::OFITrainingMetricsValue]
+ #
+ # @!attribute [rw] tfi
+ # The Transaction Fraud Insights (TFI) model training metric details.
+ # @return [Types::TFITrainingMetricsValue]
+ #
+ # @!attribute [rw] ati
+ # The Account Takeover Insights (ATI) model training metric details.
+ # @return [Types::ATITrainingMetricsValue]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/frauddetector-2019-11-15/TrainingMetricsV2 AWS API Documentation
+ #
+ class TrainingMetricsV2 < Struct.new(
+ :ofi,
+ :tfi,
+ :ati)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
# The training result details.
#
# @!attribute [rw] data_validation_metrics
@@ -4653,6 +5008,42 @@ class TrainingResult < Struct.new(
include Aws::Structure
end
+ # The training result details.
+ #
+ # @!attribute [rw] data_validation_metrics
+ # The model training data validation metrics.
+ # @return [Types::DataValidationMetrics]
+ #
+ # @!attribute [rw] training_metrics_v2
+ # The training metric details.
+ # @return [Types::TrainingMetricsV2]
+ #
+ # @!attribute [rw] variable_importance_metrics
+ # The variable importance metrics details.
+ # @return [Types::VariableImportanceMetrics]
+ #
+ # @!attribute [rw] aggregated_variables_importance_metrics
+ # The variable importance metrics of the aggregated variables.
+ #
+ # Account Takeover Insights (ATI) model uses event variables from the
+ # login data you provide to continuously calculate a set of variables
+ # (aggregated variables) based on historical events. For example, your
+ # ATI model might calculate the number of times an user has logged in
+ # using the same IP address. In this case, event variables used to
+ # derive the aggregated variables are `IP address` and `user`.
+ # @return [Types::AggregatedVariablesImportanceMetrics]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/frauddetector-2019-11-15/TrainingResultV2 AWS API Documentation
+ #
+ class TrainingResultV2 < Struct.new(
+ :data_validation_metrics,
+ :training_metrics_v2,
+ :variable_importance_metrics,
+ :aggregated_variables_importance_metrics)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
# @note When making an API call, you may pass UntagResourceRequest
# data as a hash:
#
@@ -4735,7 +5126,7 @@ class UpdateDetectorVersionMetadataResult < Aws::EmptyStructure; end
# model_versions: [
# {
# model_id: "modelIdentifier", # required
- # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# model_version_number: "floatVersionString", # required
# arn: "fraudDetectorArn",
# },
@@ -4884,7 +5275,7 @@ class UpdateEventLabelResult < Aws::EmptyStructure; end
#
# {
# model_id: "modelIdentifier", # required
- # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# description: "description",
# }
#
@@ -4919,7 +5310,7 @@ class UpdateModelResult < Aws::EmptyStructure; end
#
# {
# model_id: "modelIdentifier", # required
- # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# major_version_number: "wholeNumberVersionString", # required
# external_events_detail: {
# data_location: "s3BucketLocation", # required
@@ -5010,7 +5401,7 @@ class UpdateModelVersionResult < Struct.new(
#
# {
# model_id: "modelIdentifier", # required
- # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS
+ # model_type: "ONLINE_FRAUD_INSIGHTS", # required, accepts ONLINE_FRAUD_INSIGHTS, TRANSACTION_FRAUD_INSIGHTS, ACCOUNT_TAKEOVER_INSIGHTS
# model_version_number: "floatVersionString", # required
# status: "ACTIVE", # required, accepts ACTIVE, INACTIVE, TRAINING_CANCELLED
# }
diff --git a/gems/aws-sdk-iotsitewise/CHANGELOG.md b/gems/aws-sdk-iotsitewise/CHANGELOG.md
index b8ed5d8aa7d..cdad533a232 100644
--- a/gems/aws-sdk-iotsitewise/CHANGELOG.md
+++ b/gems/aws-sdk-iotsitewise/CHANGELOG.md
@@ -1,6 +1,11 @@
Unreleased Changes
------------------
+1.43.0 (2022-07-21)
+------------------
+
+* Feature - Added asynchronous API to ingest bulk historical and current data into IoT SiteWise.
+
1.42.0 (2022-05-31)
------------------
diff --git a/gems/aws-sdk-iotsitewise/VERSION b/gems/aws-sdk-iotsitewise/VERSION
index a50908ca3da..b978278f05f 100644
--- a/gems/aws-sdk-iotsitewise/VERSION
+++ b/gems/aws-sdk-iotsitewise/VERSION
@@ -1 +1 @@
-1.42.0
+1.43.0
diff --git a/gems/aws-sdk-iotsitewise/lib/aws-sdk-iotsitewise.rb b/gems/aws-sdk-iotsitewise/lib/aws-sdk-iotsitewise.rb
index cd9187cc676..03503dacab7 100644
--- a/gems/aws-sdk-iotsitewise/lib/aws-sdk-iotsitewise.rb
+++ b/gems/aws-sdk-iotsitewise/lib/aws-sdk-iotsitewise.rb
@@ -49,6 +49,6 @@
# @!group service
module Aws::IoTSiteWise
- GEM_VERSION = '1.42.0'
+ GEM_VERSION = '1.43.0'
end
diff --git a/gems/aws-sdk-iotsitewise/lib/aws-sdk-iotsitewise/client.rb b/gems/aws-sdk-iotsitewise/lib/aws-sdk-iotsitewise/client.rb
index a448a208f94..7c8ea6b0097 100644
--- a/gems/aws-sdk-iotsitewise/lib/aws-sdk-iotsitewise/client.rb
+++ b/gems/aws-sdk-iotsitewise/lib/aws-sdk-iotsitewise/client.rb
@@ -1231,6 +1231,91 @@ def create_asset_model(params = {}, options = {})
req.send_request(options)
end
+ # This API operation is in preview release for IoT SiteWise and is
+ # subject to change. We recommend that you use this operation only with
+ # test data, and not in production environments.
+ #
+ #
+ #
+ # Defines a job to ingest data to IoT SiteWise from Amazon S3. For more
+ # information, see [Create a bulk import job (CLI)][1] in the *Amazon
+ # Simple Storage Service User Guide*.
+ #
+ # You must enable IoT SiteWise to export data to Amazon S3 before you
+ # create a bulk import job. For more information about how to configure
+ # storage settings, see [PutStorageConfiguration][2].
+ #
+ #
+ #
+ # [1]: https://docs.aws.amazon.com/iot-sitewise/latest/userguide/CreateBulkImportJob.html
+ # [2]: https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_PutStorageConfiguration.html
+ #
+ # @option params [required, String] :job_name
+ # The unique name that helps identify the job request.
+ #
+ # @option params [required, String] :job_role_arn
+ # The [ARN][1] of the IAM role that allows IoT SiteWise to read Amazon
+ # S3 data.
+ #
+ #
+ #
+ # [1]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+ #
+ # @option params [required, Array] :files
+ # The files in the specified Amazon S3 bucket that contain your data.
+ #
+ # @option params [required, Types::ErrorReportLocation] :error_report_location
+ # The Amazon S3 destination where errors associated with the job
+ # creation request are saved.
+ #
+ # @option params [required, Types::JobConfiguration] :job_configuration
+ # Contains the configuration information of a job, such as the file
+ # format used to save data in Amazon S3.
+ #
+ # @return [Types::CreateBulkImportJobResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
+ #
+ # * {Types::CreateBulkImportJobResponse#job_id #job_id} => String
+ # * {Types::CreateBulkImportJobResponse#job_name #job_name} => String
+ # * {Types::CreateBulkImportJobResponse#job_status #job_status} => String
+ #
+ # @example Request syntax with placeholder values
+ #
+ # resp = client.create_bulk_import_job({
+ # job_name: "Name", # required
+ # job_role_arn: "ARN", # required
+ # files: [ # required
+ # {
+ # bucket: "Bucket", # required
+ # key: "String", # required
+ # version_id: "String",
+ # },
+ # ],
+ # error_report_location: { # required
+ # bucket: "Bucket", # required
+ # prefix: "String", # required
+ # },
+ # job_configuration: { # required
+ # file_format: { # required
+ # csv: {
+ # column_names: ["ALIAS"], # accepts ALIAS, ASSET_ID, PROPERTY_ID, DATA_TYPE, TIMESTAMP_SECONDS, TIMESTAMP_NANO_OFFSET, QUALITY, VALUE
+ # },
+ # },
+ # },
+ # })
+ #
+ # @example Response structure
+ #
+ # resp.job_id #=> String
+ # resp.job_name #=> String
+ # resp.job_status #=> String, one of "PENDING", "CANCELLED", "RUNNING", "COMPLETED", "FAILED", "COMPLETED_WITH_FAILURES"
+ #
+ # @overload create_bulk_import_job(params = {})
+ # @param [Hash] params ({})
+ def create_bulk_import_job(params = {}, options = {})
+ req = build_request(:create_bulk_import_job, params)
+ req.send_request(options)
+ end
+
# Creates a dashboard in an IoT SiteWise Monitor project.
#
# @option params [required, String] :project_id
@@ -2194,6 +2279,65 @@ def describe_asset_property(params = {}, options = {})
req.send_request(options)
end
+ # This API operation is in preview release for IoT SiteWise and is
+ # subject to change. We recommend that you use this operation only with
+ # test data, and not in production environments.
+ #
+ #
+ #
+ # Retrieves information about a bulk import job request. For more
+ # information, see [Describe a bulk import job (CLI)][1] in the *Amazon
+ # Simple Storage Service User Guide*.
+ #
+ #
+ #
+ # [1]: https://docs.aws.amazon.com/iot-sitewise/latest/userguide/DescribeBulkImportJob.html
+ #
+ # @option params [required, String] :job_id
+ # The ID of the job.
+ #
+ # @return [Types::DescribeBulkImportJobResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
+ #
+ # * {Types::DescribeBulkImportJobResponse#job_id #job_id} => String
+ # * {Types::DescribeBulkImportJobResponse#job_name #job_name} => String
+ # * {Types::DescribeBulkImportJobResponse#job_status #job_status} => String
+ # * {Types::DescribeBulkImportJobResponse#job_role_arn #job_role_arn} => String
+ # * {Types::DescribeBulkImportJobResponse#files #files} => Array<Types::File>
+ # * {Types::DescribeBulkImportJobResponse#error_report_location #error_report_location} => Types::ErrorReportLocation
+ # * {Types::DescribeBulkImportJobResponse#job_configuration #job_configuration} => Types::JobConfiguration
+ # * {Types::DescribeBulkImportJobResponse#job_creation_date #job_creation_date} => Time
+ # * {Types::DescribeBulkImportJobResponse#job_last_update_date #job_last_update_date} => Time
+ #
+ # @example Request syntax with placeholder values
+ #
+ # resp = client.describe_bulk_import_job({
+ # job_id: "ID", # required
+ # })
+ #
+ # @example Response structure
+ #
+ # resp.job_id #=> String
+ # resp.job_name #=> String
+ # resp.job_status #=> String, one of "PENDING", "CANCELLED", "RUNNING", "COMPLETED", "FAILED", "COMPLETED_WITH_FAILURES"
+ # resp.job_role_arn #=> String
+ # resp.files #=> Array
+ # resp.files[0].bucket #=> String
+ # resp.files[0].key #=> String
+ # resp.files[0].version_id #=> String
+ # resp.error_report_location.bucket #=> String
+ # resp.error_report_location.prefix #=> String
+ # resp.job_configuration.file_format.csv.column_names #=> Array
+ # resp.job_configuration.file_format.csv.column_names[0] #=> String, one of "ALIAS", "ASSET_ID", "PROPERTY_ID", "DATA_TYPE", "TIMESTAMP_SECONDS", "TIMESTAMP_NANO_OFFSET", "QUALITY", "VALUE"
+ # resp.job_creation_date #=> Time
+ # resp.job_last_update_date #=> Time
+ #
+ # @overload describe_bulk_import_job(params = {})
+ # @param [Hash] params ({})
+ def describe_bulk_import_job(params = {}, options = {})
+ req = build_request(:describe_bulk_import_job, params)
+ req.send_request(options)
+ end
+
# Retrieves information about a dashboard.
#
# @option params [required, String] :dashboard_id
@@ -3443,6 +3587,60 @@ def list_associated_assets(params = {}, options = {})
req.send_request(options)
end
+ # This API operation is in preview release for IoT SiteWise and is
+ # subject to change. We recommend that you use this operation only with
+ # test data, and not in production environments.
+ #
+ #
+ #
+ # Retrieves a paginated list of bulk import job requests. For more
+ # information, see [List bulk import jobs (CLI)][1] in the *Amazon
+ # Simple Storage Service User Guide*.
+ #
+ #
+ #
+ # [1]: https://docs.aws.amazon.com/iot-sitewise/latest/userguide/ListBulkImportJobs.html
+ #
+ # @option params [String] :next_token
+ # The token to be used for the next set of paginated results.
+ #
+ # @option params [Integer] :max_results
+ # The maximum number of results to return for each paginated request.
+ #
+ # @option params [String] :filter
+ # You can use a filter to select the bulk import jobs that you want to
+ # retrieve.
+ #
+ # @return [Types::ListBulkImportJobsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
+ #
+ # * {Types::ListBulkImportJobsResponse#job_summaries #job_summaries} => Array<Types::JobSummary>
+ # * {Types::ListBulkImportJobsResponse#next_token #next_token} => String
+ #
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
+ #
+ # @example Request syntax with placeholder values
+ #
+ # resp = client.list_bulk_import_jobs({
+ # next_token: "NextToken",
+ # max_results: 1,
+ # filter: "ALL", # accepts ALL, PENDING, RUNNING, CANCELLED, FAILED, COMPLETED_WITH_FAILURES, COMPLETED
+ # })
+ #
+ # @example Response structure
+ #
+ # resp.job_summaries #=> Array
+ # resp.job_summaries[0].id #=> String
+ # resp.job_summaries[0].name #=> String
+ # resp.job_summaries[0].status #=> String, one of "PENDING", "CANCELLED", "RUNNING", "COMPLETED", "FAILED", "COMPLETED_WITH_FAILURES"
+ # resp.next_token #=> String
+ #
+ # @overload list_bulk_import_jobs(params = {})
+ # @param [Hash] params ({})
+ def list_bulk_import_jobs(params = {}, options = {})
+ req = build_request(:list_bulk_import_jobs, params)
+ req.send_request(options)
+ end
+
# Retrieves a paginated list of dashboards for an IoT SiteWise Monitor
# project.
#
@@ -4675,7 +4873,7 @@ def build_request(operation_name, params = {})
params: params,
config: config)
context[:gem_name] = 'aws-sdk-iotsitewise'
- context[:gem_version] = '1.42.0'
+ context[:gem_version] = '1.43.0'
Seahorse::Client::Request.new(handlers, context)
end
diff --git a/gems/aws-sdk-iotsitewise/lib/aws-sdk-iotsitewise/client_api.rb b/gems/aws-sdk-iotsitewise/lib/aws-sdk-iotsitewise/client_api.rb
index 0cde9b95804..38f3517dad6 100644
--- a/gems/aws-sdk-iotsitewise/lib/aws-sdk-iotsitewise/client_api.rb
+++ b/gems/aws-sdk-iotsitewise/lib/aws-sdk-iotsitewise/client_api.rb
@@ -120,10 +120,13 @@ module ClientApi
BatchPutAssetPropertyValueErrorCode = Shapes::StringShape.new(name: 'BatchPutAssetPropertyValueErrorCode')
BatchPutAssetPropertyValueRequest = Shapes::StructureShape.new(name: 'BatchPutAssetPropertyValueRequest')
BatchPutAssetPropertyValueResponse = Shapes::StructureShape.new(name: 'BatchPutAssetPropertyValueResponse')
+ Bucket = Shapes::StringShape.new(name: 'Bucket')
CapabilityConfiguration = Shapes::StringShape.new(name: 'CapabilityConfiguration')
CapabilityNamespace = Shapes::StringShape.new(name: 'CapabilityNamespace')
CapabilitySyncStatus = Shapes::StringShape.new(name: 'CapabilitySyncStatus')
ClientToken = Shapes::StringShape.new(name: 'ClientToken')
+ ColumnName = Shapes::StringShape.new(name: 'ColumnName')
+ ColumnNames = Shapes::ListShape.new(name: 'ColumnNames')
CompositeModelProperty = Shapes::StructureShape.new(name: 'CompositeModelProperty')
ComputeLocation = Shapes::StringShape.new(name: 'ComputeLocation')
ConfigurationErrorDetails = Shapes::StructureShape.new(name: 'ConfigurationErrorDetails')
@@ -137,6 +140,8 @@ module ClientApi
CreateAssetModelResponse = Shapes::StructureShape.new(name: 'CreateAssetModelResponse')
CreateAssetRequest = Shapes::StructureShape.new(name: 'CreateAssetRequest')
CreateAssetResponse = Shapes::StructureShape.new(name: 'CreateAssetResponse')
+ CreateBulkImportJobRequest = Shapes::StructureShape.new(name: 'CreateBulkImportJobRequest')
+ CreateBulkImportJobResponse = Shapes::StructureShape.new(name: 'CreateBulkImportJobResponse')
CreateDashboardRequest = Shapes::StructureShape.new(name: 'CreateDashboardRequest')
CreateDashboardResponse = Shapes::StructureShape.new(name: 'CreateDashboardResponse')
CreateGatewayRequest = Shapes::StructureShape.new(name: 'CreateGatewayRequest')
@@ -145,6 +150,7 @@ module ClientApi
CreatePortalResponse = Shapes::StructureShape.new(name: 'CreatePortalResponse')
CreateProjectRequest = Shapes::StructureShape.new(name: 'CreateProjectRequest')
CreateProjectResponse = Shapes::StructureShape.new(name: 'CreateProjectResponse')
+ Csv = Shapes::StructureShape.new(name: 'Csv')
CustomerManagedS3Storage = Shapes::StructureShape.new(name: 'CustomerManagedS3Storage')
DashboardDefinition = Shapes::StringShape.new(name: 'DashboardDefinition')
DashboardSummaries = Shapes::ListShape.new(name: 'DashboardSummaries')
@@ -172,6 +178,8 @@ module ClientApi
DescribeAssetPropertyResponse = Shapes::StructureShape.new(name: 'DescribeAssetPropertyResponse')
DescribeAssetRequest = Shapes::StructureShape.new(name: 'DescribeAssetRequest')
DescribeAssetResponse = Shapes::StructureShape.new(name: 'DescribeAssetResponse')
+ DescribeBulkImportJobRequest = Shapes::StructureShape.new(name: 'DescribeBulkImportJobRequest')
+ DescribeBulkImportJobResponse = Shapes::StructureShape.new(name: 'DescribeBulkImportJobResponse')
DescribeDashboardRequest = Shapes::StructureShape.new(name: 'DescribeDashboardRequest')
DescribeDashboardResponse = Shapes::StructureShape.new(name: 'DescribeDashboardResponse')
DescribeDefaultEncryptionConfigurationRequest = Shapes::StructureShape.new(name: 'DescribeDefaultEncryptionConfigurationRequest')
@@ -204,10 +212,14 @@ module ClientApi
ErrorCode = Shapes::StringShape.new(name: 'ErrorCode')
ErrorDetails = Shapes::StructureShape.new(name: 'ErrorDetails')
ErrorMessage = Shapes::StringShape.new(name: 'ErrorMessage')
+ ErrorReportLocation = Shapes::StructureShape.new(name: 'ErrorReportLocation')
ExceptionMessage = Shapes::StringShape.new(name: 'ExceptionMessage')
Expression = Shapes::StringShape.new(name: 'Expression')
ExpressionVariable = Shapes::StructureShape.new(name: 'ExpressionVariable')
ExpressionVariables = Shapes::ListShape.new(name: 'ExpressionVariables')
+ File = Shapes::StructureShape.new(name: 'File')
+ FileFormat = Shapes::StructureShape.new(name: 'FileFormat')
+ Files = Shapes::ListShape.new(name: 'Files')
ForwardingConfig = Shapes::StructureShape.new(name: 'ForwardingConfig')
ForwardingConfigState = Shapes::StringShape.new(name: 'ForwardingConfigState')
GatewayCapabilitySummaries = Shapes::ListShape.new(name: 'GatewayCapabilitySummaries')
@@ -246,6 +258,10 @@ module ClientApi
IntervalInSeconds = Shapes::IntegerShape.new(name: 'IntervalInSeconds')
IntervalWindowInSeconds = Shapes::IntegerShape.new(name: 'IntervalWindowInSeconds')
InvalidRequestException = Shapes::StructureShape.new(name: 'InvalidRequestException')
+ JobConfiguration = Shapes::StructureShape.new(name: 'JobConfiguration')
+ JobStatus = Shapes::StringShape.new(name: 'JobStatus')
+ JobSummaries = Shapes::ListShape.new(name: 'JobSummaries')
+ JobSummary = Shapes::StructureShape.new(name: 'JobSummary')
KmsKeyId = Shapes::StringShape.new(name: 'KmsKeyId')
LimitExceededException = Shapes::StructureShape.new(name: 'LimitExceededException')
ListAccessPoliciesRequest = Shapes::StructureShape.new(name: 'ListAccessPoliciesRequest')
@@ -259,6 +275,9 @@ module ClientApi
ListAssetsResponse = Shapes::StructureShape.new(name: 'ListAssetsResponse')
ListAssociatedAssetsRequest = Shapes::StructureShape.new(name: 'ListAssociatedAssetsRequest')
ListAssociatedAssetsResponse = Shapes::StructureShape.new(name: 'ListAssociatedAssetsResponse')
+ ListBulkImportJobsFilter = Shapes::StringShape.new(name: 'ListBulkImportJobsFilter')
+ ListBulkImportJobsRequest = Shapes::StructureShape.new(name: 'ListBulkImportJobsRequest')
+ ListBulkImportJobsResponse = Shapes::StructureShape.new(name: 'ListBulkImportJobsResponse')
ListDashboardsRequest = Shapes::StructureShape.new(name: 'ListDashboardsRequest')
ListDashboardsResponse = Shapes::StructureShape.new(name: 'ListDashboardsResponse')
ListGatewaysRequest = Shapes::StructureShape.new(name: 'ListGatewaysRequest')
@@ -336,6 +355,7 @@ module ClientApi
SSOApplicationId = Shapes::StringShape.new(name: 'SSOApplicationId')
ServiceUnavailableException = Shapes::StructureShape.new(name: 'ServiceUnavailableException')
StorageType = Shapes::StringShape.new(name: 'StorageType')
+ String = Shapes::StringShape.new(name: 'String')
TagKey = Shapes::StringShape.new(name: 'TagKey')
TagKeyList = Shapes::ListShape.new(name: 'TagKeyList')
TagMap = Shapes::MapShape.new(name: 'TagMap')
@@ -750,6 +770,8 @@ module ClientApi
BatchPutAssetPropertyValueResponse.add_member(:error_entries, Shapes::ShapeRef.new(shape: BatchPutAssetPropertyErrorEntries, required: true, location_name: "errorEntries"))
BatchPutAssetPropertyValueResponse.struct_class = Types::BatchPutAssetPropertyValueResponse
+ ColumnNames.member = Shapes::ShapeRef.new(shape: ColumnName)
+
CompositeModelProperty.add_member(:name, Shapes::ShapeRef.new(shape: Name, required: true, location_name: "name"))
CompositeModelProperty.add_member(:type, Shapes::ShapeRef.new(shape: Name, required: true, location_name: "type"))
CompositeModelProperty.add_member(:asset_property, Shapes::ShapeRef.new(shape: Property, required: true, location_name: "assetProperty"))
@@ -805,6 +827,18 @@ module ClientApi
CreateAssetResponse.add_member(:asset_status, Shapes::ShapeRef.new(shape: AssetStatus, required: true, location_name: "assetStatus"))
CreateAssetResponse.struct_class = Types::CreateAssetResponse
+ CreateBulkImportJobRequest.add_member(:job_name, Shapes::ShapeRef.new(shape: Name, required: true, location_name: "jobName"))
+ CreateBulkImportJobRequest.add_member(:job_role_arn, Shapes::ShapeRef.new(shape: ARN, required: true, location_name: "jobRoleArn"))
+ CreateBulkImportJobRequest.add_member(:files, Shapes::ShapeRef.new(shape: Files, required: true, location_name: "files"))
+ CreateBulkImportJobRequest.add_member(:error_report_location, Shapes::ShapeRef.new(shape: ErrorReportLocation, required: true, location_name: "errorReportLocation"))
+ CreateBulkImportJobRequest.add_member(:job_configuration, Shapes::ShapeRef.new(shape: JobConfiguration, required: true, location_name: "jobConfiguration"))
+ CreateBulkImportJobRequest.struct_class = Types::CreateBulkImportJobRequest
+
+ CreateBulkImportJobResponse.add_member(:job_id, Shapes::ShapeRef.new(shape: ID, required: true, location_name: "jobId"))
+ CreateBulkImportJobResponse.add_member(:job_name, Shapes::ShapeRef.new(shape: Name, required: true, location_name: "jobName"))
+ CreateBulkImportJobResponse.add_member(:job_status, Shapes::ShapeRef.new(shape: JobStatus, required: true, location_name: "jobStatus"))
+ CreateBulkImportJobResponse.struct_class = Types::CreateBulkImportJobResponse
+
CreateDashboardRequest.add_member(:project_id, Shapes::ShapeRef.new(shape: ID, required: true, location_name: "projectId"))
CreateDashboardRequest.add_member(:dashboard_name, Shapes::ShapeRef.new(shape: Name, required: true, location_name: "dashboardName"))
CreateDashboardRequest.add_member(:dashboard_description, Shapes::ShapeRef.new(shape: Description, location_name: "dashboardDescription"))
@@ -856,6 +890,9 @@ module ClientApi
CreateProjectResponse.add_member(:project_arn, Shapes::ShapeRef.new(shape: ARN, required: true, location_name: "projectArn"))
CreateProjectResponse.struct_class = Types::CreateProjectResponse
+ Csv.add_member(:column_names, Shapes::ShapeRef.new(shape: ColumnNames, location_name: "columnNames"))
+ Csv.struct_class = Types::Csv
+
CustomerManagedS3Storage.add_member(:s3_resource_arn, Shapes::ShapeRef.new(shape: ARN, required: true, location_name: "s3ResourceArn"))
CustomerManagedS3Storage.add_member(:role_arn, Shapes::ShapeRef.new(shape: ARN, required: true, location_name: "roleArn"))
CustomerManagedS3Storage.struct_class = Types::CustomerManagedS3Storage
@@ -971,6 +1008,20 @@ module ClientApi
DescribeAssetResponse.add_member(:asset_description, Shapes::ShapeRef.new(shape: Description, location_name: "assetDescription"))
DescribeAssetResponse.struct_class = Types::DescribeAssetResponse
+ DescribeBulkImportJobRequest.add_member(:job_id, Shapes::ShapeRef.new(shape: ID, required: true, location: "uri", location_name: "jobId"))
+ DescribeBulkImportJobRequest.struct_class = Types::DescribeBulkImportJobRequest
+
+ DescribeBulkImportJobResponse.add_member(:job_id, Shapes::ShapeRef.new(shape: ID, required: true, location_name: "jobId"))
+ DescribeBulkImportJobResponse.add_member(:job_name, Shapes::ShapeRef.new(shape: Name, required: true, location_name: "jobName"))
+ DescribeBulkImportJobResponse.add_member(:job_status, Shapes::ShapeRef.new(shape: JobStatus, required: true, location_name: "jobStatus"))
+ DescribeBulkImportJobResponse.add_member(:job_role_arn, Shapes::ShapeRef.new(shape: ARN, required: true, location_name: "jobRoleArn"))
+ DescribeBulkImportJobResponse.add_member(:files, Shapes::ShapeRef.new(shape: Files, required: true, location_name: "files"))
+ DescribeBulkImportJobResponse.add_member(:error_report_location, Shapes::ShapeRef.new(shape: ErrorReportLocation, required: true, location_name: "errorReportLocation"))
+ DescribeBulkImportJobResponse.add_member(:job_configuration, Shapes::ShapeRef.new(shape: JobConfiguration, required: true, location_name: "jobConfiguration"))
+ DescribeBulkImportJobResponse.add_member(:job_creation_date, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "jobCreationDate"))
+ DescribeBulkImportJobResponse.add_member(:job_last_update_date, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "jobLastUpdateDate"))
+ DescribeBulkImportJobResponse.struct_class = Types::DescribeBulkImportJobResponse
+
DescribeDashboardRequest.add_member(:dashboard_id, Shapes::ShapeRef.new(shape: ID, required: true, location: "uri", location_name: "dashboardId"))
DescribeDashboardRequest.struct_class = Types::DescribeDashboardRequest
@@ -1098,12 +1149,26 @@ module ClientApi
ErrorDetails.add_member(:details, Shapes::ShapeRef.new(shape: DetailedErrors, location_name: "details"))
ErrorDetails.struct_class = Types::ErrorDetails
+ ErrorReportLocation.add_member(:bucket, Shapes::ShapeRef.new(shape: Bucket, required: true, location_name: "bucket"))
+ ErrorReportLocation.add_member(:prefix, Shapes::ShapeRef.new(shape: String, required: true, location_name: "prefix"))
+ ErrorReportLocation.struct_class = Types::ErrorReportLocation
+
ExpressionVariable.add_member(:name, Shapes::ShapeRef.new(shape: VariableName, required: true, location_name: "name"))
ExpressionVariable.add_member(:value, Shapes::ShapeRef.new(shape: VariableValue, required: true, location_name: "value"))
ExpressionVariable.struct_class = Types::ExpressionVariable
ExpressionVariables.member = Shapes::ShapeRef.new(shape: ExpressionVariable)
+ File.add_member(:bucket, Shapes::ShapeRef.new(shape: Bucket, required: true, location_name: "bucket"))
+ File.add_member(:key, Shapes::ShapeRef.new(shape: String, required: true, location_name: "key"))
+ File.add_member(:version_id, Shapes::ShapeRef.new(shape: String, location_name: "versionId"))
+ File.struct_class = Types::File
+
+ FileFormat.add_member(:csv, Shapes::ShapeRef.new(shape: Csv, location_name: "csv"))
+ FileFormat.struct_class = Types::FileFormat
+
+ Files.member = Shapes::ShapeRef.new(shape: File)
+
ForwardingConfig.add_member(:state, Shapes::ShapeRef.new(shape: ForwardingConfigState, required: true, location_name: "state"))
ForwardingConfig.struct_class = Types::ForwardingConfig
@@ -1233,6 +1298,16 @@ module ClientApi
InvalidRequestException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessage, required: true, location_name: "message"))
InvalidRequestException.struct_class = Types::InvalidRequestException
+ JobConfiguration.add_member(:file_format, Shapes::ShapeRef.new(shape: FileFormat, required: true, location_name: "fileFormat"))
+ JobConfiguration.struct_class = Types::JobConfiguration
+
+ JobSummaries.member = Shapes::ShapeRef.new(shape: JobSummary)
+
+ JobSummary.add_member(:id, Shapes::ShapeRef.new(shape: ID, required: true, location_name: "id"))
+ JobSummary.add_member(:name, Shapes::ShapeRef.new(shape: Name, required: true, location_name: "name"))
+ JobSummary.add_member(:status, Shapes::ShapeRef.new(shape: JobStatus, required: true, location_name: "status"))
+ JobSummary.struct_class = Types::JobSummary
+
LimitExceededException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessage, required: true, location_name: "message"))
LimitExceededException.struct_class = Types::LimitExceededException
@@ -1288,6 +1363,15 @@ module ClientApi
ListAssociatedAssetsResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "nextToken"))
ListAssociatedAssetsResponse.struct_class = Types::ListAssociatedAssetsResponse
+ ListBulkImportJobsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location: "querystring", location_name: "nextToken"))
+ ListBulkImportJobsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location: "querystring", location_name: "maxResults"))
+ ListBulkImportJobsRequest.add_member(:filter, Shapes::ShapeRef.new(shape: ListBulkImportJobsFilter, location: "querystring", location_name: "filter"))
+ ListBulkImportJobsRequest.struct_class = Types::ListBulkImportJobsRequest
+
+ ListBulkImportJobsResponse.add_member(:job_summaries, Shapes::ShapeRef.new(shape: JobSummaries, required: true, location_name: "jobSummaries"))
+ ListBulkImportJobsResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "nextToken"))
+ ListBulkImportJobsResponse.struct_class = Types::ListBulkImportJobsResponse
+
ListDashboardsRequest.add_member(:project_id, Shapes::ShapeRef.new(shape: ID, required: true, location: "querystring", location_name: "projectId"))
ListDashboardsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location: "querystring", location_name: "nextToken"))
ListDashboardsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location: "querystring", location_name: "maxResults"))
@@ -1849,6 +1933,24 @@ module ClientApi
o.errors << Shapes::ShapeRef.new(shape: ConflictingOperationException)
end)
+ api.add_operation(:create_bulk_import_job, Seahorse::Model::Operation.new.tap do |o|
+ o.name = "CreateBulkImportJob"
+ o.http_method = "POST"
+ o.http_request_uri = "/jobs"
+ o.endpoint_pattern = {
+ "hostPrefix" => "data.",
+ }
+ o.input = Shapes::ShapeRef.new(shape: CreateBulkImportJobRequest)
+ o.output = Shapes::ShapeRef.new(shape: CreateBulkImportJobResponse)
+ o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
+ o.errors << Shapes::ShapeRef.new(shape: ResourceAlreadyExistsException)
+ o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
+ o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
+ o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
+ o.errors << Shapes::ShapeRef.new(shape: LimitExceededException)
+ o.errors << Shapes::ShapeRef.new(shape: ConflictingOperationException)
+ end)
+
api.add_operation(:create_dashboard, Seahorse::Model::Operation.new.tap do |o|
o.name = "CreateDashboard"
o.http_method = "POST"
@@ -2097,6 +2199,21 @@ module ClientApi
o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
end)
+ api.add_operation(:describe_bulk_import_job, Seahorse::Model::Operation.new.tap do |o|
+ o.name = "DescribeBulkImportJob"
+ o.http_method = "GET"
+ o.http_request_uri = "/jobs/{jobId}"
+ o.endpoint_pattern = {
+ "hostPrefix" => "data.",
+ }
+ o.input = Shapes::ShapeRef.new(shape: DescribeBulkImportJobRequest)
+ o.output = Shapes::ShapeRef.new(shape: DescribeBulkImportJobResponse)
+ o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
+ o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
+ o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
+ o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
+ end)
+
api.add_operation(:describe_dashboard, Seahorse::Model::Operation.new.tap do |o|
o.name = "DescribeDashboard"
o.http_method = "GET"
@@ -2450,6 +2567,27 @@ module ClientApi
)
end)
+ api.add_operation(:list_bulk_import_jobs, Seahorse::Model::Operation.new.tap do |o|
+ o.name = "ListBulkImportJobs"
+ o.http_method = "GET"
+ o.http_request_uri = "/jobs"
+ o.endpoint_pattern = {
+ "hostPrefix" => "data.",
+ }
+ o.input = Shapes::ShapeRef.new(shape: ListBulkImportJobsRequest)
+ o.output = Shapes::ShapeRef.new(shape: ListBulkImportJobsResponse)
+ o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
+ o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
+ o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
+ o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
+ o[:pager] = Aws::Pager.new(
+ limit_key: "max_results",
+ tokens: {
+ "next_token" => "next_token"
+ }
+ )
+ end)
+
api.add_operation(:list_dashboards, Seahorse::Model::Operation.new.tap do |o|
o.name = "ListDashboards"
o.http_method = "GET"
diff --git a/gems/aws-sdk-iotsitewise/lib/aws-sdk-iotsitewise/types.rb b/gems/aws-sdk-iotsitewise/lib/aws-sdk-iotsitewise/types.rb
index 99410cc9131..61dc78173d1 100644
--- a/gems/aws-sdk-iotsitewise/lib/aws-sdk-iotsitewise/types.rb
+++ b/gems/aws-sdk-iotsitewise/lib/aws-sdk-iotsitewise/types.rb
@@ -2632,6 +2632,109 @@ class CreateAssetResponse < Struct.new(
include Aws::Structure
end
+ # @note When making an API call, you may pass CreateBulkImportJobRequest
+ # data as a hash:
+ #
+ # {
+ # job_name: "Name", # required
+ # job_role_arn: "ARN", # required
+ # files: [ # required
+ # {
+ # bucket: "Bucket", # required
+ # key: "String", # required
+ # version_id: "String",
+ # },
+ # ],
+ # error_report_location: { # required
+ # bucket: "Bucket", # required
+ # prefix: "String", # required
+ # },
+ # job_configuration: { # required
+ # file_format: { # required
+ # csv: {
+ # column_names: ["ALIAS"], # accepts ALIAS, ASSET_ID, PROPERTY_ID, DATA_TYPE, TIMESTAMP_SECONDS, TIMESTAMP_NANO_OFFSET, QUALITY, VALUE
+ # },
+ # },
+ # },
+ # }
+ #
+ # @!attribute [rw] job_name
+ # The unique name that helps identify the job request.
+ # @return [String]
+ #
+ # @!attribute [rw] job_role_arn
+ # The [ARN][1] of the IAM role that allows IoT SiteWise to read Amazon
+ # S3 data.
+ #
+ #
+ #
+ # [1]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+ # @return [String]
+ #
+ # @!attribute [rw] files
+ # The files in the specified Amazon S3 bucket that contain your data.
+ # @return [Array]
+ #
+ # @!attribute [rw] error_report_location
+ # The Amazon S3 destination where errors associated with the job
+ # creation request are saved.
+ # @return [Types::ErrorReportLocation]
+ #
+ # @!attribute [rw] job_configuration
+ # Contains the configuration information of a job, such as the file
+ # format used to save data in Amazon S3.
+ # @return [Types::JobConfiguration]
+ #
+ class CreateBulkImportJobRequest < Struct.new(
+ :job_name,
+ :job_role_arn,
+ :files,
+ :error_report_location,
+ :job_configuration)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
+ # @!attribute [rw] job_id
+ # The ID of the job.
+ # @return [String]
+ #
+ # @!attribute [rw] job_name
+ # The unique name that helps identify the job request.
+ # @return [String]
+ #
+ # @!attribute [rw] job_status
+ # The status of the bulk import job can be one of following values.
+ #
+ # * `PENDING` – IoT SiteWise is waiting for the current bulk import
+ # job to finish.
+ #
+ # * `CANCELLED` – The bulk import job has been canceled.
+ #
+ # * `RUNNING` – IoT SiteWise is processing your request to import your
+ # data from Amazon S3.
+ #
+ # * `COMPLETED` – IoT SiteWise successfully completed your request to
+ # import data from Amazon S3.
+ #
+ # * `FAILED` – IoT SiteWise couldn't process your request to import
+ # data from Amazon S3. You can use logs saved in the specified error
+ # report location in Amazon S3 to troubleshoot issues.
+ #
+ # * `COMPLETED_WITH_FAILURES` – IoT SiteWise completed your request to
+ # import data from Amazon S3 with errors. You can use logs saved in
+ # the specified error report location in Amazon S3 to troubleshoot
+ # issues.
+ # @return [String]
+ #
+ class CreateBulkImportJobResponse < Struct.new(
+ :job_id,
+ :job_name,
+ :job_status)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
# @note When making an API call, you may pass CreateDashboardRequest
# data as a hash:
#
@@ -3038,6 +3141,25 @@ class CreateProjectResponse < Struct.new(
include Aws::Structure
end
+ # A .csv file.
+ #
+ # @note When making an API call, you may pass Csv
+ # data as a hash:
+ #
+ # {
+ # column_names: ["ALIAS"], # accepts ALIAS, ASSET_ID, PROPERTY_ID, DATA_TYPE, TIMESTAMP_SECONDS, TIMESTAMP_NANO_OFFSET, QUALITY, VALUE
+ # }
+ #
+ # @!attribute [rw] column_names
+ # The column names specified in the .csv file.
+ # @return [Array]
+ #
+ class Csv < Struct.new(
+ :column_names)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
# Contains information about a customer managed Amazon S3 bucket.
#
# @note When making an API call, you may pass CustomerManagedS3Storage
@@ -3670,6 +3792,100 @@ class DescribeAssetResponse < Struct.new(
include Aws::Structure
end
+ # @note When making an API call, you may pass DescribeBulkImportJobRequest
+ # data as a hash:
+ #
+ # {
+ # job_id: "ID", # required
+ # }
+ #
+ # @!attribute [rw] job_id
+ # The ID of the job.
+ # @return [String]
+ #
+ class DescribeBulkImportJobRequest < Struct.new(
+ :job_id)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
+ # @!attribute [rw] job_id
+ # The ID of the job.
+ # @return [String]
+ #
+ # @!attribute [rw] job_name
+ # The unique name that helps identify the job request.
+ # @return [String]
+ #
+ # @!attribute [rw] job_status
+ # The status of the bulk import job can be one of following values.
+ #
+ # * `PENDING` – IoT SiteWise is waiting for the current bulk import
+ # job to finish.
+ #
+ # * `CANCELLED` – The bulk import job has been canceled.
+ #
+ # * `RUNNING` – IoT SiteWise is processing your request to import your
+ # data from Amazon S3.
+ #
+ # * `COMPLETED` – IoT SiteWise successfully completed your request to
+ # import data from Amazon S3.
+ #
+ # * `FAILED` – IoT SiteWise couldn't process your request to import
+ # data from Amazon S3. You can use logs saved in the specified error
+ # report location in Amazon S3 to troubleshoot issues.
+ #
+ # * `COMPLETED_WITH_FAILURES` – IoT SiteWise completed your request to
+ # import data from Amazon S3 with errors. You can use logs saved in
+ # the specified error report location in Amazon S3 to troubleshoot
+ # issues.
+ # @return [String]
+ #
+ # @!attribute [rw] job_role_arn
+ # The [ARN][1] of the IAM role that allows IoT SiteWise to read Amazon
+ # S3 data.
+ #
+ #
+ #
+ # [1]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+ # @return [String]
+ #
+ # @!attribute [rw] files
+ # The files in the specified Amazon S3 bucket that contain your data.
+ # @return [Array]
+ #
+ # @!attribute [rw] error_report_location
+ # The Amazon S3 destination where errors associated with the job
+ # creation request are saved.
+ # @return [Types::ErrorReportLocation]
+ #
+ # @!attribute [rw] job_configuration
+ # Contains the configuration information of a job, such as the file
+ # format used to save data in Amazon S3.
+ # @return [Types::JobConfiguration]
+ #
+ # @!attribute [rw] job_creation_date
+ # The date the job was created, in Unix epoch TIME.
+ # @return [Time]
+ #
+ # @!attribute [rw] job_last_update_date
+ # The date the job was last updated, in Unix epoch time.
+ # @return [Time]
+ #
+ class DescribeBulkImportJobResponse < Struct.new(
+ :job_id,
+ :job_name,
+ :job_status,
+ :job_role_arn,
+ :files,
+ :error_report_location,
+ :job_configuration,
+ :job_creation_date,
+ :job_last_update_date)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
# @note When making an API call, you may pass DescribeDashboardRequest
# data as a hash:
#
@@ -4380,6 +4596,42 @@ class ErrorDetails < Struct.new(
include Aws::Structure
end
+ # The Amazon S3 destination where errors associated with the job
+ # creation request are saved.
+ #
+ # @note When making an API call, you may pass ErrorReportLocation
+ # data as a hash:
+ #
+ # {
+ # bucket: "Bucket", # required
+ # prefix: "String", # required
+ # }
+ #
+ # @!attribute [rw] bucket
+ # The name of the Amazon S3 bucket to which errors associated with the
+ # bulk import job are sent.
+ # @return [String]
+ #
+ # @!attribute [rw] prefix
+ # Amazon S3 uses the prefix as a folder name to organize data in the
+ # bucket. Each Amazon S3 object has a key that is its unique
+ # identifier in the bucket. Each object in a bucket has exactly one
+ # key. The prefix must end with a forward slash (/). For more
+ # information, see [Organizing objects using prefixes][1] in the
+ # *Amazon Simple Storage Service User Guide*.
+ #
+ #
+ #
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-prefixes.html
+ # @return [String]
+ #
+ class ErrorReportLocation < Struct.new(
+ :bucket,
+ :prefix)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
# Contains expression variable information.
#
# @note When making an API call, you may pass ExpressionVariable
@@ -4409,6 +4661,61 @@ class ExpressionVariable < Struct.new(
include Aws::Structure
end
+ # The file in Amazon S3 where your data is saved.
+ #
+ # @note When making an API call, you may pass File
+ # data as a hash:
+ #
+ # {
+ # bucket: "Bucket", # required
+ # key: "String", # required
+ # version_id: "String",
+ # }
+ #
+ # @!attribute [rw] bucket
+ # The name of the Amazon S3 bucket from which data is imported.
+ # @return [String]
+ #
+ # @!attribute [rw] key
+ # The key of the Amazon S3 object that contains your data. Each object
+ # has a key that is a unique identifier. Each object has exactly one
+ # key.
+ # @return [String]
+ #
+ # @!attribute [rw] version_id
+ # The version ID to identify a specific version of the Amazon S3
+ # object that contains your data.
+ # @return [String]
+ #
+ class File < Struct.new(
+ :bucket,
+ :key,
+ :version_id)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
+ # The file format of the data.
+ #
+ # @note When making an API call, you may pass FileFormat
+ # data as a hash:
+ #
+ # {
+ # csv: {
+ # column_names: ["ALIAS"], # accepts ALIAS, ASSET_ID, PROPERTY_ID, DATA_TYPE, TIMESTAMP_SECONDS, TIMESTAMP_NANO_OFFSET, QUALITY, VALUE
+ # },
+ # }
+ #
+ # @!attribute [rw] csv
+ # The .csv file format.
+ # @return [Types::Csv]
+ #
+ class FileFormat < Struct.new(
+ :csv)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
# The forwarding configuration for a given property.
#
# @note When making an API call, you may pass ForwardingConfig
@@ -5271,6 +5578,72 @@ class InvalidRequestException < Struct.new(
include Aws::Structure
end
+ # Contains the configuration information of a job, such as the file
+ # format used to save data in Amazon S3.
+ #
+ # @note When making an API call, you may pass JobConfiguration
+ # data as a hash:
+ #
+ # {
+ # file_format: { # required
+ # csv: {
+ # column_names: ["ALIAS"], # accepts ALIAS, ASSET_ID, PROPERTY_ID, DATA_TYPE, TIMESTAMP_SECONDS, TIMESTAMP_NANO_OFFSET, QUALITY, VALUE
+ # },
+ # },
+ # }
+ #
+ # @!attribute [rw] file_format
+ # The file format of the data in Amazon S3.
+ # @return [Types::FileFormat]
+ #
+ class JobConfiguration < Struct.new(
+ :file_format)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
+ # Contains a job summary information.
+ #
+ # @!attribute [rw] id
+ # The ID of the job.
+ # @return [String]
+ #
+ # @!attribute [rw] name
+ # The unique name that helps identify the job request.
+ # @return [String]
+ #
+ # @!attribute [rw] status
+ # The status of the bulk import job can be one of following values.
+ #
+ # * `PENDING` – IoT SiteWise is waiting for the current bulk import
+ # job to finish.
+ #
+ # * `CANCELLED` – The bulk import job has been canceled.
+ #
+ # * `RUNNING` – IoT SiteWise is processing your request to import your
+ # data from Amazon S3.
+ #
+ # * `COMPLETED` – IoT SiteWise successfully completed your request to
+ # import data from Amazon S3.
+ #
+ # * `FAILED` – IoT SiteWise couldn't process your request to import
+ # data from Amazon S3. You can use logs saved in the specified error
+ # report location in Amazon S3 to troubleshoot issues.
+ #
+ # * `COMPLETED_WITH_FAILURES` – IoT SiteWise completed your request to
+ # import data from Amazon S3 with errors. You can use logs saved in
+ # the specified error report location in Amazon S3 to troubleshoot
+ # issues.
+ # @return [String]
+ #
+ class JobSummary < Struct.new(
+ :id,
+ :name,
+ :status)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
# You've reached the limit for a resource. For example, this can occur
# if you're trying to associate more than the allowed number of child
# assets or attempting to create more than the allowed number of
@@ -5617,6 +5990,52 @@ class ListAssociatedAssetsResponse < Struct.new(
include Aws::Structure
end
+ # @note When making an API call, you may pass ListBulkImportJobsRequest
+ # data as a hash:
+ #
+ # {
+ # next_token: "NextToken",
+ # max_results: 1,
+ # filter: "ALL", # accepts ALL, PENDING, RUNNING, CANCELLED, FAILED, COMPLETED_WITH_FAILURES, COMPLETED
+ # }
+ #
+ # @!attribute [rw] next_token
+ # The token to be used for the next set of paginated results.
+ # @return [String]
+ #
+ # @!attribute [rw] max_results
+ # The maximum number of results to return for each paginated request.
+ # @return [Integer]
+ #
+ # @!attribute [rw] filter
+ # You can use a filter to select the bulk import jobs that you want to
+ # retrieve.
+ # @return [String]
+ #
+ class ListBulkImportJobsRequest < Struct.new(
+ :next_token,
+ :max_results,
+ :filter)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
+ # @!attribute [rw] job_summaries
+ # One or more job summaries to list.
+ # @return [Array]
+ #
+ # @!attribute [rw] next_token
+ # The token for the next set of results, or null if there are no
+ # additional results.
+ # @return [String]
+ #
+ class ListBulkImportJobsResponse < Struct.new(
+ :job_summaries,
+ :next_token)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
# @note When making an API call, you may pass ListDashboardsRequest
# data as a hash:
#
diff --git a/gems/aws-sdk-kendra/CHANGELOG.md b/gems/aws-sdk-kendra/CHANGELOG.md
index 3337e186f43..c0ce64b2c6e 100644
--- a/gems/aws-sdk-kendra/CHANGELOG.md
+++ b/gems/aws-sdk-kendra/CHANGELOG.md
@@ -1,6 +1,11 @@
Unreleased Changes
------------------
+1.55.0 (2022-07-21)
+------------------
+
+* Feature - Amazon Kendra now provides Oauth2 support for SharePoint Online. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-sharepoint.html
+
1.54.0 (2022-07-14)
------------------
diff --git a/gems/aws-sdk-kendra/VERSION b/gems/aws-sdk-kendra/VERSION
index b7921ae87bc..094d6ad00ce 100644
--- a/gems/aws-sdk-kendra/VERSION
+++ b/gems/aws-sdk-kendra/VERSION
@@ -1 +1 @@
-1.54.0
+1.55.0
diff --git a/gems/aws-sdk-kendra/lib/aws-sdk-kendra.rb b/gems/aws-sdk-kendra/lib/aws-sdk-kendra.rb
index bb83e976de9..b60cf9534cd 100644
--- a/gems/aws-sdk-kendra/lib/aws-sdk-kendra.rb
+++ b/gems/aws-sdk-kendra/lib/aws-sdk-kendra.rb
@@ -48,6 +48,6 @@
# @!group service
module Aws::Kendra
- GEM_VERSION = '1.54.0'
+ GEM_VERSION = '1.55.0'
end
diff --git a/gems/aws-sdk-kendra/lib/aws-sdk-kendra/client.rb b/gems/aws-sdk-kendra/lib/aws-sdk-kendra/client.rb
index b8ec222cec7..e18d71f9de1 100644
--- a/gems/aws-sdk-kendra/lib/aws-sdk-kendra/client.rb
+++ b/gems/aws-sdk-kendra/lib/aws-sdk-kendra/client.rb
@@ -830,15 +830,14 @@ def clear_query_suggestions(params = {}, options = {})
# control without indexing all of your documents again. For example,
# your index contains top-secret company documents that only certain
# employees or users should access. One of these users leaves the
- # company or switches to a team that should be blocked from access to
- # top-secret documents. Your documents in your index still give this
- # user access to top-secret documents due to the user having access at
- # the time your documents were indexed. You can create a specific access
- # control configuration for this user with deny access. You can later
- # update the access control configuration to allow access in the case
- # the user returns to the company and re-joins the 'top-secret' team.
- # You can re-configure access control for your documents circumstances
- # change.
+ # company or switches to a team that should be blocked from accessing
+ # top-secret documents. The user still has access to top-secret
+ # documents because the user had access when your documents were
+ # previously indexed. You can create a specific access control
+ # configuration for the user with deny access. You can later update the
+ # access control configuration to allow access if the user returns to
+ # the company and re-joins the 'top-secret' team. You can re-configure
+ # access control for your documents as circumstances change.
#
# To apply your access control configuration to certain documents, you
# call the [BatchPutDocument][1] API with the
@@ -1091,6 +1090,7 @@ def create_access_control_configuration(params = {}, options = {})
# bucket: "S3BucketName", # required
# key: "S3ObjectKey", # required
# },
+ # authentication_type: "HTTP_BASIC", # accepts HTTP_BASIC, OAUTH2
# },
# database_configuration: {
# database_engine_type: "RDS_AURORA_MYSQL", # required, accepts RDS_AURORA_MYSQL, RDS_AURORA_POSTGRESQL, RDS_MYSQL, RDS_POSTGRESQL
@@ -2676,6 +2676,7 @@ def describe_access_control_configuration(params = {}, options = {})
# resp.configuration.share_point_configuration.disable_local_groups #=> Boolean
# resp.configuration.share_point_configuration.ssl_certificate_s3_path.bucket #=> String
# resp.configuration.share_point_configuration.ssl_certificate_s3_path.key #=> String
+ # resp.configuration.share_point_configuration.authentication_type #=> String, one of "HTTP_BASIC", "OAUTH2"
# resp.configuration.database_configuration.database_engine_type #=> String, one of "RDS_AURORA_MYSQL", "RDS_AURORA_POSTGRESQL", "RDS_MYSQL", "RDS_POSTGRESQL"
# resp.configuration.database_configuration.connection_configuration.database_host #=> String
# resp.configuration.database_configuration.connection_configuration.database_port #=> Integer
@@ -3830,7 +3831,7 @@ def get_snapshots(params = {}, options = {})
# The identifier of the index for the access control configuration.
#
# @option params [String] :next_token
- # If the previous response was incomplete (because there is more data to
+ # If the previous response was incomplete (because there's more data to
# retrieve), Amazon Kendra returns a pagination token in the response.
# You can use this pagination token to retrieve the next set of access
# control configurations.
@@ -5093,7 +5094,7 @@ def untag_resource(params = {}, options = {})
# You call the [BatchPutDocument][1] API to apply the updated access
# control configuration, with the `AccessControlConfigurationId`
# included in the [Document][2] object. If you use an S3 bucket as a
- # data source, you synchronize your data source to apply the the
+ # data source, you synchronize your data source to apply the
# `AccessControlConfigurationId` in the `.metadata.json` file. Amazon
# Kendra currently only supports access control configuration for S3
# data sources and documents indexed using the `BatchPutDocument` API.
@@ -5270,6 +5271,7 @@ def update_access_control_configuration(params = {}, options = {})
# bucket: "S3BucketName", # required
# key: "S3ObjectKey", # required
# },
+ # authentication_type: "HTTP_BASIC", # accepts HTTP_BASIC, OAUTH2
# },
# database_configuration: {
# database_engine_type: "RDS_AURORA_MYSQL", # required, accepts RDS_AURORA_MYSQL, RDS_AURORA_POSTGRESQL, RDS_MYSQL, RDS_POSTGRESQL
@@ -6318,7 +6320,7 @@ def build_request(operation_name, params = {})
params: params,
config: config)
context[:gem_name] = 'aws-sdk-kendra'
- context[:gem_version] = '1.54.0'
+ context[:gem_version] = '1.55.0'
Seahorse::Client::Request.new(handlers, context)
end
diff --git a/gems/aws-sdk-kendra/lib/aws-sdk-kendra/client_api.rb b/gems/aws-sdk-kendra/lib/aws-sdk-kendra/client_api.rb
index 727561790fe..93c45e09500 100644
--- a/gems/aws-sdk-kendra/lib/aws-sdk-kendra/client_api.rb
+++ b/gems/aws-sdk-kendra/lib/aws-sdk-kendra/client_api.rb
@@ -441,6 +441,7 @@ module ClientApi
ServiceNowServiceCatalogConfiguration = Shapes::StructureShape.new(name: 'ServiceNowServiceCatalogConfiguration')
ServiceQuotaExceededException = Shapes::StructureShape.new(name: 'ServiceQuotaExceededException')
SharePointConfiguration = Shapes::StructureShape.new(name: 'SharePointConfiguration')
+ SharePointOnlineAuthenticationType = Shapes::StringShape.new(name: 'SharePointOnlineAuthenticationType')
SharePointUrlList = Shapes::ListShape.new(name: 'SharePointUrlList')
SharePointVersion = Shapes::StringShape.new(name: 'SharePointVersion')
SharedDriveId = Shapes::StringShape.new(name: 'SharedDriveId')
@@ -1901,6 +1902,7 @@ module ClientApi
SharePointConfiguration.add_member(:document_title_field_name, Shapes::ShapeRef.new(shape: DataSourceFieldName, location_name: "DocumentTitleFieldName"))
SharePointConfiguration.add_member(:disable_local_groups, Shapes::ShapeRef.new(shape: Boolean, location_name: "DisableLocalGroups"))
SharePointConfiguration.add_member(:ssl_certificate_s3_path, Shapes::ShapeRef.new(shape: S3Path, location_name: "SslCertificateS3Path"))
+ SharePointConfiguration.add_member(:authentication_type, Shapes::ShapeRef.new(shape: SharePointOnlineAuthenticationType, location_name: "AuthenticationType"))
SharePointConfiguration.struct_class = Types::SharePointConfiguration
SharePointUrlList.member = Shapes::ShapeRef.new(shape: Url)
diff --git a/gems/aws-sdk-kendra/lib/aws-sdk-kendra/types.rb b/gems/aws-sdk-kendra/lib/aws-sdk-kendra/types.rb
index 19c5b7d358e..129819232cc 100644
--- a/gems/aws-sdk-kendra/lib/aws-sdk-kendra/types.rb
+++ b/gems/aws-sdk-kendra/lib/aws-sdk-kendra/types.rb
@@ -2439,6 +2439,7 @@ class CreateAccessControlConfigurationResponse < Struct.new(
# bucket: "S3BucketName", # required
# key: "S3ObjectKey", # required
# },
+ # authentication_type: "HTTP_BASIC", # accepts HTTP_BASIC, OAUTH2
# },
# database_configuration: {
# database_engine_type: "RDS_AURORA_MYSQL", # required, accepts RDS_AURORA_MYSQL, RDS_AURORA_POSTGRESQL, RDS_MYSQL, RDS_POSTGRESQL
@@ -3927,6 +3928,7 @@ class CustomDocumentEnrichmentConfiguration < Struct.new(
# bucket: "S3BucketName", # required
# key: "S3ObjectKey", # required
# },
+ # authentication_type: "HTTP_BASIC", # accepts HTTP_BASIC, OAUTH2
# },
# database_configuration: {
# database_engine_type: "RDS_AURORA_MYSQL", # required, accepts RDS_AURORA_MYSQL, RDS_AURORA_POSTGRESQL, RDS_MYSQL, RDS_POSTGRESQL
@@ -8750,7 +8752,7 @@ class JwtTokenTypeConfiguration < Struct.new(
# @return [String]
#
# @!attribute [rw] next_token
- # If the previous response was incomplete (because there is more data
+ # If the previous response was incomplete (because there's more data
# to retrieve), Amazon Kendra returns a pagination token in the
# response. You can use this pagination token to retrieve the next set
# of access control configurations.
@@ -8771,9 +8773,9 @@ class ListAccessControlConfigurationsRequest < Struct.new(
end
# @!attribute [rw] next_token
- # If the response is truncated, Amazon Kendra returns this token that
- # you can use in the subsequent request to retrieve the next set of
- # access control configurations.
+ # If the response is truncated, Amazon Kendra returns this token,
+ # which you can use in the subsequent request to retrieve the next set
+ # of access control configurations.
# @return [String]
#
# @!attribute [rw] access_control_configurations
@@ -11763,6 +11765,7 @@ class ServiceQuotaExceededException < Struct.new(
# bucket: "S3BucketName", # required
# key: "S3ObjectKey", # required
# },
+ # authentication_type: "HTTP_BASIC", # accepts HTTP_BASIC, OAUTH2
# }
#
# @!attribute [rw] share_point_version
@@ -11781,9 +11784,14 @@ class ServiceQuotaExceededException < Struct.new(
# provide the sever domain name as part of the credentials. For more
# information, see [Using a Microsoft SharePoint Data Source][1].
#
+ # You can also provide OAuth authentication credentials of user name,
+ # password, client ID, and client secret. For more information, see
+ # [Authentication for a SharePoint data source][2].
+ #
#
#
# [1]: https://docs.aws.amazon.com/kendra/latest/dg/data-source-sharepoint.html
+ # [2]: https://docs.aws.amazon.com/kendra/latest/dg/data-source-sharepoint.html#sharepoint-authentication
# @return [String]
#
# @!attribute [rw] crawl_attachments
@@ -11856,6 +11864,13 @@ class ServiceQuotaExceededException < Struct.new(
# use this to connect to SharePoint.
# @return [Types::S3Path]
#
+ # @!attribute [rw] authentication_type
+ # Whether you want to connect to SharePoint using basic authentication
+ # of user name and password, or OAuth authentication of user name,
+ # password, client ID, and client secret. You can use OAuth
+ # authentication for SharePoint Online.
+ # @return [String]
+ #
# @see http://docs.aws.amazon.com/goto/WebAPI/kendra-2019-02-03/SharePointConfiguration AWS API Documentation
#
class SharePointConfiguration < Struct.new(
@@ -11870,7 +11885,8 @@ class SharePointConfiguration < Struct.new(
:field_mappings,
:document_title_field_name,
:disable_local_groups,
- :ssl_certificate_s3_path)
+ :ssl_certificate_s3_path,
+ :authentication_type)
SENSITIVE = []
include Aws::Structure
end
@@ -12790,6 +12806,7 @@ class UpdateAccessControlConfigurationResponse < Aws::EmptyStructure; end
# bucket: "S3BucketName", # required
# key: "S3ObjectKey", # required
# },
+ # authentication_type: "HTTP_BASIC", # accepts HTTP_BASIC, OAUTH2
# },
# database_configuration: {
# database_engine_type: "RDS_AURORA_MYSQL", # required, accepts RDS_AURORA_MYSQL, RDS_AURORA_POSTGRESQL, RDS_MYSQL, RDS_POSTGRESQL
diff --git a/gems/aws-sdk-networkfirewall/CHANGELOG.md b/gems/aws-sdk-networkfirewall/CHANGELOG.md
index 2e8280b781e..b7ced94b3eb 100644
--- a/gems/aws-sdk-networkfirewall/CHANGELOG.md
+++ b/gems/aws-sdk-networkfirewall/CHANGELOG.md
@@ -1,6 +1,11 @@
Unreleased Changes
------------------
+1.18.0 (2022-07-21)
+------------------
+
+* Feature - Network Firewall now supports referencing dynamic IP sets from stateful rule groups, for IP sets stored in Amazon VPC prefix lists.
+
1.17.0 (2022-04-28)
------------------
diff --git a/gems/aws-sdk-networkfirewall/VERSION b/gems/aws-sdk-networkfirewall/VERSION
index 092afa15df4..84cc529467b 100644
--- a/gems/aws-sdk-networkfirewall/VERSION
+++ b/gems/aws-sdk-networkfirewall/VERSION
@@ -1 +1 @@
-1.17.0
+1.18.0
diff --git a/gems/aws-sdk-networkfirewall/lib/aws-sdk-networkfirewall.rb b/gems/aws-sdk-networkfirewall/lib/aws-sdk-networkfirewall.rb
index 5f38320aba1..58937262641 100644
--- a/gems/aws-sdk-networkfirewall/lib/aws-sdk-networkfirewall.rb
+++ b/gems/aws-sdk-networkfirewall/lib/aws-sdk-networkfirewall.rb
@@ -48,6 +48,6 @@
# @!group service
module Aws::NetworkFirewall
- GEM_VERSION = '1.17.0'
+ GEM_VERSION = '1.18.0'
end
diff --git a/gems/aws-sdk-networkfirewall/lib/aws-sdk-networkfirewall/client.rb b/gems/aws-sdk-networkfirewall/lib/aws-sdk-networkfirewall/client.rb
index 3a7084a7b77..62c1fc11c65 100644
--- a/gems/aws-sdk-networkfirewall/lib/aws-sdk-networkfirewall/client.rb
+++ b/gems/aws-sdk-networkfirewall/lib/aws-sdk-networkfirewall/client.rb
@@ -638,14 +638,18 @@ def associate_subnets(params = {}, options = {})
# resp.firewall.encryption_configuration.key_id #=> String
# resp.firewall.encryption_configuration.type #=> String, one of "CUSTOMER_KMS", "AWS_OWNED_KMS_KEY"
# resp.firewall_status.status #=> String, one of "PROVISIONING", "DELETING", "READY"
- # resp.firewall_status.configuration_sync_state_summary #=> String, one of "PENDING", "IN_SYNC"
+ # resp.firewall_status.configuration_sync_state_summary #=> String, one of "PENDING", "IN_SYNC", "CAPACITY_CONSTRAINED"
# resp.firewall_status.sync_states #=> Hash
# resp.firewall_status.sync_states["AvailabilityZone"].attachment.subnet_id #=> String
# resp.firewall_status.sync_states["AvailabilityZone"].attachment.endpoint_id #=> String
# resp.firewall_status.sync_states["AvailabilityZone"].attachment.status #=> String, one of "CREATING", "DELETING", "SCALING", "READY"
# resp.firewall_status.sync_states["AvailabilityZone"].config #=> Hash
- # resp.firewall_status.sync_states["AvailabilityZone"].config["ResourceName"].sync_status #=> String, one of "PENDING", "IN_SYNC"
+ # resp.firewall_status.sync_states["AvailabilityZone"].config["ResourceName"].sync_status #=> String, one of "PENDING", "IN_SYNC", "CAPACITY_CONSTRAINED"
# resp.firewall_status.sync_states["AvailabilityZone"].config["ResourceName"].update_token #=> String
+ # resp.firewall_status.capacity_usage_summary.cid_rs.available_cidr_count #=> Integer
+ # resp.firewall_status.capacity_usage_summary.cid_rs.utilized_cidr_count #=> Integer
+ # resp.firewall_status.capacity_usage_summary.cid_rs.ip_set_references #=> Hash
+ # resp.firewall_status.capacity_usage_summary.cid_rs.ip_set_references["IPSetArn"].resolved_cidr_count #=> Integer
#
# @see http://docs.aws.amazon.com/goto/WebAPI/network-firewall-2020-11-12/CreateFirewall AWS API Documentation
#
@@ -922,6 +926,13 @@ def create_firewall_policy(params = {}, options = {})
# },
# },
# },
+ # reference_sets: {
+ # ip_set_references: {
+ # "IPSetReferenceName" => {
+ # reference_arn: "ResourceArn",
+ # },
+ # },
+ # },
# rules_source: { # required
# rules_string: "RulesString",
# rules_source_list: {
@@ -1118,14 +1129,18 @@ def create_rule_group(params = {}, options = {})
# resp.firewall.encryption_configuration.key_id #=> String
# resp.firewall.encryption_configuration.type #=> String, one of "CUSTOMER_KMS", "AWS_OWNED_KMS_KEY"
# resp.firewall_status.status #=> String, one of "PROVISIONING", "DELETING", "READY"
- # resp.firewall_status.configuration_sync_state_summary #=> String, one of "PENDING", "IN_SYNC"
+ # resp.firewall_status.configuration_sync_state_summary #=> String, one of "PENDING", "IN_SYNC", "CAPACITY_CONSTRAINED"
# resp.firewall_status.sync_states #=> Hash
# resp.firewall_status.sync_states["AvailabilityZone"].attachment.subnet_id #=> String
# resp.firewall_status.sync_states["AvailabilityZone"].attachment.endpoint_id #=> String
# resp.firewall_status.sync_states["AvailabilityZone"].attachment.status #=> String, one of "CREATING", "DELETING", "SCALING", "READY"
# resp.firewall_status.sync_states["AvailabilityZone"].config #=> Hash
- # resp.firewall_status.sync_states["AvailabilityZone"].config["ResourceName"].sync_status #=> String, one of "PENDING", "IN_SYNC"
+ # resp.firewall_status.sync_states["AvailabilityZone"].config["ResourceName"].sync_status #=> String, one of "PENDING", "IN_SYNC", "CAPACITY_CONSTRAINED"
# resp.firewall_status.sync_states["AvailabilityZone"].config["ResourceName"].update_token #=> String
+ # resp.firewall_status.capacity_usage_summary.cid_rs.available_cidr_count #=> Integer
+ # resp.firewall_status.capacity_usage_summary.cid_rs.utilized_cidr_count #=> Integer
+ # resp.firewall_status.capacity_usage_summary.cid_rs.ip_set_references #=> Hash
+ # resp.firewall_status.capacity_usage_summary.cid_rs.ip_set_references["IPSetArn"].resolved_cidr_count #=> Integer
#
# @see http://docs.aws.amazon.com/goto/WebAPI/network-firewall-2020-11-12/DeleteFirewall AWS API Documentation
#
@@ -1321,14 +1336,18 @@ def delete_rule_group(params = {}, options = {})
# resp.firewall.encryption_configuration.key_id #=> String
# resp.firewall.encryption_configuration.type #=> String, one of "CUSTOMER_KMS", "AWS_OWNED_KMS_KEY"
# resp.firewall_status.status #=> String, one of "PROVISIONING", "DELETING", "READY"
- # resp.firewall_status.configuration_sync_state_summary #=> String, one of "PENDING", "IN_SYNC"
+ # resp.firewall_status.configuration_sync_state_summary #=> String, one of "PENDING", "IN_SYNC", "CAPACITY_CONSTRAINED"
# resp.firewall_status.sync_states #=> Hash
# resp.firewall_status.sync_states["AvailabilityZone"].attachment.subnet_id #=> String
# resp.firewall_status.sync_states["AvailabilityZone"].attachment.endpoint_id #=> String
# resp.firewall_status.sync_states["AvailabilityZone"].attachment.status #=> String, one of "CREATING", "DELETING", "SCALING", "READY"
# resp.firewall_status.sync_states["AvailabilityZone"].config #=> Hash
- # resp.firewall_status.sync_states["AvailabilityZone"].config["ResourceName"].sync_status #=> String, one of "PENDING", "IN_SYNC"
+ # resp.firewall_status.sync_states["AvailabilityZone"].config["ResourceName"].sync_status #=> String, one of "PENDING", "IN_SYNC", "CAPACITY_CONSTRAINED"
# resp.firewall_status.sync_states["AvailabilityZone"].config["ResourceName"].update_token #=> String
+ # resp.firewall_status.capacity_usage_summary.cid_rs.available_cidr_count #=> Integer
+ # resp.firewall_status.capacity_usage_summary.cid_rs.utilized_cidr_count #=> Integer
+ # resp.firewall_status.capacity_usage_summary.cid_rs.ip_set_references #=> Hash
+ # resp.firewall_status.capacity_usage_summary.cid_rs.ip_set_references["IPSetArn"].resolved_cidr_count #=> Integer
#
# @see http://docs.aws.amazon.com/goto/WebAPI/network-firewall-2020-11-12/DescribeFirewall AWS API Documentation
#
@@ -1529,6 +1548,8 @@ def describe_resource_policy(params = {}, options = {})
# resp.rule_group.rule_variables.port_sets #=> Hash
# resp.rule_group.rule_variables.port_sets["RuleVariableName"].definition #=> Array
# resp.rule_group.rule_variables.port_sets["RuleVariableName"].definition[0] #=> String
+ # resp.rule_group.reference_sets.ip_set_references #=> Hash
+ # resp.rule_group.reference_sets.ip_set_references["IPSetReferenceName"].reference_arn #=> String
# resp.rule_group.rules_source.rules_string #=> String
# resp.rule_group.rules_source.rules_source_list.targets #=> Array
# resp.rule_group.rules_source.rules_source_list.targets[0] #=> String
@@ -2743,6 +2764,13 @@ def update_logging_configuration(params = {}, options = {})
# },
# },
# },
+ # reference_sets: {
+ # ip_set_references: {
+ # "IPSetReferenceName" => {
+ # reference_arn: "ResourceArn",
+ # },
+ # },
+ # },
# rules_source: { # required
# rules_string: "RulesString",
# rules_source_list: {
@@ -2956,7 +2984,7 @@ def build_request(operation_name, params = {})
params: params,
config: config)
context[:gem_name] = 'aws-sdk-networkfirewall'
- context[:gem_version] = '1.17.0'
+ context[:gem_version] = '1.18.0'
Seahorse::Client::Request.new(handlers, context)
end
diff --git a/gems/aws-sdk-networkfirewall/lib/aws-sdk-networkfirewall/client_api.rb b/gems/aws-sdk-networkfirewall/lib/aws-sdk-networkfirewall/client_api.rb
index b96d63cef39..fa1d6127793 100644
--- a/gems/aws-sdk-networkfirewall/lib/aws-sdk-networkfirewall/client_api.rb
+++ b/gems/aws-sdk-networkfirewall/lib/aws-sdk-networkfirewall/client_api.rb
@@ -28,6 +28,9 @@ module ClientApi
AzSubnet = Shapes::StringShape.new(name: 'AzSubnet')
AzSubnets = Shapes::ListShape.new(name: 'AzSubnets')
Boolean = Shapes::BooleanShape.new(name: 'Boolean')
+ CIDRCount = Shapes::IntegerShape.new(name: 'CIDRCount')
+ CIDRSummary = Shapes::StructureShape.new(name: 'CIDRSummary')
+ CapacityUsageSummary = Shapes::StructureShape.new(name: 'CapacityUsageSummary')
CollectionMember_String = Shapes::StringShape.new(name: 'CollectionMember_String')
ConfigurationSyncState = Shapes::StringShape.new(name: 'ConfigurationSyncState')
CreateFirewallPolicyRequest = Shapes::StructureShape.new(name: 'CreateFirewallPolicyRequest')
@@ -84,6 +87,12 @@ module ClientApi
HashMapValue = Shapes::StringShape.new(name: 'HashMapValue')
Header = Shapes::StructureShape.new(name: 'Header')
IPSet = Shapes::StructureShape.new(name: 'IPSet')
+ IPSetArn = Shapes::StringShape.new(name: 'IPSetArn')
+ IPSetMetadata = Shapes::StructureShape.new(name: 'IPSetMetadata')
+ IPSetMetadataMap = Shapes::MapShape.new(name: 'IPSetMetadataMap')
+ IPSetReference = Shapes::StructureShape.new(name: 'IPSetReference')
+ IPSetReferenceMap = Shapes::MapShape.new(name: 'IPSetReferenceMap')
+ IPSetReferenceName = Shapes::StringShape.new(name: 'IPSetReferenceName')
IPSets = Shapes::MapShape.new(name: 'IPSets')
InsufficientCapacityException = Shapes::StructureShape.new(name: 'InsufficientCapacityException')
InternalServerError = Shapes::StructureShape.new(name: 'InternalServerError')
@@ -130,6 +139,7 @@ module ClientApi
PublishMetricAction = Shapes::StructureShape.new(name: 'PublishMetricAction')
PutResourcePolicyRequest = Shapes::StructureShape.new(name: 'PutResourcePolicyRequest')
PutResourcePolicyResponse = Shapes::StructureShape.new(name: 'PutResourcePolicyResponse')
+ ReferenceSets = Shapes::StructureShape.new(name: 'ReferenceSets')
ResourceArn = Shapes::StringShape.new(name: 'ResourceArn')
ResourceId = Shapes::StringShape.new(name: 'ResourceId')
ResourceManagedStatus = Shapes::StringShape.new(name: 'ResourceManagedStatus')
@@ -258,6 +268,14 @@ module ClientApi
AzSubnets.member = Shapes::ShapeRef.new(shape: AzSubnet)
+ CIDRSummary.add_member(:available_cidr_count, Shapes::ShapeRef.new(shape: CIDRCount, location_name: "AvailableCIDRCount"))
+ CIDRSummary.add_member(:utilized_cidr_count, Shapes::ShapeRef.new(shape: CIDRCount, location_name: "UtilizedCIDRCount"))
+ CIDRSummary.add_member(:ip_set_references, Shapes::ShapeRef.new(shape: IPSetMetadataMap, location_name: "IPSetReferences"))
+ CIDRSummary.struct_class = Types::CIDRSummary
+
+ CapacityUsageSummary.add_member(:cid_rs, Shapes::ShapeRef.new(shape: CIDRSummary, location_name: "CIDRs"))
+ CapacityUsageSummary.struct_class = Types::CapacityUsageSummary
+
CreateFirewallPolicyRequest.add_member(:firewall_policy_name, Shapes::ShapeRef.new(shape: ResourceName, required: true, location_name: "FirewallPolicyName"))
CreateFirewallPolicyRequest.add_member(:firewall_policy, Shapes::ShapeRef.new(shape: FirewallPolicy, required: true, location_name: "FirewallPolicy"))
CreateFirewallPolicyRequest.add_member(:description, Shapes::ShapeRef.new(shape: Description, location_name: "Description"))
@@ -462,6 +480,7 @@ module ClientApi
FirewallStatus.add_member(:status, Shapes::ShapeRef.new(shape: FirewallStatusValue, required: true, location_name: "Status"))
FirewallStatus.add_member(:configuration_sync_state_summary, Shapes::ShapeRef.new(shape: ConfigurationSyncState, required: true, location_name: "ConfigurationSyncStateSummary"))
FirewallStatus.add_member(:sync_states, Shapes::ShapeRef.new(shape: SyncStates, location_name: "SyncStates"))
+ FirewallStatus.add_member(:capacity_usage_summary, Shapes::ShapeRef.new(shape: CapacityUsageSummary, location_name: "CapacityUsageSummary"))
FirewallStatus.struct_class = Types::FirewallStatus
Firewalls.member = Shapes::ShapeRef.new(shape: FirewallMetadata)
@@ -479,6 +498,18 @@ module ClientApi
IPSet.add_member(:definition, Shapes::ShapeRef.new(shape: VariableDefinitionList, required: true, location_name: "Definition"))
IPSet.struct_class = Types::IPSet
+ IPSetMetadata.add_member(:resolved_cidr_count, Shapes::ShapeRef.new(shape: CIDRCount, location_name: "ResolvedCIDRCount"))
+ IPSetMetadata.struct_class = Types::IPSetMetadata
+
+ IPSetMetadataMap.key = Shapes::ShapeRef.new(shape: IPSetArn)
+ IPSetMetadataMap.value = Shapes::ShapeRef.new(shape: IPSetMetadata)
+
+ IPSetReference.add_member(:reference_arn, Shapes::ShapeRef.new(shape: ResourceArn, location_name: "ReferenceArn"))
+ IPSetReference.struct_class = Types::IPSetReference
+
+ IPSetReferenceMap.key = Shapes::ShapeRef.new(shape: IPSetReferenceName)
+ IPSetReferenceMap.value = Shapes::ShapeRef.new(shape: IPSetReference)
+
IPSets.key = Shapes::ShapeRef.new(shape: RuleVariableName)
IPSets.value = Shapes::ShapeRef.new(shape: IPSet)
@@ -591,6 +622,9 @@ module ClientApi
PutResourcePolicyResponse.struct_class = Types::PutResourcePolicyResponse
+ ReferenceSets.add_member(:ip_set_references, Shapes::ShapeRef.new(shape: IPSetReferenceMap, location_name: "IPSetReferences"))
+ ReferenceSets.struct_class = Types::ReferenceSets
+
ResourceNotFoundException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessage, location_name: "Message"))
ResourceNotFoundException.struct_class = Types::ResourceNotFoundException
@@ -602,6 +636,7 @@ module ClientApi
RuleDefinition.struct_class = Types::RuleDefinition
RuleGroup.add_member(:rule_variables, Shapes::ShapeRef.new(shape: RuleVariables, location_name: "RuleVariables"))
+ RuleGroup.add_member(:reference_sets, Shapes::ShapeRef.new(shape: ReferenceSets, location_name: "ReferenceSets"))
RuleGroup.add_member(:rules_source, Shapes::ShapeRef.new(shape: RulesSource, required: true, location_name: "RulesSource"))
RuleGroup.add_member(:stateful_rule_options, Shapes::ShapeRef.new(shape: StatefulRuleOptions, location_name: "StatefulRuleOptions"))
RuleGroup.struct_class = Types::RuleGroup
diff --git a/gems/aws-sdk-networkfirewall/lib/aws-sdk-networkfirewall/types.rb b/gems/aws-sdk-networkfirewall/lib/aws-sdk-networkfirewall/types.rb
index 3a7f1344a2b..51bfb43d041 100644
--- a/gems/aws-sdk-networkfirewall/lib/aws-sdk-networkfirewall/types.rb
+++ b/gems/aws-sdk-networkfirewall/lib/aws-sdk-networkfirewall/types.rb
@@ -329,6 +329,50 @@ class Attachment < Struct.new(
include Aws::Structure
end
+ # Summarizes the CIDR blocks used by the IP set references in a
+ # firewall. Network Firewall calculates the number of CIDRs by taking an
+ # aggregated count of all CIDRs used by the IP sets you are referencing.
+ #
+ # @!attribute [rw] available_cidr_count
+ # The number of CIDR blocks available for use by the IP set references
+ # in a firewall.
+ # @return [Integer]
+ #
+ # @!attribute [rw] utilized_cidr_count
+ # The number of CIDR blocks used by the IP set references in a
+ # firewall.
+ # @return [Integer]
+ #
+ # @!attribute [rw] ip_set_references
+ # The list of the IP set references used by a firewall.
+ # @return [Hash]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/network-firewall-2020-11-12/CIDRSummary AWS API Documentation
+ #
+ class CIDRSummary < Struct.new(
+ :available_cidr_count,
+ :utilized_cidr_count,
+ :ip_set_references)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
+ # The capacity usage summary of the resources used by the ReferenceSets
+ # in a firewall.
+ #
+ # @!attribute [rw] cid_rs
+ # Describes the capacity usage of the CIDR blocks used by the IP set
+ # references in a firewall.
+ # @return [Types::CIDRSummary]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/network-firewall-2020-11-12/CapacityUsageSummary AWS API Documentation
+ #
+ class CapacityUsageSummary < Struct.new(
+ :cid_rs)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
# @note When making an API call, you may pass CreateFirewallPolicyRequest
# data as a hash:
#
@@ -609,6 +653,13 @@ class CreateFirewallResponse < Struct.new(
# },
# },
# },
+ # reference_sets: {
+ # ip_set_references: {
+ # "IPSetReferenceName" => {
+ # reference_arn: "ResourceArn",
+ # },
+ # },
+ # },
# rules_source: { # required
# rules_string: "RulesString",
# rules_source_list: {
@@ -2120,12 +2171,20 @@ class FirewallPolicyResponse < Struct.new(
# and configuration object.
# @return [Hash]
#
+ # @!attribute [rw] capacity_usage_summary
+ # Describes the capacity usage of the resources contained in a
+ # firewall's reference sets. Network Firewall calclulates the
+ # capacity usage by taking an aggregated count of all of the resources
+ # used by all of the reference sets in a firewall.
+ # @return [Types::CapacityUsageSummary]
+ #
# @see http://docs.aws.amazon.com/goto/WebAPI/network-firewall-2020-11-12/FirewallStatus AWS API Documentation
#
class FirewallStatus < Struct.new(
:status,
:configuration_sync_state_summary,
- :sync_states)
+ :sync_states,
+ :capacity_usage_summary)
SENSITIVE = []
include Aws::Structure
end
@@ -2255,6 +2314,63 @@ class IPSet < Struct.new(
include Aws::Structure
end
+ # General information about the IP set.
+ #
+ # @!attribute [rw] resolved_cidr_count
+ # Describes the total number of CIDR blocks currently in use by the IP
+ # set references in a firewall. To determine how many CIDR blocks are
+ # available for you to use in a firewall, you can call
+ # `AvailableCIDRCount`.
+ # @return [Integer]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/network-firewall-2020-11-12/IPSetMetadata AWS API Documentation
+ #
+ class IPSetMetadata < Struct.new(
+ :resolved_cidr_count)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
+ # Configures one or more IP set references for a Suricata-compatible
+ # rule group. This is used in CreateRuleGroup or UpdateRuleGroup. An IP
+ # set reference is a rule variable that references a resource that you
+ # create and manage in another Amazon Web Services service, such as an
+ # Amazon VPC prefix list. Network Firewall IP set references enable you
+ # to dynamically update the contents of your rules. When you create,
+ # update, or delete the IP set you are referencing in your rule, Network
+ # Firewall automatically updates the rule's content with the changes.
+ # For more information about IP set references in Network Firewall, see
+ # [Using IP set references][1] in the *Network Firewall Developer
+ # Guide*.
+ #
+ # Network Firewall currently supports only [Amazon VPC prefix lists][2]
+ # as IP set references.
+ #
+ #
+ #
+ # [1]: https://docs.aws.amazon.com/network-firewall/latest/developerguide/rule-groups-ip-set-references
+ # [2]: https://docs.aws.amazon.com/vpc/latest/userguide/managed-prefix-lists.html
+ #
+ # @note When making an API call, you may pass IPSetReference
+ # data as a hash:
+ #
+ # {
+ # reference_arn: "ResourceArn",
+ # }
+ #
+ # @!attribute [rw] reference_arn
+ # The Amazon Resource Name (ARN) of the resource that you are
+ # referencing in your rule group.
+ # @return [String]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/network-firewall-2020-11-12/IPSetReference AWS API Documentation
+ #
+ class IPSetReference < Struct.new(
+ :reference_arn)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
# Amazon Web Services doesn't currently have enough available capacity
# to fulfill your request. Try your request later.
#
@@ -2983,6 +3099,31 @@ class PutResourcePolicyRequest < Struct.new(
#
class PutResourcePolicyResponse < Aws::EmptyStructure; end
+ # Contains a set of IP set references.
+ #
+ # @note When making an API call, you may pass ReferenceSets
+ # data as a hash:
+ #
+ # {
+ # ip_set_references: {
+ # "IPSetReferenceName" => {
+ # reference_arn: "ResourceArn",
+ # },
+ # },
+ # }
+ #
+ # @!attribute [rw] ip_set_references
+ # The list of IP set references.
+ # @return [Hash]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/network-firewall-2020-11-12/ReferenceSets AWS API Documentation
+ #
+ class ReferenceSets < Struct.new(
+ :ip_set_references)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
# Unable to locate a resource using the parameters that you provided.
#
# @!attribute [rw] message
@@ -3136,6 +3277,13 @@ class RuleDefinition < Struct.new(
# },
# },
# },
+ # reference_sets: {
+ # ip_set_references: {
+ # "IPSetReferenceName" => {
+ # reference_arn: "ResourceArn",
+ # },
+ # },
+ # },
# rules_source: { # required
# rules_string: "RulesString",
# rules_source_list: {
@@ -3228,6 +3376,10 @@ class RuleDefinition < Struct.new(
# You can only use these for stateful rule groups.
# @return [Types::RuleVariables]
#
+ # @!attribute [rw] reference_sets
+ # The list of a rule group's reference sets.
+ # @return [Types::ReferenceSets]
+ #
# @!attribute [rw] rules_source
# The stateful rules or stateless rules for the rule group.
# @return [Types::RulesSource]
@@ -3243,6 +3395,7 @@ class RuleDefinition < Struct.new(
#
class RuleGroup < Struct.new(
:rule_variables,
+ :reference_sets,
:rules_source,
:stateful_rule_options)
SENSITIVE = []
@@ -5000,6 +5153,13 @@ class UpdateLoggingConfigurationResponse < Struct.new(
# },
# },
# },
+ # reference_sets: {
+ # ip_set_references: {
+ # "IPSetReferenceName" => {
+ # reference_arn: "ResourceArn",
+ # },
+ # },
+ # },
# rules_source: { # required
# rules_string: "RulesString",
# rules_source_list: {
diff --git a/gems/aws-sdk-rds/CHANGELOG.md b/gems/aws-sdk-rds/CHANGELOG.md
index 1bd0250f78c..83f3eb315bf 100644
--- a/gems/aws-sdk-rds/CHANGELOG.md
+++ b/gems/aws-sdk-rds/CHANGELOG.md
@@ -1,6 +1,11 @@
Unreleased Changes
------------------
+1.149.0 (2022-07-21)
+------------------
+
+* Feature - Adds support for creating an RDS Proxy for an RDS for MariaDB database.
+
1.148.0 (2022-07-05)
------------------
diff --git a/gems/aws-sdk-rds/VERSION b/gems/aws-sdk-rds/VERSION
index 309b7f257b5..283c6557091 100644
--- a/gems/aws-sdk-rds/VERSION
+++ b/gems/aws-sdk-rds/VERSION
@@ -1 +1 @@
-1.148.0
+1.149.0
diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds.rb
index 9f63ceeca25..3ed23caa86b 100644
--- a/gems/aws-sdk-rds/lib/aws-sdk-rds.rb
+++ b/gems/aws-sdk-rds/lib/aws-sdk-rds.rb
@@ -74,6 +74,6 @@
# @!group service
module Aws::RDS
- GEM_VERSION = '1.148.0'
+ GEM_VERSION = '1.149.0'
end
diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds/client.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds/client.rb
index 5503b956fa6..82b8b6ad71c 100644
--- a/gems/aws-sdk-rds/lib/aws-sdk-rds/client.rb
+++ b/gems/aws-sdk-rds/lib/aws-sdk-rds/client.rb
@@ -765,7 +765,7 @@ def authorize_db_security_group_ingress(params = {}, options = {})
# For more information on backtracking, see [ Backtracking an Aurora DB
# Cluster][1] in the *Amazon Aurora User Guide*.
#
- # This action only applies to Aurora MySQL DB clusters.
+ # This action applies only to Aurora MySQL DB clusters.
#
#
#
@@ -1020,60 +1020,16 @@ def copy_db_cluster_parameter_group(params = {}, options = {})
#
# You can copy an encrypted DB cluster snapshot from another Amazon Web
# Services Region. In that case, the Amazon Web Services Region where
- # you call the `CopyDBClusterSnapshot` action is the destination Amazon
- # Web Services Region for the encrypted DB cluster snapshot to be copied
- # to. To copy an encrypted DB cluster snapshot from another Amazon Web
- # Services Region, you must provide the following values:
+ # you call the `CopyDBClusterSnapshot` operation is the destination
+ # Amazon Web Services Region for the encrypted DB cluster snapshot to be
+ # copied to. To copy an encrypted DB cluster snapshot from another
+ # Amazon Web Services Region, you must provide the following values:
#
# * `KmsKeyId` - The Amazon Web Services Key Management System (Amazon
# Web Services KMS) key identifier for the key to use to encrypt the
# copy of the DB cluster snapshot in the destination Amazon Web
# Services Region.
#
- # * `PreSignedUrl` - A URL that contains a Signature Version 4 signed
- # request for the `CopyDBClusterSnapshot` action to be called in the
- # source Amazon Web Services Region where the DB cluster snapshot is
- # copied from. The pre-signed URL must be a valid request for the
- # `CopyDBClusterSnapshot` API action that can be executed in the
- # source Amazon Web Services Region that contains the encrypted DB
- # cluster snapshot to be copied.
- #
- # The pre-signed URL request must contain the following parameter
- # values:
- #
- # * `KmsKeyId` - The Amazon Web Services KMS key identifier for the
- # KMS key to use to encrypt the copy of the DB cluster snapshot in
- # the destination Amazon Web Services Region. This is the same
- # identifier for both the `CopyDBClusterSnapshot` action that is
- # called in the destination Amazon Web Services Region, and the
- # action contained in the pre-signed URL.
- #
- # * `DestinationRegion` - The name of the Amazon Web Services Region
- # that the DB cluster snapshot is to be created in.
- #
- # * `SourceDBClusterSnapshotIdentifier` - The DB cluster snapshot
- # identifier for the encrypted DB cluster snapshot to be copied.
- # This identifier must be in the Amazon Resource Name (ARN) format
- # for the source Amazon Web Services Region. For example, if you are
- # copying an encrypted DB cluster snapshot from the us-west-2 Amazon
- # Web Services Region, then your `SourceDBClusterSnapshotIdentifier`
- # looks like the following example:
- # `arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115`.
- #
- # To learn how to generate a Signature Version 4 signed request, see [
- # Authenticating Requests: Using Query Parameters (Amazon Web Services
- # Signature Version 4)][1] and [ Signature Version 4 Signing
- # Process][2].
- #
- # If you are using an Amazon Web Services SDK tool or the CLI, you can
- # specify `SourceRegion` (or `--source-region` for the CLI) instead of
- # specifying `PreSignedUrl` manually. Specifying `SourceRegion`
- # autogenerates a pre-signed URL that is a valid request for the
- # operation that can be executed in the source Amazon Web Services
- # Region.
- #
- #
- #
# * `TargetDBClusterSnapshotIdentifier` - The identifier for the new
# copy of the DB cluster snapshot in the destination Amazon Web
# Services Region.
@@ -1082,7 +1038,7 @@ def copy_db_cluster_parameter_group(params = {}, options = {})
# identifier for the encrypted DB cluster snapshot to be copied. This
# identifier must be in the ARN format for the source Amazon Web
# Services Region and is the same value as the
- # `SourceDBClusterSnapshotIdentifier` in the pre-signed URL.
+ # `SourceDBClusterSnapshotIdentifier` in the presigned URL.
#
# To cancel the copy operation once it is in progress, delete the target
# DB cluster snapshot identified by `TargetDBClusterSnapshotIdentifier`
@@ -1090,22 +1046,20 @@ def copy_db_cluster_parameter_group(params = {}, options = {})
#
# For more information on copying encrypted Amazon Aurora DB cluster
# snapshots from one Amazon Web Services Region to another, see [
- # Copying a Snapshot][3] in the *Amazon Aurora User Guide*.
+ # Copying a Snapshot][1] in the *Amazon Aurora User Guide*.
#
# For more information on Amazon Aurora DB clusters, see [ What is
- # Amazon Aurora?][4] in the *Amazon Aurora User Guide*.
+ # Amazon Aurora?][2] in the *Amazon Aurora User Guide*.
#
# For more information on Multi-AZ DB clusters, see [ Multi-AZ
- # deployments with two readable standby DB instances][5] in the *Amazon
+ # deployments with two readable standby DB instances][3] in the *Amazon
# RDS User Guide*.
#
#
#
- # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
- # [2]: https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html
- # [3]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_CopySnapshot.html
- # [4]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html
- # [5]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html
+ # [1]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_CopySnapshot.html
+ # [2]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html
+ # [3]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html
#
# @option params [required, String] :source_db_cluster_snapshot_identifier
# The identifier of the DB cluster snapshot to copy. This parameter
@@ -1173,26 +1127,30 @@ def copy_db_cluster_parameter_group(params = {}, options = {})
# the `KmsKeyId` parameter, an error is returned.
#
# @option params [String] :pre_signed_url
- # The URL that contains a Signature Version 4 signed request for the
- # `CopyDBClusterSnapshot` API action in the Amazon Web Services Region
- # that contains the source DB cluster snapshot to copy. The
- # `PreSignedUrl` parameter must be used when copying an encrypted DB
- # cluster snapshot from another Amazon Web Services Region. Don't
- # specify `PreSignedUrl` when you are copying an encrypted DB cluster
- # snapshot in the same Amazon Web Services Region.
+ # When you are copying a DB cluster snapshot from one Amazon Web
+ # Services GovCloud (US) Region to another, the URL that contains a
+ # Signature Version 4 signed request for the `CopyDBClusterSnapshot` API
+ # operation in the Amazon Web Services Region that contains the source
+ # DB cluster snapshot to copy. Use the `PreSignedUrl` parameter when
+ # copying an encrypted DB cluster snapshot from another Amazon Web
+ # Services Region. Don't specify `PreSignedUrl` when copying an
+ # encrypted DB cluster snapshot in the same Amazon Web Services Region.
+ #
+ # This setting applies only to Amazon Web Services GovCloud (US)
+ # Regions. It's ignored in other Amazon Web Services Regions.
#
- # The pre-signed URL must be a valid request for the
- # `CopyDBClusterSnapshot` API action that can be executed in the source
+ # The presigned URL must be a valid request for the
+ # `CopyDBClusterSnapshot` API operation that can run in the source
# Amazon Web Services Region that contains the encrypted DB cluster
- # snapshot to be copied. The pre-signed URL request must contain the
- # following parameter values:
+ # snapshot to copy. The presigned URL request must contain the following
+ # parameter values:
#
- # * `KmsKeyId` - The Amazon Web Services KMS key identifier for the KMS
- # key to use to encrypt the copy of the DB cluster snapshot in the
- # destination Amazon Web Services Region. This is the same identifier
- # for both the `CopyDBClusterSnapshot` action that is called in the
- # destination Amazon Web Services Region, and the action contained in
- # the pre-signed URL.
+ # * `KmsKeyId` - The KMS key identifier for the KMS key to use to
+ # encrypt the copy of the DB cluster snapshot in the destination
+ # Amazon Web Services Region. This is the same identifier for both the
+ # `CopyDBClusterSnapshot` operation that is called in the destination
+ # Amazon Web Services Region, and the operation contained in the
+ # presigned URL.
#
# * `DestinationRegion` - The name of the Amazon Web Services Region
# that the DB cluster snapshot is to be created in.
@@ -1214,9 +1172,8 @@ def copy_db_cluster_parameter_group(params = {}, options = {})
# If you are using an Amazon Web Services SDK tool or the CLI, you can
# specify `SourceRegion` (or `--source-region` for the CLI) instead of
# specifying `PreSignedUrl` manually. Specifying `SourceRegion`
- # autogenerates a pre-signed URL that is a valid request for the
- # operation that can be executed in the source Amazon Web Services
- # Region.
+ # autogenerates a presigned URL that is a valid request for the
+ # operation that can run in the source Amazon Web Services Region.
#
#
#
@@ -1415,7 +1372,7 @@ def copy_db_parameter_group(params = {}, options = {})
#
# You can copy a snapshot from one Amazon Web Services Region to
# another. In that case, the Amazon Web Services Region where you call
- # the `CopyDBSnapshot` action is the destination Amazon Web Services
+ # the `CopyDBSnapshot` operation is the destination Amazon Web Services
# Region for the DB snapshot copy.
#
# This command doesn't apply to RDS Custom.
@@ -1443,8 +1400,7 @@ def copy_db_parameter_group(params = {}, options = {})
# must be the Amazon Resource Name (ARN) of the shared DB snapshot.
#
# If you are copying an encrypted snapshot this parameter must be in the
- # ARN format for the source Amazon Web Services Region, and must match
- # the `SourceDBSnapshotIdentifier` in the `PreSignedUrl` parameter.
+ # ARN format for the source Amazon Web Services Region.
#
# Constraints:
#
@@ -1510,39 +1466,46 @@ def copy_db_parameter_group(params = {}, options = {})
# snapshot to the target DB snapshot. By default, tags are not copied.
#
# @option params [String] :pre_signed_url
- # The URL that contains a Signature Version 4 signed request for the
- # `CopyDBSnapshot` API action in the source Amazon Web Services Region
- # that contains the source DB snapshot to copy.
+ # When you are copying a snapshot from one Amazon Web Services GovCloud
+ # (US) Region to another, the URL that contains a Signature Version 4
+ # signed request for the `CopyDBSnapshot` API operation in the source
+ # Amazon Web Services Region that contains the source DB snapshot to
+ # copy.
+ #
+ # This setting applies only to Amazon Web Services GovCloud (US)
+ # Regions. It's ignored in other Amazon Web Services Regions.
#
# You must specify this parameter when you copy an encrypted DB snapshot
# from another Amazon Web Services Region by using the Amazon RDS API.
# Don't specify `PreSignedUrl` when you are copying an encrypted DB
# snapshot in the same Amazon Web Services Region.
#
- # The presigned URL must be a valid request for the `CopyDBSnapshot` API
- # action that can be executed in the source Amazon Web Services Region
- # that contains the encrypted DB snapshot to be copied. The presigned
- # URL request must contain the following parameter values:
+ # The presigned URL must be a valid request for the
+ # `CopyDBClusterSnapshot` API operation that can run in the source
+ # Amazon Web Services Region that contains the encrypted DB cluster
+ # snapshot to copy. The presigned URL request must contain the following
+ # parameter values:
#
# * `DestinationRegion` - The Amazon Web Services Region that the
# encrypted DB snapshot is copied to. This Amazon Web Services Region
- # is the same one where the `CopyDBSnapshot` action is called that
+ # is the same one where the `CopyDBSnapshot` operation is called that
# contains this presigned URL.
#
# For example, if you copy an encrypted DB snapshot from the us-west-2
# Amazon Web Services Region to the us-east-1 Amazon Web Services
- # Region, then you call the `CopyDBSnapshot` action in the us-east-1
- # Amazon Web Services Region and provide a presigned URL that contains
- # a call to the `CopyDBSnapshot` action in the us-west-2 Amazon Web
- # Services Region. For this example, the `DestinationRegion` in the
- # presigned URL must be set to the us-east-1 Amazon Web Services
- # Region.
- #
- # * `KmsKeyId` - The Amazon Web Services KMS key identifier for the KMS
- # key to use to encrypt the copy of the DB snapshot in the destination
- # Amazon Web Services Region. This is the same identifier for both the
- # `CopyDBSnapshot` action that is called in the destination Amazon Web
- # Services Region, and the action contained in the presigned URL.
+ # Region, then you call the `CopyDBSnapshot` operation in the
+ # us-east-1 Amazon Web Services Region and provide a presigned URL
+ # that contains a call to the `CopyDBSnapshot` operation in the
+ # us-west-2 Amazon Web Services Region. For this example, the
+ # `DestinationRegion` in the presigned URL must be set to the
+ # us-east-1 Amazon Web Services Region.
+ #
+ # * `KmsKeyId` - The KMS key identifier for the KMS key to use to
+ # encrypt the copy of the DB snapshot in the destination Amazon Web
+ # Services Region. This is the same identifier for both the
+ # `CopyDBSnapshot` operation that is called in the destination Amazon
+ # Web Services Region, and the operation contained in the presigned
+ # URL.
#
# * `SourceDBSnapshotIdentifier` - The DB snapshot identifier for the
# encrypted snapshot to be copied. This identifier must be in the
@@ -1559,9 +1522,8 @@ def copy_db_parameter_group(params = {}, options = {})
# If you are using an Amazon Web Services SDK tool or the CLI, you can
# specify `SourceRegion` (or `--source-region` for the CLI) instead of
# specifying `PreSignedUrl` manually. Specifying `SourceRegion`
- # autogenerates a pre-signed URL that is a valid request for the
- # operation that can be executed in the source Amazon Web Services
- # Region.
+ # autogenerates a presigned URL that is a valid request for the
+ # operation that can run in the source Amazon Web Services Region.
#
#
#
@@ -2040,10 +2002,7 @@ def create_custom_db_engine_version(params = {}, options = {})
#
# You can use the `ReplicationSourceIdentifier` parameter to create an
# Amazon Aurora DB cluster as a read replica of another DB cluster or
- # Amazon RDS MySQL or PostgreSQL DB instance. For cross-Region
- # replication where the DB cluster identified by
- # `ReplicationSourceIdentifier` is encrypted, also specify the
- # `PreSignedUrl` parameter.
+ # Amazon RDS MySQL or PostgreSQL DB instance.
#
# For more information on Amazon Aurora, see [ What is Amazon
# Aurora?][1] in the *Amazon Aurora User Guide*.
@@ -2366,25 +2325,25 @@ def create_custom_db_engine_version(params = {}, options = {})
# Valid for: Aurora DB clusters and Multi-AZ DB clusters
#
# @option params [String] :pre_signed_url
- # A URL that contains a Signature Version 4 signed request for the
- # `CreateDBCluster` action to be called in the source Amazon Web
- # Services Region where the DB cluster is replicated from. Specify
- # `PreSignedUrl` only when you are performing cross-Region replication
- # from an encrypted DB cluster.
- #
- # The pre-signed URL must be a valid request for the `CreateDBCluster`
- # API action that can be executed in the source Amazon Web Services
- # Region that contains the encrypted DB cluster to be copied.
- #
- # The pre-signed URL request must contain the following parameter
- # values:
- #
- # * `KmsKeyId` - The Amazon Web Services KMS key identifier for the KMS
- # key to use to encrypt the copy of the DB cluster in the destination
- # Amazon Web Services Region. This should refer to the same KMS key
- # for both the `CreateDBCluster` action that is called in the
- # destination Amazon Web Services Region, and the action contained in
- # the pre-signed URL.
+ # When you are replicating a DB cluster from one Amazon Web Services
+ # GovCloud (US) Region to another, an URL that contains a Signature
+ # Version 4 signed request for the `CreateDBCluster` operation to be
+ # called in the source Amazon Web Services Region where the DB cluster
+ # is replicated from. Specify `PreSignedUrl` only when you are
+ # performing cross-Region replication from an encrypted DB cluster.
+ #
+ # The presigned URL must be a valid request for the `CreateDBCluster`
+ # API operation that can run in the source Amazon Web Services Region
+ # that contains the encrypted DB cluster to copy.
+ #
+ # The presigned URL request must contain the following parameter values:
+ #
+ # * `KmsKeyId` - The KMS key identifier for the KMS key to use to
+ # encrypt the copy of the DB cluster in the destination Amazon Web
+ # Services Region. This should refer to the same KMS key for both the
+ # `CreateDBCluster` operation that is called in the destination Amazon
+ # Web Services Region, and the operation contained in the presigned
+ # URL.
#
# * `DestinationRegion` - The name of the Amazon Web Services Region
# that Aurora read replica will be created in.
@@ -2405,9 +2364,8 @@ def create_custom_db_engine_version(params = {}, options = {})
# If you are using an Amazon Web Services SDK tool or the CLI, you can
# specify `SourceRegion` (or `--source-region` for the CLI) instead of
# specifying `PreSignedUrl` manually. Specifying `SourceRegion`
- # autogenerates a pre-signed URL that is a valid request for the
- # operation that can be executed in the source Amazon Web Services
- # Region.
+ # autogenerates a presigned URL that is a valid request for the
+ # operation that can run in the source Amazon Web Services Region.
#
#
#
@@ -2424,7 +2382,7 @@ def create_custom_db_engine_version(params = {}, options = {})
# accounts. By default, mapping isn't enabled.
#
# For more information, see [ IAM Database Authentication][1] in the
- # *Amazon Aurora User Guide.*.
+ # *Amazon Aurora User Guide*.
#
# Valid for: Aurora DB clusters only
#
@@ -2498,6 +2456,9 @@ def create_custom_db_engine_version(params = {}, options = {})
# The `multimaster` engine mode only applies for DB clusters created
# with Aurora MySQL version 5.6.10a.
#
+ # The `serverless` engine mode only applies for Aurora Serverless v1 DB
+ # clusters.
+ #
# For Aurora PostgreSQL, the `global` engine mode isn't required, and
# both the `parallelquery` and the `multimaster` engine modes currently
# aren't supported.
@@ -2506,22 +2467,25 @@ def create_custom_db_engine_version(params = {}, options = {})
# information, see the following sections in the *Amazon Aurora User
# Guide*\:
#
- # * [ Limitations of Aurora Serverless v1][1]
+ # * [Limitations of Aurora Serverless v1][1]
+ #
+ # * [Requirements for Aurora Serverless v2][2]
#
- # * [ Limitations of Parallel Query][2]
+ # * [Limitations of Parallel Query][3]
#
- # * [ Limitations of Aurora Global Databases][3]
+ # * [Limitations of Aurora Global Databases][4]
#
- # * [ Limitations of Multi-Master Clusters][4]
+ # * [Limitations of Multi-Master Clusters][5]
#
# Valid for: Aurora DB clusters only
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations
- # [2]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations
- # [3]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations
- # [4]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-multi-master.html#aurora-multi-master-limitations
+ # [2]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html
+ # [3]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations
+ # [4]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations
+ # [5]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-multi-master.html#aurora-multi-master-limitations
#
# @option params [Types::ScalingConfiguration] :scaling_configuration
# For DB clusters in `serverless` DB engine mode, the scaling properties
@@ -2763,8 +2727,27 @@ def create_custom_db_engine_version(params = {}, options = {})
# Valid for: Multi-AZ DB clusters only
#
# @option params [Integer] :performance_insights_retention_period
- # The amount of time, in days, to retain Performance Insights data.
- # Valid values are 7 or 731 (2 years).
+ # The number of days to retain Performance Insights data. The default is
+ # 7 days. The following values are valid:
+ #
+ # * 7
+ #
+ # * *month* * 31, where *month* is a number of months from 1-23
+ #
+ # * 731
+ #
+ # For example, the following values are valid:
+ #
+ # * 93 (3 months * 31)
+ #
+ # * 341 (11 months * 31)
+ #
+ # * 589 (19 months * 31)
+ #
+ # * 731
+ #
+ # If you specify a retention period such as 94, which isn't a valid
+ # value, RDS issues an error.
#
# Valid for: Multi-AZ DB clusters only
#
@@ -2997,7 +2980,7 @@ def create_db_cluster(params = {}, options = {})
# Creates a new custom endpoint and associates it with an Amazon Aurora
# DB cluster.
#
- # This action only applies to Aurora DB clusters.
+ # This action applies only to Aurora DB clusters.
#
#
#
@@ -3109,8 +3092,9 @@ def create_db_cluster_endpoint(params = {}, options = {})
# the default database for a DB cluster, such as the character set for
# the default database defined by the `character_set_database`
# parameter. You can use the *Parameter Groups* option of the [Amazon
- # RDS console][1] or the `DescribeDBClusterParameters` action to verify
- # that your DB cluster parameter group has been created or modified.
+ # RDS console][1] or the `DescribeDBClusterParameters` operation to
+ # verify that your DB cluster parameter group has been created or
+ # modified.
#
# For more information on Amazon Aurora, see [ What is Amazon
# Aurora?][2] in the *Amazon Aurora User Guide*.
@@ -3363,6 +3347,23 @@ def create_db_cluster_snapshot(params = {}, options = {})
# Creates a new DB instance.
#
+ # The new DB instance can be an RDS DB instance, or it can be a DB
+ # instance in an Aurora DB cluster. For an Aurora DB cluster, you can
+ # call this operation multiple times to add more than one DB instance to
+ # the cluster.
+ #
+ # For more information about creating an RDS DB instance, see [ Creating
+ # an Amazon RDS DB instance][1] in the *Amazon RDS User Guide*.
+ #
+ # For more information about creating a DB instance in an Aurora DB
+ # cluster, see [ Creating an Amazon Aurora DB cluster][2] in the *Amazon
+ # Aurora User Guide*.
+ #
+ #
+ #
+ # [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_CreateDBInstance.html
+ # [2]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.CreateInstance.html
+ #
# @option params [String] :db_name
# The meaning of this parameter differs according to the database engine
# you use.
@@ -3596,14 +3597,16 @@ def create_db_cluster_snapshot(params = {}, options = {})
#
# @option params [required, String] :db_instance_class
# The compute and memory capacity of the DB instance, for example
- # db.m4.large. Not all DB instance classes are available in all Amazon
+ # db.m5.large. Not all DB instance classes are available in all Amazon
# Web Services Regions, or for all database engines. For the full list
# of DB instance classes, and availability for your engine, see [DB
- # Instance Class][1] in the *Amazon RDS User Guide*.
+ # instance classes][1] in the *Amazon RDS User Guide* or [Aurora DB
+ # instance classes][2] in the *Amazon Aurora User Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html
+ # [2]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.DBInstanceClass.html
#
# @option params [required, String] :engine
# The name of the database engine to be used for this instance.
@@ -3702,7 +3705,9 @@ def create_db_cluster_snapshot(params = {}, options = {})
# @option params [Array] :db_security_groups
# A list of DB security groups to associate with this DB instance.
#
- # Default: The default DB security group for the database engine.
+ # This setting applies to the legacy EC2-Classic platform, which is no
+ # longer used to create new DB instances. Use the `VpcSecurityGroupIds`
+ # setting instead.
#
# @option params [Array] :vpc_security_group_ids
# A list of Amazon EC2 VPC security groups to associate with this DB
@@ -3802,7 +3807,7 @@ def create_db_cluster_snapshot(params = {}, options = {})
#
# * Can't be set to 0 if the DB instance is a source to read replicas
#
- # * Can't be set to 0 or 35 for an RDS Custom for Oracle DB instance
+ # * Can't be set to 0 for an RDS Custom for Oracle DB instance
#
# @option params [String] :preferred_backup_window
# The daily time range during which automated backups are created if
@@ -3885,11 +3890,16 @@ def create_db_cluster_snapshot(params = {}, options = {})
#
# This setting doesn't apply to RDS Custom.
#
+ # **Amazon Aurora**
+ #
+ # Not applicable. DB instance Availability Zones (AZs) are managed by
+ # the DB cluster.
+ #
# @option params [String] :engine_version
# The version number of the database engine to use.
#
# For a list of valid engine versions, use the
- # `DescribeDBEngineVersions` action.
+ # `DescribeDBEngineVersions` operation.
#
# The following are the database engines and links to information about
# the major and minor versions that are available with Amazon RDS. Not
@@ -3965,6 +3975,10 @@ def create_db_cluster_snapshot(params = {}, options = {})
#
# This setting doesn't apply to RDS Custom.
#
+ # **Amazon Aurora**
+ #
+ # Not applicable.
+ #
# @option params [Integer] :iops
# The amount of Provisioned IOPS (input/output operations per second) to
# be initially allocated for the DB instance. For information about
@@ -3976,6 +3990,10 @@ def create_db_cluster_snapshot(params = {}, options = {})
# instance. For SQL Server DB instances, must be a multiple between 1
# and 50 of the storage amount for the DB instance.
#
+ # **Amazon Aurora**
+ #
+ # Not applicable. Storage is managed by the DB cluster.
+ #
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS
@@ -3991,6 +4009,10 @@ def create_db_cluster_snapshot(params = {}, options = {})
#
# This setting doesn't apply to RDS Custom.
#
+ # **Amazon Aurora**
+ #
+ # Not applicable.
+ #
# @option params [String] :character_set_name
# For supported engines, this value indicates that the DB instance
# should be associated with the specified `CharacterSet`.
@@ -4061,12 +4083,20 @@ def create_db_cluster_snapshot(params = {}, options = {})
#
# Default: `io1` if the `Iops` parameter is specified, otherwise `gp2`
#
+ # **Amazon Aurora**
+ #
+ # Not applicable. Storage is managed by the DB cluster.
+ #
# @option params [String] :tde_credential_arn
# The ARN from the key store with which to associate the instance for
# TDE encryption.
#
# This setting doesn't apply to RDS Custom.
#
+ # **Amazon Aurora**
+ #
+ # Not applicable.
+ #
# @option params [String] :tde_credential_password
# The password for the given ARN from the key store in order to access
# the device.
@@ -4123,6 +4153,10 @@ def create_db_cluster_snapshot(params = {}, options = {})
#
# This setting doesn't apply to RDS Custom.
#
+ # **Amazon Aurora**
+ #
+ # Not applicable. The domain is managed by the DB cluster.
+ #
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/kerberos-authentication.html
@@ -4171,6 +4205,10 @@ def create_db_cluster_snapshot(params = {}, options = {})
#
# This setting doesn't apply to RDS Custom.
#
+ # **Amazon Aurora**
+ #
+ # Not applicable. The domain is managed by the DB cluster.
+ #
# @option params [Integer] :promotion_tier
# A value that specifies the order in which an Aurora Replica is
# promoted to the primary instance after a failure of the existing
@@ -4200,13 +4238,16 @@ def create_db_cluster_snapshot(params = {}, options = {})
# Services Identity and Access Management (IAM) accounts to database
# accounts. By default, mapping isn't enabled.
#
- # This setting doesn't apply to RDS Custom or Amazon Aurora. In Aurora,
- # mapping Amazon Web Services IAM accounts to database accounts is
- # managed by the DB cluster.
- #
# For more information, see [ IAM Database Authentication for MySQL and
# PostgreSQL][1] in the *Amazon RDS User Guide*.
#
+ # This setting doesn't apply to RDS Custom.
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable. Mapping Amazon Web Services IAM accounts to database
+ # accounts is managed by the DB cluster.
+ #
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html
@@ -4237,8 +4278,27 @@ def create_db_cluster_snapshot(params = {}, options = {})
# This setting doesn't apply to RDS Custom.
#
# @option params [Integer] :performance_insights_retention_period
- # The amount of time, in days, to retain Performance Insights data.
- # Valid values are 7 or 731 (2 years).
+ # The number of days to retain Performance Insights data. The default is
+ # 7 days. The following values are valid:
+ #
+ # * 7
+ #
+ # * *month* * 31, where *month* is a number of months from 1-23
+ #
+ # * 731
+ #
+ # For example, the following values are valid:
+ #
+ # * 93 (3 months * 31)
+ #
+ # * 341 (11 months * 31)
+ #
+ # * 589 (19 months * 31)
+ #
+ # * 731
+ #
+ # If you specify a retention period such as 94, which isn't a valid
+ # value, RDS issues an error.
#
# This setting doesn't apply to RDS Custom.
#
@@ -4287,6 +4347,10 @@ def create_db_cluster_snapshot(params = {}, options = {})
#
# This setting doesn't apply to RDS Custom.
#
+ # **Amazon Aurora**
+ #
+ # Not applicable.
+ #
# @option params [Boolean] :deletion_protection
# A value that indicates whether the DB instance has deletion protection
# enabled. The database can't be deleted when deletion protection is
@@ -4314,6 +4378,10 @@ def create_db_cluster_snapshot(params = {}, options = {})
#
# This setting doesn't apply to RDS Custom.
#
+ # **Amazon Aurora**
+ #
+ # Not applicable. Storage is managed by the DB cluster.
+ #
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIOPS.StorageTypes.html#USER_PIOPS.Autoscaling
@@ -4641,8 +4709,8 @@ def create_db_instance(params = {}, options = {})
# information, see [Working with Read Replicas][1] in the *Amazon RDS
# User Guide*.
#
- # Amazon Aurora doesn't support this action. Call the
- # `CreateDBInstance` action to create a DB instance for an Aurora DB
+ # Amazon Aurora doesn't support this operation. Call the
+ # `CreateDBInstance` operation to create a DB instance for an Aurora DB
# cluster.
#
# All read replica DB instances are created with backups disabled. All
@@ -4774,7 +4842,7 @@ def create_db_instance(params = {}, options = {})
# specified DB engine for a cross-Region read replica.
#
# Specifying a parameter group for this operation is only supported for
- # Oracle DB instances. It isn't supported for RDS Custom.
+ # MySQL and Oracle DB instances. It isn't supported for RDS Custom.
#
# Constraints:
#
@@ -4913,9 +4981,16 @@ def create_db_instance(params = {}, options = {})
# as the primary replica.
#
# @option params [String] :pre_signed_url
- # The URL that contains a Signature Version 4 signed request for the
- # `CreateDBInstanceReadReplica` API action in the source Amazon Web
- # Services Region that contains the source DB instance.
+ # When you are creating a read replica from one Amazon Web Services
+ # GovCloud (US) Region to another or from one China Amazon Web Services
+ # Region to another, the URL that contains a Signature Version 4 signed
+ # request for the `CreateDBInstanceReadReplica` API operation in the
+ # source Amazon Web Services Region that contains the source DB
+ # instance.
+ #
+ # This setting applies only to Amazon Web Services GovCloud (US) Regions
+ # and China Amazon Web Services Regions. It's ignored in other Amazon
+ # Web Services Regions.
#
# You must specify this parameter when you create an encrypted read
# replica from another Amazon Web Services Region by using the Amazon
@@ -4923,32 +4998,31 @@ def create_db_instance(params = {}, options = {})
# encrypted read replica in the same Amazon Web Services Region.
#
# The presigned URL must be a valid request for the
- # `CreateDBInstanceReadReplica` API action that can be executed in the
- # source Amazon Web Services Region that contains the encrypted source
- # DB instance. The presigned URL request must contain the following
+ # `CreateDBInstanceReadReplica` API operation that can run in the source
+ # Amazon Web Services Region that contains the encrypted source DB
+ # instance. The presigned URL request must contain the following
# parameter values:
#
# * `DestinationRegion` - The Amazon Web Services Region that the
# encrypted read replica is created in. This Amazon Web Services
# Region is the same one where the `CreateDBInstanceReadReplica`
- # action is called that contains this presigned URL.
+ # operation is called that contains this presigned URL.
#
# For example, if you create an encrypted DB instance in the us-west-1
# Amazon Web Services Region, from a source DB instance in the
# us-east-2 Amazon Web Services Region, then you call the
- # `CreateDBInstanceReadReplica` action in the us-east-1 Amazon Web
+ # `CreateDBInstanceReadReplica` operation in the us-east-1 Amazon Web
# Services Region and provide a presigned URL that contains a call to
- # the `CreateDBInstanceReadReplica` action in the us-west-2 Amazon Web
- # Services Region. For this example, the `DestinationRegion` in the
- # presigned URL must be set to the us-east-1 Amazon Web Services
+ # the `CreateDBInstanceReadReplica` operation in the us-west-2 Amazon
+ # Web Services Region. For this example, the `DestinationRegion` in
+ # the presigned URL must be set to the us-east-1 Amazon Web Services
# Region.
#
- # * `KmsKeyId` - The Amazon Web Services KMS key identifier for the key
- # to use to encrypt the read replica in the destination Amazon Web
- # Services Region. This is the same identifier for both the
- # `CreateDBInstanceReadReplica` action that is called in the
- # destination Amazon Web Services Region, and the action contained in
- # the presigned URL.
+ # * `KmsKeyId` - The KMS key identifier for the key to use to encrypt
+ # the read replica in the destination Amazon Web Services Region. This
+ # is the same identifier for both the `CreateDBInstanceReadReplica`
+ # operation that is called in the destination Amazon Web Services
+ # Region, and the operation contained in the presigned URL.
#
# * `SourceDBInstanceIdentifier` - The DB instance identifier for the
# encrypted DB instance to be replicated. This identifier must be in
@@ -4967,11 +5041,10 @@ def create_db_instance(params = {}, options = {})
# specify `SourceRegion` (or `--source-region` for the CLI) instead of
# specifying `PreSignedUrl` manually. Specifying `SourceRegion`
# autogenerates a presigned URL that is a valid request for the
- # operation that can be executed in the source Amazon Web Services
- # Region.
+ # operation that can run in the source Amazon Web Services Region.
#
- # `SourceRegion` isn't supported for SQL Server, because SQL Server on
- # Amazon RDS doesn't support cross-Region read replicas.
+ # `SourceRegion` isn't supported for SQL Server, because Amazon RDS for
+ # SQL Server doesn't support cross-Region read replicas.
#
#
#
@@ -5025,8 +5098,27 @@ def create_db_instance(params = {}, options = {})
# This setting doesn't apply to RDS Custom.
#
# @option params [Integer] :performance_insights_retention_period
- # The amount of time, in days, to retain Performance Insights data.
- # Valid values are 7 or 731 (2 years).
+ # The number of days to retain Performance Insights data. The default is
+ # 7 days. The following values are valid:
+ #
+ # * 7
+ #
+ # * *month* * 31, where *month* is a number of months from 1-23
+ #
+ # * 731
+ #
+ # For example, the following values are valid:
+ #
+ # * 93 (3 months * 31)
+ #
+ # * 341 (11 months * 31)
+ #
+ # * 589 (19 months * 31)
+ #
+ # * 731
+ #
+ # If you specify a retention period such as 94, which isn't a valid
+ # value, RDS issues an error.
#
# This setting doesn't apply to RDS Custom.
#
@@ -5568,8 +5660,10 @@ def create_db_parameter_group(params = {}, options = {})
# @option params [required, String] :engine_family
# The kinds of databases that the proxy can connect to. This value
# determines which database network protocol the proxy recognizes when
- # it interprets network traffic to and from the database. The engine
- # family applies to MySQL and PostgreSQL for both RDS and Aurora.
+ # it interprets network traffic to and from the database. For Aurora
+ # MySQL, RDS for MariaDB, and RDS for MySQL databases, specify `MYSQL`.
+ # For Aurora PostgreSQL and RDS for PostgreSQL databases, specify
+ # `POSTGRESQL`.
#
# @option params [required, Array] :auth
# The authorization mechanism that the proxy uses.
@@ -6046,11 +6140,11 @@ def create_db_subnet_group(params = {}, options = {})
req.send_request(options)
end
- # Creates an RDS event notification subscription. This action requires a
- # topic Amazon Resource Name (ARN) created by either the RDS console,
- # the SNS console, or the SNS API. To obtain an ARN with SNS, you must
- # create a topic in Amazon SNS and subscribe to the topic. The ARN is
- # displayed in the SNS console.
+ # Creates an RDS event notification subscription. This operation
+ # requires a topic Amazon Resource Name (ARN) created by either the RDS
+ # console, the SNS console, or the SNS API. To obtain an ARN with SNS,
+ # you must create a topic in Amazon SNS and subscribe to the topic. The
+ # ARN is displayed in the SNS console.
#
# You can specify the type of source (`SourceType`) that you want to be
# notified of and provide a list of RDS sources (`SourceIds`) that
@@ -6233,7 +6327,7 @@ def create_event_subscription(params = {}, options = {})
# existing Aurora cluster during the create operation, and this cluster
# becomes the primary cluster of the global database.
#
- # This action only applies to Aurora DB clusters.
+ # This action applies only to Aurora DB clusters.
#
#
#
@@ -6255,9 +6349,9 @@ def create_event_subscription(params = {}, options = {})
# global database can't be deleted when deletion protection is enabled.
#
# @option params [String] :database_name
- # The name for your database of up to 64 alpha-numeric characters. If
- # you do not provide a name, Amazon Aurora will not create a database in
- # the global database cluster you are creating.
+ # The name for your database of up to 64 alphanumeric characters. If you
+ # do not provide a name, Amazon Aurora will not create a database in the
+ # global database cluster you are creating.
#
# @option params [Boolean] :storage_encrypted
# The storage encryption setting for the new global database cluster.
@@ -10211,8 +10305,7 @@ def describe_db_snapshot_attributes(params = {}, options = {})
#
# @option params [String] :db_instance_identifier
# The ID of the DB instance to retrieve the list of DB snapshots for.
- # This parameter can't be used in conjunction with
- # `DBSnapshotIdentifier`. This parameter isn't case-sensitive.
+ # This parameter isn't case-sensitive.
#
# Constraints:
#
@@ -10221,9 +10314,8 @@ def describe_db_snapshot_attributes(params = {}, options = {})
# ^
#
# @option params [String] :db_snapshot_identifier
- # A specific DB snapshot identifier to describe. This parameter can't
- # be used in conjunction with `DBInstanceIdentifier`. This value is
- # stored as a lowercase string.
+ # A specific DB snapshot identifier to describe. This value is stored as
+ # a lowercase string.
#
# Constraints:
#
@@ -10943,10 +11035,19 @@ def describe_event_subscriptions(params = {}, options = {})
# snapshot, DB cluster snapshot group, or RDS Proxy can be obtained by
# providing the name as a parameter.
#
+ # For more information on working with events, see [Monitoring Amazon
+ # RDS events][1] in the *Amazon RDS User Guide* and [Monitoring Amazon
+ # Aurora events][2] in the *Amazon Aurora User Guide*.
+ #
# By default, RDS returns events that were generated in the past hour.
#
#
#
+ #
+ #
+ # [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/working-with-events.html
+ # [2]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/working-with-events.html
+ #
# @option params [String] :source_identifier
# The identifier of the event source for which events are returned. If
# not specified, then all sources are included in the response.
@@ -11652,7 +11753,7 @@ def describe_option_groups(params = {}, options = {})
#
# Default: 100
#
- # Constraints: Minimum 20, maximum 100.
+ # Constraints: Minimum 20, maximum 10000.
#
# @option params [String] :marker
# An optional pagination token provided by a previous
@@ -12407,7 +12508,7 @@ def download_db_log_file_portion(params = {}, options = {})
#
# An Amazon Aurora DB cluster automatically fails over to an Aurora
# Replica, if one exists, when the primary DB instance fails. A Multi-AZ
- # DB cluster automatically fails over to a readbable standby DB instance
+ # DB cluster automatically fails over to a readable standby DB instance
# when the primary DB instance fails.
#
# To simulate a failure of a primary instance for testing, you can force
@@ -13560,8 +13661,27 @@ def modify_custom_db_engine_version(params = {}, options = {})
# Valid for: Multi-AZ DB clusters only
#
# @option params [Integer] :performance_insights_retention_period
- # The amount of time, in days, to retain Performance Insights data.
- # Valid values are 7 or 731 (2 years).
+ # The number of days to retain Performance Insights data. The default is
+ # 7 days. The following values are valid:
+ #
+ # * 7
+ #
+ # * *month* * 31, where *month* is a number of months from 1-23
+ #
+ # * 731
+ #
+ # For example, the following values are valid:
+ #
+ # * 93 (3 months * 31)
+ #
+ # * 341 (11 months * 31)
+ #
+ # * 589 (19 months * 31)
+ #
+ # * 731
+ #
+ # If you specify a retention period such as 94, which isn't a valid
+ # value, RDS issues an error.
#
# Valid for: Multi-AZ DB clusters only
#
@@ -13849,7 +13969,7 @@ def modify_db_cluster_endpoint(params = {}, options = {})
# database for a DB cluster, such as the character set for the default
# database defined by the `character_set_database` parameter. You can
# use the *Parameter Groups* option of the [Amazon RDS console][1] or
- # the `DescribeDBClusterParameters` action to verify that your DB
+ # the `DescribeDBClusterParameters` operation to verify that your DB
# cluster parameter group has been created or modified.
#
# If the modified DB cluster parameter group is used by an Aurora
@@ -13976,7 +14096,7 @@ def modify_db_cluster_parameter_group(params = {}, options = {})
# To view which Amazon Web Services accounts have access to copy or
# restore a manual DB cluster snapshot, or whether a manual DB cluster
# snapshot is public or private, use the
- # DescribeDBClusterSnapshotAttributes API action. The accounts are
+ # DescribeDBClusterSnapshotAttributes API operation. The accounts are
# returned as values for the `restore` attribute.
#
# @option params [required, String] :db_cluster_snapshot_identifier
@@ -13990,7 +14110,7 @@ def modify_db_cluster_parameter_group(params = {}, options = {})
# or restore a manual DB cluster snapshot, set this value to `restore`.
#
# To view the list of attributes available to modify, use the
- # DescribeDBClusterSnapshotAttributes API action.
+ # DescribeDBClusterSnapshotAttributes API operation.
#
#
#
@@ -14102,10 +14222,11 @@ def modify_db_cluster_snapshot_attribute(params = {}, options = {})
#
# @option params [String] :db_instance_class
# The new compute and memory capacity of the DB instance, for example
- # db.m4.large. Not all DB instance classes are available in all Amazon
+ # db.m5.large. Not all DB instance classes are available in all Amazon
# Web Services Regions, or for all database engines. For the full list
# of DB instance classes, and availability for your engine, see [DB
- # Instance Class][1] in the *Amazon RDS User Guide*.
+ # instance classes][1] in the *Amazon RDS User Guide* or [Aurora DB
+ # instance classes][2] in the *Amazon Aurora User Guide*.
#
# If you modify the DB instance class, an outage occurs during the
# change. The change is applied during the next maintenance window,
@@ -14118,6 +14239,7 @@ def modify_db_cluster_snapshot_attribute(params = {}, options = {})
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html
+ # [2]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.DBInstanceClass.html
#
# @option params [String] :db_subnet_group_name
# The new DB subnet group for the DB instance. You can use this
@@ -14229,7 +14351,7 @@ def modify_db_cluster_snapshot_attribute(params = {}, options = {})
#
# Constraints: Must contain from 8 to 128 characters.
#
- # Amazon RDS API actions never return the password, so this action
+ # Amazon RDS API operations never return the password, so this action
# provides a way to regain access to a primary instance user if the
# password is lost. This includes restoring privileges that might have
# been accidentally revoked.
@@ -14281,8 +14403,8 @@ def modify_db_cluster_snapshot_attribute(params = {}, options = {})
# Constraints:
#
# * It must be a value from 0 to 35. It can't be set to 0 if the DB
- # instance is a source to read replicas. It can't be set to 0 or 35
- # for an RDS Custom for Oracle DB instance.
+ # instance is a source to read replicas. It can't be set to 0 for an
+ # RDS Custom for Oracle DB instance.
#
# * It can be specified for a MySQL read replica only if the source is
# running MySQL 5.6 or later.
@@ -14694,7 +14816,7 @@ def modify_db_cluster_snapshot_attribute(params = {}, options = {})
# DB instance.
#
# For more information, see [Using Amazon Performance Insights][1] in
- # the *Amazon RDS User Guide.*.
+ # the *Amazon RDS User Guide*.
#
# This setting doesn't apply to RDS Custom.
#
@@ -14717,8 +14839,27 @@ def modify_db_cluster_snapshot_attribute(params = {}, options = {})
# This setting doesn't apply to RDS Custom.
#
# @option params [Integer] :performance_insights_retention_period
- # The amount of time, in days, to retain Performance Insights data.
- # Valid values are 7 or 731 (2 years).
+ # The number of days to retain Performance Insights data. The default is
+ # 7 days. The following values are valid:
+ #
+ # * 7
+ #
+ # * *month* * 31, where *month* is a number of months from 1-23
+ #
+ # * 731
+ #
+ # For example, the following values are valid:
+ #
+ # * 93 (3 months * 31)
+ #
+ # * 341 (11 months * 31)
+ #
+ # * 589 (19 months * 31)
+ #
+ # * 731
+ #
+ # If you specify a retention period such as 94, which isn't a valid
+ # value, RDS issues an error.
#
# This setting doesn't apply to RDS Custom.
#
@@ -15394,10 +15535,10 @@ def modify_db_proxy_endpoint(params = {}, options = {})
# Modifies the properties of a `DBProxyTargetGroup`.
#
# @option params [required, String] :target_group_name
- # The name of the new target group to assign to the proxy.
+ # The name of the target group to modify.
#
# @option params [required, String] :db_proxy_name
- # The name of the new proxy to which to assign the target group.
+ # The name of the proxy.
#
# @option params [Types::ConnectionPoolConfiguration] :connection_pool_config
# The settings that determine the size and behavior of the connection
@@ -15585,7 +15726,7 @@ def modify_db_snapshot(params = {}, options = {})
#
# To view which Amazon Web Services accounts have access to copy or
# restore a manual DB snapshot, or whether a manual DB snapshot public
- # or private, use the DescribeDBSnapshotAttributes API action. The
+ # or private, use the DescribeDBSnapshotAttributes API operation. The
# accounts are returned as values for the `restore` attribute.
#
# @option params [required, String] :db_snapshot_identifier
@@ -15598,7 +15739,7 @@ def modify_db_snapshot(params = {}, options = {})
# or restore a manual DB snapshot, set this value to `restore`.
#
# To view the list of attributes available to modify, use the
- # DescribeDBSnapshotAttributes API action.
+ # DescribeDBSnapshotAttributes API operation.
#
#
#
@@ -16739,6 +16880,9 @@ def reboot_db_cluster(params = {}, options = {})
#
# This command doesn't apply to RDS Custom.
#
+ # If your DB instance is part of a Multi-AZ DB cluster, you can reboot
+ # the DB cluster with the `RebootDBCluster` operation.
+ #
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_RebootInstance.html
@@ -17531,9 +17675,9 @@ def reset_db_parameter_group(params = {}, options = {})
# @option params [required, String] :engine
# The name of the database engine to be used for this DB cluster.
#
- # Valid Values: `aurora` (for MySQL 5.6-compatible Aurora),
+ # Valid Values: `aurora` (for MySQL 5.6-compatible Aurora) and
# `aurora-mysql` (for MySQL 5.7-compatible and MySQL 8.0-compatible
- # Aurora), and `aurora-postgresql`
+ # Aurora)
#
# @option params [String] :engine_version
# The version number of the database engine to use.
@@ -17551,20 +17695,10 @@ def reset_db_parameter_group(params = {}, options = {})
# `aws rds describe-db-engine-versions --engine aurora-mysql --query
# "DBEngineVersions[].EngineVersion"`
#
- # To list all of the available engine versions for `aurora-postgresql`,
- # use the following command:
- #
- # `aws rds describe-db-engine-versions --engine aurora-postgresql
- # --query "DBEngineVersions[].EngineVersion"`
- #
# **Aurora MySQL**
#
- # Example: `5.6.10a`, `5.6.mysql_aurora.1.19.2`, `5.7.12`,
- # `5.7.mysql_aurora.2.04.5`, `8.0.mysql_aurora.3.01.0`
- #
- # **Aurora PostgreSQL**
- #
- # Example: `9.6.3`, `10.7`
+ # Example: `5.6.10a`, `5.6.mysql_aurora.1.19.2`,
+ # `5.7.mysql_aurora.2.07.1`, `8.0.mysql_aurora.3.02.0`
#
# @option params [Integer] :port
# The port number on which the instances in the restored DB cluster
@@ -18198,7 +18332,7 @@ def restore_db_cluster_from_s3(params = {}, options = {})
#
# For more information about exporting CloudWatch Logs for Amazon RDS,
# see [Publishing Database Logs to Amazon CloudWatch Logs][1] in the
- # *Amazon RDS User Guide.*.
+ # *Amazon RDS User Guide*.
#
# For more information about exporting CloudWatch Logs for Amazon
# Aurora, see [Publishing Database Logs to Amazon CloudWatch Logs][2] in
@@ -18780,7 +18914,7 @@ def restore_db_cluster_from_snapshot(params = {}, options = {})
#
# For more information about exporting CloudWatch Logs for Amazon RDS,
# see [Publishing Database Logs to Amazon CloudWatch Logs][1] in the
- # *Amazon RDS User Guide.*.
+ # *Amazon RDS User Guide*.
#
# For more information about exporting CloudWatch Logs for Amazon
# Aurora, see [Publishing Database Logs to Amazon CloudWatch Logs][2] in
@@ -20187,7 +20321,7 @@ def restore_db_instance_from_db_snapshot(params = {}, options = {})
# DB instance.
#
# For more information, see [Using Amazon Performance Insights][1] in
- # the *Amazon RDS User Guide.*.
+ # the *Amazon RDS User Guide*.
#
#
#
@@ -20206,8 +20340,27 @@ def restore_db_instance_from_db_snapshot(params = {}, options = {})
# a different default KMS key for each Amazon Web Services Region.
#
# @option params [Integer] :performance_insights_retention_period
- # The amount of time, in days, to retain Performance Insights data.
- # Valid values are 7 or 731 (2 years).
+ # The number of days to retain Performance Insights data. The default is
+ # 7 days. The following values are valid:
+ #
+ # * 7
+ #
+ # * *month* * 31, where *month* is a number of months from 1-23
+ #
+ # * 731
+ #
+ # For example, the following values are valid:
+ #
+ # * 93 (3 months * 31)
+ #
+ # * 341 (11 months * 31)
+ #
+ # * 589 (19 months * 31)
+ #
+ # * 731
+ #
+ # If you specify a retention period such as 94, which isn't a valid
+ # value, RDS issues an error.
#
# @option params [Array] :enable_cloudwatch_logs_exports
# The list of logs that the restored DB instance is to export to
@@ -21733,14 +21886,36 @@ def start_db_instance(params = {}, options = {})
# `arn:aws:kms:us-east-1:123456789012:key/AKIAIOSFODNN7EXAMPLE`.
#
# @option params [String] :pre_signed_url
- # A URL that contains a Signature Version 4 signed request for the
- # StartDBInstanceAutomatedBackupsReplication action to be called in the
+ # In an Amazon Web Services GovCloud (US) Region, an URL that contains a
+ # Signature Version 4 signed request for the
+ # `StartDBInstanceAutomatedBackupsReplication` operation to call in the
# Amazon Web Services Region of the source DB instance. The presigned
# URL must be a valid request for the
- # StartDBInstanceAutomatedBackupsReplication API action that can be
- # executed in the Amazon Web Services Region that contains the source DB
+ # `StartDBInstanceAutomatedBackupsReplication` API operation that can
+ # run in the Amazon Web Services Region that contains the source DB
# instance.
#
+ # This setting applies only to Amazon Web Services GovCloud (US)
+ # Regions. It's ignored in other Amazon Web Services Regions.
+ #
+ # To learn how to generate a Signature Version 4 signed request, see [
+ # Authenticating Requests: Using Query Parameters (Amazon Web Services
+ # Signature Version 4)][1] and [ Signature Version 4 Signing
+ # Process][2].
+ #
+ # If you are using an Amazon Web Services SDK tool or the CLI, you can
+ # specify `SourceRegion` (or `--source-region` for the CLI) instead of
+ # specifying `PreSignedUrl` manually. Specifying `SourceRegion`
+ # autogenerates a presigned URL that is a valid request for the
+ # operation that can run in the source Amazon Web Services Region.
+ #
+ #
+ #
+ #
+ #
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
+ # [2]: https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html
+ #
# @option params [String] :source_region
# The source region of the snapshot. This is only needed when the
# shapshot is encrypted and in a different region.
@@ -22309,7 +22484,8 @@ def stop_db_instance(params = {}, options = {})
# Stops automated backup replication for a DB instance.
#
- # This command doesn't apply to RDS Custom.
+ # This command doesn't apply to RDS Custom, Aurora MySQL, and Aurora
+ # PostgreSQL.
#
# For more information, see [ Replicating Automated Backups to Another
# Amazon Web Services Region][1] in the *Amazon RDS User Guide.*
@@ -22387,7 +22563,7 @@ def build_request(operation_name, params = {})
params: params,
config: config)
context[:gem_name] = 'aws-sdk-rds'
- context[:gem_version] = '1.148.0'
+ context[:gem_version] = '1.149.0'
Seahorse::Client::Request.new(handlers, context)
end
diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster.rb
index e6b3eda425b..4db7a86758c 100644
--- a/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster.rb
+++ b/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster.rb
@@ -579,8 +579,24 @@ def performance_insights_kms_key_id
data[:performance_insights_kms_key_id]
end
- # The amount of time, in days, to retain Performance Insights data.
- # Valid values are 7 or 731 (2 years).
+ # The number of days to retain Performance Insights data. The default is
+ # 7 days. The following values are valid:
+ #
+ # * 7
+ #
+ # * *month* * 31, where *month* is a number of months from 1-23
+ #
+ # * 731
+ #
+ # For example, the following values are valid:
+ #
+ # * 93 (3 months * 31)
+ #
+ # * 341 (11 months * 31)
+ #
+ # * 589 (19 months * 31)
+ #
+ # * 731
#
# This setting is only for non-Aurora Multi-AZ DB clusters.
# @return [Integer]
@@ -1075,25 +1091,25 @@ def wait_until(options = {}, &block)
#
# Valid for: Aurora DB clusters and Multi-AZ DB clusters
# @option options [String] :pre_signed_url
- # A URL that contains a Signature Version 4 signed request for the
- # `CreateDBCluster` action to be called in the source Amazon Web
- # Services Region where the DB cluster is replicated from. Specify
- # `PreSignedUrl` only when you are performing cross-Region replication
- # from an encrypted DB cluster.
- #
- # The pre-signed URL must be a valid request for the `CreateDBCluster`
- # API action that can be executed in the source Amazon Web Services
- # Region that contains the encrypted DB cluster to be copied.
- #
- # The pre-signed URL request must contain the following parameter
- # values:
- #
- # * `KmsKeyId` - The Amazon Web Services KMS key identifier for the KMS
- # key to use to encrypt the copy of the DB cluster in the destination
- # Amazon Web Services Region. This should refer to the same KMS key
- # for both the `CreateDBCluster` action that is called in the
- # destination Amazon Web Services Region, and the action contained in
- # the pre-signed URL.
+ # When you are replicating a DB cluster from one Amazon Web Services
+ # GovCloud (US) Region to another, an URL that contains a Signature
+ # Version 4 signed request for the `CreateDBCluster` operation to be
+ # called in the source Amazon Web Services Region where the DB cluster
+ # is replicated from. Specify `PreSignedUrl` only when you are
+ # performing cross-Region replication from an encrypted DB cluster.
+ #
+ # The presigned URL must be a valid request for the `CreateDBCluster`
+ # API operation that can run in the source Amazon Web Services Region
+ # that contains the encrypted DB cluster to copy.
+ #
+ # The presigned URL request must contain the following parameter values:
+ #
+ # * `KmsKeyId` - The KMS key identifier for the KMS key to use to
+ # encrypt the copy of the DB cluster in the destination Amazon Web
+ # Services Region. This should refer to the same KMS key for both the
+ # `CreateDBCluster` operation that is called in the destination Amazon
+ # Web Services Region, and the operation contained in the presigned
+ # URL.
#
# * `DestinationRegion` - The name of the Amazon Web Services Region
# that Aurora read replica will be created in.
@@ -1114,9 +1130,8 @@ def wait_until(options = {}, &block)
# If you are using an Amazon Web Services SDK tool or the CLI, you can
# specify `SourceRegion` (or `--source-region` for the CLI) instead of
# specifying `PreSignedUrl` manually. Specifying `SourceRegion`
- # autogenerates a pre-signed URL that is a valid request for the
- # operation that can be executed in the source Amazon Web Services
- # Region.
+ # autogenerates a presigned URL that is a valid request for the
+ # operation that can run in the source Amazon Web Services Region.
#
#
#
@@ -1132,7 +1147,7 @@ def wait_until(options = {}, &block)
# accounts. By default, mapping isn't enabled.
#
# For more information, see [ IAM Database Authentication][1] in the
- # *Amazon Aurora User Guide.*.
+ # *Amazon Aurora User Guide*.
#
# Valid for: Aurora DB clusters only
#
@@ -1203,6 +1218,9 @@ def wait_until(options = {}, &block)
# The `multimaster` engine mode only applies for DB clusters created
# with Aurora MySQL version 5.6.10a.
#
+ # The `serverless` engine mode only applies for Aurora Serverless v1 DB
+ # clusters.
+ #
# For Aurora PostgreSQL, the `global` engine mode isn't required, and
# both the `parallelquery` and the `multimaster` engine modes currently
# aren't supported.
@@ -1211,22 +1229,25 @@ def wait_until(options = {}, &block)
# information, see the following sections in the *Amazon Aurora User
# Guide*\:
#
- # * [ Limitations of Aurora Serverless v1][1]
+ # * [Limitations of Aurora Serverless v1][1]
#
- # * [ Limitations of Parallel Query][2]
+ # * [Requirements for Aurora Serverless v2][2]
#
- # * [ Limitations of Aurora Global Databases][3]
+ # * [Limitations of Parallel Query][3]
#
- # * [ Limitations of Multi-Master Clusters][4]
+ # * [Limitations of Aurora Global Databases][4]
+ #
+ # * [Limitations of Multi-Master Clusters][5]
#
# Valid for: Aurora DB clusters only
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations
- # [2]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations
- # [3]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations
- # [4]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-multi-master.html#aurora-multi-master-limitations
+ # [2]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html
+ # [3]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations
+ # [4]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations
+ # [5]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-multi-master.html#aurora-multi-master-limitations
# @option options [Types::ScalingConfiguration] :scaling_configuration
# For DB clusters in `serverless` DB engine mode, the scaling properties
# of the DB cluster.
@@ -1449,8 +1470,27 @@ def wait_until(options = {}, &block)
#
# Valid for: Multi-AZ DB clusters only
# @option options [Integer] :performance_insights_retention_period
- # The amount of time, in days, to retain Performance Insights data.
- # Valid values are 7 or 731 (2 years).
+ # The number of days to retain Performance Insights data. The default is
+ # 7 days. The following values are valid:
+ #
+ # * 7
+ #
+ # * *month* * 31, where *month* is a number of months from 1-23
+ #
+ # * 731
+ #
+ # For example, the following values are valid:
+ #
+ # * 93 (3 months * 31)
+ #
+ # * 341 (11 months * 31)
+ #
+ # * 589 (19 months * 31)
+ #
+ # * 731
+ #
+ # If you specify a retention period such as 94, which isn't a valid
+ # value, RDS issues an error.
#
# Valid for: Multi-AZ DB clusters only
# @option options [Types::ServerlessV2ScalingConfiguration] :serverless_v2_scaling_configuration
@@ -2066,8 +2106,27 @@ def failover(options = {})
#
# Valid for: Multi-AZ DB clusters only
# @option options [Integer] :performance_insights_retention_period
- # The amount of time, in days, to retain Performance Insights data.
- # Valid values are 7 or 731 (2 years).
+ # The number of days to retain Performance Insights data. The default is
+ # 7 days. The following values are valid:
+ #
+ # * 7
+ #
+ # * *month* * 31, where *month* is a number of months from 1-23
+ #
+ # * 731
+ #
+ # For example, the following values are valid:
+ #
+ # * 93 (3 months * 31)
+ #
+ # * 341 (11 months * 31)
+ #
+ # * 589 (19 months * 31)
+ #
+ # * 731
+ #
+ # If you specify a retention period such as 94, which isn't a valid
+ # value, RDS issues an error.
#
# Valid for: Multi-AZ DB clusters only
# @option options [Types::ServerlessV2ScalingConfiguration] :serverless_v2_scaling_configuration
@@ -2307,7 +2366,7 @@ def modify(options = {})
#
# For more information about exporting CloudWatch Logs for Amazon RDS,
# see [Publishing Database Logs to Amazon CloudWatch Logs][1] in the
- # *Amazon RDS User Guide.*.
+ # *Amazon RDS User Guide*.
#
# For more information about exporting CloudWatch Logs for Amazon
# Aurora, see [Publishing Database Logs to Amazon CloudWatch Logs][2] in
diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster_snapshot.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster_snapshot.rb
index 25afbc872b4..fdfa61b9728 100644
--- a/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster_snapshot.rb
+++ b/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster_snapshot.rb
@@ -408,26 +408,30 @@ def create(options = {})
# If you copy an unencrypted DB cluster snapshot and specify a value for
# the `KmsKeyId` parameter, an error is returned.
# @option options [String] :pre_signed_url
- # The URL that contains a Signature Version 4 signed request for the
- # `CopyDBClusterSnapshot` API action in the Amazon Web Services Region
- # that contains the source DB cluster snapshot to copy. The
- # `PreSignedUrl` parameter must be used when copying an encrypted DB
- # cluster snapshot from another Amazon Web Services Region. Don't
- # specify `PreSignedUrl` when you are copying an encrypted DB cluster
- # snapshot in the same Amazon Web Services Region.
- #
- # The pre-signed URL must be a valid request for the
- # `CopyDBClusterSnapshot` API action that can be executed in the source
+ # When you are copying a DB cluster snapshot from one Amazon Web
+ # Services GovCloud (US) Region to another, the URL that contains a
+ # Signature Version 4 signed request for the `CopyDBClusterSnapshot` API
+ # operation in the Amazon Web Services Region that contains the source
+ # DB cluster snapshot to copy. Use the `PreSignedUrl` parameter when
+ # copying an encrypted DB cluster snapshot from another Amazon Web
+ # Services Region. Don't specify `PreSignedUrl` when copying an
+ # encrypted DB cluster snapshot in the same Amazon Web Services Region.
+ #
+ # This setting applies only to Amazon Web Services GovCloud (US)
+ # Regions. It's ignored in other Amazon Web Services Regions.
+ #
+ # The presigned URL must be a valid request for the
+ # `CopyDBClusterSnapshot` API operation that can run in the source
# Amazon Web Services Region that contains the encrypted DB cluster
- # snapshot to be copied. The pre-signed URL request must contain the
- # following parameter values:
+ # snapshot to copy. The presigned URL request must contain the following
+ # parameter values:
#
- # * `KmsKeyId` - The Amazon Web Services KMS key identifier for the KMS
- # key to use to encrypt the copy of the DB cluster snapshot in the
- # destination Amazon Web Services Region. This is the same identifier
- # for both the `CopyDBClusterSnapshot` action that is called in the
- # destination Amazon Web Services Region, and the action contained in
- # the pre-signed URL.
+ # * `KmsKeyId` - The KMS key identifier for the KMS key to use to
+ # encrypt the copy of the DB cluster snapshot in the destination
+ # Amazon Web Services Region. This is the same identifier for both the
+ # `CopyDBClusterSnapshot` operation that is called in the destination
+ # Amazon Web Services Region, and the operation contained in the
+ # presigned URL.
#
# * `DestinationRegion` - The name of the Amazon Web Services Region
# that the DB cluster snapshot is to be created in.
@@ -449,9 +453,8 @@ def create(options = {})
# If you are using an Amazon Web Services SDK tool or the CLI, you can
# specify `SourceRegion` (or `--source-region` for the CLI) instead of
# specifying `PreSignedUrl` manually. Specifying `SourceRegion`
- # autogenerates a pre-signed URL that is a valid request for the
- # operation that can be executed in the source Amazon Web Services
- # Region.
+ # autogenerates a presigned URL that is a valid request for the
+ # operation that can run in the source Amazon Web Services Region.
#
#
#
@@ -743,7 +746,7 @@ def delete(options = {})
#
# For more information about exporting CloudWatch Logs for Amazon RDS,
# see [Publishing Database Logs to Amazon CloudWatch Logs][1] in the
- # *Amazon RDS User Guide.*.
+ # *Amazon RDS User Guide*.
#
# For more information about exporting CloudWatch Logs for Amazon
# Aurora, see [Publishing Database Logs to Amazon CloudWatch Logs][2] in
diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds/db_instance.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds/db_instance.rb
index a3ee64342dc..56158e79a11 100644
--- a/gems/aws-sdk-rds/lib/aws-sdk-rds/db_instance.rb
+++ b/gems/aws-sdk-rds/lib/aws-sdk-rds/db_instance.rb
@@ -483,8 +483,24 @@ def performance_insights_kms_key_id
data[:performance_insights_kms_key_id]
end
- # The amount of time, in days, to retain Performance Insights data.
- # Valid values are 7 or 731 (2 years).
+ # The number of days to retain Performance Insights data. The default is
+ # 7 days. The following values are valid:
+ #
+ # * 7
+ #
+ # * *month* * 31, where *month* is a number of months from 1-23
+ #
+ # * 731
+ #
+ # For example, the following values are valid:
+ #
+ # * 93 (3 months * 31)
+ #
+ # * 341 (11 months * 31)
+ #
+ # * 589 (19 months * 31)
+ #
+ # * 731
# @return [Integer]
def performance_insights_retention_period
data[:performance_insights_retention_period]
@@ -1119,14 +1135,16 @@ def wait_until(options = {}, &block)
# * Web and Express editions: Must be an integer from 20 to 1024.
# @option options [required, String] :db_instance_class
# The compute and memory capacity of the DB instance, for example
- # db.m4.large. Not all DB instance classes are available in all Amazon
+ # db.m5.large. Not all DB instance classes are available in all Amazon
# Web Services Regions, or for all database engines. For the full list
# of DB instance classes, and availability for your engine, see [DB
- # Instance Class][1] in the *Amazon RDS User Guide*.
+ # instance classes][1] in the *Amazon RDS User Guide* or [Aurora DB
+ # instance classes][2] in the *Amazon Aurora User Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html
+ # [2]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.DBInstanceClass.html
# @option options [required, String] :engine
# The name of the database engine to be used for this instance.
#
@@ -1221,7 +1239,9 @@ def wait_until(options = {}, &block)
# @option options [Array] :db_security_groups
# A list of DB security groups to associate with this DB instance.
#
- # Default: The default DB security group for the database engine.
+ # This setting applies to the legacy EC2-Classic platform, which is no
+ # longer used to create new DB instances. Use the `VpcSecurityGroupIds`
+ # setting instead.
# @option options [Array] :vpc_security_group_ids
# A list of Amazon EC2 VPC security groups to associate with this DB
# instance.
@@ -1315,7 +1335,7 @@ def wait_until(options = {}, &block)
#
# * Can't be set to 0 if the DB instance is a source to read replicas
#
- # * Can't be set to 0 or 35 for an RDS Custom for Oracle DB instance
+ # * Can't be set to 0 for an RDS Custom for Oracle DB instance
# @option options [String] :preferred_backup_window
# The daily time range during which automated backups are created if
# automated backups are enabled, using the `BackupRetentionPeriod`
@@ -1394,11 +1414,16 @@ def wait_until(options = {}, &block)
# instance is a Multi-AZ deployment.
#
# This setting doesn't apply to RDS Custom.
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable. DB instance Availability Zones (AZs) are managed by
+ # the DB cluster.
# @option options [String] :engine_version
# The version number of the database engine to use.
#
# For a list of valid engine versions, use the
- # `DescribeDBEngineVersions` action.
+ # `DescribeDBEngineVersions` operation.
#
# The following are the database engines and links to information about
# the major and minor versions that are available with Amazon RDS. Not
@@ -1471,6 +1496,10 @@ def wait_until(options = {}, &block)
# `general-public-license`
#
# This setting doesn't apply to RDS Custom.
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable.
# @option options [Integer] :iops
# The amount of Provisioned IOPS (input/output operations per second) to
# be initially allocated for the DB instance. For information about
@@ -1482,6 +1511,10 @@ def wait_until(options = {}, &block)
# instance. For SQL Server DB instances, must be a multiple between 1
# and 50 of the storage amount for the DB instance.
#
+ # **Amazon Aurora**
+ #
+ # Not applicable. Storage is managed by the DB cluster.
+ #
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS
@@ -1495,6 +1528,10 @@ def wait_until(options = {}, &block)
# instance.
#
# This setting doesn't apply to RDS Custom.
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable.
# @option options [String] :character_set_name
# For supported engines, this value indicates that the DB instance
# should be associated with the specified `CharacterSet`.
@@ -1559,11 +1596,19 @@ def wait_until(options = {}, &block)
# parameter.
#
# Default: `io1` if the `Iops` parameter is specified, otherwise `gp2`
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable. Storage is managed by the DB cluster.
# @option options [String] :tde_credential_arn
# The ARN from the key store with which to associate the instance for
# TDE encryption.
#
# This setting doesn't apply to RDS Custom.
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable.
# @option options [String] :tde_credential_password
# The password for the given ARN from the key store in order to access
# the device.
@@ -1617,6 +1662,10 @@ def wait_until(options = {}, &block)
#
# This setting doesn't apply to RDS Custom.
#
+ # **Amazon Aurora**
+ #
+ # Not applicable. The domain is managed by the DB cluster.
+ #
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/kerberos-authentication.html
@@ -1660,6 +1709,10 @@ def wait_until(options = {}, &block)
# the Directory Service.
#
# This setting doesn't apply to RDS Custom.
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable. The domain is managed by the DB cluster.
# @option options [Integer] :promotion_tier
# A value that specifies the order in which an Aurora Replica is
# promoted to the primary instance after a failure of the existing
@@ -1687,13 +1740,16 @@ def wait_until(options = {}, &block)
# Services Identity and Access Management (IAM) accounts to database
# accounts. By default, mapping isn't enabled.
#
- # This setting doesn't apply to RDS Custom or Amazon Aurora. In Aurora,
- # mapping Amazon Web Services IAM accounts to database accounts is
- # managed by the DB cluster.
- #
# For more information, see [ IAM Database Authentication for MySQL and
# PostgreSQL][1] in the *Amazon RDS User Guide*.
#
+ # This setting doesn't apply to RDS Custom.
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable. Mapping Amazon Web Services IAM accounts to database
+ # accounts is managed by the DB cluster.
+ #
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html
@@ -1721,8 +1777,27 @@ def wait_until(options = {}, &block)
#
# This setting doesn't apply to RDS Custom.
# @option options [Integer] :performance_insights_retention_period
- # The amount of time, in days, to retain Performance Insights data.
- # Valid values are 7 or 731 (2 years).
+ # The number of days to retain Performance Insights data. The default is
+ # 7 days. The following values are valid:
+ #
+ # * 7
+ #
+ # * *month* * 31, where *month* is a number of months from 1-23
+ #
+ # * 731
+ #
+ # For example, the following values are valid:
+ #
+ # * 93 (3 months * 31)
+ #
+ # * 341 (11 months * 31)
+ #
+ # * 589 (19 months * 31)
+ #
+ # * 731
+ #
+ # If you specify a retention period such as 94, which isn't a valid
+ # value, RDS issues an error.
#
# This setting doesn't apply to RDS Custom.
# @option options [Array] :enable_cloudwatch_logs_exports
@@ -1768,6 +1843,10 @@ def wait_until(options = {}, &block)
# instance class of the DB instance.
#
# This setting doesn't apply to RDS Custom.
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable.
# @option options [Boolean] :deletion_protection
# A value that indicates whether the DB instance has deletion protection
# enabled. The database can't be deleted when deletion protection is
@@ -1794,6 +1873,10 @@ def wait_until(options = {}, &block)
#
# This setting doesn't apply to RDS Custom.
#
+ # **Amazon Aurora**
+ #
+ # Not applicable. Storage is managed by the DB cluster.
+ #
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIOPS.StorageTypes.html#USER_PIOPS.Autoscaling
@@ -1998,7 +2081,7 @@ def create(options = {})
# specified DB engine for a cross-Region read replica.
#
# Specifying a parameter group for this operation is only supported for
- # Oracle DB instances. It isn't supported for RDS Custom.
+ # MySQL and Oracle DB instances. It isn't supported for RDS Custom.
#
# Constraints:
#
@@ -2127,9 +2210,16 @@ def create(options = {})
# This setting doesn't apply to RDS Custom, which uses the same KMS key
# as the primary replica.
# @option options [String] :pre_signed_url
- # The URL that contains a Signature Version 4 signed request for the
- # `CreateDBInstanceReadReplica` API action in the source Amazon Web
- # Services Region that contains the source DB instance.
+ # When you are creating a read replica from one Amazon Web Services
+ # GovCloud (US) Region to another or from one China Amazon Web Services
+ # Region to another, the URL that contains a Signature Version 4 signed
+ # request for the `CreateDBInstanceReadReplica` API operation in the
+ # source Amazon Web Services Region that contains the source DB
+ # instance.
+ #
+ # This setting applies only to Amazon Web Services GovCloud (US) Regions
+ # and China Amazon Web Services Regions. It's ignored in other Amazon
+ # Web Services Regions.
#
# You must specify this parameter when you create an encrypted read
# replica from another Amazon Web Services Region by using the Amazon
@@ -2137,32 +2227,31 @@ def create(options = {})
# encrypted read replica in the same Amazon Web Services Region.
#
# The presigned URL must be a valid request for the
- # `CreateDBInstanceReadReplica` API action that can be executed in the
- # source Amazon Web Services Region that contains the encrypted source
- # DB instance. The presigned URL request must contain the following
+ # `CreateDBInstanceReadReplica` API operation that can run in the source
+ # Amazon Web Services Region that contains the encrypted source DB
+ # instance. The presigned URL request must contain the following
# parameter values:
#
# * `DestinationRegion` - The Amazon Web Services Region that the
# encrypted read replica is created in. This Amazon Web Services
# Region is the same one where the `CreateDBInstanceReadReplica`
- # action is called that contains this presigned URL.
+ # operation is called that contains this presigned URL.
#
# For example, if you create an encrypted DB instance in the us-west-1
# Amazon Web Services Region, from a source DB instance in the
# us-east-2 Amazon Web Services Region, then you call the
- # `CreateDBInstanceReadReplica` action in the us-east-1 Amazon Web
+ # `CreateDBInstanceReadReplica` operation in the us-east-1 Amazon Web
# Services Region and provide a presigned URL that contains a call to
- # the `CreateDBInstanceReadReplica` action in the us-west-2 Amazon Web
- # Services Region. For this example, the `DestinationRegion` in the
- # presigned URL must be set to the us-east-1 Amazon Web Services
+ # the `CreateDBInstanceReadReplica` operation in the us-west-2 Amazon
+ # Web Services Region. For this example, the `DestinationRegion` in
+ # the presigned URL must be set to the us-east-1 Amazon Web Services
# Region.
#
- # * `KmsKeyId` - The Amazon Web Services KMS key identifier for the key
- # to use to encrypt the read replica in the destination Amazon Web
- # Services Region. This is the same identifier for both the
- # `CreateDBInstanceReadReplica` action that is called in the
- # destination Amazon Web Services Region, and the action contained in
- # the presigned URL.
+ # * `KmsKeyId` - The KMS key identifier for the key to use to encrypt
+ # the read replica in the destination Amazon Web Services Region. This
+ # is the same identifier for both the `CreateDBInstanceReadReplica`
+ # operation that is called in the destination Amazon Web Services
+ # Region, and the operation contained in the presigned URL.
#
# * `SourceDBInstanceIdentifier` - The DB instance identifier for the
# encrypted DB instance to be replicated. This identifier must be in
@@ -2181,11 +2270,10 @@ def create(options = {})
# specify `SourceRegion` (or `--source-region` for the CLI) instead of
# specifying `PreSignedUrl` manually. Specifying `SourceRegion`
# autogenerates a presigned URL that is a valid request for the
- # operation that can be executed in the source Amazon Web Services
- # Region.
+ # operation that can run in the source Amazon Web Services Region.
#
- # `SourceRegion` isn't supported for SQL Server, because SQL Server on
- # Amazon RDS doesn't support cross-Region read replicas.
+ # `SourceRegion` isn't supported for SQL Server, because Amazon RDS for
+ # SQL Server doesn't support cross-Region read replicas.
#
#
#
@@ -2235,8 +2323,27 @@ def create(options = {})
#
# This setting doesn't apply to RDS Custom.
# @option options [Integer] :performance_insights_retention_period
- # The amount of time, in days, to retain Performance Insights data.
- # Valid values are 7 or 731 (2 years).
+ # The number of days to retain Performance Insights data. The default is
+ # 7 days. The following values are valid:
+ #
+ # * 7
+ #
+ # * *month* * 31, where *month* is a number of months from 1-23
+ #
+ # * 731
+ #
+ # For example, the following values are valid:
+ #
+ # * 93 (3 months * 31)
+ #
+ # * 341 (11 months * 31)
+ #
+ # * 589 (19 months * 31)
+ #
+ # * 731
+ #
+ # If you specify a retention period such as 94, which isn't a valid
+ # value, RDS issues an error.
#
# This setting doesn't apply to RDS Custom.
# @option options [Array] :enable_cloudwatch_logs_exports
@@ -2561,10 +2668,11 @@ def delete(options = {})
# `CreateDBInstance`.
# @option options [String] :db_instance_class
# The new compute and memory capacity of the DB instance, for example
- # db.m4.large. Not all DB instance classes are available in all Amazon
+ # db.m5.large. Not all DB instance classes are available in all Amazon
# Web Services Regions, or for all database engines. For the full list
# of DB instance classes, and availability for your engine, see [DB
- # Instance Class][1] in the *Amazon RDS User Guide*.
+ # instance classes][1] in the *Amazon RDS User Guide* or [Aurora DB
+ # instance classes][2] in the *Amazon Aurora User Guide*.
#
# If you modify the DB instance class, an outage occurs during the
# change. The change is applied during the next maintenance window,
@@ -2577,6 +2685,7 @@ def delete(options = {})
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html
+ # [2]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.DBInstanceClass.html
# @option options [String] :db_subnet_group_name
# The new DB subnet group for the DB instance. You can use this
# parameter to move your DB instance to a different VPC. If your DB
@@ -2683,7 +2792,7 @@ def delete(options = {})
#
# Constraints: Must contain from 8 to 128 characters.
#
- # Amazon RDS API actions never return the password, so this action
+ # Amazon RDS API operations never return the password, so this action
# provides a way to regain access to a primary instance user if the
# password is lost. This includes restoring privileges that might have
# been accidentally revoked.
@@ -2733,8 +2842,8 @@ def delete(options = {})
# Constraints:
#
# * It must be a value from 0 to 35. It can't be set to 0 if the DB
- # instance is a source to read replicas. It can't be set to 0 or 35
- # for an RDS Custom for Oracle DB instance.
+ # instance is a source to read replicas. It can't be set to 0 for an
+ # RDS Custom for Oracle DB instance.
#
# * It can be specified for a MySQL read replica only if the source is
# running MySQL 5.6 or later.
@@ -3122,7 +3231,7 @@ def delete(options = {})
# DB instance.
#
# For more information, see [Using Amazon Performance Insights][1] in
- # the *Amazon RDS User Guide.*.
+ # the *Amazon RDS User Guide*.
#
# This setting doesn't apply to RDS Custom.
#
@@ -3143,8 +3252,27 @@ def delete(options = {})
#
# This setting doesn't apply to RDS Custom.
# @option options [Integer] :performance_insights_retention_period
- # The amount of time, in days, to retain Performance Insights data.
- # Valid values are 7 or 731 (2 years).
+ # The number of days to retain Performance Insights data. The default is
+ # 7 days. The following values are valid:
+ #
+ # * 7
+ #
+ # * *month* * 31, where *month* is a number of months from 1-23
+ #
+ # * 731
+ #
+ # For example, the following values are valid:
+ #
+ # * 93 (3 months * 31)
+ #
+ # * 341 (11 months * 31)
+ #
+ # * 589 (19 months * 31)
+ #
+ # * 731
+ #
+ # If you specify a retention period such as 94, which isn't a valid
+ # value, RDS issues an error.
#
# This setting doesn't apply to RDS Custom.
# @option options [Types::CloudwatchLogsExportConfiguration] :cloudwatch_logs_export_configuration
@@ -4107,9 +4235,8 @@ def security_groups
# })
# @param [Hash] options ({})
# @option options [String] :db_snapshot_identifier
- # A specific DB snapshot identifier to describe. This parameter can't
- # be used in conjunction with `DBInstanceIdentifier`. This value is
- # stored as a lowercase string.
+ # A specific DB snapshot identifier to describe. This value is stored as
+ # a lowercase string.
#
# Constraints:
#
diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds/db_snapshot.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds/db_snapshot.rb
index 979a8f277b4..c31b602cfc9 100644
--- a/gems/aws-sdk-rds/lib/aws-sdk-rds/db_snapshot.rb
+++ b/gems/aws-sdk-rds/lib/aws-sdk-rds/db_snapshot.rb
@@ -478,39 +478,46 @@ def create(options = {})
# A value that indicates whether to copy all tags from the source DB
# snapshot to the target DB snapshot. By default, tags are not copied.
# @option options [String] :pre_signed_url
- # The URL that contains a Signature Version 4 signed request for the
- # `CopyDBSnapshot` API action in the source Amazon Web Services Region
- # that contains the source DB snapshot to copy.
+ # When you are copying a snapshot from one Amazon Web Services GovCloud
+ # (US) Region to another, the URL that contains a Signature Version 4
+ # signed request for the `CopyDBSnapshot` API operation in the source
+ # Amazon Web Services Region that contains the source DB snapshot to
+ # copy.
+ #
+ # This setting applies only to Amazon Web Services GovCloud (US)
+ # Regions. It's ignored in other Amazon Web Services Regions.
#
# You must specify this parameter when you copy an encrypted DB snapshot
# from another Amazon Web Services Region by using the Amazon RDS API.
# Don't specify `PreSignedUrl` when you are copying an encrypted DB
# snapshot in the same Amazon Web Services Region.
#
- # The presigned URL must be a valid request for the `CopyDBSnapshot` API
- # action that can be executed in the source Amazon Web Services Region
- # that contains the encrypted DB snapshot to be copied. The presigned
- # URL request must contain the following parameter values:
+ # The presigned URL must be a valid request for the
+ # `CopyDBClusterSnapshot` API operation that can run in the source
+ # Amazon Web Services Region that contains the encrypted DB cluster
+ # snapshot to copy. The presigned URL request must contain the following
+ # parameter values:
#
# * `DestinationRegion` - The Amazon Web Services Region that the
# encrypted DB snapshot is copied to. This Amazon Web Services Region
- # is the same one where the `CopyDBSnapshot` action is called that
+ # is the same one where the `CopyDBSnapshot` operation is called that
# contains this presigned URL.
#
# For example, if you copy an encrypted DB snapshot from the us-west-2
# Amazon Web Services Region to the us-east-1 Amazon Web Services
- # Region, then you call the `CopyDBSnapshot` action in the us-east-1
- # Amazon Web Services Region and provide a presigned URL that contains
- # a call to the `CopyDBSnapshot` action in the us-west-2 Amazon Web
- # Services Region. For this example, the `DestinationRegion` in the
- # presigned URL must be set to the us-east-1 Amazon Web Services
- # Region.
- #
- # * `KmsKeyId` - The Amazon Web Services KMS key identifier for the KMS
- # key to use to encrypt the copy of the DB snapshot in the destination
- # Amazon Web Services Region. This is the same identifier for both the
- # `CopyDBSnapshot` action that is called in the destination Amazon Web
- # Services Region, and the action contained in the presigned URL.
+ # Region, then you call the `CopyDBSnapshot` operation in the
+ # us-east-1 Amazon Web Services Region and provide a presigned URL
+ # that contains a call to the `CopyDBSnapshot` operation in the
+ # us-west-2 Amazon Web Services Region. For this example, the
+ # `DestinationRegion` in the presigned URL must be set to the
+ # us-east-1 Amazon Web Services Region.
+ #
+ # * `KmsKeyId` - The KMS key identifier for the KMS key to use to
+ # encrypt the copy of the DB snapshot in the destination Amazon Web
+ # Services Region. This is the same identifier for both the
+ # `CopyDBSnapshot` operation that is called in the destination Amazon
+ # Web Services Region, and the operation contained in the presigned
+ # URL.
#
# * `SourceDBSnapshotIdentifier` - The DB snapshot identifier for the
# encrypted snapshot to be copied. This identifier must be in the
@@ -527,9 +534,8 @@ def create(options = {})
# If you are using an Amazon Web Services SDK tool or the CLI, you can
# specify `SourceRegion` (or `--source-region` for the CLI) instead of
# specifying `PreSignedUrl` manually. Specifying `SourceRegion`
- # autogenerates a pre-signed URL that is a valid request for the
- # operation that can be executed in the source Amazon Web Services
- # Region.
+ # autogenerates a presigned URL that is a valid request for the
+ # operation that can run in the source Amazon Web Services Region.
#
#
#
diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds/resource.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds/resource.rb
index d1746c8488c..c13729eb662 100644
--- a/gems/aws-sdk-rds/lib/aws-sdk-rds/resource.rb
+++ b/gems/aws-sdk-rds/lib/aws-sdk-rds/resource.rb
@@ -390,25 +390,25 @@ def client
#
# Valid for: Aurora DB clusters and Multi-AZ DB clusters
# @option options [String] :pre_signed_url
- # A URL that contains a Signature Version 4 signed request for the
- # `CreateDBCluster` action to be called in the source Amazon Web
- # Services Region where the DB cluster is replicated from. Specify
- # `PreSignedUrl` only when you are performing cross-Region replication
- # from an encrypted DB cluster.
- #
- # The pre-signed URL must be a valid request for the `CreateDBCluster`
- # API action that can be executed in the source Amazon Web Services
- # Region that contains the encrypted DB cluster to be copied.
- #
- # The pre-signed URL request must contain the following parameter
- # values:
- #
- # * `KmsKeyId` - The Amazon Web Services KMS key identifier for the KMS
- # key to use to encrypt the copy of the DB cluster in the destination
- # Amazon Web Services Region. This should refer to the same KMS key
- # for both the `CreateDBCluster` action that is called in the
- # destination Amazon Web Services Region, and the action contained in
- # the pre-signed URL.
+ # When you are replicating a DB cluster from one Amazon Web Services
+ # GovCloud (US) Region to another, an URL that contains a Signature
+ # Version 4 signed request for the `CreateDBCluster` operation to be
+ # called in the source Amazon Web Services Region where the DB cluster
+ # is replicated from. Specify `PreSignedUrl` only when you are
+ # performing cross-Region replication from an encrypted DB cluster.
+ #
+ # The presigned URL must be a valid request for the `CreateDBCluster`
+ # API operation that can run in the source Amazon Web Services Region
+ # that contains the encrypted DB cluster to copy.
+ #
+ # The presigned URL request must contain the following parameter values:
+ #
+ # * `KmsKeyId` - The KMS key identifier for the KMS key to use to
+ # encrypt the copy of the DB cluster in the destination Amazon Web
+ # Services Region. This should refer to the same KMS key for both the
+ # `CreateDBCluster` operation that is called in the destination Amazon
+ # Web Services Region, and the operation contained in the presigned
+ # URL.
#
# * `DestinationRegion` - The name of the Amazon Web Services Region
# that Aurora read replica will be created in.
@@ -429,9 +429,8 @@ def client
# If you are using an Amazon Web Services SDK tool or the CLI, you can
# specify `SourceRegion` (or `--source-region` for the CLI) instead of
# specifying `PreSignedUrl` manually. Specifying `SourceRegion`
- # autogenerates a pre-signed URL that is a valid request for the
- # operation that can be executed in the source Amazon Web Services
- # Region.
+ # autogenerates a presigned URL that is a valid request for the
+ # operation that can run in the source Amazon Web Services Region.
#
#
#
@@ -447,7 +446,7 @@ def client
# accounts. By default, mapping isn't enabled.
#
# For more information, see [ IAM Database Authentication][1] in the
- # *Amazon Aurora User Guide.*.
+ # *Amazon Aurora User Guide*.
#
# Valid for: Aurora DB clusters only
#
@@ -518,6 +517,9 @@ def client
# The `multimaster` engine mode only applies for DB clusters created
# with Aurora MySQL version 5.6.10a.
#
+ # The `serverless` engine mode only applies for Aurora Serverless v1 DB
+ # clusters.
+ #
# For Aurora PostgreSQL, the `global` engine mode isn't required, and
# both the `parallelquery` and the `multimaster` engine modes currently
# aren't supported.
@@ -526,22 +528,25 @@ def client
# information, see the following sections in the *Amazon Aurora User
# Guide*\:
#
- # * [ Limitations of Aurora Serverless v1][1]
+ # * [Limitations of Aurora Serverless v1][1]
+ #
+ # * [Requirements for Aurora Serverless v2][2]
#
- # * [ Limitations of Parallel Query][2]
+ # * [Limitations of Parallel Query][3]
#
- # * [ Limitations of Aurora Global Databases][3]
+ # * [Limitations of Aurora Global Databases][4]
#
- # * [ Limitations of Multi-Master Clusters][4]
+ # * [Limitations of Multi-Master Clusters][5]
#
# Valid for: Aurora DB clusters only
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations
- # [2]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations
- # [3]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations
- # [4]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-multi-master.html#aurora-multi-master-limitations
+ # [2]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html
+ # [3]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations
+ # [4]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations
+ # [5]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-multi-master.html#aurora-multi-master-limitations
# @option options [Types::ScalingConfiguration] :scaling_configuration
# For DB clusters in `serverless` DB engine mode, the scaling properties
# of the DB cluster.
@@ -764,8 +769,27 @@ def client
#
# Valid for: Multi-AZ DB clusters only
# @option options [Integer] :performance_insights_retention_period
- # The amount of time, in days, to retain Performance Insights data.
- # Valid values are 7 or 731 (2 years).
+ # The number of days to retain Performance Insights data. The default is
+ # 7 days. The following values are valid:
+ #
+ # * 7
+ #
+ # * *month* * 31, where *month* is a number of months from 1-23
+ #
+ # * 731
+ #
+ # For example, the following values are valid:
+ #
+ # * 93 (3 months * 31)
+ #
+ # * 341 (11 months * 31)
+ #
+ # * 589 (19 months * 31)
+ #
+ # * 731
+ #
+ # If you specify a retention period such as 94, which isn't a valid
+ # value, RDS issues an error.
#
# Valid for: Multi-AZ DB clusters only
# @option options [Types::ServerlessV2ScalingConfiguration] :serverless_v2_scaling_configuration
@@ -1179,14 +1203,16 @@ def create_db_cluster_parameter_group(options = {})
# * Web and Express editions: Must be an integer from 20 to 1024.
# @option options [required, String] :db_instance_class
# The compute and memory capacity of the DB instance, for example
- # db.m4.large. Not all DB instance classes are available in all Amazon
+ # db.m5.large. Not all DB instance classes are available in all Amazon
# Web Services Regions, or for all database engines. For the full list
# of DB instance classes, and availability for your engine, see [DB
- # Instance Class][1] in the *Amazon RDS User Guide*.
+ # instance classes][1] in the *Amazon RDS User Guide* or [Aurora DB
+ # instance classes][2] in the *Amazon Aurora User Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html
+ # [2]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.DBInstanceClass.html
# @option options [required, String] :engine
# The name of the database engine to be used for this instance.
#
@@ -1281,7 +1307,9 @@ def create_db_cluster_parameter_group(options = {})
# @option options [Array] :db_security_groups
# A list of DB security groups to associate with this DB instance.
#
- # Default: The default DB security group for the database engine.
+ # This setting applies to the legacy EC2-Classic platform, which is no
+ # longer used to create new DB instances. Use the `VpcSecurityGroupIds`
+ # setting instead.
# @option options [Array] :vpc_security_group_ids
# A list of Amazon EC2 VPC security groups to associate with this DB
# instance.
@@ -1375,7 +1403,7 @@ def create_db_cluster_parameter_group(options = {})
#
# * Can't be set to 0 if the DB instance is a source to read replicas
#
- # * Can't be set to 0 or 35 for an RDS Custom for Oracle DB instance
+ # * Can't be set to 0 for an RDS Custom for Oracle DB instance
# @option options [String] :preferred_backup_window
# The daily time range during which automated backups are created if
# automated backups are enabled, using the `BackupRetentionPeriod`
@@ -1454,11 +1482,16 @@ def create_db_cluster_parameter_group(options = {})
# instance is a Multi-AZ deployment.
#
# This setting doesn't apply to RDS Custom.
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable. DB instance Availability Zones (AZs) are managed by
+ # the DB cluster.
# @option options [String] :engine_version
# The version number of the database engine to use.
#
# For a list of valid engine versions, use the
- # `DescribeDBEngineVersions` action.
+ # `DescribeDBEngineVersions` operation.
#
# The following are the database engines and links to information about
# the major and minor versions that are available with Amazon RDS. Not
@@ -1531,6 +1564,10 @@ def create_db_cluster_parameter_group(options = {})
# `general-public-license`
#
# This setting doesn't apply to RDS Custom.
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable.
# @option options [Integer] :iops
# The amount of Provisioned IOPS (input/output operations per second) to
# be initially allocated for the DB instance. For information about
@@ -1542,6 +1579,10 @@ def create_db_cluster_parameter_group(options = {})
# instance. For SQL Server DB instances, must be a multiple between 1
# and 50 of the storage amount for the DB instance.
#
+ # **Amazon Aurora**
+ #
+ # Not applicable. Storage is managed by the DB cluster.
+ #
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS
@@ -1555,6 +1596,10 @@ def create_db_cluster_parameter_group(options = {})
# instance.
#
# This setting doesn't apply to RDS Custom.
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable.
# @option options [String] :character_set_name
# For supported engines, this value indicates that the DB instance
# should be associated with the specified `CharacterSet`.
@@ -1619,11 +1664,19 @@ def create_db_cluster_parameter_group(options = {})
# parameter.
#
# Default: `io1` if the `Iops` parameter is specified, otherwise `gp2`
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable. Storage is managed by the DB cluster.
# @option options [String] :tde_credential_arn
# The ARN from the key store with which to associate the instance for
# TDE encryption.
#
# This setting doesn't apply to RDS Custom.
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable.
# @option options [String] :tde_credential_password
# The password for the given ARN from the key store in order to access
# the device.
@@ -1677,6 +1730,10 @@ def create_db_cluster_parameter_group(options = {})
#
# This setting doesn't apply to RDS Custom.
#
+ # **Amazon Aurora**
+ #
+ # Not applicable. The domain is managed by the DB cluster.
+ #
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/kerberos-authentication.html
@@ -1720,6 +1777,10 @@ def create_db_cluster_parameter_group(options = {})
# the Directory Service.
#
# This setting doesn't apply to RDS Custom.
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable. The domain is managed by the DB cluster.
# @option options [Integer] :promotion_tier
# A value that specifies the order in which an Aurora Replica is
# promoted to the primary instance after a failure of the existing
@@ -1747,13 +1808,16 @@ def create_db_cluster_parameter_group(options = {})
# Services Identity and Access Management (IAM) accounts to database
# accounts. By default, mapping isn't enabled.
#
- # This setting doesn't apply to RDS Custom or Amazon Aurora. In Aurora,
- # mapping Amazon Web Services IAM accounts to database accounts is
- # managed by the DB cluster.
- #
# For more information, see [ IAM Database Authentication for MySQL and
# PostgreSQL][1] in the *Amazon RDS User Guide*.
#
+ # This setting doesn't apply to RDS Custom.
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable. Mapping Amazon Web Services IAM accounts to database
+ # accounts is managed by the DB cluster.
+ #
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html
@@ -1781,8 +1845,27 @@ def create_db_cluster_parameter_group(options = {})
#
# This setting doesn't apply to RDS Custom.
# @option options [Integer] :performance_insights_retention_period
- # The amount of time, in days, to retain Performance Insights data.
- # Valid values are 7 or 731 (2 years).
+ # The number of days to retain Performance Insights data. The default is
+ # 7 days. The following values are valid:
+ #
+ # * 7
+ #
+ # * *month* * 31, where *month* is a number of months from 1-23
+ #
+ # * 731
+ #
+ # For example, the following values are valid:
+ #
+ # * 93 (3 months * 31)
+ #
+ # * 341 (11 months * 31)
+ #
+ # * 589 (19 months * 31)
+ #
+ # * 731
+ #
+ # If you specify a retention period such as 94, which isn't a valid
+ # value, RDS issues an error.
#
# This setting doesn't apply to RDS Custom.
# @option options [Array] :enable_cloudwatch_logs_exports
@@ -1828,6 +1911,10 @@ def create_db_cluster_parameter_group(options = {})
# instance class of the DB instance.
#
# This setting doesn't apply to RDS Custom.
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable.
# @option options [Boolean] :deletion_protection
# A value that indicates whether the DB instance has deletion protection
# enabled. The database can't be deleted when deletion protection is
@@ -1854,6 +1941,10 @@ def create_db_cluster_parameter_group(options = {})
#
# This setting doesn't apply to RDS Custom.
#
+ # **Amazon Aurora**
+ #
+ # Not applicable. Storage is managed by the DB cluster.
+ #
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIOPS.StorageTypes.html#USER_PIOPS.Autoscaling
@@ -2856,8 +2947,7 @@ def db_security_groups(options = {})
# @param [Hash] options ({})
# @option options [String] :db_instance_identifier
# The ID of the DB instance to retrieve the list of DB snapshots for.
- # This parameter can't be used in conjunction with
- # `DBSnapshotIdentifier`. This parameter isn't case-sensitive.
+ # This parameter isn't case-sensitive.
#
# Constraints:
#
@@ -2865,9 +2955,8 @@ def db_security_groups(options = {})
#
# ^
# @option options [String] :db_snapshot_identifier
- # A specific DB snapshot identifier to describe. This parameter can't
- # be used in conjunction with `DBInstanceIdentifier`. This value is
- # stored as a lowercase string.
+ # A specific DB snapshot identifier to describe. This value is stored as
+ # a lowercase string.
#
# Constraints:
#
diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds/types.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds/types.rb
index cde03f7ba9b..dfa2a7e4d9e 100644
--- a/gems/aws-sdk-rds/lib/aws-sdk-rds/types.rb
+++ b/gems/aws-sdk-rds/lib/aws-sdk-rds/types.rb
@@ -1116,26 +1116,31 @@ class CopyDBClusterParameterGroupResult < Struct.new(
# @return [String]
#
# @!attribute [rw] pre_signed_url
- # The URL that contains a Signature Version 4 signed request for the
- # `CopyDBClusterSnapshot` API action in the Amazon Web Services Region
- # that contains the source DB cluster snapshot to copy. The
- # `PreSignedUrl` parameter must be used when copying an encrypted DB
- # cluster snapshot from another Amazon Web Services Region. Don't
- # specify `PreSignedUrl` when you are copying an encrypted DB cluster
- # snapshot in the same Amazon Web Services Region.
- #
- # The pre-signed URL must be a valid request for the
- # `CopyDBClusterSnapshot` API action that can be executed in the
- # source Amazon Web Services Region that contains the encrypted DB
- # cluster snapshot to be copied. The pre-signed URL request must
- # contain the following parameter values:
- #
- # * `KmsKeyId` - The Amazon Web Services KMS key identifier for the
- # KMS key to use to encrypt the copy of the DB cluster snapshot in
- # the destination Amazon Web Services Region. This is the same
- # identifier for both the `CopyDBClusterSnapshot` action that is
- # called in the destination Amazon Web Services Region, and the
- # action contained in the pre-signed URL.
+ # When you are copying a DB cluster snapshot from one Amazon Web
+ # Services GovCloud (US) Region to another, the URL that contains a
+ # Signature Version 4 signed request for the `CopyDBClusterSnapshot`
+ # API operation in the Amazon Web Services Region that contains the
+ # source DB cluster snapshot to copy. Use the `PreSignedUrl` parameter
+ # when copying an encrypted DB cluster snapshot from another Amazon
+ # Web Services Region. Don't specify `PreSignedUrl` when copying an
+ # encrypted DB cluster snapshot in the same Amazon Web Services
+ # Region.
+ #
+ # This setting applies only to Amazon Web Services GovCloud (US)
+ # Regions. It's ignored in other Amazon Web Services Regions.
+ #
+ # The presigned URL must be a valid request for the
+ # `CopyDBClusterSnapshot` API operation that can run in the source
+ # Amazon Web Services Region that contains the encrypted DB cluster
+ # snapshot to copy. The presigned URL request must contain the
+ # following parameter values:
+ #
+ # * `KmsKeyId` - The KMS key identifier for the KMS key to use to
+ # encrypt the copy of the DB cluster snapshot in the destination
+ # Amazon Web Services Region. This is the same identifier for both
+ # the `CopyDBClusterSnapshot` operation that is called in the
+ # destination Amazon Web Services Region, and the operation
+ # contained in the presigned URL.
#
# * `DestinationRegion` - The name of the Amazon Web Services Region
# that the DB cluster snapshot is to be created in.
@@ -1157,9 +1162,8 @@ class CopyDBClusterParameterGroupResult < Struct.new(
# If you are using an Amazon Web Services SDK tool or the CLI, you can
# specify `SourceRegion` (or `--source-region` for the CLI) instead of
# specifying `PreSignedUrl` manually. Specifying `SourceRegion`
- # autogenerates a pre-signed URL that is a valid request for the
- # operation that can be executed in the source Amazon Web Services
- # Region.
+ # autogenerates a presigned URL that is a valid request for the
+ # operation that can run in the source Amazon Web Services Region.
#
#
#
@@ -1340,9 +1344,7 @@ class CopyDBParameterGroupResult < Struct.new(
# must be the Amazon Resource Name (ARN) of the shared DB snapshot.
#
# If you are copying an encrypted snapshot this parameter must be in
- # the ARN format for the source Amazon Web Services Region, and must
- # match the `SourceDBSnapshotIdentifier` in the `PreSignedUrl`
- # parameter.
+ # the ARN format for the source Amazon Web Services Region.
#
# Constraints:
#
@@ -1413,40 +1415,46 @@ class CopyDBParameterGroupResult < Struct.new(
# @return [Boolean]
#
# @!attribute [rw] pre_signed_url
- # The URL that contains a Signature Version 4 signed request for the
- # `CopyDBSnapshot` API action in the source Amazon Web Services Region
- # that contains the source DB snapshot to copy.
+ # When you are copying a snapshot from one Amazon Web Services
+ # GovCloud (US) Region to another, the URL that contains a Signature
+ # Version 4 signed request for the `CopyDBSnapshot` API operation in
+ # the source Amazon Web Services Region that contains the source DB
+ # snapshot to copy.
+ #
+ # This setting applies only to Amazon Web Services GovCloud (US)
+ # Regions. It's ignored in other Amazon Web Services Regions.
#
# You must specify this parameter when you copy an encrypted DB
# snapshot from another Amazon Web Services Region by using the Amazon
# RDS API. Don't specify `PreSignedUrl` when you are copying an
# encrypted DB snapshot in the same Amazon Web Services Region.
#
- # The presigned URL must be a valid request for the `CopyDBSnapshot`
- # API action that can be executed in the source Amazon Web Services
- # Region that contains the encrypted DB snapshot to be copied. The
- # presigned URL request must contain the following parameter values:
+ # The presigned URL must be a valid request for the
+ # `CopyDBClusterSnapshot` API operation that can run in the source
+ # Amazon Web Services Region that contains the encrypted DB cluster
+ # snapshot to copy. The presigned URL request must contain the
+ # following parameter values:
#
# * `DestinationRegion` - The Amazon Web Services Region that the
# encrypted DB snapshot is copied to. This Amazon Web Services
- # Region is the same one where the `CopyDBSnapshot` action is called
- # that contains this presigned URL.
+ # Region is the same one where the `CopyDBSnapshot` operation is
+ # called that contains this presigned URL.
#
# For example, if you copy an encrypted DB snapshot from the
# us-west-2 Amazon Web Services Region to the us-east-1 Amazon Web
- # Services Region, then you call the `CopyDBSnapshot` action in the
- # us-east-1 Amazon Web Services Region and provide a presigned URL
- # that contains a call to the `CopyDBSnapshot` action in the
+ # Services Region, then you call the `CopyDBSnapshot` operation in
+ # the us-east-1 Amazon Web Services Region and provide a presigned
+ # URL that contains a call to the `CopyDBSnapshot` operation in the
# us-west-2 Amazon Web Services Region. For this example, the
# `DestinationRegion` in the presigned URL must be set to the
# us-east-1 Amazon Web Services Region.
#
- # * `KmsKeyId` - The Amazon Web Services KMS key identifier for the
- # KMS key to use to encrypt the copy of the DB snapshot in the
- # destination Amazon Web Services Region. This is the same
- # identifier for both the `CopyDBSnapshot` action that is called in
- # the destination Amazon Web Services Region, and the action
- # contained in the presigned URL.
+ # * `KmsKeyId` - The KMS key identifier for the KMS key to use to
+ # encrypt the copy of the DB snapshot in the destination Amazon Web
+ # Services Region. This is the same identifier for both the
+ # `CopyDBSnapshot` operation that is called in the destination
+ # Amazon Web Services Region, and the operation contained in the
+ # presigned URL.
#
# * `SourceDBSnapshotIdentifier` - The DB snapshot identifier for the
# encrypted snapshot to be copied. This identifier must be in the
@@ -1464,9 +1472,8 @@ class CopyDBParameterGroupResult < Struct.new(
# If you are using an Amazon Web Services SDK tool or the CLI, you can
# specify `SourceRegion` (or `--source-region` for the CLI) instead of
# specifying `PreSignedUrl` manually. Specifying `SourceRegion`
- # autogenerates a pre-signed URL that is a valid request for the
- # operation that can be executed in the source Amazon Web Services
- # Region.
+ # autogenerates a presigned URL that is a valid request for the
+ # operation that can run in the source Amazon Web Services Region.
#
#
#
@@ -2199,25 +2206,26 @@ class CreateDBClusterEndpointMessage < Struct.new(
# @return [String]
#
# @!attribute [rw] pre_signed_url
- # A URL that contains a Signature Version 4 signed request for the
- # `CreateDBCluster` action to be called in the source Amazon Web
- # Services Region where the DB cluster is replicated from. Specify
- # `PreSignedUrl` only when you are performing cross-Region replication
- # from an encrypted DB cluster.
- #
- # The pre-signed URL must be a valid request for the `CreateDBCluster`
- # API action that can be executed in the source Amazon Web Services
- # Region that contains the encrypted DB cluster to be copied.
- #
- # The pre-signed URL request must contain the following parameter
+ # When you are replicating a DB cluster from one Amazon Web Services
+ # GovCloud (US) Region to another, an URL that contains a Signature
+ # Version 4 signed request for the `CreateDBCluster` operation to be
+ # called in the source Amazon Web Services Region where the DB cluster
+ # is replicated from. Specify `PreSignedUrl` only when you are
+ # performing cross-Region replication from an encrypted DB cluster.
+ #
+ # The presigned URL must be a valid request for the `CreateDBCluster`
+ # API operation that can run in the source Amazon Web Services Region
+ # that contains the encrypted DB cluster to copy.
+ #
+ # The presigned URL request must contain the following parameter
# values:
#
- # * `KmsKeyId` - The Amazon Web Services KMS key identifier for the
- # KMS key to use to encrypt the copy of the DB cluster in the
- # destination Amazon Web Services Region. This should refer to the
- # same KMS key for both the `CreateDBCluster` action that is called
- # in the destination Amazon Web Services Region, and the action
- # contained in the pre-signed URL.
+ # * `KmsKeyId` - The KMS key identifier for the KMS key to use to
+ # encrypt the copy of the DB cluster in the destination Amazon Web
+ # Services Region. This should refer to the same KMS key for both
+ # the `CreateDBCluster` operation that is called in the destination
+ # Amazon Web Services Region, and the operation contained in the
+ # presigned URL.
#
# * `DestinationRegion` - The name of the Amazon Web Services Region
# that Aurora read replica will be created in.
@@ -2238,9 +2246,8 @@ class CreateDBClusterEndpointMessage < Struct.new(
# If you are using an Amazon Web Services SDK tool or the CLI, you can
# specify `SourceRegion` (or `--source-region` for the CLI) instead of
# specifying `PreSignedUrl` manually. Specifying `SourceRegion`
- # autogenerates a pre-signed URL that is a valid request for the
- # operation that can be executed in the source Amazon Web Services
- # Region.
+ # autogenerates a presigned URL that is a valid request for the
+ # operation that can run in the source Amazon Web Services Region.
#
#
#
@@ -2258,7 +2265,7 @@ class CreateDBClusterEndpointMessage < Struct.new(
# accounts. By default, mapping isn't enabled.
#
# For more information, see [ IAM Database Authentication][1] in the
- # *Amazon Aurora User Guide.*.
+ # *Amazon Aurora User Guide*.
#
# Valid for: Aurora DB clusters only
#
@@ -2335,6 +2342,9 @@ class CreateDBClusterEndpointMessage < Struct.new(
# The `multimaster` engine mode only applies for DB clusters created
# with Aurora MySQL version 5.6.10a.
#
+ # The `serverless` engine mode only applies for Aurora Serverless v1
+ # DB clusters.
+ #
# For Aurora PostgreSQL, the `global` engine mode isn't required, and
# both the `parallelquery` and the `multimaster` engine modes
# currently aren't supported.
@@ -2343,22 +2353,25 @@ class CreateDBClusterEndpointMessage < Struct.new(
# information, see the following sections in the *Amazon Aurora User
# Guide*\:
#
- # * [ Limitations of Aurora Serverless v1][1]
+ # * [Limitations of Aurora Serverless v1][1]
#
- # * [ Limitations of Parallel Query][2]
+ # * [Requirements for Aurora Serverless v2][2]
#
- # * [ Limitations of Aurora Global Databases][3]
+ # * [Limitations of Parallel Query][3]
#
- # * [ Limitations of Multi-Master Clusters][4]
+ # * [Limitations of Aurora Global Databases][4]
+ #
+ # * [Limitations of Multi-Master Clusters][5]
#
# Valid for: Aurora DB clusters only
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations
- # [2]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations
- # [3]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations
- # [4]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-multi-master.html#aurora-multi-master-limitations
+ # [2]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html
+ # [3]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations
+ # [4]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations
+ # [5]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-multi-master.html#aurora-multi-master-limitations
# @return [String]
#
# @!attribute [rw] scaling_configuration
@@ -2622,8 +2635,27 @@ class CreateDBClusterEndpointMessage < Struct.new(
# @return [String]
#
# @!attribute [rw] performance_insights_retention_period
- # The amount of time, in days, to retain Performance Insights data.
- # Valid values are 7 or 731 (2 years).
+ # The number of days to retain Performance Insights data. The default
+ # is 7 days. The following values are valid:
+ #
+ # * 7
+ #
+ # * *month* * 31, where *month* is a number of months from 1-23
+ #
+ # * 731
+ #
+ # For example, the following values are valid:
+ #
+ # * 93 (3 months * 31)
+ #
+ # * 341 (11 months * 31)
+ #
+ # * 589 (19 months * 31)
+ #
+ # * 731
+ #
+ # If you specify a retention period such as 94, which isn't a valid
+ # value, RDS issues an error.
#
# Valid for: Multi-AZ DB clusters only
# @return [Integer]
@@ -3229,14 +3261,16 @@ class CreateDBClusterSnapshotResult < Struct.new(
#
# @!attribute [rw] db_instance_class
# The compute and memory capacity of the DB instance, for example
- # db.m4.large. Not all DB instance classes are available in all Amazon
+ # db.m5.large. Not all DB instance classes are available in all Amazon
# Web Services Regions, or for all database engines. For the full list
# of DB instance classes, and availability for your engine, see [DB
- # Instance Class][1] in the *Amazon RDS User Guide*.
+ # instance classes][1] in the *Amazon RDS User Guide* or [Aurora DB
+ # instance classes][2] in the *Amazon Aurora User Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html
+ # [2]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.DBInstanceClass.html
# @return [String]
#
# @!attribute [rw] engine
@@ -3339,7 +3373,9 @@ class CreateDBClusterSnapshotResult < Struct.new(
# @!attribute [rw] db_security_groups
# A list of DB security groups to associate with this DB instance.
#
- # Default: The default DB security group for the database engine.
+ # This setting applies to the legacy EC2-Classic platform, which is no
+ # longer used to create new DB instances. Use the
+ # `VpcSecurityGroupIds` setting instead.
# @return [Array]
#
# @!attribute [rw] vpc_security_group_ids
@@ -3445,7 +3481,7 @@ class CreateDBClusterSnapshotResult < Struct.new(
#
# * Can't be set to 0 if the DB instance is a source to read replicas
#
- # * Can't be set to 0 or 35 for an RDS Custom for Oracle DB instance
+ # * Can't be set to 0 for an RDS Custom for Oracle DB instance
# @return [Integer]
#
# @!attribute [rw] preferred_backup_window
@@ -3531,13 +3567,18 @@ class CreateDBClusterSnapshotResult < Struct.new(
# DB instance is a Multi-AZ deployment.
#
# This setting doesn't apply to RDS Custom.
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable. DB instance Availability Zones (AZs) are managed by
+ # the DB cluster.
# @return [Boolean]
#
# @!attribute [rw] engine_version
# The version number of the database engine to use.
#
# For a list of valid engine versions, use the
- # `DescribeDBEngineVersions` action.
+ # `DescribeDBEngineVersions` operation.
#
# The following are the database engines and links to information
# about the major and minor versions that are available with Amazon
@@ -3614,6 +3655,10 @@ class CreateDBClusterSnapshotResult < Struct.new(
# `general-public-license`
#
# This setting doesn't apply to RDS Custom.
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable.
# @return [String]
#
# @!attribute [rw] iops
@@ -3627,6 +3672,10 @@ class CreateDBClusterSnapshotResult < Struct.new(
# amount for the DB instance. For SQL Server DB instances, must be a
# multiple between 1 and 50 of the storage amount for the DB instance.
#
+ # **Amazon Aurora**
+ #
+ # Not applicable. Storage is managed by the DB cluster.
+ #
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS
@@ -3642,6 +3691,10 @@ class CreateDBClusterSnapshotResult < Struct.new(
# associated with a DB instance.
#
# This setting doesn't apply to RDS Custom.
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable.
# @return [String]
#
# @!attribute [rw] character_set_name
@@ -3719,6 +3772,10 @@ class CreateDBClusterSnapshotResult < Struct.new(
# parameter.
#
# Default: `io1` if the `Iops` parameter is specified, otherwise `gp2`
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable. Storage is managed by the DB cluster.
# @return [String]
#
# @!attribute [rw] tde_credential_arn
@@ -3726,6 +3783,10 @@ class CreateDBClusterSnapshotResult < Struct.new(
# TDE encryption.
#
# This setting doesn't apply to RDS Custom.
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable.
# @return [String]
#
# @!attribute [rw] tde_credential_password
@@ -3789,6 +3850,10 @@ class CreateDBClusterSnapshotResult < Struct.new(
#
# This setting doesn't apply to RDS Custom.
#
+ # **Amazon Aurora**
+ #
+ # Not applicable. The domain is managed by the DB cluster.
+ #
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/kerberos-authentication.html
@@ -3840,6 +3905,10 @@ class CreateDBClusterSnapshotResult < Struct.new(
# the Directory Service.
#
# This setting doesn't apply to RDS Custom.
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable. The domain is managed by the DB cluster.
# @return [String]
#
# @!attribute [rw] promotion_tier
@@ -3873,13 +3942,16 @@ class CreateDBClusterSnapshotResult < Struct.new(
# Services Identity and Access Management (IAM) accounts to database
# accounts. By default, mapping isn't enabled.
#
- # This setting doesn't apply to RDS Custom or Amazon Aurora. In
- # Aurora, mapping Amazon Web Services IAM accounts to database
- # accounts is managed by the DB cluster.
- #
# For more information, see [ IAM Database Authentication for MySQL
# and PostgreSQL][1] in the *Amazon RDS User Guide*.
#
+ # This setting doesn't apply to RDS Custom.
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable. Mapping Amazon Web Services IAM accounts to database
+ # accounts is managed by the DB cluster.
+ #
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html
@@ -3914,8 +3986,27 @@ class CreateDBClusterSnapshotResult < Struct.new(
# @return [String]
#
# @!attribute [rw] performance_insights_retention_period
- # The amount of time, in days, to retain Performance Insights data.
- # Valid values are 7 or 731 (2 years).
+ # The number of days to retain Performance Insights data. The default
+ # is 7 days. The following values are valid:
+ #
+ # * 7
+ #
+ # * *month* * 31, where *month* is a number of months from 1-23
+ #
+ # * 731
+ #
+ # For example, the following values are valid:
+ #
+ # * 93 (3 months * 31)
+ #
+ # * 341 (11 months * 31)
+ #
+ # * 589 (19 months * 31)
+ #
+ # * 731
+ #
+ # If you specify a retention period such as 94, which isn't a valid
+ # value, RDS issues an error.
#
# This setting doesn't apply to RDS Custom.
# @return [Integer]
@@ -3966,6 +4057,10 @@ class CreateDBClusterSnapshotResult < Struct.new(
# DB instance class of the DB instance.
#
# This setting doesn't apply to RDS Custom.
+ #
+ # **Amazon Aurora**
+ #
+ # Not applicable.
# @return [Array]
#
# @!attribute [rw] deletion_protection
@@ -3996,6 +4091,10 @@ class CreateDBClusterSnapshotResult < Struct.new(
#
# This setting doesn't apply to RDS Custom.
#
+ # **Amazon Aurora**
+ #
+ # Not applicable. Storage is managed by the DB cluster.
+ #
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIOPS.StorageTypes.html#USER_PIOPS.Autoscaling
@@ -4321,7 +4420,8 @@ class CreateDBInstanceMessage < Struct.new(
# specified DB engine for a cross-Region read replica.
#
# Specifying a parameter group for this operation is only supported
- # for Oracle DB instances. It isn't supported for RDS Custom.
+ # for MySQL and Oracle DB instances. It isn't supported for RDS
+ # Custom.
#
# Constraints:
#
@@ -4472,9 +4572,16 @@ class CreateDBInstanceMessage < Struct.new(
# @return [String]
#
# @!attribute [rw] pre_signed_url
- # The URL that contains a Signature Version 4 signed request for the
- # `CreateDBInstanceReadReplica` API action in the source Amazon Web
- # Services Region that contains the source DB instance.
+ # When you are creating a read replica from one Amazon Web Services
+ # GovCloud (US) Region to another or from one China Amazon Web
+ # Services Region to another, the URL that contains a Signature
+ # Version 4 signed request for the `CreateDBInstanceReadReplica` API
+ # operation in the source Amazon Web Services Region that contains the
+ # source DB instance.
+ #
+ # This setting applies only to Amazon Web Services GovCloud (US)
+ # Regions and China Amazon Web Services Regions. It's ignored in
+ # other Amazon Web Services Regions.
#
# You must specify this parameter when you create an encrypted read
# replica from another Amazon Web Services Region by using the Amazon
@@ -4482,7 +4589,7 @@ class CreateDBInstanceMessage < Struct.new(
# encrypted read replica in the same Amazon Web Services Region.
#
# The presigned URL must be a valid request for the
- # `CreateDBInstanceReadReplica` API action that can be executed in the
+ # `CreateDBInstanceReadReplica` API operation that can run in the
# source Amazon Web Services Region that contains the encrypted source
# DB instance. The presigned URL request must contain the following
# parameter values:
@@ -4490,24 +4597,24 @@ class CreateDBInstanceMessage < Struct.new(
# * `DestinationRegion` - The Amazon Web Services Region that the
# encrypted read replica is created in. This Amazon Web Services
# Region is the same one where the `CreateDBInstanceReadReplica`
- # action is called that contains this presigned URL.
+ # operation is called that contains this presigned URL.
#
# For example, if you create an encrypted DB instance in the
# us-west-1 Amazon Web Services Region, from a source DB instance in
# the us-east-2 Amazon Web Services Region, then you call the
- # `CreateDBInstanceReadReplica` action in the us-east-1 Amazon Web
- # Services Region and provide a presigned URL that contains a call
- # to the `CreateDBInstanceReadReplica` action in the us-west-2
- # Amazon Web Services Region. For this example, the
+ # `CreateDBInstanceReadReplica` operation in the us-east-1 Amazon
+ # Web Services Region and provide a presigned URL that contains a
+ # call to the `CreateDBInstanceReadReplica` operation in the
+ # us-west-2 Amazon Web Services Region. For this example, the
# `DestinationRegion` in the presigned URL must be set to the
# us-east-1 Amazon Web Services Region.
#
- # * `KmsKeyId` - The Amazon Web Services KMS key identifier for the
- # key to use to encrypt the read replica in the destination Amazon
- # Web Services Region. This is the same identifier for both the
- # `CreateDBInstanceReadReplica` action that is called in the
- # destination Amazon Web Services Region, and the action contained
- # in the presigned URL.
+ # * `KmsKeyId` - The KMS key identifier for the key to use to encrypt
+ # the read replica in the destination Amazon Web Services Region.
+ # This is the same identifier for both the
+ # `CreateDBInstanceReadReplica` operation that is called in the
+ # destination Amazon Web Services Region, and the operation
+ # contained in the presigned URL.
#
# * `SourceDBInstanceIdentifier` - The DB instance identifier for the
# encrypted DB instance to be replicated. This identifier must be in
@@ -4527,11 +4634,10 @@ class CreateDBInstanceMessage < Struct.new(
# specify `SourceRegion` (or `--source-region` for the CLI) instead of
# specifying `PreSignedUrl` manually. Specifying `SourceRegion`
# autogenerates a presigned URL that is a valid request for the
- # operation that can be executed in the source Amazon Web Services
- # Region.
+ # operation that can run in the source Amazon Web Services Region.
#
- # `SourceRegion` isn't supported for SQL Server, because SQL Server
- # on Amazon RDS doesn't support cross-Region read replicas.
+ # `SourceRegion` isn't supported for SQL Server, because Amazon RDS
+ # for SQL Server doesn't support cross-Region read replicas.
#
#
#
@@ -4590,8 +4696,27 @@ class CreateDBInstanceMessage < Struct.new(
# @return [String]
#
# @!attribute [rw] performance_insights_retention_period
- # The amount of time, in days, to retain Performance Insights data.
- # Valid values are 7 or 731 (2 years).
+ # The number of days to retain Performance Insights data. The default
+ # is 7 days. The following values are valid:
+ #
+ # * 7
+ #
+ # * *month* * 31, where *month* is a number of months from 1-23
+ #
+ # * 731
+ #
+ # For example, the following values are valid:
+ #
+ # * 93 (3 months * 31)
+ #
+ # * 341 (11 months * 31)
+ #
+ # * 589 (19 months * 31)
+ #
+ # * 731
+ #
+ # If you specify a retention period such as 94, which isn't a valid
+ # value, RDS issues an error.
#
# This setting doesn't apply to RDS Custom.
# @return [Integer]
@@ -5068,8 +5193,10 @@ class CreateDBProxyEndpointResponse < Struct.new(
# @!attribute [rw] engine_family
# The kinds of databases that the proxy can connect to. This value
# determines which database network protocol the proxy recognizes when
- # it interprets network traffic to and from the database. The engine
- # family applies to MySQL and PostgreSQL for both RDS and Aurora.
+ # it interprets network traffic to and from the database. For Aurora
+ # MySQL, RDS for MariaDB, and RDS for MySQL databases, specify
+ # `MYSQL`. For Aurora PostgreSQL and RDS for PostgreSQL databases,
+ # specify `POSTGRESQL`.
# @return [String]
#
# @!attribute [rw] auth
@@ -5522,7 +5649,7 @@ class CreateEventSubscriptionResult < Struct.new(
# @return [Boolean]
#
# @!attribute [rw] database_name
- # The name for your database of up to 64 alpha-numeric characters. If
+ # The name for your database of up to 64 alphanumeric characters. If
# you do not provide a name, Amazon Aurora will not create a database
# in the global database cluster you are creating.
# @return [String]
@@ -6133,8 +6260,24 @@ class CustomDBEngineVersionQuotaExceededFault < Aws::EmptyStructure; end
# @return [String]
#
# @!attribute [rw] performance_insights_retention_period
- # The amount of time, in days, to retain Performance Insights data.
- # Valid values are 7 or 731 (2 years).
+ # The number of days to retain Performance Insights data. The default
+ # is 7 days. The following values are valid:
+ #
+ # * 7
+ #
+ # * *month* * 31, where *month* is a number of months from 1-23
+ #
+ # * 731
+ #
+ # For example, the following values are valid:
+ #
+ # * 93 (3 months * 31)
+ #
+ # * 341 (11 months * 31)
+ #
+ # * 589 (19 months * 31)
+ #
+ # * 731
#
# This setting is only for non-Aurora Multi-AZ DB clusters.
# @return [Integer]
@@ -7546,8 +7689,24 @@ class DBEngineVersionMessage < Struct.new(
# @return [String]
#
# @!attribute [rw] performance_insights_retention_period
- # The amount of time, in days, to retain Performance Insights data.
- # Valid values are 7 or 731 (2 years).
+ # The number of days to retain Performance Insights data. The default
+ # is 7 days. The following values are valid:
+ #
+ # * 7
+ #
+ # * *month* * 31, where *month* is a number of months from 1-23
+ #
+ # * 731
+ #
+ # For example, the following values are valid:
+ #
+ # * 93 (3 months * 31)
+ #
+ # * 341 (11 months * 31)
+ #
+ # * 589 (19 months * 31)
+ #
+ # * 731
# @return [Integer]
#
# @!attribute [rw] enabled_cloudwatch_logs_exports
@@ -8337,8 +8496,12 @@ class DBParameterGroupsMessage < Struct.new(
# @return [String]
#
# @!attribute [rw] engine_family
- # The engine family applies to MySQL and PostgreSQL for both RDS and
- # Aurora.
+ # The kinds of databases that the proxy can connect to. This value
+ # determines which database network protocol the proxy recognizes when
+ # it interprets network traffic to and from the database. `MYSQL`
+ # supports Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases.
+ # `POSTGRESQL` supports Aurora PostgreSQL and RDS for PostgreSQL
+ # databases.
# @return [String]
#
# @!attribute [rw] vpc_id
@@ -11589,8 +11752,7 @@ class DescribeDBSnapshotAttributesResult < Struct.new(
#
# @!attribute [rw] db_instance_identifier
# The ID of the DB instance to retrieve the list of DB snapshots for.
- # This parameter can't be used in conjunction with
- # `DBSnapshotIdentifier`. This parameter isn't case-sensitive.
+ # This parameter isn't case-sensitive.
#
# Constraints:
#
@@ -11600,9 +11762,8 @@ class DescribeDBSnapshotAttributesResult < Struct.new(
# @return [String]
#
# @!attribute [rw] db_snapshot_identifier
- # A specific DB snapshot identifier to describe. This parameter can't
- # be used in conjunction with `DBInstanceIdentifier`. This value is
- # stored as a lowercase string.
+ # A specific DB snapshot identifier to describe. This value is stored
+ # as a lowercase string.
#
# Constraints:
#
@@ -12642,7 +12803,7 @@ class DescribeOptionGroupsMessage < Struct.new(
#
# Default: 100
#
- # Constraints: Minimum 20, maximum 100.
+ # Constraints: Minimum 20, maximum 10000.
# @return [Integer]
#
# @!attribute [rw] marker
@@ -13293,8 +13454,12 @@ class EngineDefaults < Struct.new(
include Aws::Structure
end
- # This data type is used as a response element in the `DescribeEvents`
- # action.
+ # This data type is used as a response element in the
+ # [DescribeEvents][1] action.
+ #
+ #
+ #
+ # [1]: https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeEvents.html
#
# @!attribute [rw] source_identifier
# Provides the identifier for the source of the event.
@@ -13334,7 +13499,11 @@ class Event < Struct.new(
end
# Contains the results of a successful invocation of the
- # `DescribeEventCategories` operation.
+ # [DescribeEventCategories][1] operation.
+ #
+ #
+ #
+ # [1]: https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeEventCategories.html
#
# @!attribute [rw] source_type
# The source type that the returned categories belong to
@@ -15075,8 +15244,27 @@ class ModifyDBClusterEndpointMessage < Struct.new(
# @return [String]
#
# @!attribute [rw] performance_insights_retention_period
- # The amount of time, in days, to retain Performance Insights data.
- # Valid values are 7 or 731 (2 years).
+ # The number of days to retain Performance Insights data. The default
+ # is 7 days. The following values are valid:
+ #
+ # * 7
+ #
+ # * *month* * 31, where *month* is a number of months from 1-23
+ #
+ # * 731
+ #
+ # For example, the following values are valid:
+ #
+ # * 93 (3 months * 31)
+ #
+ # * 341 (11 months * 31)
+ #
+ # * 589 (19 months * 31)
+ #
+ # * 731
+ #
+ # If you specify a retention period such as 94, which isn't a valid
+ # value, RDS issues an error.
#
# Valid for: Multi-AZ DB clusters only
# @return [Integer]
@@ -15252,7 +15440,7 @@ class ModifyDBClusterResult < Struct.new(
# `restore`.
#
# To view the list of attributes available to modify, use the
- # DescribeDBClusterSnapshotAttributes API action.
+ # DescribeDBClusterSnapshotAttributes API operation.
#
#
# @return [String]
@@ -15401,10 +15589,11 @@ class ModifyDBClusterSnapshotAttributeResult < Struct.new(
#
# @!attribute [rw] db_instance_class
# The new compute and memory capacity of the DB instance, for example
- # db.m4.large. Not all DB instance classes are available in all Amazon
+ # db.m5.large. Not all DB instance classes are available in all Amazon
# Web Services Regions, or for all database engines. For the full list
# of DB instance classes, and availability for your engine, see [DB
- # Instance Class][1] in the *Amazon RDS User Guide*.
+ # instance classes][1] in the *Amazon RDS User Guide* or [Aurora DB
+ # instance classes][2] in the *Amazon Aurora User Guide*.
#
# If you modify the DB instance class, an outage occurs during the
# change. The change is applied during the next maintenance window,
@@ -15417,6 +15606,7 @@ class ModifyDBClusterSnapshotAttributeResult < Struct.new(
#
#
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html
+ # [2]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.DBInstanceClass.html
# @return [String]
#
# @!attribute [rw] db_subnet_group_name
@@ -15534,7 +15724,7 @@ class ModifyDBClusterSnapshotAttributeResult < Struct.new(
#
# Constraints: Must contain from 8 to 128 characters.
#
- # Amazon RDS API actions never return the password, so this action
+ # Amazon RDS API operations never return the password, so this action
# provides a way to regain access to a primary instance user if the
# password is lost. This includes restoring privileges that might have
# been accidentally revoked.
@@ -15589,8 +15779,8 @@ class ModifyDBClusterSnapshotAttributeResult < Struct.new(
# Constraints:
#
# * It must be a value from 0 to 35. It can't be set to 0 if the DB
- # instance is a source to read replicas. It can't be set to 0 or 35
- # for an RDS Custom for Oracle DB instance.
+ # instance is a source to read replicas. It can't be set to 0 for
+ # an RDS Custom for Oracle DB instance.
#
# * It can be specified for a MySQL read replica only if the source is
# running MySQL 5.6 or later.
@@ -16033,7 +16223,7 @@ class ModifyDBClusterSnapshotAttributeResult < Struct.new(
# the DB instance.
#
# For more information, see [Using Amazon Performance Insights][1] in
- # the *Amazon RDS User Guide.*.
+ # the *Amazon RDS User Guide*.
#
# This setting doesn't apply to RDS Custom.
#
@@ -16059,8 +16249,27 @@ class ModifyDBClusterSnapshotAttributeResult < Struct.new(
# @return [String]
#
# @!attribute [rw] performance_insights_retention_period
- # The amount of time, in days, to retain Performance Insights data.
- # Valid values are 7 or 731 (2 years).
+ # The number of days to retain Performance Insights data. The default
+ # is 7 days. The following values are valid:
+ #
+ # * 7
+ #
+ # * *month* * 31, where *month* is a number of months from 1-23
+ #
+ # * 731
+ #
+ # For example, the following values are valid:
+ #
+ # * 93 (3 months * 31)
+ #
+ # * 341 (11 months * 31)
+ #
+ # * 589 (19 months * 31)
+ #
+ # * 731
+ #
+ # If you specify a retention period such as 94, which isn't a valid
+ # value, RDS issues an error.
#
# This setting doesn't apply to RDS Custom.
# @return [Integer]
@@ -16549,11 +16758,11 @@ class ModifyDBProxyResponse < Struct.new(
# }
#
# @!attribute [rw] target_group_name
- # The name of the new target group to assign to the proxy.
+ # The name of the target group to modify.
# @return [String]
#
# @!attribute [rw] db_proxy_name
- # The name of the new proxy to which to assign the target group.
+ # The name of the proxy.
# @return [String]
#
# @!attribute [rw] connection_pool_config
@@ -16612,7 +16821,7 @@ class ModifyDBProxyTargetGroupResponse < Struct.new(
# copy or restore a manual DB snapshot, set this value to `restore`.
#
# To view the list of attributes available to modify, use the
- # DescribeDBSnapshotAttributes API action.
+ # DescribeDBSnapshotAttributes API operation.
#
#
# @return [String]
@@ -19339,9 +19548,9 @@ class ResourcePendingMaintenanceActions < Struct.new(
# @!attribute [rw] engine
# The name of the database engine to be used for this DB cluster.
#
- # Valid Values: `aurora` (for MySQL 5.6-compatible Aurora),
+ # Valid Values: `aurora` (for MySQL 5.6-compatible Aurora) and
# `aurora-mysql` (for MySQL 5.7-compatible and MySQL 8.0-compatible
- # Aurora), and `aurora-postgresql`
+ # Aurora)
# @return [String]
#
# @!attribute [rw] engine_version
@@ -19360,20 +19569,10 @@ class ResourcePendingMaintenanceActions < Struct.new(
# `aws rds describe-db-engine-versions --engine aurora-mysql --query
# "DBEngineVersions[].EngineVersion"`
#
- # To list all of the available engine versions for
- # `aurora-postgresql`, use the following command:
- #
- # `aws rds describe-db-engine-versions --engine aurora-postgresql
- # --query "DBEngineVersions[].EngineVersion"`
- #
# **Aurora MySQL**
#
- # Example: `5.6.10a`, `5.6.mysql_aurora.1.19.2`, `5.7.12`,
- # `5.7.mysql_aurora.2.04.5`, `8.0.mysql_aurora.3.01.0`
- #
- # **Aurora PostgreSQL**
- #
- # Example: `9.6.3`, `10.7`
+ # Example: `5.6.10a`, `5.6.mysql_aurora.1.19.2`,
+ # `5.7.mysql_aurora.2.07.1`, `8.0.mysql_aurora.3.02.0`
# @return [String]
#
# @!attribute [rw] port
@@ -19985,7 +20184,7 @@ class RestoreDBClusterFromS3Result < Struct.new(
#
# For more information about exporting CloudWatch Logs for Amazon RDS,
# see [Publishing Database Logs to Amazon CloudWatch Logs][1] in the
- # *Amazon RDS User Guide.*.
+ # *Amazon RDS User Guide*.
#
# For more information about exporting CloudWatch Logs for Amazon
# Aurora, see [Publishing Database Logs to Amazon CloudWatch Logs][2]
@@ -20504,7 +20703,7 @@ class RestoreDBClusterFromSnapshotResult < Struct.new(
#
# For more information about exporting CloudWatch Logs for Amazon RDS,
# see [Publishing Database Logs to Amazon CloudWatch Logs][1] in the
- # *Amazon RDS User Guide.*.
+ # *Amazon RDS User Guide*.
#
# For more information about exporting CloudWatch Logs for Amazon
# Aurora, see [Publishing Database Logs to Amazon CloudWatch Logs][2]
@@ -21726,7 +21925,7 @@ class RestoreDBInstanceFromDBSnapshotResult < Struct.new(
# the DB instance.
#
# For more information, see [Using Amazon Performance Insights][1] in
- # the *Amazon RDS User Guide.*.
+ # the *Amazon RDS User Guide*.
#
#
#
@@ -21748,8 +21947,27 @@ class RestoreDBInstanceFromDBSnapshotResult < Struct.new(
# @return [String]
#
# @!attribute [rw] performance_insights_retention_period
- # The amount of time, in days, to retain Performance Insights data.
- # Valid values are 7 or 731 (2 years).
+ # The number of days to retain Performance Insights data. The default
+ # is 7 days. The following values are valid:
+ #
+ # * 7
+ #
+ # * *month* * 31, where *month* is a number of months from 1-23
+ #
+ # * 731
+ #
+ # For example, the following values are valid:
+ #
+ # * 93 (3 months * 31)
+ #
+ # * 341 (11 months * 31)
+ #
+ # * 589 (19 months * 31)
+ #
+ # * 731
+ #
+ # If you specify a retention period such as 94, which isn't a valid
+ # value, RDS issues an error.
# @return [Integer]
#
# @!attribute [rw] enable_cloudwatch_logs_exports
@@ -22696,7 +22914,7 @@ class ScalingConfiguration < Struct.new(
# [1]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html
#
# @!attribute [rw] min_capacity
- # The maximum capacity for the Aurora DB cluster in `serverless` DB
+ # The minimum capacity for an Aurora DB cluster in `serverless` DB
# engine mode.
# @return [Integer]
#
@@ -23084,13 +23302,35 @@ class StartDBClusterResult < Struct.new(
# @return [String]
#
# @!attribute [rw] pre_signed_url
- # A URL that contains a Signature Version 4 signed request for the
- # StartDBInstanceAutomatedBackupsReplication action to be called in
+ # In an Amazon Web Services GovCloud (US) Region, an URL that contains
+ # a Signature Version 4 signed request for the
+ # `StartDBInstanceAutomatedBackupsReplication` operation to call in
# the Amazon Web Services Region of the source DB instance. The
# presigned URL must be a valid request for the
- # StartDBInstanceAutomatedBackupsReplication API action that can be
- # executed in the Amazon Web Services Region that contains the source
- # DB instance.
+ # `StartDBInstanceAutomatedBackupsReplication` API operation that can
+ # run in the Amazon Web Services Region that contains the source DB
+ # instance.
+ #
+ # This setting applies only to Amazon Web Services GovCloud (US)
+ # Regions. It's ignored in other Amazon Web Services Regions.
+ #
+ # To learn how to generate a Signature Version 4 signed request, see [
+ # Authenticating Requests: Using Query Parameters (Amazon Web Services
+ # Signature Version 4)][1] and [ Signature Version 4 Signing
+ # Process][2].
+ #
+ # If you are using an Amazon Web Services SDK tool or the CLI, you can
+ # specify `SourceRegion` (or `--source-region` for the CLI) instead of
+ # specifying `PreSignedUrl` manually. Specifying `SourceRegion`
+ # autogenerates a presigned URL that is a valid request for the
+ # operation that can run in the source Amazon Web Services Region.
+ #
+ #
+ #
+ #
+ #
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
+ # [2]: https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html
# @return [String]
#
# @!attribute [rw] source_region
@@ -23856,7 +24096,9 @@ class ValidStorageOptions < Struct.new(
# @return [String]
#
# @!attribute [rw] status
- # The status of the VPC security group.
+ # The membership status of the VPC security group.
+ #
+ # Currently, the only valid status is `active`.
# @return [String]
#
# @see http://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/VpcSecurityGroupMembership AWS API Documentation