From eb54f1307fa48c883b12f1a3a9aeb520f20068cc Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Fri, 23 Feb 2024 14:23:24 -0500 Subject: [PATCH] Release v1.50.25 (2024-02-23) (#5179) Release v1.50.25 (2024-02-23) === ### Service Client Updates * `service/appsync`: Updates service documentation * `service/qldb`: Updates service documentation * `service/rds`: Updates service API, documentation, waiters, paginators, and examples * Add pattern and length based validations for DBShardGroupIdentifier * `service/rum`: Updates service documentation --- CHANGELOG.md | 10 + aws/version.go | 2 +- models/apis/appsync/2017-07-25/docs-2.json | 10 +- models/apis/qldb/2019-01-02/docs-2.json | 4 +- .../qldb/2019-01-02/endpoint-rule-set-1.json | 366 ++++++++---------- models/apis/rds/2014-10-31/api-2.json | 16 +- models/apis/rds/2014-10-31/docs-2.json | 15 +- models/apis/rum/2018-05-10/docs-2.json | 18 +- .../rum/2018-05-10/endpoint-rule-set-1.json | 366 ++++++++---------- .../apis/rum/2018-05-10/endpoint-tests-1.json | 158 +++++--- service/appsync/api.go | 28 +- service/cloudwatchrum/api.go | 77 ++-- service/qldb/api.go | 7 +- service/rds/api.go | 30 +- 14 files changed, 576 insertions(+), 531 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 01553ec4e98..96b3a85ce0a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +Release v1.50.25 (2024-02-23) +=== + +### Service Client Updates +* `service/appsync`: Updates service documentation +* `service/qldb`: Updates service documentation +* `service/rds`: Updates service API, documentation, waiters, paginators, and examples + * Add pattern and length based validations for DBShardGroupIdentifier +* `service/rum`: Updates service documentation + Release v1.50.24 (2024-02-22) === diff --git a/aws/version.go b/aws/version.go index a1b1509c5bc..e0267957552 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.50.24" +const SDKVersion = "1.50.25" diff --git a/models/apis/appsync/2017-07-25/docs-2.json b/models/apis/appsync/2017-07-25/docs-2.json index 05dfc913773..05c77e90fbd 100644 --- a/models/apis/appsync/2017-07-25/docs-2.json +++ b/models/apis/appsync/2017-07-25/docs-2.json @@ -278,8 +278,8 @@ "base": null, "refs": { "ApiCache$healthMetricsConfig": "
Controls how cache health metrics will be emitted to CloudWatch. Cache health metrics include:
NetworkBandwidthOutAllowanceExceeded: The network packets dropped because the throughput exceeded the aggregated bandwidth limit. This is useful for diagnosing bottlenecks in a cache configuration.
EngineCPUUtilization: The CPU utilization (percentage) allocated to the Redis process. This is useful for diagnosing bottlenecks in a cache configuration.
Metrics will be recorded by API ID. You can set the value to ENABLED
or DISABLED
.
Controls how cache health metrics will be emitted to CloudWatch. Cache health metrics include:
NetworkBandwidthOutAllowanceExceeded: The number of times a specified GraphQL operation was called.
EngineCPUUtilization: The number of GraphQL errors that occurred during a specified GraphQL operation.
Metrics will be recorded by API ID. You can set the value to ENABLED
or DISABLED
.
Controls how cache health metrics will be emitted to CloudWatch. Cache health metrics include:
NetworkBandwidthOutAllowanceExceeded: The number of times a specified GraphQL operation was called.
EngineCPUUtilization: The number of GraphQL errors that occurred during a specified GraphQL operation.
Metrics will be recorded by API ID. You can set the value to ENABLED
or DISABLED
.
Controls how cache health metrics will be emitted to CloudWatch. Cache health metrics include:
NetworkBandwidthOutAllowanceExceeded: The network packets dropped because the throughput exceeded the aggregated bandwidth limit. This is useful for diagnosing bottlenecks in a cache configuration.
EngineCPUUtilization: The CPU utilization (percentage) allocated to the Redis process. This is useful for diagnosing bottlenecks in a cache configuration.
Metrics will be recorded by API ID. You can set the value to ENABLED
or DISABLED
.
Controls how cache health metrics will be emitted to CloudWatch. Cache health metrics include:
NetworkBandwidthOutAllowanceExceeded: The network packets dropped because the throughput exceeded the aggregated bandwidth limit. This is useful for diagnosing bottlenecks in a cache configuration.
EngineCPUUtilization: The CPU utilization (percentage) allocated to the Redis process. This is useful for diagnosing bottlenecks in a cache configuration.
Metrics will be recorded by API ID. You can set the value to ENABLED
or DISABLED
.
Controls how data source metrics will be emitted to CloudWatch. Data source metrics include:
Requests: The number of invocations that occured during a request.
Latency: The time to complete a data source invocation.
Errors: The number of errors that occurred during a data source invocation.
These metrics can be emitted to CloudWatch per data source or for all data sources in the request. Metrics will be recorded by API ID and data source name. dataSourceLevelMetricsBehavior
accepts one of these values at a time:
FULL_REQUEST_DATA_SOURCE_METRICS
: Records and emits metric data for all data sources in the request.
PER_DATA_SOURCE_METRICS
: Records and emits metric data for data sources that have the metricConfig
value set to ENABLED
.
Controls how data source metrics will be emitted to CloudWatch. Data source metrics include:
Requests: The number of invocations that occured during a request.
Latency: The time to complete a data source invocation.
Errors: The number of errors that occurred during a data source invocation.
These metrics can be emitted to CloudWatch per data source or for all data sources in the request. Metrics will be recorded by API ID and data source name. dataSourceLevelMetricsBehavior
accepts one of these values at a time:
FULL_REQUEST_DATA_SOURCE_METRICS
: Records and emits metric data for all data sources in the request.
PER_DATA_SOURCE_METRICS
: Records and emits metric data for data sources that have the metricsConfig
value set to ENABLED
.
Enables and controls the enhanced metrics feature. Enhanced metrics emit granular data on API usage and performance such as AppSync request and error counts, latency, and cache hits/misses. All enhanced metric data is sent to your CloudWatch account, and you can configure the types of data that will be sent.
Enhanced metrics can be configured at the resolver, data source, and operation levels. EnhancedMetricsConfig
contains three required parameters, each controlling one of these categories:
resolverLevelMetricsBehavior
: Controls how resolver metrics will be emitted to CloudWatch. Resolver metrics include:
GraphQL errors: The number of GraphQL errors that occurred.
Requests: The number of invocations that occurred during a request.
Latency: The time to complete a resolver invocation.
Cache hits: The number of cache hits during a request.
Cache misses: The number of cache misses during a request.
These metrics can be emitted to CloudWatch per resolver or for all resolvers in the request. Metrics will be recorded by API ID and resolver name. resolverLevelMetricsBehavior
accepts one of these values at a time:
FULL_REQUEST_RESOLVER_METRICS
: Records and emits metric data for all resolvers in the request.
PER_RESOLVER_METRICS
: Records and emits metric data for resolvers that have the metricConfig
value set to ENABLED
.
dataSourceLevelMetricsBehavior
: Controls how data source metrics will be emitted to CloudWatch. Data source metrics include:
Requests: The number of invocations that occured during a request.
Latency: The time to complete a data source invocation.
Errors: The number of errors that occurred during a data source invocation.
These metrics can be emitted to CloudWatch per data source or for all data sources in the request. Metrics will be recorded by API ID and data source name. dataSourceLevelMetricsBehavior
accepts one of these values at a time:
FULL_REQUEST_DATA_SOURCE_METRICS
: Records and emits metric data for all data sources in the request.
PER_DATA_SOURCE_METRICS
: Records and emits metric data for data sources that have the metricConfig
value set to ENABLED
.
operationLevelMetricsConfig
: Controls how operation metrics will be emitted to CloudWatch. Operation metrics include:
Requests: The number of times a specified GraphQL operation was called.
GraphQL errors: The number of GraphQL errors that occurred during a specified GraphQL operation.
Metrics will be recorded by API ID and operation name. You can set the value to ENABLED
or DISABLED
.
Enables and controls the enhanced metrics feature. Enhanced metrics emit granular data on API usage and performance such as AppSync request and error counts, latency, and cache hits/misses. All enhanced metric data is sent to your CloudWatch account, and you can configure the types of data that will be sent.
Enhanced metrics can be configured at the resolver, data source, and operation levels. EnhancedMetricsConfig
contains three required parameters, each controlling one of these categories:
resolverLevelMetricsBehavior
: Controls how resolver metrics will be emitted to CloudWatch. Resolver metrics include:
GraphQL errors: The number of GraphQL errors that occurred.
Requests: The number of invocations that occurred during a request.
Latency: The time to complete a resolver invocation.
Cache hits: The number of cache hits during a request.
Cache misses: The number of cache misses during a request.
These metrics can be emitted to CloudWatch per resolver or for all resolvers in the request. Metrics will be recorded by API ID and resolver name. resolverLevelMetricsBehavior
accepts one of these values at a time:
FULL_REQUEST_RESOLVER_METRICS
: Records and emits metric data for all resolvers in the request.
PER_RESOLVER_METRICS
: Records and emits metric data for resolvers that have the metricsConfig
value set to ENABLED
.
dataSourceLevelMetricsBehavior
: Controls how data source metrics will be emitted to CloudWatch. Data source metrics include:
Requests: The number of invocations that occured during a request.
Latency: The time to complete a data source invocation.
Errors: The number of errors that occurred during a data source invocation.
These metrics can be emitted to CloudWatch per data source or for all data sources in the request. Metrics will be recorded by API ID and data source name. dataSourceLevelMetricsBehavior
accepts one of these values at a time:
FULL_REQUEST_DATA_SOURCE_METRICS
: Records and emits metric data for all data sources in the request.
PER_DATA_SOURCE_METRICS
: Records and emits metric data for data sources that have the metricsConfig
value set to ENABLED
.
operationLevelMetricsConfig
: Controls how operation metrics will be emitted to CloudWatch. Operation metrics include:
Requests: The number of times a specified GraphQL operation was called.
GraphQL errors: The number of GraphQL errors that occurred during a specified GraphQL operation.
Metrics will be recorded by API ID and operation name. You can set the value to ENABLED
or DISABLED
.
The enhancedMetricsConfig
object.
The enhancedMetricsConfig
object.
Controls how resolver metrics will be emitted to CloudWatch. Resolver metrics include:
GraphQL errors: The number of GraphQL errors that occurred.
Requests: The number of invocations that occurred during a request.
Latency: The time to complete a resolver invocation.
Cache hits: The number of cache hits during a request.
Cache misses: The number of cache misses during a request.
These metrics can be emitted to CloudWatch per resolver or for all resolvers in the request. Metrics will be recorded by API ID and resolver name. resolverLevelMetricsBehavior
accepts one of these values at a time:
FULL_REQUEST_RESOLVER_METRICS
: Records and emits metric data for all resolvers in the request.
PER_RESOLVER_METRICS
: Records and emits metric data for resolvers that have the metricConfig
value set to ENABLED
.
Controls how resolver metrics will be emitted to CloudWatch. Resolver metrics include:
GraphQL errors: The number of GraphQL errors that occurred.
Requests: The number of invocations that occurred during a request.
Latency: The time to complete a resolver invocation.
Cache hits: The number of cache hits during a request.
Cache misses: The number of cache misses during a request.
These metrics can be emitted to CloudWatch per resolver or for all resolvers in the request. Metrics will be recorded by API ID and resolver name. resolverLevelMetricsBehavior
accepts one of these values at a time:
FULL_REQUEST_RESOLVER_METRICS
: Records and emits metric data for all resolvers in the request.
PER_RESOLVER_METRICS
: Records and emits metric data for resolvers that have the metricsConfig
value set to ENABLED
.
The Amazon Resource Name (ARN) of the QLDB journal stream.
", "JournalS3ExportDescription$RoleArn": "The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions for a journal export job to do the following:
Write objects into your Amazon Simple Storage Service (Amazon S3) bucket.
(Optional) Use your customer managed key in Key Management Service (KMS) for server-side encryption of your exported data.
The Amazon Resource Name (ARN) of the Kinesis Data Streams resource.
", - "LedgerEncryptionDescription$KmsKeyArn": "The Amazon Resource Name (ARN) of the customer managed KMS key that the ledger uses for encryption at rest. If this parameter is undefined, the ledger uses an Amazon Web Services owned KMS key for encryption.
", + "LedgerEncryptionDescription$KmsKeyArn": "The Amazon Resource Name (ARN) of the customer managed KMS key that the ledger uses for encryption at rest. If this parameter is undefined, the ledger uses an Amazon Web Services owned KMS key for encryption. It will display AWS_OWNED_KMS_KEY
when updating the ledger's encryption configuration to the Amazon Web Services owned KMS key.
The Amazon Resource Name (ARN) for which to list the tags. For example:
arn:aws:qldb:us-east-1:123456789012:ledger/exampleLedger
The Amazon Resource Name (ARN) of a symmetric encryption key in Key Management Service (KMS). Amazon S3 does not support asymmetric KMS keys.
You must provide a KmsKeyArn
if you specify SSE_KMS
as the ObjectEncryptionType
.
KmsKeyArn
is not required if you specify SSE_S3
as the ObjectEncryptionType
.
The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions for a journal stream to write data records to a Kinesis Data Streams resource.
To pass a role to QLDB when requesting a journal stream, you must have permissions to perform the iam:PassRole
action on the IAM role resource. This is required for all journal stream requests.
Information about the encryption of data at rest in an Amazon QLDB ledger. This includes the current status, the key in Key Management Service (KMS), and when the key became inaccessible (in the case of an error).
For more information, see Encryption at rest in the Amazon QLDB Developer Guide.
", "refs": { - "DescribeLedgerResponse$EncryptionDescription": "Information about the encryption of data at rest in the ledger. This includes the current status, the KMS key, and when the key became inaccessible (in the case of an error).
", + "DescribeLedgerResponse$EncryptionDescription": "Information about the encryption of data at rest in the ledger. This includes the current status, the KMS key, and when the key became inaccessible (in the case of an error). If this parameter is undefined, the ledger uses an Amazon Web Services owned KMS key for encryption.
", "UpdateLedgerResponse$EncryptionDescription": "Information about the encryption of data at rest in the ledger. This includes the current status, the KMS key, and when the key became inaccessible (in the case of an error).
" } }, diff --git a/models/apis/qldb/2019-01-02/endpoint-rule-set-1.json b/models/apis/qldb/2019-01-02/endpoint-rule-set-1.json index 5782369086d..b0cd79996db 100644 --- a/models/apis/qldb/2019-01-02/endpoint-rule-set-1.json +++ b/models/apis/qldb/2019-01-02/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -58,293 +57,258 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } - ] + ], + "type": "tree" }, { - "conditions": [], - "type": "tree", + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://qldb-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://qldb-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } - ] + ], + "type": "tree" }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] }, true ] } ], - "type": "tree", "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://qldb-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://qldb-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } - ] + ], + "type": "tree" }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://qldb.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], - "type": "tree", + ], "rules": [ { "conditions": [], "endpoint": { - "url": "https://qldb.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://qldb.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } - ] + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } - ] + ], + "type": "tree" + }, + { + "conditions": [], + "endpoint": { + "url": "https://qldb.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } - ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" + ], + "type": "tree" } - ] + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/models/apis/rds/2014-10-31/api-2.json b/models/apis/rds/2014-10-31/api-2.json index 40c47f91a09..3755b4ad451 100644 --- a/models/apis/rds/2014-10-31/api-2.json +++ b/models/apis/rds/2014-10-31/api-2.json @@ -5385,7 +5385,7 @@ "type":"structure", "members":{ "DBShardGroupResourceId":{"shape":"String"}, - "DBShardGroupIdentifier":{"shape":"String"}, + "DBShardGroupIdentifier":{"shape":"DBShardGroupIdentifier"}, "DBClusterIdentifier":{"shape":"String"}, "MaxACU":{"shape":"DoubleOptional"}, "ComputeRedundancy":{"shape":"IntegerOptional"}, @@ -5405,6 +5405,12 @@ }, "exception":true }, + "DBShardGroupIdentifier":{ + "type":"string", + "max":63, + "min":1, + "pattern":"[a-zA-Z][a-zA-Z0-9]*(-[a-zA-Z0-9]+)*" + }, "DBShardGroupNotFoundFault":{ "type":"structure", "members":{ @@ -5832,7 +5838,7 @@ "type":"structure", "required":["DBShardGroupIdentifier"], "members":{ - "DBShardGroupIdentifier":{"shape":"String"} + "DBShardGroupIdentifier":{"shape":"DBShardGroupIdentifier"} } }, "DeleteDBSnapshotMessage":{ @@ -6229,7 +6235,7 @@ "DescribeDBShardGroupsMessage":{ "type":"structure", "members":{ - "DBShardGroupIdentifier":{"shape":"String"}, + "DBShardGroupIdentifier":{"shape":"DBShardGroupIdentifier"}, "Filters":{"shape":"FilterList"}, "Marker":{"shape":"String"}, "MaxRecords":{"shape":"MaxRecords"} @@ -8000,7 +8006,7 @@ "type":"structure", "required":["DBShardGroupIdentifier"], "members":{ - "DBShardGroupIdentifier":{"shape":"String"}, + "DBShardGroupIdentifier":{"shape":"DBShardGroupIdentifier"}, "MaxACU":{"shape":"DoubleOptional"} } }, @@ -8703,7 +8709,7 @@ "type":"structure", "required":["DBShardGroupIdentifier"], "members":{ - "DBShardGroupIdentifier":{"shape":"String"} + "DBShardGroupIdentifier":{"shape":"DBShardGroupIdentifier"} } }, "RecommendedAction":{ diff --git a/models/apis/rds/2014-10-31/docs-2.json b/models/apis/rds/2014-10-31/docs-2.json index 0b08f91e522..e0570d41b75 100644 --- a/models/apis/rds/2014-10-31/docs-2.json +++ b/models/apis/rds/2014-10-31/docs-2.json @@ -1821,6 +1821,16 @@ "refs": { } }, + "DBShardGroupIdentifier": { + "base": null, + "refs": { + "DBShardGroup$DBShardGroupIdentifier": "The name of the DB shard group.
", + "DeleteDBShardGroupMessage$DBShardGroupIdentifier": "Teh name of the DB shard group to delete.
", + "DescribeDBShardGroupsMessage$DBShardGroupIdentifier": "The user-supplied DB shard group identifier or the Amazon Resource Name (ARN) of the DB shard group. If this parameter is specified, information for only the specific DB shard group is returned. This parameter isn't case-sensitive.
Constraints:
If supplied, must match an existing DB shard group identifier.
The name of the DB shard group to modify.
", + "RebootDBShardGroupMessage$DBShardGroupIdentifier": "The name of the DB shard group to reboot.
" + } + }, "DBShardGroupNotFoundFault": { "base": "The specified DB shard group name wasn't found.
", "refs": { @@ -4983,7 +4993,6 @@ "DBSecurityGroupMessage$Marker": "An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
The Amazon Web Services Region-unique, immutable identifier for the DB shard group.
", - "DBShardGroup$DBShardGroupIdentifier": "The name of the DB shard group.
", "DBShardGroup$DBClusterIdentifier": "The name of the primary DB cluster for the DB shard group.
", "DBShardGroup$Status": "The status of the DB shard group.
", "DBShardGroup$Endpoint": "The connection endpoint for the DB shard group.
", @@ -5042,7 +5051,6 @@ "DeleteDBParameterGroupMessage$DBParameterGroupName": "The name of the DB parameter group.
Constraints:
Must be the name of an existing DB parameter group
You can't delete a default DB parameter group
Can't be associated with any DB instances
The name of the DB proxy to delete.
", "DeleteDBSecurityGroupMessage$DBSecurityGroupName": "The name of the DB security group to delete.
You can't delete the default DB security group.
Constraints:
Must be 1 to 255 letters, numbers, or hyphens.
First character must be a letter
Can't end with a hyphen or contain two consecutive hyphens
Must not be \"Default\"
Teh name of the DB shard group to delete.
", "DeleteDBSnapshotMessage$DBSnapshotIdentifier": "The DB snapshot identifier.
Constraints: Must be the name of an existing DB snapshot in the available
state.
The name of the database subnet group to delete.
You can't delete the default subnet group.
Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.
Example: mydbsubnetgroup
The name of the RDS event notification subscription you want to delete.
", @@ -5116,7 +5124,6 @@ "DescribeDBRecommendationsMessage$Marker": "An optional pagination token provided by a previous DescribeDBRecommendations
request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
The name of the DB security group to return details for.
", "DescribeDBSecurityGroupsMessage$Marker": "An optional pagination token provided by a previous DescribeDBSecurityGroups
request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
The user-supplied DB shard group identifier or the Amazon Resource Name (ARN) of the DB shard group. If this parameter is specified, information for only the specific DB shard group is returned. This parameter isn't case-sensitive.
Constraints:
If supplied, must match an existing DB shard group identifier.
An optional pagination token provided by a previous DescribeDBShardGroups
request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
A pagination token that can be used in a later DescribeDBClusters
request.
The identifier for the DB snapshot to describe the attributes for.
", @@ -5329,7 +5336,6 @@ "ModifyDBRecommendationMessage$RecommendationId": "The identifier of the recommendation to update.
", "ModifyDBRecommendationMessage$Locale": "The language of the modified recommendation.
", "ModifyDBRecommendationMessage$Status": "The recommendation status to update.
Valid values:
active
dismissed
The name of the DB shard group to modify.
", "ModifyDBSnapshotAttributeMessage$DBSnapshotIdentifier": "The identifier for the DB snapshot to modify the attributes for.
", "ModifyDBSnapshotAttributeMessage$AttributeName": "The name of the DB snapshot attribute to modify.
To manage authorization for other Amazon Web Services accounts to copy or restore a manual DB snapshot, set this value to restore
.
To view the list of attributes available to modify, use the DescribeDBSnapshotAttributes API operation.
The identifier of the DB snapshot to modify.
", @@ -5432,7 +5438,6 @@ "ReadersArnList$member": null, "RebootDBClusterMessage$DBClusterIdentifier": "The DB cluster identifier. This parameter is stored as a lowercase string.
Constraints:
Must match the identifier of an existing DBCluster.
The DB instance identifier. This parameter is stored as a lowercase string.
Constraints:
Must match the identifier of an existing DBInstance.
The name of the DB shard group to reboot.
", "RecommendedAction$ActionId": "The unique identifier of the recommended action.
", "RecommendedAction$Title": "A short description to summarize the action. The description might contain markdown.
", "RecommendedAction$Description": "A detailed description of the action. The description might contain markdown.
", diff --git a/models/apis/rum/2018-05-10/docs-2.json b/models/apis/rum/2018-05-10/docs-2.json index 76794d81ba7..65124f74e88 100644 --- a/models/apis/rum/2018-05-10/docs-2.json +++ b/models/apis/rum/2018-05-10/docs-2.json @@ -2,7 +2,7 @@ "version": "2.0", "service": "With Amazon CloudWatch RUM, you can perform real-user monitoring to collect client-side data about your web application performance from actual user sessions in real time. The data collected includes page load times, client-side errors, and user behavior. When you view this data, you can see it all aggregated together and also see breakdowns by the browsers and devices that your customers use.
You can use the collected data to quickly identify and debug client-side performance issues. CloudWatch RUM helps you visualize anomalies in your application performance and find relevant debugging data such as error messages, stack traces, and user sessions. You can also use RUM to understand the range of end-user impact including the number of users, geolocations, and browsers used.
", "operations": { - "BatchCreateRumMetricDefinitions": "Specifies the extended metrics and custom metrics that you want a CloudWatch RUM app monitor to send to a destination. Valid destinations include CloudWatch and Evidently.
By default, RUM app monitors send some metrics to CloudWatch. These default metrics are listed in CloudWatch metrics that you can collect with CloudWatch RUM.
In addition to these default metrics, you can choose to send extended metrics or custom metrics or both.
Extended metrics enable you to send metrics with additional dimensions not included in the default metrics. You can also send extended metrics to Evidently as well as CloudWatch. The valid dimension names for the additional dimensions for extended metrics are BrowserName
, CountryCode
, DeviceType
, FileType
, OSName
, and PageId
. For more information, see Extended metrics that you can send to CloudWatch and CloudWatch Evidently.
Custom metrics are metrics that you define. You can send custom metrics to CloudWatch or to CloudWatch Evidently or to both. With custom metrics, you can use any metric name and namespace, and to derive the metrics you can use any custom events, built-in events, custom attributes, or default attributes.
You can't send custom metrics to the AWS/RUM
namespace. You must send custom metrics to a custom namespace that you define. The namespace that you use can't start with AWS/
. CloudWatch RUM prepends RUM/CustomMetrics/
to the custom namespace that you define, so the final namespace for your metrics in CloudWatch is RUM/CustomMetrics/your-custom-namespace
.
The maximum number of metric definitions that you can specify in one BatchCreateRumMetricDefinitions
operation is 200.
The maximum number of metric definitions that one destination can contain is 2000.
Extended metrics sent to CloudWatch and RUM custom metrics are charged as CloudWatch custom metrics. Each combination of additional dimension name and dimension value counts as a custom metric. For more information, see Amazon CloudWatch Pricing.
You must have already created a destination for the metrics before you send them. For more information, see PutRumMetricsDestination.
If some metric definitions specified in a BatchCreateRumMetricDefinitions
operations are not valid, those metric definitions fail and return errors, but all valid metric definitions in the same operation still succeed.
Specifies the extended metrics and custom metrics that you want a CloudWatch RUM app monitor to send to a destination. Valid destinations include CloudWatch and Evidently.
By default, RUM app monitors send some metrics to CloudWatch. These default metrics are listed in CloudWatch metrics that you can collect with CloudWatch RUM.
In addition to these default metrics, you can choose to send extended metrics, custom metrics, or both.
Extended metrics let you send metrics with additional dimensions that aren't included in the default metrics. You can also send extended metrics to both Evidently and CloudWatch. The valid dimension names for the additional dimensions for extended metrics are BrowserName
, CountryCode
, DeviceType
, FileType
, OSName
, and PageId
. For more information, see Extended metrics that you can send to CloudWatch and CloudWatch Evidently.
Custom metrics are metrics that you define. You can send custom metrics to CloudWatch. CloudWatch Evidently, or both. With custom metrics, you can use any metric name and namespace. To derive the metrics, you can use any custom events, built-in events, custom attributes, or default attributes.
You can't send custom metrics to the AWS/RUM
namespace. You must send custom metrics to a custom namespace that you define. The namespace that you use can't start with AWS/
. CloudWatch RUM prepends RUM/CustomMetrics/
to the custom namespace that you define, so the final namespace for your metrics in CloudWatch is RUM/CustomMetrics/your-custom-namespace
.
The maximum number of metric definitions that you can specify in one BatchCreateRumMetricDefinitions
operation is 200.
The maximum number of metric definitions that one destination can contain is 2000.
Extended metrics sent to CloudWatch and RUM custom metrics are charged as CloudWatch custom metrics. Each combination of additional dimension name and dimension value counts as a custom metric. For more information, see Amazon CloudWatch Pricing.
You must have already created a destination for the metrics before you send them. For more information, see PutRumMetricsDestination.
If some metric definitions specified in a BatchCreateRumMetricDefinitions
operations are not valid, those metric definitions fail and return errors, but all valid metric definitions in the same operation still succeed.
Removes the specified metrics from being sent to an extended metrics destination.
If some metric definition IDs specified in a BatchDeleteRumMetricDefinitions
operations are not valid, those metric definitions fail and return errors, but all valid metric definition IDs in the same operation are still deleted.
The maximum number of metric definitions that you can specify in one BatchDeleteRumMetricDefinitions
operation is 200.
Retrieves the list of metrics and dimensions that a RUM app monitor is sending to a single destination.
", "CreateAppMonitor": "Creates a Amazon CloudWatch RUM app monitor, which collects telemetry data from your application and sends that data to RUM. The data includes performance and reliability information such as page load time, client-side errors, and user behavior.
You use this operation only to create a new app monitor. To update an existing app monitor, use UpdateAppMonitor instead.
After you create an app monitor, sign in to the CloudWatch RUM console to get the JavaScript code snippet to add to your web application. For more information, see How do I find a code snippet that I've already generated?
", @@ -36,8 +36,8 @@ "base": "This structure contains much of the configuration data for the app monitor.
", "refs": { "AppMonitor$AppMonitorConfiguration": "A structure that contains much of the configuration data for the app monitor.
", - "CreateAppMonitorRequest$AppMonitorConfiguration": "A structure that contains much of the configuration data for the app monitor. If you are using Amazon Cognito for authorization, you must include this structure in your request, and it must include the ID of the Amazon Cognito identity pool to use for authorization. If you don't include AppMonitorConfiguration
, you must set up your own authorization method. For more information, see Authorize your application to send data to Amazon Web Services.
If you omit this argument, the sample rate used for RUM is set to 10% of the user sessions.
", - "UpdateAppMonitorRequest$AppMonitorConfiguration": "A structure that contains much of the configuration data for the app monitor. If you are using Amazon Cognito for authorization, you must include this structure in your request, and it must include the ID of the Amazon Cognito identity pool to use for authorization. If you don't include AppMonitorConfiguration
, you must set up your own authorization method. For more information, see Authorize your application to send data to Amazon Web Services.
A structure that contains much of the configuration data for the app monitor. If you are using Amazon Cognito for authorization, you must include this structure in your request, and it must include the ID of the Amazon Cognito identity pool to use for authorization. If you don't include AppMonitorConfiguration
, you must set up your own authorization method. For more information, see Authorize your application to send data to Amazon Web Services.
If you omit this argument, the sample rate used for RUM is set to 10% of the user sessions.
", + "UpdateAppMonitorRequest$AppMonitorConfiguration": "A structure that contains much of the configuration data for the app monitor. If you are using Amazon Cognito for authorization, you must include this structure in your request, and it must include the ID of the Amazon Cognito identity pool to use for authorization. If you don't include AppMonitorConfiguration
, you must set up your own authorization method. For more information, see Authorize your application to send data to Amazon Web Services.
The ARN of the guest IAM role that is attached to the Amazon Cognito identity pool that is used to authorize the sending of data to RUM.
", + "AppMonitorConfiguration$GuestRoleArn": "The ARN of the guest IAM role that is attached to the Amazon Cognito identity pool that is used to authorize the sending of data to RUM.
It is possible that an app monitor does not have a value for GuestRoleArn
. For example, this can happen when you use the console to create an app monitor and you allow CloudWatch RUM to create a new identity pool for Authorization. In this case, GuestRoleArn
is not present in the GetAppMonitor response because it is not stored by the service.
If this issue affects you, you can take one of the following steps:
Use the Cloud Development Kit (CDK) to create an identity pool and the associated IAM role, and use that for your app monitor.
Make a separate GetIdentityPoolRoles call to Amazon Cognito to retrieve the GuestRoleArn
.
The ARN of the resource that you want to see the tags of.
", "ListTagsForResourceResponse$ResourceArn": "The ARN of the resource that you are viewing.
", "TagResourceRequest$ResourceArn": "The ARN of the CloudWatch RUM resource that you're adding tags to.
", @@ -275,7 +275,7 @@ "base": null, "refs": { "MetricDefinition$EventPattern": "The pattern that defines the metric. RUM checks events that happen in a user's session against the pattern, and events that match the pattern are sent to the metric destination.
If the metrics destination is CloudWatch
and the event also matches a value in DimensionKeys
, then the metric is published with the specified dimensions.
The pattern that defines the metric, specified as a JSON object. RUM checks events that happen in a user's session against the pattern, and events that match the pattern are sent to the metric destination.
When you define extended metrics, the metric definition is not valid if EventPattern
is omitted.
Example event patterns:
'{ \"event_type\": [\"com.amazon.rum.js_error_event\"], \"metadata\": { \"browserName\": [ \"Chrome\", \"Safari\" ], } }'
'{ \"event_type\": [\"com.amazon.rum.performance_navigation_event\"], \"metadata\": { \"browserName\": [ \"Chrome\", \"Firefox\" ] }, \"event_details\": { \"duration\": [{ \"numeric\": [ \"<\", 2000 ] }] } }'
'{ \"event_type\": [\"com.amazon.rum.performance_navigation_event\"], \"metadata\": { \"browserName\": [ \"Chrome\", \"Safari\" ], \"countryCode\": [ \"US\" ] }, \"event_details\": { \"duration\": [{ \"numeric\": [ \">=\", 2000, \"<\", 8000 ] }] } }'
If the metrics destination' is CloudWatch
and the event also matches a value in DimensionKeys
, then the metric is published with the specified dimensions.
The pattern that defines the metric, specified as a JSON object. RUM checks events that happen in a user's session against the pattern, and events that match the pattern are sent to the metric destination.
When you define extended metrics, the metric definition is not valid if EventPattern
is omitted.
Example event patterns:
'{ \"event_type\": [\"com.amazon.rum.js_error_event\"], \"metadata\": { \"browserName\": [ \"Chrome\", \"Safari\" ], } }'
'{ \"event_type\": [\"com.amazon.rum.performance_navigation_event\"], \"metadata\": { \"browserName\": [ \"Chrome\", \"Firefox\" ] }, \"event_details\": { \"duration\": [{ \"numeric\": [ \"<\", 2000 ] }] } }'
'{ \"event_type\": [\"com.amazon.rum.performance_navigation_event\"], \"metadata\": { \"browserName\": [ \"Chrome\", \"Safari\" ], \"countryCode\": [ \"US\" ] }, \"event_details\": { \"duration\": [{ \"numeric\": [ \">=\", 2000, \"<\", 8000 ] }] } }'
If the metrics destination is CloudWatch
and the event also matches a value in DimensionKeys
, then the metric is published with the specified dimensions.
This field appears only when the destination is Evidently
. It specifies the ARN of the IAM role that is used to write to the Evidently experiment that receives the metrics.
This parameter is required if Destination
is Evidently
. If Destination
is CloudWatch
, do not use this parameter.
This parameter specifies the ARN of an IAM role that RUM will assume to write to the Evidently experiment that you are sending metrics to. This role must have permission to write to that experiment.
" + "PutRumMetricsDestinationRequest$IamRoleArn": "This parameter is required if Destination
is Evidently
. If Destination
is CloudWatch
, don't use this parameter.
This parameter specifies the ARN of an IAM role that RUM will assume to write to the Evidently experiment that you are sending metrics to. This role must have permission to write to that experiment.
If you specify this parameter, you must be signed on to a role that has PassRole permissions attached to it, to allow the role to be passed. The CloudWatchAmazonCloudWatchRUMFullAccess policy doesn't include PassRole
permissions.
Use this structure to define one extended metric or custom metric that RUM will send to CloudWatch or CloudWatch Evidently. For more information, see Additional metrics that you can send to CloudWatch and CloudWatch Evidently.
This structure is validated differently for extended metrics and custom metrics. For extended metrics that are sent to the AWS/RUM
namespace, the following validations apply:
The Namespace
parameter must be omitted or set to AWS/RUM
.
Only certain combinations of values for Name
, ValueKey
, and EventPattern
are valid. In addition to what is displayed in the list below, the EventPattern
can also include information used by the DimensionKeys
field.
If Name
is PerformanceNavigationDuration
, then ValueKey
must be event_details.duration
and the EventPattern
must include {\"event_type\":[\"com.amazon.rum.performance_navigation_event\"]}
If Name
is PerformanceResourceDuration
, then ValueKey
must be event_details.duration
and the EventPattern
must include {\"event_type\":[\"com.amazon.rum.performance_resource_event\"]}
If Name
is NavigationSatisfiedTransaction
, then ValueKey
must be null and the EventPattern
must include { \"event_type\": [\"com.amazon.rum.performance_navigation_event\"], \"event_details\": { \"duration\": [{ \"numeric\": [\">\",2000] }] } }
If Name
is NavigationToleratedTransaction
, then ValueKey
must be null and the EventPattern
must include { \"event_type\": [\"com.amazon.rum.performance_navigation_event\"], \"event_details\": { \"duration\": [{ \"numeric\": [\">=\",2000,\"<\"8000] }] } }
If Name
is NavigationFrustratedTransaction
, then ValueKey
must be null and the EventPattern
must include { \"event_type\": [\"com.amazon.rum.performance_navigation_event\"], \"event_details\": { \"duration\": [{ \"numeric\": [\">=\",8000] }] } }
If Name
is WebVitalsCumulativeLayoutShift
, then ValueKey
must be event_details.value
and the EventPattern
must include {\"event_type\":[\"com.amazon.rum.cumulative_layout_shift_event\"]}
If Name
is WebVitalsFirstInputDelay
, then ValueKey
must be event_details.value
and the EventPattern
must include {\"event_type\":[\"com.amazon.rum.first_input_delay_event\"]}
If Name
is WebVitalsLargestContentfulPaint
, then ValueKey
must be event_details.value
and the EventPattern
must include {\"event_type\":[\"com.amazon.rum.largest_contentful_paint_event\"]}
If Name
is JsErrorCount
, then ValueKey
must be null and the EventPattern
must include {\"event_type\":[\"com.amazon.rum.js_error_event\"]}
If Name
is HttpErrorCount
, then ValueKey
must be null and the EventPattern
must include {\"event_type\":[\"com.amazon.rum.http_event\"]}
If Name
is SessionCount
, then ValueKey
must be null and the EventPattern
must include {\"event_type\":[\"com.amazon.rum.session_start_event\"]}
For custom metrics, the following validation rules apply:
The namespace can't be omitted and can't be AWS/RUM
. You can use the AWS/RUM
namespace only for extended metrics.
All dimensions listed in the DimensionKeys
field must be present in the value of EventPattern
.
The values that you specify for ValueKey
, EventPattern
, and DimensionKeys
must be fields in RUM events, so all first-level keys in these fields must be one of the keys in the list later in this section.
If you set a value for EventPattern
, it must be a JSON object.
For every non-empty event_details
, there must be a non-empty event_type
.
If EventPattern
contains an event_details
field, it must also contain an event_type
. For every built-in event_type
that you use, you must use a value for event_details
that corresponds to that event_type
. For information about event details that correspond to event types, see RUM event details.
In EventPattern
, any JSON array must contain only one value.
Valid key values for first-level keys in the ValueKey
, EventPattern
, and DimensionKeys
fields:
account_id
application_Id
application_version
application_name
batch_id
event_details
event_id
event_interaction
event_timestamp
event_type
event_version
log_stream
metadata
sessionId
user_details
userId
Use this structure to define one extended metric or custom metric that RUM will send to CloudWatch or CloudWatch Evidently. For more information, see Custom metrics and extended metrics that you can send to CloudWatch and CloudWatch Evidently.
This structure is validated differently for extended metrics and custom metrics. For extended metrics that are sent to the AWS/RUM
namespace, the following validations apply:
The Namespace
parameter must be omitted or set to AWS/RUM
.
Only certain combinations of values for Name
, ValueKey
, and EventPattern
are valid. In addition to what is displayed in the following list, the EventPattern
can also include information used by the DimensionKeys
field.
If Name
is PerformanceNavigationDuration
, then ValueKey
must be event_details.duration
and the EventPattern
must include {\"event_type\":[\"com.amazon.rum.performance_navigation_event\"]}
If Name
is PerformanceResourceDuration
, then ValueKey
must be event_details.duration
and the EventPattern
must include {\"event_type\":[\"com.amazon.rum.performance_resource_event\"]}
If Name
is NavigationSatisfiedTransaction
, then ValueKey
must be null and the EventPattern
must include { \"event_type\": [\"com.amazon.rum.performance_navigation_event\"], \"event_details\": { \"duration\": [{ \"numeric\": [\">\",2000] }] } }
If Name
is NavigationToleratedTransaction
, then ValueKey
must be null and the EventPattern
must include { \"event_type\": [\"com.amazon.rum.performance_navigation_event\"], \"event_details\": { \"duration\": [{ \"numeric\": [\">=\",2000,\"<\"8000] }] } }
If Name
is NavigationFrustratedTransaction
, then ValueKey
must be null and the EventPattern
must include { \"event_type\": [\"com.amazon.rum.performance_navigation_event\"], \"event_details\": { \"duration\": [{ \"numeric\": [\">=\",8000] }] } }
If Name
is WebVitalsCumulativeLayoutShift
, then ValueKey
must be event_details.value
and the EventPattern
must include {\"event_type\":[\"com.amazon.rum.cumulative_layout_shift_event\"]}
If Name
is WebVitalsFirstInputDelay
, then ValueKey
must be event_details.value
and the EventPattern
must include {\"event_type\":[\"com.amazon.rum.first_input_delay_event\"]}
If Name
is WebVitalsLargestContentfulPaint
, then ValueKey
must be event_details.value
and the EventPattern
must include {\"event_type\":[\"com.amazon.rum.largest_contentful_paint_event\"]}
If Name
is JsErrorCount
, then ValueKey
must be null and the EventPattern
must include {\"event_type\":[\"com.amazon.rum.js_error_event\"]}
If Name
is HttpErrorCount
, then ValueKey
must be null and the EventPattern
must include {\"event_type\":[\"com.amazon.rum.http_event\"]}
If Name
is SessionCount
, then ValueKey
must be null and the EventPattern
must include {\"event_type\":[\"com.amazon.rum.session_start_event\"]}
If Name
is PageViewCount
, then ValueKey
must be null and the EventPattern
must include {\"event_type\":[\"com.amazon.rum.page_view_event\"]}
If Name
is Http4xxCount
, then ValueKey
must be null and the EventPattern
must include {\"event_type\": [\"com.amazon.rum.http_event\"],\"event_details\":{\"response\":{\"status\":[{\"numeric\":[\">=\",400,\"<\",500]}]}}} }
If Name
is Http5xxCount
, then ValueKey
must be null and the EventPattern
must include {\"event_type\": [\"com.amazon.rum.http_event\"],\"event_details\":{\"response\":{\"status\":[{\"numeric\":[\">=\",500,\"<=\",599]}]}}} }
For custom metrics, the following validation rules apply:
The namespace can't be omitted and can't be AWS/RUM
. You can use the AWS/RUM
namespace only for extended metrics.
All dimensions listed in the DimensionKeys
field must be present in the value of EventPattern
.
The values that you specify for ValueKey
, EventPattern
, and DimensionKeys
must be fields in RUM events, so all first-level keys in these fields must be one of the keys in the list later in this section.
If you set a value for EventPattern
, it must be a JSON object.
For every non-empty event_details
, there must be a non-empty event_type
.
If EventPattern
contains an event_details
field, it must also contain an event_type
. For every built-in event_type
that you use, you must use a value for event_details
that corresponds to that event_type
. For information about event details that correspond to event types, see RUM event details.
In EventPattern
, any JSON array must contain only one value.
Valid key values for first-level keys in the ValueKey
, EventPattern
, and DimensionKeys
fields:
account_id
application_Id
application_version
application_name
batch_id
event_details
event_id
event_interaction
event_timestamp
event_type
event_version
log_stream
metadata
sessionId
user_details
userId
The metric definition that caused this error.
", "MetricDefinitionsRequest$member": null, @@ -435,7 +435,7 @@ "MetricDestination": { "base": null, "refs": { - "BatchCreateRumMetricDefinitionsRequest$Destination": "The destination to send the metrics to. Valid values are CloudWatch
and Evidently
. If you specify Evidently
, you must also specify the ARN of the CloudWatchEvidently experiment that will receive the metrics and an IAM role that has permission to write to the experiment.
The destination to send the metrics to. Valid values are CloudWatch
and Evidently
. If you specify Evidently
, you must also specify the Amazon Resource Name (ARN) of the CloudWatchEvidently experiment that will receive the metrics and an IAM role that has permission to write to the experiment.
Defines the destination where you want to stop sending the specified metrics. Valid values are CloudWatch
and Evidently
. If you specify Evidently
, you must also specify the ARN of the CloudWatchEvidently experiment that is to be the destination and an IAM role that has permission to write to the experiment.
The type of destination that you want to view metrics for. Valid values are CloudWatch
and Evidently
.
The type of destination to delete. Valid values are CloudWatch
and Evidently
.
The field within the event object that the metric value is sourced from.
", - "MetricDefinitionRequest$ValueKey": "The field within the event object that the metric value is sourced from.
If you omit this field, a hardcoded value of 1 is pushed as the metric value. This is useful if you just want to count the number of events that the filter catches.
If this metric is sent to CloudWatch Evidently, this field will be passed to Evidently raw and Evidently will handle data extraction from the event.
" + "MetricDefinitionRequest$ValueKey": "The field within the event object that the metric value is sourced from.
If you omit this field, a hardcoded value of 1 is pushed as the metric value. This is useful if you want to count the number of events that the filter catches.
If this metric is sent to CloudWatch Evidently, this field will be passed to Evidently raw. Evidently will handle data extraction from the event.
" } } } diff --git a/models/apis/rum/2018-05-10/endpoint-rule-set-1.json b/models/apis/rum/2018-05-10/endpoint-rule-set-1.json index 8fdeb4663ac..a05b4f3a089 100644 --- a/models/apis/rum/2018-05-10/endpoint-rule-set-1.json +++ b/models/apis/rum/2018-05-10/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -58,293 +57,258 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } - ] + ], + "type": "tree" }, { - "conditions": [], - "type": "tree", + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://rum-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://rum-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } - ] + ], + "type": "tree" }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] }, true ] } ], - "type": "tree", "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://rum-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://rum-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } - ] + ], + "type": "tree" }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://rum.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], - "type": "tree", + ], "rules": [ { "conditions": [], "endpoint": { - "url": "https://rum.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://rum.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } - ] + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } - ] + ], + "type": "tree" + }, + { + "conditions": [], + "endpoint": { + "url": "https://rum.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } - ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" + ], + "type": "tree" } - ] + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/models/apis/rum/2018-05-10/endpoint-tests-1.json b/models/apis/rum/2018-05-10/endpoint-tests-1.json index bb59ef12e33..50b7fb1734e 100644 --- a/models/apis/rum/2018-05-10/endpoint-tests-1.json +++ b/models/apis/rum/2018-05-10/endpoint-tests-1.json @@ -9,8 +9,8 @@ }, "params": { "Region": "ap-northeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -22,8 +22,8 @@ }, "params": { "Region": "ap-southeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -35,8 +35,8 @@ }, "params": { "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -48,8 +48,8 @@ }, "params": { "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -61,8 +61,8 @@ }, "params": { "Region": "eu-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -74,8 +74,8 @@ }, "params": { "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -87,8 +87,8 @@ }, "params": { "Region": "eu-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -100,8 +100,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -113,8 +113,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -126,8 +126,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -139,8 +139,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -152,8 +152,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -165,8 +165,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -178,8 +178,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -191,8 +191,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -204,8 +204,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -217,8 +217,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -230,8 +230,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -243,8 +243,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -256,8 +256,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -269,8 +269,19 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -282,8 +293,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -295,8 +317,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -308,8 +341,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -321,8 +365,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -334,8 +378,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -347,8 +391,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -359,8 +403,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -371,10 +415,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/service/appsync/api.go b/service/appsync/api.go index aec19f4d881..5a8064e379c 100644 --- a/service/appsync/api.go +++ b/service/appsync/api.go @@ -7621,11 +7621,13 @@ type CreateApiCacheInput struct { // Controls how cache health metrics will be emitted to CloudWatch. Cache health // metrics include: // - // * NetworkBandwidthOutAllowanceExceeded: The number of times a specified - // GraphQL operation was called. + // * NetworkBandwidthOutAllowanceExceeded: The network packets dropped because + // the throughput exceeded the aggregated bandwidth limit. This is useful + // for diagnosing bottlenecks in a cache configuration. // - // * EngineCPUUtilization: The number of GraphQL errors that occurred during - // a specified GraphQL operation. + // * EngineCPUUtilization: The CPU utilization (percentage) allocated to + // the Redis process. This is useful for diagnosing bottlenecks in a cache + // configuration. // // Metrics will be recorded by API ID. You can set the value to ENABLED or DISABLED. HealthMetricsConfig *string `locationName:"healthMetricsConfig" type:"string" enum:"CacheHealthMetricsConfig"` @@ -10823,7 +10825,7 @@ func (s *ElasticsearchDataSourceConfig) SetEndpoint(v string) *ElasticsearchData // resolvers in the request. // // - PER_RESOLVER_METRICS: Records and emits metric data for resolvers that -// have the metricConfig value set to ENABLED. +// have the metricsConfig value set to ENABLED. // // dataSourceLevelMetricsBehavior: Controls how data source metrics will be // emitted to CloudWatch. Data source metrics include: @@ -10842,7 +10844,7 @@ func (s *ElasticsearchDataSourceConfig) SetEndpoint(v string) *ElasticsearchData // all data sources in the request. // // - PER_DATA_SOURCE_METRICS: Records and emits metric data for data sources -// that have the metricConfig value set to ENABLED. +// that have the metricsConfig value set to ENABLED. // // operationLevelMetricsConfig: Controls how operation metrics will be emitted // to CloudWatch. Operation metrics include: @@ -10874,7 +10876,7 @@ type EnhancedMetricsConfig struct { // all data sources in the request. // // * PER_DATA_SOURCE_METRICS: Records and emits metric data for data sources - // that have the metricConfig value set to ENABLED. + // that have the metricsConfig value set to ENABLED. // // DataSourceLevelMetricsBehavior is a required field DataSourceLevelMetricsBehavior *string `locationName:"dataSourceLevelMetricsBehavior" type:"string" required:"true" enum:"DataSourceLevelMetricsBehavior"` @@ -10914,7 +10916,7 @@ type EnhancedMetricsConfig struct { // resolvers in the request. // // * PER_RESOLVER_METRICS: Records and emits metric data for resolvers that - // have the metricConfig value set to ENABLED. + // have the metricsConfig value set to ENABLED. // // ResolverLevelMetricsBehavior is a required field ResolverLevelMetricsBehavior *string `locationName:"resolverLevelMetricsBehavior" type:"string" required:"true" enum:"ResolverLevelMetricsBehavior"` @@ -16535,11 +16537,13 @@ type UpdateApiCacheInput struct { // Controls how cache health metrics will be emitted to CloudWatch. Cache health // metrics include: // - // * NetworkBandwidthOutAllowanceExceeded: The number of times a specified - // GraphQL operation was called. + // * NetworkBandwidthOutAllowanceExceeded: The network packets dropped because + // the throughput exceeded the aggregated bandwidth limit. This is useful + // for diagnosing bottlenecks in a cache configuration. // - // * EngineCPUUtilization: The number of GraphQL errors that occurred during - // a specified GraphQL operation. + // * EngineCPUUtilization: The CPU utilization (percentage) allocated to + // the Redis process. This is useful for diagnosing bottlenecks in a cache + // configuration. // // Metrics will be recorded by API ID. You can set the value to ENABLED or DISABLED. HealthMetricsConfig *string `locationName:"healthMetricsConfig" type:"string" enum:"CacheHealthMetricsConfig"` diff --git a/service/cloudwatchrum/api.go b/service/cloudwatchrum/api.go index 5ffcb7a134a..b9da2b4e70e 100644 --- a/service/cloudwatchrum/api.go +++ b/service/cloudwatchrum/api.go @@ -64,20 +64,20 @@ func (c *CloudWatchRUM) BatchCreateRumMetricDefinitionsRequest(input *BatchCreat // metrics are listed in CloudWatch metrics that you can collect with CloudWatch // RUM (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-RUM-metrics.html). // -// In addition to these default metrics, you can choose to send extended metrics -// or custom metrics or both. +// In addition to these default metrics, you can choose to send extended metrics, +// custom metrics, or both. // -// - Extended metrics enable you to send metrics with additional dimensions -// not included in the default metrics. You can also send extended metrics -// to Evidently as well as CloudWatch. The valid dimension names for the -// additional dimensions for extended metrics are BrowserName, CountryCode, -// DeviceType, FileType, OSName, and PageId. For more information, see Extended -// metrics that you can send to CloudWatch and CloudWatch Evidently (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-RUM-vended-metrics.html). +// - Extended metrics let you send metrics with additional dimensions that +// aren't included in the default metrics. You can also send extended metrics +// to both Evidently and CloudWatch. The valid dimension names for the additional +// dimensions for extended metrics are BrowserName, CountryCode, DeviceType, +// FileType, OSName, and PageId. For more information, see Extended metrics +// that you can send to CloudWatch and CloudWatch Evidently (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-RUM-vended-metrics.html). // // - Custom metrics are metrics that you define. You can send custom metrics -// to CloudWatch or to CloudWatch Evidently or to both. With custom metrics, -// you can use any metric name and namespace, and to derive the metrics you -// can use any custom events, built-in events, custom attributes, or default +// to CloudWatch. CloudWatch Evidently, or both. With custom metrics, you +// can use any metric name and namespace. To derive the metrics, you can +// use any custom events, built-in events, custom attributes, or default // attributes. You can't send custom metrics to the AWS/RUM namespace. You // must send custom metrics to a custom namespace that you define. The namespace // that you use can't start with AWS/. CloudWatch RUM prepends RUM/CustomMetrics/ @@ -2129,6 +2129,20 @@ type AppMonitorConfiguration struct { // The ARN of the guest IAM role that is attached to the Amazon Cognito identity // pool that is used to authorize the sending of data to RUM. + // + // It is possible that an app monitor does not have a value for GuestRoleArn. + // For example, this can happen when you use the console to create an app monitor + // and you allow CloudWatch RUM to create a new identity pool for Authorization. + // In this case, GuestRoleArn is not present in the GetAppMonitor (https://docs.aws.amazon.com/cloudwatchrum/latest/APIReference/API_GetAppMonitor.html) + // response because it is not stored by the service. + // + // If this issue affects you, you can take one of the following steps: + // + // * Use the Cloud Development Kit (CDK) to create an identity pool and the + // associated IAM role, and use that for your app monitor. + // + // * Make a separate GetIdentityPoolRoles (https://docs.aws.amazon.com/cognitoidentity/latest/APIReference/API_GetIdentityPoolRoles.html) + // call to Amazon Cognito to retrieve the GuestRoleArn. GuestRoleArn *string `type:"string"` // The ID of the Amazon Cognito identity pool that is used to authorize the @@ -2437,9 +2451,9 @@ type BatchCreateRumMetricDefinitionsInput struct { AppMonitorName *string `location:"uri" locationName:"AppMonitorName" min:"1" type:"string" required:"true"` // The destination to send the metrics to. Valid values are CloudWatch and Evidently. - // If you specify Evidently, you must also specify the ARN of the CloudWatchEvidently - // experiment that will receive the metrics and an IAM role that has permission - // to write to the experiment. + // If you specify Evidently, you must also specify the Amazon Resource Name + // (ARN) of the CloudWatchEvidently experiment that will receive the metrics + // and an IAM role that has permission to write to the experiment. // // Destination is a required field Destination *string `type:"string" required:"true" enum:"MetricDestination"` @@ -2992,7 +3006,7 @@ type CreateAppMonitorInput struct { // structure in your request, and it must include the ID of the Amazon Cognito // identity pool to use for authorization. If you don't include AppMonitorConfiguration, // you must set up your own authorization method. For more information, see - // Authorize your application to send data to Amazon Web Services (https://docs.aws.amazon.com/monitoring/CloudWatch-RUM-get-started-authorization.html). + // Authorize your application to send data to Amazon Web Services (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-RUM-get-started-authorization.html). // // If you omit this argument, the sample rate used for RUM is set to 10% of // the user sessions. @@ -4128,8 +4142,8 @@ func (s *MetricDefinition) SetValueKey(v string) *MetricDefinition { // Use this structure to define one extended metric or custom metric that RUM // will send to CloudWatch or CloudWatch Evidently. For more information, see -// Additional metrics that you can send to CloudWatch and CloudWatch Evidently -// (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-RUM-vended-metrics.html). +// Custom metrics and extended metrics that you can send to CloudWatch and CloudWatch +// Evidently (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-RUM-custom-and-extended-metrics.html). // // This structure is validated differently for extended metrics and custom metrics. // For extended metrics that are sent to the AWS/RUM namespace, the following @@ -4138,9 +4152,9 @@ func (s *MetricDefinition) SetValueKey(v string) *MetricDefinition { // - The Namespace parameter must be omitted or set to AWS/RUM. // // - Only certain combinations of values for Name, ValueKey, and EventPattern -// are valid. In addition to what is displayed in the list below, the EventPattern -// can also include information used by the DimensionKeys field. If Name -// is PerformanceNavigationDuration, then ValueKeymust be event_details.duration +// are valid. In addition to what is displayed in the following list, the +// EventPattern can also include information used by the DimensionKeys field. +// If Name is PerformanceNavigationDuration, then ValueKeymust be event_details.duration // and the EventPattern must include {"event_type":["com.amazon.rum.performance_navigation_event"]} // If Name is PerformanceResourceDuration, then ValueKeymust be event_details.duration // and the EventPattern must include {"event_type":["com.amazon.rum.performance_resource_event"]} @@ -4164,6 +4178,13 @@ func (s *MetricDefinition) SetValueKey(v string) *MetricDefinition { // is HttpErrorCount, then ValueKeymust be null and the EventPattern must // include {"event_type":["com.amazon.rum.http_event"]} If Name is SessionCount, // then ValueKeymust be null and the EventPattern must include {"event_type":["com.amazon.rum.session_start_event"]} +// If Name is PageViewCount, then ValueKeymust be null and the EventPattern +// must include {"event_type":["com.amazon.rum.page_view_event"]} If Name +// is Http4xxCount, then ValueKeymust be null and the EventPattern must include +// {"event_type": ["com.amazon.rum.http_event"],"event_details":{"response":{"status":[{"numeric":[">=",400,"<",500]}]}}} +// } If Name is Http5xxCount, then ValueKeymust be null and the EventPattern +// must include {"event_type": ["com.amazon.rum.http_event"],"event_details":{"response":{"status":[{"numeric":[">=",500,"<=",599]}]}}} +// } // // For custom metrics, the following validation rules apply: // @@ -4268,7 +4289,7 @@ type MetricDefinitionRequest struct { // { "browserName": [ "Chrome", "Safari" ], "countryCode": [ "US" ] }, "event_details": // { "duration": [{ "numeric": [ ">=", 2000, "<", 8000 ] }] } }' // - // If the metrics destination' is CloudWatch and the event also matches a value + // If the metrics destination is CloudWatch and the event also matches a value // in DimensionKeys, then the metric is published with the specified dimensions. EventPattern *string `type:"string"` @@ -4315,11 +4336,11 @@ type MetricDefinitionRequest struct { // The field within the event object that the metric value is sourced from. // // If you omit this field, a hardcoded value of 1 is pushed as the metric value. - // This is useful if you just want to count the number of events that the filter + // This is useful if you want to count the number of events that the filter // catches. // // If this metric is sent to CloudWatch Evidently, this field will be passed - // to Evidently raw and Evidently will handle data extraction from the event. + // to Evidently raw. Evidently will handle data extraction from the event. ValueKey *string `min:"1" type:"string"` } @@ -4624,11 +4645,17 @@ type PutRumMetricsDestinationInput struct { DestinationArn *string `type:"string"` // This parameter is required if Destination is Evidently. If Destination is - // CloudWatch, do not use this parameter. + // CloudWatch, don't use this parameter. // // This parameter specifies the ARN of an IAM role that RUM will assume to write // to the Evidently experiment that you are sending metrics to. This role must // have permission to write to that experiment. + // + // If you specify this parameter, you must be signed on to a role that has PassRole + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) + // permissions attached to it, to allow the role to be passed. The CloudWatchAmazonCloudWatchRUMFullAccess + // (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/auth-and-access-control-cw.html#managed-policies-cloudwatch-RUM) + // policy doesn't include PassRole permissions. IamRoleArn *string `type:"string"` } @@ -5319,7 +5346,7 @@ type UpdateAppMonitorInput struct { // structure in your request, and it must include the ID of the Amazon Cognito // identity pool to use for authorization. If you don't include AppMonitorConfiguration, // you must set up your own authorization method. For more information, see - // Authorize your application to send data to Amazon Web Services (https://docs.aws.amazon.com/monitoring/CloudWatch-RUM-get-started-authorization.html). + // Authorize your application to send data to Amazon Web Services (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-RUM-get-started-authorization.html). AppMonitorConfiguration *AppMonitorConfiguration `type:"structure"` // Specifies whether this app monitor allows the web client to define and send diff --git a/service/qldb/api.go b/service/qldb/api.go index d03be554506..3a739cfb8ea 100644 --- a/service/qldb/api.go +++ b/service/qldb/api.go @@ -2666,7 +2666,8 @@ type DescribeLedgerOutput struct { // Information about the encryption of data at rest in the ledger. This includes // the current status, the KMS key, and when the key became inaccessible (in - // the case of an error). + // the case of an error). If this parameter is undefined, the ledger uses an + // Amazon Web Services owned KMS key for encryption. EncryptionDescription *LedgerEncryptionDescription `type:"structure"` // The name of the ledger. @@ -3799,7 +3800,9 @@ type LedgerEncryptionDescription struct { // The Amazon Resource Name (ARN) of the customer managed KMS key that the ledger // uses for encryption at rest. If this parameter is undefined, the ledger uses - // an Amazon Web Services owned KMS key for encryption. + // an Amazon Web Services owned KMS key for encryption. It will display AWS_OWNED_KMS_KEY + // when updating the ledger's encryption configuration to the Amazon Web Services + // owned KMS key. // // KmsKeyArn is a required field KmsKeyArn *string `min:"20" type:"string" required:"true"` diff --git a/service/rds/api.go b/service/rds/api.go index 09d64650a54..079d2f53249 100644 --- a/service/rds/api.go +++ b/service/rds/api.go @@ -26460,7 +26460,7 @@ type CreateDBShardGroupOutput struct { DBClusterIdentifier *string `type:"string"` // The name of the DB shard group. - DBShardGroupIdentifier *string `type:"string"` + DBShardGroupIdentifier *string `min:"1" type:"string"` // The Amazon Web Services Region-unique, immutable identifier for the DB shard // group. @@ -32660,7 +32660,7 @@ type DBShardGroup struct { DBClusterIdentifier *string `type:"string"` // The name of the DB shard group. - DBShardGroupIdentifier *string `type:"string"` + DBShardGroupIdentifier *string `min:"1" type:"string"` // The Amazon Web Services Region-unique, immutable identifier for the DB shard // group. @@ -35135,7 +35135,7 @@ type DeleteDBShardGroupInput struct { // Teh name of the DB shard group to delete. // // DBShardGroupIdentifier is a required field - DBShardGroupIdentifier *string `type:"string" required:"true"` + DBShardGroupIdentifier *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -35162,6 +35162,9 @@ func (s *DeleteDBShardGroupInput) Validate() error { if s.DBShardGroupIdentifier == nil { invalidParams.Add(request.NewErrParamRequired("DBShardGroupIdentifier")) } + if s.DBShardGroupIdentifier != nil && len(*s.DBShardGroupIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DBShardGroupIdentifier", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -35195,7 +35198,7 @@ type DeleteDBShardGroupOutput struct { DBClusterIdentifier *string `type:"string"` // The name of the DB shard group. - DBShardGroupIdentifier *string `type:"string"` + DBShardGroupIdentifier *string `min:"1" type:"string"` // The Amazon Web Services Region-unique, immutable identifier for the DB shard // group. @@ -39672,7 +39675,7 @@ type DescribeDBShardGroupsInput struct { // Constraints: // // * If supplied, must match an existing DB shard group identifier. - DBShardGroupIdentifier *string `type:"string"` + DBShardGroupIdentifier *string `min:"1" type:"string"` // A filter that specifies one or more DB shard groups to describe. Filters []*Filter `locationNameList:"Filter" type:"list"` @@ -39713,6 +39716,9 @@ func (s DescribeDBShardGroupsInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeDBShardGroupsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeDBShardGroupsInput"} + if s.DBShardGroupIdentifier != nil && len(*s.DBShardGroupIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DBShardGroupIdentifier", 1)) + } if s.MaxRecords != nil && *s.MaxRecords < 20 { invalidParams.Add(request.NewErrParamMinValue("MaxRecords", 20)) } @@ -49618,7 +49624,7 @@ type ModifyDBShardGroupInput struct { // The name of the DB shard group to modify. // // DBShardGroupIdentifier is a required field - DBShardGroupIdentifier *string `type:"string" required:"true"` + DBShardGroupIdentifier *string `min:"1" type:"string" required:"true"` // The maximum capacity of the DB shard group in Aurora capacity units (ACUs). MaxACU *float64 `type:"double"` @@ -49648,6 +49654,9 @@ func (s *ModifyDBShardGroupInput) Validate() error { if s.DBShardGroupIdentifier == nil { invalidParams.Add(request.NewErrParamRequired("DBShardGroupIdentifier")) } + if s.DBShardGroupIdentifier != nil && len(*s.DBShardGroupIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DBShardGroupIdentifier", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -49687,7 +49696,7 @@ type ModifyDBShardGroupOutput struct { DBClusterIdentifier *string `type:"string"` // The name of the DB shard group. - DBShardGroupIdentifier *string `type:"string"` + DBShardGroupIdentifier *string `min:"1" type:"string"` // The Amazon Web Services Region-unique, immutable identifier for the DB shard // group. @@ -53328,7 +53337,7 @@ type RebootDBShardGroupInput struct { // The name of the DB shard group to reboot. // // DBShardGroupIdentifier is a required field - DBShardGroupIdentifier *string `type:"string" required:"true"` + DBShardGroupIdentifier *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -53355,6 +53364,9 @@ func (s *RebootDBShardGroupInput) Validate() error { if s.DBShardGroupIdentifier == nil { invalidParams.Add(request.NewErrParamRequired("DBShardGroupIdentifier")) } + if s.DBShardGroupIdentifier != nil && len(*s.DBShardGroupIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DBShardGroupIdentifier", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -53388,7 +53400,7 @@ type RebootDBShardGroupOutput struct { DBClusterIdentifier *string `type:"string"` // The name of the DB shard group. - DBShardGroupIdentifier *string `type:"string"` + DBShardGroupIdentifier *string `min:"1" type:"string"` // The Amazon Web Services Region-unique, immutable identifier for the DB shard // group.