From f70c9addacea9c0e3a8cc4d2a2037b7ddc3b5c12 Mon Sep 17 00:00:00 2001 From: Godwin Rose Samuel Date: Wed, 23 Oct 2024 10:32:01 -0700 Subject: [PATCH 1/8] chore: update valid appsync function runtime in tests (#3670) --- tests/translator/input/graphqlapi_function_by_id.yaml | 2 +- .../graphqlapi_multiple_none_datasource_functions.yaml | 8 ++++---- ...aphqlapi_resolver_function_with_lambda_datasource.yaml | 2 +- .../output/aws-cn/graphqlapi_function_by_id.json | 2 +- .../graphqlapi_multiple_none_datasource_functions.json | 8 ++++---- ...aphqlapi_resolver_function_with_lambda_datasource.json | 2 +- .../output/aws-us-gov/graphqlapi_function_by_id.json | 2 +- .../graphqlapi_multiple_none_datasource_functions.json | 8 ++++---- ...aphqlapi_resolver_function_with_lambda_datasource.json | 2 +- tests/translator/output/graphqlapi_function_by_id.json | 2 +- .../graphqlapi_multiple_none_datasource_functions.json | 8 ++++---- ...aphqlapi_resolver_function_with_lambda_datasource.json | 2 +- 12 files changed, 24 insertions(+), 24 deletions(-) diff --git a/tests/translator/input/graphqlapi_function_by_id.yaml b/tests/translator/input/graphqlapi_function_by_id.yaml index 6c45cc637..d089338e6 100644 --- a/tests/translator/input/graphqlapi_function_by_id.yaml +++ b/tests/translator/input/graphqlapi_function_by_id.yaml @@ -22,5 +22,5 @@ Resources: DataSourceName: some-cool-datasource Name: MyFunction Runtime: - Name: some-runtime + Name: APPSYNC_JS RuntimeVersion: 1.2.3 diff --git a/tests/translator/input/graphqlapi_multiple_none_datasource_functions.yaml b/tests/translator/input/graphqlapi_multiple_none_datasource_functions.yaml index f71751ecc..a711ffd00 100644 --- a/tests/translator/input/graphqlapi_multiple_none_datasource_functions.yaml +++ b/tests/translator/input/graphqlapi_multiple_none_datasource_functions.yaml @@ -15,23 +15,23 @@ Resources: CodeUri: my-code-uri DataSource: NONE Runtime: - Name: some-runtime + Name: APPSYNC_JS Version: 1.2.3 AnotherFunction: CodeUri: my-code-uri DataSource: None Runtime: - Name: some-runtime + Name: APPSYNC_JS Version: 1.2.3 SimilarFunction: CodeUri: my-code-uri DataSource: none Runtime: - Name: some-runtime + Name: APPSYNC_JS Version: 1.2.3 GoodFunction: CodeUri: my-code-uri DataSource: nOnE Runtime: - Name: some-runtime + Name: APPSYNC_JS Version: 1.2.3 diff --git a/tests/translator/input/graphqlapi_resolver_function_with_lambda_datasource.yaml b/tests/translator/input/graphqlapi_resolver_function_with_lambda_datasource.yaml index eff5425cc..98a77993e 100644 --- a/tests/translator/input/graphqlapi_resolver_function_with_lambda_datasource.yaml +++ b/tests/translator/input/graphqlapi_resolver_function_with_lambda_datasource.yaml @@ -50,7 +50,7 @@ Resources: Functions: MyFunction: Runtime: - Name: some-runtime + Name: APPSYNC_JS Version: 1.2.3 InlineCode: this is my epic code DataSource: MyDataSource diff --git a/tests/translator/output/aws-cn/graphqlapi_function_by_id.json b/tests/translator/output/aws-cn/graphqlapi_function_by_id.json index ea9a7fc1c..678cbf2bf 100644 --- a/tests/translator/output/aws-cn/graphqlapi_function_by_id.json +++ b/tests/translator/output/aws-cn/graphqlapi_function_by_id.json @@ -12,7 +12,7 @@ "DataSourceName": "some-cool-datasource", "Name": "MyFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, diff --git a/tests/translator/output/aws-cn/graphqlapi_multiple_none_datasource_functions.json b/tests/translator/output/aws-cn/graphqlapi_multiple_none_datasource_functions.json index 43fb34207..a245392a3 100644 --- a/tests/translator/output/aws-cn/graphqlapi_multiple_none_datasource_functions.json +++ b/tests/translator/output/aws-cn/graphqlapi_multiple_none_datasource_functions.json @@ -39,7 +39,7 @@ }, "Name": "AnotherFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, @@ -88,7 +88,7 @@ }, "Name": "GoodFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, @@ -111,7 +111,7 @@ }, "Name": "MyFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, @@ -159,7 +159,7 @@ }, "Name": "SimilarFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, diff --git a/tests/translator/output/aws-cn/graphqlapi_resolver_function_with_lambda_datasource.json b/tests/translator/output/aws-cn/graphqlapi_resolver_function_with_lambda_datasource.json index dfce18342..c6021bf37 100644 --- a/tests/translator/output/aws-cn/graphqlapi_resolver_function_with_lambda_datasource.json +++ b/tests/translator/output/aws-cn/graphqlapi_resolver_function_with_lambda_datasource.json @@ -173,7 +173,7 @@ "MaxBatchSize": 10, "Name": "MyFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, diff --git a/tests/translator/output/aws-us-gov/graphqlapi_function_by_id.json b/tests/translator/output/aws-us-gov/graphqlapi_function_by_id.json index ea9a7fc1c..678cbf2bf 100644 --- a/tests/translator/output/aws-us-gov/graphqlapi_function_by_id.json +++ b/tests/translator/output/aws-us-gov/graphqlapi_function_by_id.json @@ -12,7 +12,7 @@ "DataSourceName": "some-cool-datasource", "Name": "MyFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, diff --git a/tests/translator/output/aws-us-gov/graphqlapi_multiple_none_datasource_functions.json b/tests/translator/output/aws-us-gov/graphqlapi_multiple_none_datasource_functions.json index 43fb34207..a245392a3 100644 --- a/tests/translator/output/aws-us-gov/graphqlapi_multiple_none_datasource_functions.json +++ b/tests/translator/output/aws-us-gov/graphqlapi_multiple_none_datasource_functions.json @@ -39,7 +39,7 @@ }, "Name": "AnotherFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, @@ -88,7 +88,7 @@ }, "Name": "GoodFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, @@ -111,7 +111,7 @@ }, "Name": "MyFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, @@ -159,7 +159,7 @@ }, "Name": "SimilarFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, diff --git a/tests/translator/output/aws-us-gov/graphqlapi_resolver_function_with_lambda_datasource.json b/tests/translator/output/aws-us-gov/graphqlapi_resolver_function_with_lambda_datasource.json index dfce18342..c6021bf37 100644 --- a/tests/translator/output/aws-us-gov/graphqlapi_resolver_function_with_lambda_datasource.json +++ b/tests/translator/output/aws-us-gov/graphqlapi_resolver_function_with_lambda_datasource.json @@ -173,7 +173,7 @@ "MaxBatchSize": 10, "Name": "MyFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, diff --git a/tests/translator/output/graphqlapi_function_by_id.json b/tests/translator/output/graphqlapi_function_by_id.json index ea9a7fc1c..678cbf2bf 100644 --- a/tests/translator/output/graphqlapi_function_by_id.json +++ b/tests/translator/output/graphqlapi_function_by_id.json @@ -12,7 +12,7 @@ "DataSourceName": "some-cool-datasource", "Name": "MyFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, diff --git a/tests/translator/output/graphqlapi_multiple_none_datasource_functions.json b/tests/translator/output/graphqlapi_multiple_none_datasource_functions.json index 43fb34207..a245392a3 100644 --- a/tests/translator/output/graphqlapi_multiple_none_datasource_functions.json +++ b/tests/translator/output/graphqlapi_multiple_none_datasource_functions.json @@ -39,7 +39,7 @@ }, "Name": "AnotherFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, @@ -88,7 +88,7 @@ }, "Name": "GoodFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, @@ -111,7 +111,7 @@ }, "Name": "MyFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, @@ -159,7 +159,7 @@ }, "Name": "SimilarFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, diff --git a/tests/translator/output/graphqlapi_resolver_function_with_lambda_datasource.json b/tests/translator/output/graphqlapi_resolver_function_with_lambda_datasource.json index dfce18342..c6021bf37 100644 --- a/tests/translator/output/graphqlapi_resolver_function_with_lambda_datasource.json +++ b/tests/translator/output/graphqlapi_resolver_function_with_lambda_datasource.json @@ -173,7 +173,7 @@ "MaxBatchSize": 10, "Name": "MyFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, From 77672ce598e2673a3c2e8692e95785b7aab0b32f Mon Sep 17 00:00:00 2001 From: Xia Zhao <78883180+xazhao@users.noreply.github.com> Date: Wed, 30 Oct 2024 11:40:51 -0700 Subject: [PATCH 2/8] chore: fix graphql transform test template (#3672) --- tests/translator/input/graphqlapi_cognito_default_auth.yaml | 2 +- .../output/aws-cn/graphqlapi_cognito_default_auth.json | 2 +- .../output/aws-us-gov/graphqlapi_cognito_default_auth.json | 2 +- tests/translator/output/graphqlapi_cognito_default_auth.json | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/translator/input/graphqlapi_cognito_default_auth.yaml b/tests/translator/input/graphqlapi_cognito_default_auth.yaml index 7754ffa96..9078f87f7 100644 --- a/tests/translator/input/graphqlapi_cognito_default_auth.yaml +++ b/tests/translator/input/graphqlapi_cognito_default_auth.yaml @@ -16,7 +16,7 @@ Resources: AppIdClientRegex: myregex AwsRegion: na-east-1 # This default action will exist post transform since this is our default authentication. - DefaultAction: something + DefaultAction: ALLOW UserPoolId: myid Tags: key1: value1 diff --git a/tests/translator/output/aws-cn/graphqlapi_cognito_default_auth.json b/tests/translator/output/aws-cn/graphqlapi_cognito_default_auth.json index e32b49b83..391715a04 100644 --- a/tests/translator/output/aws-cn/graphqlapi_cognito_default_auth.json +++ b/tests/translator/output/aws-cn/graphqlapi_cognito_default_auth.json @@ -30,7 +30,7 @@ "UserPoolConfig": { "AppIdClientRegex": "myregex", "AwsRegion": "na-east-1", - "DefaultAction": "something", + "DefaultAction": "ALLOW", "UserPoolId": "myid" }, "XrayEnabled": true diff --git a/tests/translator/output/aws-us-gov/graphqlapi_cognito_default_auth.json b/tests/translator/output/aws-us-gov/graphqlapi_cognito_default_auth.json index e32b49b83..391715a04 100644 --- a/tests/translator/output/aws-us-gov/graphqlapi_cognito_default_auth.json +++ b/tests/translator/output/aws-us-gov/graphqlapi_cognito_default_auth.json @@ -30,7 +30,7 @@ "UserPoolConfig": { "AppIdClientRegex": "myregex", "AwsRegion": "na-east-1", - "DefaultAction": "something", + "DefaultAction": "ALLOW", "UserPoolId": "myid" }, "XrayEnabled": true diff --git a/tests/translator/output/graphqlapi_cognito_default_auth.json b/tests/translator/output/graphqlapi_cognito_default_auth.json index e32b49b83..391715a04 100644 --- a/tests/translator/output/graphqlapi_cognito_default_auth.json +++ b/tests/translator/output/graphqlapi_cognito_default_auth.json @@ -30,7 +30,7 @@ "UserPoolConfig": { "AppIdClientRegex": "myregex", "AwsRegion": "na-east-1", - "DefaultAction": "something", + "DefaultAction": "ALLOW", "UserPoolId": "myid" }, "XrayEnabled": true From 51fec4bfd514c1610772c351209247b60f409d5c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 30 Oct 2024 12:55:15 -0700 Subject: [PATCH 3/8] chore(schema): update (#3671) Co-authored-by: github-actions Co-authored-by: Jacob Fuss <32497805+jfuss@users.noreply.github.com> --- samtranslator/schema/schema.json | 286 +++++++------- schema_source/cloudformation-docs.json | 475 ++++++++++++++--------- schema_source/cloudformation.schema.json | 286 +++++++------- 3 files changed, 580 insertions(+), 467 deletions(-) diff --git a/samtranslator/schema/schema.json b/samtranslator/schema/schema.json index 6ccff19b7..556c36c6d 100644 --- a/samtranslator/schema/schema.json +++ b/samtranslator/schema/schema.json @@ -7890,7 +7890,7 @@ "additionalProperties": false, "properties": { "DomainName": { - "markdownDescription": "The custom domain name for your API in Amazon API Gateway. Uppercase letters are not supported.", + "markdownDescription": "The custom domain name for your API in Amazon API Gateway. Uppercase letters and the underscore ( `_` ) character are not supported.", "title": "DomainName", "type": "string" }, @@ -10839,7 +10839,7 @@ "type": "string" }, "DisableSSO": { - "markdownDescription": "", + "markdownDescription": "If you set this parameter to `true` , Amazon AppFlow bypasses the single sign-on (SSO) settings in your SAP account when it accesses your SAP OData instance.\n\nWhether you need this option depends on the types of credentials that you applied to your SAP OData connection profile. If your profile uses basic authentication credentials, SAP SSO can prevent Amazon AppFlow from connecting to your account with your username and password. In this case, bypassing SSO makes it possible for Amazon AppFlow to connect successfully. However, if your profile uses OAuth credentials, this parameter has no affect.", "title": "DisableSSO", "type": "boolean" }, @@ -10890,7 +10890,7 @@ "type": "string" }, "OAuth2GrantType": { - "markdownDescription": "Specifies the OAuth 2.0 grant type that Amazon AppFlow uses when it requests an access token from Salesforce. Amazon AppFlow requires an access token each time it attempts to access your Salesforce records.\n\nYou can specify one of the following values:\n\n- **AUTHORIZATION_CODE** - Amazon AppFlow passes an authorization code when it requests the access token from Salesforce. Amazon AppFlow receives the authorization code from Salesforce after you log in to your Salesforce account and authorize Amazon AppFlow to access your records.\n- **CLIENT_CREDENTIALS** - Amazon AppFlow passes client credentials (a client ID and client secret) when it requests the access token from Salesforce. You provide these credentials to Amazon AppFlow when you define the connection to your Salesforce account.\n- **JWT_BEARER** - Amazon AppFlow passes a JSON web token (JWT) when it requests the access token from Salesforce. You provide the JWT to Amazon AppFlow when you define the connection to your Salesforce account. When you use this grant type, you don't need to log in to your Salesforce account to authorize Amazon AppFlow to access your records.", + "markdownDescription": "Specifies the OAuth 2.0 grant type that Amazon AppFlow uses when it requests an access token from Salesforce. Amazon AppFlow requires an access token each time it attempts to access your Salesforce records.\n\nYou can specify one of the following values:\n\n- **AUTHORIZATION_CODE** - Amazon AppFlow passes an authorization code when it requests the access token from Salesforce. Amazon AppFlow receives the authorization code from Salesforce after you log in to your Salesforce account and authorize Amazon AppFlow to access your records.\n- **JWT_BEARER** - Amazon AppFlow passes a JSON web token (JWT) when it requests the access token from Salesforce. You provide the JWT to Amazon AppFlow when you define the connection to your Salesforce account. When you use this grant type, you don't need to log in to your Salesforce account to authorize Amazon AppFlow to access your records.\n\n> The CLIENT_CREDENTIALS value is not supported for Salesforce.", "title": "OAuth2GrantType", "type": "string" }, @@ -10928,7 +10928,7 @@ "properties": { "OAuth2Credentials": { "$ref": "#/definitions/AWS::AppFlow::ConnectorProfile.OAuth2Credentials", - "markdownDescription": "", + "markdownDescription": "The OAuth 2.0 credentials required to authenticate the user.", "title": "OAuth2Credentials" }, "Password": { @@ -11987,7 +11987,7 @@ "additionalProperties": false, "properties": { "maxPageSize": { - "markdownDescription": "", + "markdownDescription": "The maximum number of records that Amazon AppFlow receives in each page of the response from your SAP application. For transfers of OData records, the maximum page size is 3,000. For transfers of data that comes from an ODP provider, the maximum page size is 10,000.", "title": "maxPageSize", "type": "number" } @@ -12001,7 +12001,7 @@ "additionalProperties": false, "properties": { "maxParallelism": { - "markdownDescription": "", + "markdownDescription": "The maximum number of processes that Amazon AppFlow runs at the same time when it retrieves your data from your SAP application.", "title": "maxParallelism", "type": "number" } @@ -12021,12 +12021,12 @@ }, "paginationConfig": { "$ref": "#/definitions/AWS::AppFlow::Flow.SAPODataPaginationConfig", - "markdownDescription": "", + "markdownDescription": "Sets the page size for each concurrent process that transfers OData records from your SAP instance.", "title": "paginationConfig" }, "parallelismConfig": { "$ref": "#/definitions/AWS::AppFlow::Flow.SAPODataParallelismConfig", - "markdownDescription": "", + "markdownDescription": "Sets the number of concurrent processes that transfers OData records from your SAP instance.", "title": "parallelismConfig" } }, @@ -19193,9 +19193,7 @@ "title": "DynamoDBConfig" }, "ElasticsearchConfig": { - "$ref": "#/definitions/AWS::AppSync::DataSource.ElasticsearchConfig", - "markdownDescription": "AWS Region and Endpoints for an Amazon OpenSearch Service domain in your account.\n\nAs of September 2021, Amazon Elasticsearch Service is Amazon OpenSearch Service . This property is deprecated. For new data sources, use *OpenSearchServiceConfig* to specify an OpenSearch Service data source.", - "title": "ElasticsearchConfig" + "$ref": "#/definitions/AWS::AppSync::DataSource.ElasticsearchConfig" }, "EventBridgeConfig": { "$ref": "#/definitions/AWS::AppSync::DataSource.EventBridgeConfig", @@ -19371,13 +19369,9 @@ "additionalProperties": false, "properties": { "AwsRegion": { - "markdownDescription": "The AWS Region.", - "title": "AwsRegion", "type": "string" }, "Endpoint": { - "markdownDescription": "The endpoint.", - "title": "Endpoint", "type": "string" } }, @@ -41241,12 +41235,12 @@ "type": "number" }, "ComputeType": { - "markdownDescription": "> Updating this field is not allowed for `MAC_ARM` . \n\nInformation about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", + "markdownDescription": "Information about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", "title": "ComputeType", "type": "string" }, "EnvironmentType": { - "markdownDescription": "> Updating this field is not allowed for `MAC_ARM` . \n\nThe environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", + "markdownDescription": "The environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", "title": "EnvironmentType", "type": "string" }, @@ -45793,7 +45787,7 @@ "properties": { "AccountRecoverySetting": { "$ref": "#/definitions/AWS::Cognito::UserPool.AccountRecoverySetting", - "markdownDescription": "Use this setting to define which verified available method a user can use to recover their password when they call `ForgotPassword` . It allows you to define a preferred method when a user has more than one method available. With this setting, SMS does not qualify for a valid password recovery mechanism if the user also has SMS MFA enabled. In the absence of this setting, Cognito uses the legacy behavior to determine the recovery method where SMS is preferred over email.", + "markdownDescription": "The available verified method a user can use to recover their password when they call `ForgotPassword` . You can use this setting to define a preferred method when a user has more than one method available. With this setting, SMS doesn't qualify for a valid password recovery mechanism if the user also has SMS multi-factor authentication (MFA) activated. In the absence of this setting, Amazon Cognito uses the legacy behavior to determine the recovery method where SMS is preferred through email.", "title": "AccountRecoverySetting" }, "AdminCreateUserConfig": { @@ -45805,7 +45799,7 @@ "items": { "type": "string" }, - "markdownDescription": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* .\n\n> This user pool property cannot be updated.", + "markdownDescription": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* .", "title": "AliasAttributes", "type": "array" }, @@ -45833,12 +45827,12 @@ "title": "EmailConfiguration" }, "EmailVerificationMessage": { - "markdownDescription": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) .", + "markdownDescription": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) .", "title": "EmailVerificationMessage", "type": "string" }, "EmailVerificationSubject": { - "markdownDescription": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) .", + "markdownDescription": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) .", "title": "EmailVerificationSubject", "type": "string" }, @@ -45846,7 +45840,7 @@ "items": { "type": "string" }, - "markdownDescription": "Enables MFA on a specified user pool. To disable all MFAs after it has been enabled, set MfaConfiguration to \u201cOFF\u201d and remove EnabledMfas. MFAs can only be all disabled if MfaConfiguration is OFF. Once SMS_MFA is enabled, SMS_MFA can only be disabled by setting MfaConfiguration to \u201cOFF\u201d. Can be one of the following values:\n\n- `SMS_MFA` - Enables SMS MFA for the user pool. SMS_MFA can only be enabled if SMS configuration is provided.\n- `SOFTWARE_TOKEN_MFA` - Enables software token MFA for the user pool.\n\nAllowed values: `SMS_MFA` | `SOFTWARE_TOKEN_MFA`", + "markdownDescription": "Set enabled MFA options on a specified user pool. To disable all MFAs after it has been enabled, set `MfaConfiguration` to `OFF` and remove EnabledMfas. MFAs can only be all disabled if `MfaConfiguration` is `OFF` . After you enable `SMS_MFA` , you can only disable it by setting `MfaConfiguration` to `OFF` . Can be one of the following values:\n\n- `SMS_MFA` - Enables MFA with SMS for the user pool. To select this option, you must also provide values for `SmsConfiguration` .\n- `SOFTWARE_TOKEN_MFA` - Enables software token MFA for the user pool.\n- `EMAIL_OTP` - Enables MFA with email for the user pool. To select this option, you must provide values for `EmailConfiguration` and within those, set `EmailSendingAccount` to `DEVELOPER` .\n\nAllowed values: `SMS_MFA` | `SOFTWARE_TOKEN_MFA` | `EMAIL_OTP`", "title": "EnabledMfas", "type": "array" }, @@ -45869,12 +45863,12 @@ "items": { "$ref": "#/definitions/AWS::Cognito::UserPool.SchemaAttribute" }, - "markdownDescription": "The schema attributes for the new user pool. These attributes can be standard or custom attributes.\n\n> During a user pool update, you can add new schema attributes but you cannot modify or delete an existing schema attribute.", + "markdownDescription": "An array of schema attributes for the new user pool. These attributes can be standard or custom attributes.", "title": "Schema", "type": "array" }, "SmsAuthenticationMessage": { - "markdownDescription": "A string representing the SMS authentication message.", + "markdownDescription": "The contents of the SMS authentication message.", "title": "SmsAuthenticationMessage", "type": "string" }, @@ -45884,7 +45878,7 @@ "title": "SmsConfiguration" }, "SmsVerificationMessage": { - "markdownDescription": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) .", + "markdownDescription": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) .", "title": "SmsVerificationMessage", "type": "string" }, @@ -45918,13 +45912,13 @@ "items": { "type": "string" }, - "markdownDescription": "Determines whether email addresses or phone numbers can be specified as user names when a user signs up. Possible values: `phone_number` or `email` .\n\nThis user pool property cannot be updated.", + "markdownDescription": "Specifies whether a user can use an email address or phone number as a username when they sign up.", "title": "UsernameAttributes", "type": "array" }, "UsernameConfiguration": { "$ref": "#/definitions/AWS::Cognito::UserPool.UsernameConfiguration", - "markdownDescription": "You can choose to set case sensitivity on the username input for the selected sign-in option. For example, when this is set to `False` , users will be able to sign in using either \"username\" or \"Username\". This configuration is immutable once it has been set.", + "markdownDescription": "Case sensitivity on the username input for the selected sign-in option. When case sensitivity is set to `False` (case insensitive), users can sign in with any combination of capital and lowercase letters. For example, `username` , `USERNAME` , or `UserName` , or for email, `email@example.com` or `EMaiL@eXamplE.Com` . For most use cases, set case sensitivity to `False` (case insensitive) as a best practice. When usernames and email addresses are case insensitive, Amazon Cognito treats any variation in case as the same user, and prevents a case variation from being assigned to the same attribute for a different user.\n\nThis configuration is immutable after you set it. For more information, see [UsernameConfigurationType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UsernameConfigurationType.html) .", "title": "UsernameConfiguration" }, "VerificationMessageTemplate": { @@ -45962,7 +45956,7 @@ "items": { "$ref": "#/definitions/AWS::Cognito::UserPool.RecoveryOption" }, - "markdownDescription": "The list of `RecoveryOptionTypes` .", + "markdownDescription": "The list of options and priorities for user message delivery in forgot-password operations. Sets or displays user pool preferences for email or SMS message priority, whether users should fall back to a second delivery method, and whether passwords should only be reset by administrators.", "title": "RecoveryMechanisms", "type": "array" } @@ -45979,7 +45973,7 @@ }, "InviteMessageTemplate": { "$ref": "#/definitions/AWS::Cognito::UserPool.InviteMessageTemplate", - "markdownDescription": "The message template to be used for the welcome message to new users.\n\nSee also [Customizing User Invitation Messages](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-message-customizations.html#cognito-user-pool-settings-user-invitation-message-customization) .", + "markdownDescription": "The template for the welcome message to new users.\n\nSee also [Customizing User Invitation Messages](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-message-customizations.html#cognito-user-pool-settings-user-invitation-message-customization) .", "title": "InviteMessageTemplate" }, "UnusedAccountValidityDays": { @@ -45994,12 +45988,12 @@ "additionalProperties": false, "properties": { "LambdaArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon Cognito triggers to send email notifications to users.", + "markdownDescription": "The Amazon Resource Name (ARN) of the function that you want to assign to your Lambda trigger.", "title": "LambdaArn", "type": "string" }, "LambdaVersion": { - "markdownDescription": "The Lambda version represents the signature of the \"request\" attribute in the \"event\" information that Amazon Cognito passes to your custom email sender AWS Lambda function. The only supported value is `V1_0` .", + "markdownDescription": "The user pool trigger version of the request that Amazon Cognito sends to your Lambda function. Higher-numbered versions add fields that support new features.\n\nYou must use a `LambdaVersion` of `V1_0` with a custom sender function.", "title": "LambdaVersion", "type": "string" } @@ -46010,12 +46004,12 @@ "additionalProperties": false, "properties": { "LambdaArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon Cognito triggers to send SMS notifications to users.", + "markdownDescription": "The Amazon Resource Name (ARN) of the function that you want to assign to your Lambda trigger.", "title": "LambdaArn", "type": "string" }, "LambdaVersion": { - "markdownDescription": "The Lambda version represents the signature of the \"request\" attribute in the \"event\" information Amazon Cognito passes to your custom SMS sender Lambda function. The only supported value is `V1_0` .", + "markdownDescription": "The user pool trigger version of the request that Amazon Cognito sends to your Lambda function. Higher-numbered versions add fields that support new features.\n\nYou must use a `LambdaVersion` of `V1_0` with a custom sender function.", "title": "LambdaVersion", "type": "string" } @@ -46042,7 +46036,7 @@ "additionalProperties": false, "properties": { "ConfigurationSet": { - "markdownDescription": "The set of configuration rules that can be applied to emails sent using Amazon SES. A configuration set is applied to an email by including a reference to the configuration set in the headers of the email. Once applied, all of the rules in that configuration set are applied to the email. Configuration sets can be used to apply the following types of rules to emails:\n\n- Event publishing \u2013 Amazon SES can track the number of send, delivery, open, click, bounce, and complaint events for each email sent. Use event publishing to send information about these events to other AWS services such as SNS and CloudWatch.\n- IP pool management \u2013 When leasing dedicated IP addresses with Amazon SES, you can create groups of IP addresses, called dedicated IP pools. You can then associate the dedicated IP pools with configuration sets.", + "markdownDescription": "The set of configuration rules that can be applied to emails sent using Amazon Simple Email Service. A configuration set is applied to an email by including a reference to the configuration set in the headers of the email. Once applied, all of the rules in that configuration set are applied to the email. Configuration sets can be used to apply the following types of rules to emails:\n\n- **Event publishing** - Amazon Simple Email Service can track the number of send, delivery, open, click, bounce, and complaint events for each email sent. Use event publishing to send information about these events to other AWS services such as and Amazon CloudWatch\n- **IP pool management** - When leasing dedicated IP addresses with Amazon Simple Email Service, you can create groups of IP addresses, called dedicated IP pools. You can then associate the dedicated IP pools with configuration sets.", "title": "ConfigurationSet", "type": "string" }, @@ -46052,7 +46046,7 @@ "type": "string" }, "From": { - "markdownDescription": "Identifies either the sender's email address or the sender's name with their email address. For example, `testuser@example.com` or `Test User ` . This address appears before the body of the email.", + "markdownDescription": "Either the sender\u2019s email address or the sender\u2019s name with their email address. For example, `testuser@example.com` or `Test User ` . This address appears before the body of the email.", "title": "From", "type": "string" }, @@ -46100,7 +46094,7 @@ }, "CustomEmailSender": { "$ref": "#/definitions/AWS::Cognito::UserPool.CustomEmailSender", - "markdownDescription": "A custom email sender AWS Lambda trigger.", + "markdownDescription": "The configuration of a custom email sender Lambda trigger. This trigger routes all email notifications from a user pool to a Lambda function that delivers the message using custom logic.", "title": "CustomEmailSender" }, "CustomMessage": { @@ -46110,7 +46104,7 @@ }, "CustomSMSSender": { "$ref": "#/definitions/AWS::Cognito::UserPool.CustomSMSSender", - "markdownDescription": "A custom SMS sender AWS Lambda trigger.", + "markdownDescription": "The configuration of a custom SMS sender Lambda trigger. This trigger routes all SMS notifications from a user pool to a Lambda function that delivers the message using custom logic.", "title": "CustomSMSSender" }, "DefineAuthChallenge": { @@ -46119,7 +46113,7 @@ "type": "string" }, "KMSKeyID": { - "markdownDescription": "The Amazon Resource Name of a AWS Key Management Service ( AWS KMS ) key. Amazon Cognito uses the key to encrypt codes and temporary passwords sent to `CustomEmailSender` and `CustomSMSSender` .", + "markdownDescription": "The ARN of an [KMS key](https://docs.aws.amazon.com//kms/latest/developerguide/concepts.html#master_keys) . Amazon Cognito uses the key to encrypt codes and temporary passwords sent to custom sender Lambda triggers.", "title": "KMSKeyID", "type": "string" }, @@ -46249,12 +46243,12 @@ "additionalProperties": false, "properties": { "Name": { - "markdownDescription": "Specifies the recovery method for a user.", + "markdownDescription": "The recovery method that this object sets a recovery option for.", "title": "Name", "type": "string" }, "Priority": { - "markdownDescription": "A positive integer specifying priority of a method with 1 being the highest priority.", + "markdownDescription": "Your priority preference for using the specified attribute in account recovery. The highest priority is `1` .", "title": "Priority", "type": "number" } @@ -46270,7 +46264,7 @@ "type": "string" }, "DeveloperOnlyAttribute": { - "markdownDescription": "> We recommend that you use [WriteAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UserPoolClientType.html#CognitoUserPools-Type-UserPoolClientType-WriteAttributes) in the user pool client to control how attributes can be mutated for new use cases instead of using `DeveloperOnlyAttribute` . \n\nSpecifies whether the attribute type is developer only. This attribute can only be modified by an administrator. Users will not be able to modify this attribute using their access token.", + "markdownDescription": "> You should use [WriteAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UserPoolClientType.html#CognitoUserPools-Type-UserPoolClientType-WriteAttributes) in the user pool client to control how attributes can be mutated for new use cases instead of using `DeveloperOnlyAttribute` . \n\nSpecifies whether the attribute type is developer only. This attribute can only be modified by an administrator. Users won't be able to modify this attribute using their access token. For example, `DeveloperOnlyAttribute` can be modified using AdminUpdateUserAttributes but can't be updated using UpdateUserAttributes.", "title": "DeveloperOnlyAttribute", "type": "boolean" }, @@ -46306,7 +46300,7 @@ "additionalProperties": false, "properties": { "ExternalId": { - "markdownDescription": "The external ID is a value. We recommend you use `ExternalId` to add security to your IAM role, which is used to call Amazon SNS to send SMS messages for your user pool. If you provide an `ExternalId` , the Cognito User Pool uses it when attempting to assume your IAM role. You can also set your roles trust policy to require the `ExternalID` . If you use the Cognito Management Console to create a role for SMS MFA, Cognito creates a role with the required permissions and a trust policy that uses `ExternalId` .", + "markdownDescription": "The external ID provides additional security for your IAM role. You can use an `ExternalId` with the IAM role that you use with Amazon SNS to send SMS messages for your user pool. If you provide an `ExternalId` , your Amazon Cognito user pool includes it in the request to assume your IAM role. You can configure the role trust policy to require that Amazon Cognito, and any principal, provide the `ExternalID` . If you use the Amazon Cognito Management Console to create a role for SMS multi-factor authentication (MFA), Amazon Cognito creates a role with the required permissions and a trust policy that demonstrates use of the `ExternalId` .\n\nFor more information about the `ExternalId` of a role, see [How to use an external ID when granting access to your AWS resources to a third party](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) .", "title": "ExternalId", "type": "string" }, @@ -46450,7 +46444,7 @@ "additionalProperties": false, "properties": { "AccessTokenValidity": { - "markdownDescription": "The access token time limit. After this limit expires, your user can't use their access token. To specify the time unit for `AccessTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `AccessTokenValidity` to `10` and `TokenValidityUnits` to `hours` , your user can authorize access with their access token for 10 hours.\n\nThe default time unit for `AccessTokenValidity` in an API request is hours.", + "markdownDescription": "The access token time limit. After this limit expires, your user can't use their access token. To specify the time unit for `AccessTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `AccessTokenValidity` to `10` and `TokenValidityUnits` to `hours` , your user can authorize access with\ntheir access token for 10 hours.\n\nThe default time unit for `AccessTokenValidity` in an API request is hours. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your access\ntokens are valid for one hour.", "title": "AccessTokenValidity", "type": "number" }, @@ -46527,7 +46521,7 @@ "type": "boolean" }, "IdTokenValidity": { - "markdownDescription": "The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for `IdTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `IdTokenValidity` as `10` and `TokenValidityUnits` as `hours` , your user can authenticate their session with their ID token for 10 hours.\n\nThe default time unit for `IdTokenValidity` in an API request is hours.", + "markdownDescription": "The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for `IdTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `IdTokenValidity` as `10` and `TokenValidityUnits` as `hours` , your user can authenticate their session with their ID token for 10 hours.\n\nThe default time unit for `IdTokenValidity` in an API request is hours. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your ID\ntokens are valid for one hour.", "title": "IdTokenValidity", "type": "number" }, @@ -46540,7 +46534,7 @@ "type": "array" }, "PreventUserExistenceErrors": { - "markdownDescription": "Use this setting to choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to `ENABLED` and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY` , those APIs will return a `UserNotFoundException` exception if the user does not exist in the user pool.", + "markdownDescription": "Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to `ENABLED` and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY` , those APIs return a `UserNotFoundException` exception if the user doesn't exist in the user pool.\n\nValid values include:\n\n- `ENABLED` - This prevents user existence-related errors.\n- `LEGACY` - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented.\n\nDefaults to `LEGACY` when you don't provide a value.", "title": "PreventUserExistenceErrors", "type": "string" }, @@ -46553,7 +46547,7 @@ "type": "array" }, "RefreshTokenValidity": { - "markdownDescription": "The refresh token time limit. After this limit expires, your user can't use their refresh token. To specify the time unit for `RefreshTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `RefreshTokenValidity` as `10` and `TokenValidityUnits` as `days` , your user can refresh their session and retrieve new access and ID tokens for 10 days.\n\nThe default time unit for `RefreshTokenValidity` in an API request is days. You can't set `RefreshTokenValidity` to 0. If you do, Amazon Cognito overrides the value with the default value of 30 days.", + "markdownDescription": "The refresh token time limit. After this limit expires, your user can't use their refresh token. To specify the time unit for `RefreshTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `RefreshTokenValidity` as `10` and `TokenValidityUnits` as `days` , your user can refresh their session\nand retrieve new access and ID tokens for 10 days.\n\nThe default time unit for `RefreshTokenValidity` in an API request is days. You can't set `RefreshTokenValidity` to 0. If you do, Amazon Cognito overrides the value with the default value of 30 days. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your refresh\ntokens are valid for 30 days.", "title": "RefreshTokenValidity", "type": "number" }, @@ -46614,7 +46608,7 @@ "additionalProperties": false, "properties": { "ApplicationArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of an Amazon Pinpoint project. You can use the Amazon Pinpoint project for integration with the chosen user pool client. Amazon Cognito publishes events to the Amazon Pinpoint project that the app ARN declares.", + "markdownDescription": "The Amazon Resource Name (ARN) of an Amazon Pinpoint project that you want to connect to your user pool app client. Amazon Cognito publishes events to the Amazon Pinpoint project that `ApplicationArn` declares. You can also configure your application to pass an endpoint ID in the `AnalyticsMetadata` parameter of sign-in operations. The endpoint ID is information about the destination for push notifications", "title": "ApplicationArn", "type": "string" }, @@ -46703,12 +46697,12 @@ "title": "CustomDomainConfig" }, "Domain": { - "markdownDescription": "The domain name for the domain that hosts the sign-up and sign-in pages for your application. For example: `auth.example.com` . If you're using a prefix domain, this field denotes the first part of the domain before `.auth.[region].amazoncognito.com` .\n\nThis string can include only lowercase letters, numbers, and hyphens. Don't use a hyphen for the first or last character. Use periods to separate subdomain names.", + "markdownDescription": "The domain name for the custom domain that hosts the sign-up and sign-in pages for your application. One example might be `auth.example.com` .\n\nThis string can include only lowercase letters, numbers, and hyphens. Don't use a hyphen for the first or last character. Use periods to separate subdomain names.", "title": "Domain", "type": "string" }, "UserPoolId": { - "markdownDescription": "The user pool ID for the user pool where you want to associate a user pool domain.", + "markdownDescription": "The ID of the user pool that is associated with the custom domain whose certificate you're updating.", "title": "UserPoolId", "type": "string" } @@ -46971,7 +46965,7 @@ "additionalProperties": false, "properties": { "Identifier": { - "markdownDescription": "A unique resource server identifier for the resource server. This could be an HTTPS endpoint where the resource server is located. For example: `https://my-weather-api.example.com` .", + "markdownDescription": "A unique resource server identifier for the resource server. The identifier can be an API friendly name like `solar-system-data` . You can also set an API URL like `https://solar-system-data-api.example.com` as your identifier.\n\nAmazon Cognito represents scopes in the access token in the format `$resource-server-identifier/$scope` . Longer scope-identifier strings increase the size of your access tokens.", "title": "Identifier", "type": "string" }, @@ -47083,7 +47077,7 @@ "title": "AccountTakeoverRiskConfiguration" }, "ClientId": { - "markdownDescription": "The app client ID. You can specify the risk configuration for a single client (with a specific ClientId) or for all clients (by setting the ClientId to `ALL` ).", + "markdownDescription": "The app client where this configuration is applied. When this parameter isn't present, the risk configuration applies to all user pool app clients that don't have client-level settings.", "title": "ClientId", "type": "string" }, @@ -47352,7 +47346,7 @@ "type": "string" }, "ClientId": { - "markdownDescription": "The client ID for the client app. You can specify the UI customization settings for a single client (with a specific clientId) or for all clients (by setting the clientId to `ALL` ).", + "markdownDescription": "The app client ID for your UI customization. When this value isn't present, the customization applies to all user pool app clients that don't have client-level settings..", "title": "ClientId", "type": "string" }, @@ -47457,7 +47451,7 @@ "items": { "$ref": "#/definitions/AWS::Cognito::UserPoolUser.AttributeType" }, - "markdownDescription": "An array of name-value pairs that contain user attributes and attribute values.", + "markdownDescription": "An array of name-value pairs that contain user attributes and attribute values to be set for the user to be created. You can create a user without specifying any attributes other than `Username` . However, any attributes that you specify as required (when creating a user pool or in the *Attributes* tab of the console) either you should supply (in your call to `AdminCreateUser` ) or the user should supply (when they sign up in response to your welcome message).\n\nFor custom attributes, you must prepend the `custom:` prefix to the attribute name.\n\nTo send a message inviting the user to sign up, you must specify the user's email address or phone number. You can do this in your call to AdminCreateUser or in the *Users* tab of the Amazon Cognito console for managing your user pools.\n\nIn your call to `AdminCreateUser` , you can set the `email_verified` attribute to `True` , and you can set the `phone_number_verified` attribute to `True` . You can also do this by calling [AdminUpdateUserAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AdminUpdateUserAttributes.html) .\n\n- *email* : The email address of the user to whom the message that contains the code and username will be sent. Required if the `email_verified` attribute is set to `True` , or if `\"EMAIL\"` is specified in the `DesiredDeliveryMediums` parameter.\n- *phone_number* : The phone number of the user to whom the message that contains the code and username will be sent. Required if the `phone_number_verified` attribute is set to `True` , or if `\"SMS\"` is specified in the `DesiredDeliveryMediums` parameter.", "title": "UserAttributes", "type": "array" }, @@ -47568,7 +47562,7 @@ "type": "string" }, "Username": { - "markdownDescription": "", + "markdownDescription": "The user's username.", "title": "Username", "type": "string" } @@ -69887,7 +69881,7 @@ "items": { "type": "string" }, - "markdownDescription": "The accelerator types that must be on the instance type.\n\n- To include instance types with GPU hardware, specify `gpu` .\n- To include instance types with FPGA hardware, specify `fpga` .\n- To include instance types with inference hardware, specify `inference` .\n\nDefault: Any accelerator type", + "markdownDescription": "The accelerator types that must be on the instance type.\n\n- To include instance types with GPU hardware, specify `gpu` .\n- To include instance types with FPGA hardware, specify `fpga` .\n\nDefault: Any accelerator type", "title": "AcceleratorTypes", "type": "array" }, @@ -71730,7 +71724,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::Instance.ElasticInferenceAccelerator" }, - "markdownDescription": "An elastic inference accelerator to associate with the instance.\n\n> Amazon Elastic Inference (EI) is no longer available to new customers. For more information, see [Amazon Elastic Inference FAQs](https://docs.aws.amazon.com/machine-learning/elastic-inference/faqs/) .", + "markdownDescription": "An elastic inference accelerator to associate with the instance.\n\n> Amazon Elastic Inference is no longer available.", "title": "ElasticInferenceAccelerators", "type": "array" }, @@ -72836,7 +72830,7 @@ "type": "number" }, "KmsKeyId": { - "markdownDescription": "The ARN of the symmetric AWS Key Management Service ( AWS KMS ) CMK used for encryption.", + "markdownDescription": "Identifier (key ID, key alias, key ARN, or alias ARN) of the customer managed KMS key to use for EBS encryption.", "title": "KmsKeyId", "type": "string" }, @@ -72988,7 +72982,7 @@ "items": { "type": "string" }, - "markdownDescription": "The accelerator types that must be on the instance type.\n\n- For instance types with GPU accelerators, specify `gpu` .\n- For instance types with FPGA accelerators, specify `fpga` .\n- For instance types with inference accelerators, specify `inference` .\n\nDefault: Any accelerator type", + "markdownDescription": "The accelerator types that must be on the instance type.\n\n- For instance types with GPU accelerators, specify `gpu` .\n- For instance types with FPGA accelerators, specify `fpga` .\n\nDefault: Any accelerator type", "title": "AcceleratorTypes", "type": "array" }, @@ -73191,7 +73185,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::LaunchTemplate.LaunchTemplateElasticInferenceAccelerator" }, - "markdownDescription": "An elastic inference accelerator to associate with the instance. Elastic inference accelerators are a resource you can attach to your Amazon EC2 instances to accelerate your Deep Learning (DL) inference workloads.\n\nYou cannot specify accelerators from different generations in the same request.\n\n> Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.", + "markdownDescription": "> Amazon Elastic Inference is no longer available. \n\nAn elastic inference accelerator to associate with the instance. Elastic inference accelerators are a resource you can attach to your Amazon EC2 instances to accelerate your Deep Learning (DL) inference workloads.\n\nYou cannot specify accelerators from different generations in the same request.\n\n> Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.", "title": "ElasticInferenceAccelerators", "type": "array" }, @@ -73227,7 +73221,7 @@ }, "InstanceRequirements": { "$ref": "#/definitions/AWS::EC2::LaunchTemplate.InstanceRequirements", - "markdownDescription": "The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify instance types with these attributes.\n\nYou must specify `VCpuCount` and `MemoryMiB` . All other attributes are optional. Any unspecified optional attribute is set to its default.\n\nWhen you specify multiple attributes, you get instance types that satisfy all of the specified attributes. If you specify multiple values for an attribute, you get instance types that satisfy any of the specified values.\n\nTo limit the list of instance types from which Amazon EC2 can identify matching instance types, you can use one of the following parameters, but not both in the same request:\n\n- `AllowedInstanceTypes` - The instance types to include in the list. All other instance types are ignored, even if they match your specified attributes.\n- `ExcludedInstanceTypes` - The instance types to exclude from the list, even if they match your specified attributes.\n\n> If you specify `InstanceRequirements` , you can't specify `InstanceType` .\n> \n> Attribute-based instance type selection is only supported when using Auto Scaling groups, EC2 Fleet, and Spot Fleet to launch instances. If you plan to use the launch template in the [launch instance wizard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-instance-wizard.html) , or with the [RunInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) API or [AWS::EC2::Instance](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-instance.html) AWS CloudFormation resource, you can't specify `InstanceRequirements` . \n\nFor more information, see [Attribute-based instance type selection for EC2 Fleet](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html) , [Attribute-based instance type selection for Spot Fleet](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-attribute-based-instance-type-selection.html) , and [Spot placement score](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-placement-score.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify instance types with these attributes.\n\nYou must specify `VCpuCount` and `MemoryMiB` . All other attributes are optional. Any unspecified optional attribute is set to its default.\n\nWhen you specify multiple attributes, you get instance types that satisfy all of the specified attributes. If you specify multiple values for an attribute, you get instance types that satisfy any of the specified values.\n\nTo limit the list of instance types from which Amazon EC2 can identify matching instance types, you can use one of the following parameters, but not both in the same request:\n\n- `AllowedInstanceTypes` - The instance types to include in the list. All other instance types are ignored, even if they match your specified attributes.\n- `ExcludedInstanceTypes` - The instance types to exclude from the list, even if they match your specified attributes.\n\n> If you specify `InstanceRequirements` , you can't specify `InstanceType` .\n> \n> Attribute-based instance type selection is only supported when using Auto Scaling groups, EC2 Fleet, and Spot Fleet to launch instances. If you plan to use the launch template in the [launch instance wizard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-instance-wizard.html) , or with the [RunInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) API or [AWS::EC2::Instance](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-instance.html) AWS CloudFormation resource, you can't specify `InstanceRequirements` . \n\nFor more information, see [Specify attributes for instance type selection for EC2 Fleet or Spot Fleet](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html) and [Spot placement score](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-placement-score.html) in the *Amazon EC2 User Guide* .", "title": "InstanceRequirements" }, "InstanceType": { @@ -73517,7 +73511,7 @@ "type": "array" }, "InterfaceType": { - "markdownDescription": "The type of network interface. To create an Elastic Fabric Adapter (EFA), specify `efa` . For more information, see [Elastic Fabric Adapter](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) in the *Amazon EC2 User Guide* .\n\nIf you are not creating an EFA, specify `interface` or omit this parameter.\n\nValid values: `interface` | `efa`", + "markdownDescription": "The type of network interface. To create an Elastic Fabric Adapter (EFA), specify `efa` or `efa` . For more information, see [Elastic Fabric Adapter](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) in the *Amazon EC2 User Guide* .\n\nIf you are not creating an EFA, specify `interface` or omit this parameter.\n\nIf you specify `efa-only` , do not assign any IP addresses to the network interface. EFA-only network interfaces do not support IP addresses.\n\nValid values: `interface` | `efa` | `efa-only`", "title": "InterfaceType", "type": "string" }, @@ -76349,7 +76343,7 @@ "type": "array" }, "MaxEntries": { - "markdownDescription": "The maximum number of entries for the prefix list. This property is required when you create a prefix list.", + "markdownDescription": "The maximum number of entries for the prefix list.", "title": "MaxEntries", "type": "number" }, @@ -77508,7 +77502,7 @@ "items": { "type": "string" }, - "markdownDescription": "The accelerator types that must be on the instance type.\n\n- To include instance types with GPU hardware, specify `gpu` .\n- To include instance types with FPGA hardware, specify `fpga` .\n- To include instance types with inference hardware, specify `inference` .\n\nDefault: Any accelerator type", + "markdownDescription": "The accelerator types that must be on the instance type.\n\n- To include instance types with GPU hardware, specify `gpu` .\n- To include instance types with FPGA hardware, specify `fpga` .\n\nDefault: Any accelerator type", "title": "AcceleratorTypes", "type": "array" }, @@ -84136,7 +84130,7 @@ "type": "boolean" }, "FilesystemType": { - "markdownDescription": "The Linux filesystem type for the volume. For volumes created from a snapshot, you must specify the same filesystem type that the volume was using when the snapshot was created. If there is a filesystem type mismatch, the task will fail to start.\n\nThe available filesystem types are `ext3` , `ext4` , and `xfs` . If no value is specified, the `xfs` filesystem type is used by default.", + "markdownDescription": "The filesystem type for the volume. For volumes created from a snapshot, you must specify the same filesystem type that the volume was using when the snapshot was created. If there is a filesystem type mismatch, the task will fail to start.\n\nThe available Linux filesystem types are `ext3` , `ext4` , and `xfs` . If no value is specified, the `xfs` filesystem type is used by default.\n\nThe available Windows filesystem types are `NTFS` .", "title": "FilesystemType", "type": "string" }, @@ -84915,7 +84909,7 @@ "items": { "type": "string" }, - "markdownDescription": "A string array representing the command that the container runs to determine if it is healthy. The string array must start with `CMD` to run the command arguments directly, or `CMD-SHELL` to run the command with the container's default shell.\n\nWhen you use the AWS Management Console JSON panel, the AWS Command Line Interface , or the APIs, enclose the list of commands in double quotes and brackets.\n\n`[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]`\n\nYou don't include the double quotes and brackets when you use the AWS Management Console.\n\n`CMD-SHELL, curl -f http://localhost/ || exit 1`\n\nAn exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see `HealthCheck` in the docker container create command", + "markdownDescription": "A string array representing the command that the container runs to determine if it is healthy. The string array must start with `CMD` to run the command arguments directly, or `CMD-SHELL` to run the command with the container's default shell.\n\nWhen you use the AWS Management Console JSON panel, the AWS Command Line Interface , or the APIs, enclose the list of commands in double quotes and brackets.\n\n`[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]`\n\nYou don't include the double quotes and brackets when you use the AWS Management Console.\n\n`CMD-SHELL, curl -f http://localhost/ || exit 1`\n\nAn exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see `HealthCheck` in the docker container create command.", "title": "Command", "type": "array" }, @@ -86438,7 +86432,7 @@ "title": "Logging" }, "Name": { - "markdownDescription": "The unique name to give to your cluster.", + "markdownDescription": "The unique name to give to your cluster. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphanumeric character and can't be longer than 100 characters. The name must be unique within the AWS Region and AWS account that you're creating the cluster in. Note that underscores can't be used in AWS CloudFormation .", "title": "Name", "type": "string" }, @@ -90026,7 +90020,7 @@ "additionalProperties": false, "properties": { "Enabled": { - "markdownDescription": "", + "markdownDescription": "Enables the application to automatically start on job submission.", "title": "Enabled", "type": "boolean" } @@ -90037,12 +90031,12 @@ "additionalProperties": false, "properties": { "Enabled": { - "markdownDescription": "", + "markdownDescription": "Enables the application to automatically stop after a certain amount of time being idle. Defaults to true.", "title": "Enabled", "type": "boolean" }, "IdleTimeoutMinutes": { - "markdownDescription": "", + "markdownDescription": "The amount of idle time in minutes after which your application will automatically stop. Defaults to 15 minutes.", "title": "IdleTimeoutMinutes", "type": "number" } @@ -90087,7 +90081,7 @@ "additionalProperties": false, "properties": { "Classification": { - "markdownDescription": "", + "markdownDescription": "The classification within a configuration.", "title": "Classification", "type": "string" }, @@ -90095,13 +90089,13 @@ "items": { "$ref": "#/definitions/AWS::EMRServerless::Application.ConfigurationObject" }, - "markdownDescription": "", + "markdownDescription": "A list of additional configurations to apply within a configuration object.", "title": "Configurations", "type": "array" }, "Properties": { "additionalProperties": true, - "markdownDescription": "", + "markdownDescription": "A set of properties specified within a configuration classification.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -90294,17 +90288,17 @@ "additionalProperties": false, "properties": { "Cpu": { - "markdownDescription": "", + "markdownDescription": "The CPU requirements of the worker configuration. Each worker can have 1, 2, 4, 8, or 16 vCPUs.", "title": "Cpu", "type": "string" }, "Disk": { - "markdownDescription": "", + "markdownDescription": "The disk requirements of the worker configuration.", "title": "Disk", "type": "string" }, "Memory": { - "markdownDescription": "", + "markdownDescription": "The memory requirements of the worker configuration.", "title": "Memory", "type": "string" } @@ -90367,12 +90361,12 @@ "type": "string" }, "AutoMinorVersionUpgrade": { - "markdownDescription": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "markdownDescription": "If you are running Valkey 7.2 or later, or Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", "title": "AutoMinorVersionUpgrade", "type": "boolean" }, "CacheNodeType": { - "markdownDescription": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.\n- Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis OSS configuration variables `appendonly` and `appendfsync` are not supported on Redis OSS version 2.8.22 and later.", + "markdownDescription": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Valkey and Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.\n- Valkey and Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis OSS configuration variables `appendonly` and `appendfsync` are not supported on Redis OSS version 2.8.22 and later.", "title": "CacheNodeType", "type": "string" }, @@ -90400,17 +90394,17 @@ "type": "string" }, "Engine": { - "markdownDescription": "The name of the cache engine to be used for this cluster.\n\nValid values for this parameter are: `memcached` | `redis`", + "markdownDescription": "The name of the cache engine to be used for this cluster.\n\nValid values for this parameter are: `memcached` | valkey | `redis`", "title": "Engine", "type": "string" }, "EngineVersion": { - "markdownDescription": "The version number of the cache engine to be used for this cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.\n\n*Important:* You can upgrade to a newer engine version (see [Selecting a Cache Engine and Version](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement) ), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.", + "markdownDescription": "The version number of the cache engine to be used for this cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.\n\n*Important:* You can upgrade to a newer engine version (see [Selecting a Cache Engine and Version](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SelectEngine.html#VersionManagement) ), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.", "title": "EngineVersion", "type": "string" }, "IpDiscovery": { - "markdownDescription": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "IpDiscovery", "type": "string" }, @@ -90423,7 +90417,7 @@ "type": "array" }, "NetworkType": { - "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "NetworkType", "type": "string" }, @@ -90464,12 +90458,12 @@ "items": { "type": "string" }, - "markdownDescription": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "markdownDescription": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Valkey or Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", "title": "SnapshotArns", "type": "array" }, "SnapshotName": { - "markdownDescription": "The name of a Redis OSS snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", + "markdownDescription": "The name of a Valkey or Redis OSS snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", "title": "SnapshotName", "type": "string" }, @@ -90645,7 +90639,7 @@ "additionalProperties": false, "properties": { "AutomaticFailoverEnabled": { - "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.", + "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Valkey or Redis OSS (cluster mode enabled) replication groups.", "title": "AutomaticFailoverEnabled", "type": "boolean" }, @@ -90660,7 +90654,7 @@ "type": "string" }, "EngineVersion": { - "markdownDescription": "The Elasticache Redis OSS engine version.", + "markdownDescription": "The Elasticache Valkey or Redis OSS engine version.", "title": "EngineVersion", "type": "string" }, @@ -90771,7 +90765,7 @@ "additionalProperties": false, "properties": { "NodeGroupId": { - "markdownDescription": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "markdownDescription": "Either the ElastiCache supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "title": "NodeGroupId", "type": "string" }, @@ -90919,27 +90913,27 @@ "type": "boolean" }, "AuthToken": { - "markdownDescription": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Users with the Redis OSS AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "markdownDescription": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Valkey or Redis OSS users with the AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "title": "AuthToken", "type": "string" }, "AutoMinorVersionUpgrade": { - "markdownDescription": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "markdownDescription": "If you are running Valkey 7.2 or later, or Redis OSS 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", "title": "AutoMinorVersionUpgrade", "type": "boolean" }, "AutomaticFailoverEnabled": { - "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.\n\nDefault: false", + "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Valkey or Redis OSS (cluster mode enabled) replication groups.\n\nDefault: false", "title": "AutomaticFailoverEnabled", "type": "boolean" }, "CacheNodeType": { - "markdownDescription": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.12xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.12xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Amazon Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)", + "markdownDescription": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.12xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.12xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Amazon Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)", "title": "CacheNodeType", "type": "string" }, "CacheParameterGroupName": { - "markdownDescription": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Redis OSS version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Redis OSS (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Redis OSS (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", + "markdownDescription": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Valkey or Redis OSS version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Valkey or Redis OSS (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Valkey or Redis OSS (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", "title": "CacheParameterGroupName", "type": "string" }, @@ -90957,22 +90951,22 @@ "type": "string" }, "ClusterMode": { - "markdownDescription": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/modify-cluster-mode.html) .", + "markdownDescription": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Valkey or Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Valkey or Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/modify-cluster-mode.html) .", "title": "ClusterMode", "type": "string" }, "DataTieringEnabled": { - "markdownDescription": "Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see [Data tiering](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html) .", + "markdownDescription": "Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see [Data tiering](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/data-tiering.html) .", "title": "DataTieringEnabled", "type": "boolean" }, "Engine": { - "markdownDescription": "The name of the cache engine to be used for the clusters in this replication group. The value must be set to `Redis` .", + "markdownDescription": "The name of the cache engine to be used for the clusters in this replication group. The value must be set to `valkey` or `redis` .\n\n> Upgrading an existing engine from redis to valkey is done through in-place migration, and requires a parameter group.", "title": "Engine", "type": "string" }, "EngineVersion": { - "markdownDescription": "The version number of the cache engine to be used for the clusters in this replication group. To view the supported cache engine versions, use the `DescribeCacheEngineVersions` operation.\n\n*Important:* You can upgrade to a newer engine version (see [Selecting a Cache Engine and Version](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement) ) in the *ElastiCache User Guide* , but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.", + "markdownDescription": "The version number of the cache engine to be used for the clusters in this replication group. To view the supported cache engine versions, use the `DescribeCacheEngineVersions` operation.\n\n*Important:* You can upgrade to a newer engine version (see [Selecting a Cache Engine and Version](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SelectEngine.html#VersionManagement) ) in the *ElastiCache User Guide* , but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.", "title": "EngineVersion", "type": "string" }, @@ -90982,7 +90976,7 @@ "type": "string" }, "IpDiscovery": { - "markdownDescription": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "IpDiscovery", "type": "string" }, @@ -91000,12 +90994,12 @@ "type": "array" }, "MultiAZEnabled": { - "markdownDescription": "A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see [Minimizing Downtime: Multi-AZ](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html) .", + "markdownDescription": "A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see [Minimizing Downtime: Multi-AZ](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/AutoFailover.html) .", "title": "MultiAZEnabled", "type": "boolean" }, "NetworkType": { - "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "NetworkType", "type": "string" }, @@ -91013,7 +91007,7 @@ "items": { "$ref": "#/definitions/AWS::ElastiCache::ReplicationGroup.NodeGroupConfiguration" }, - "markdownDescription": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Redis OSS cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "markdownDescription": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Valkey or Redis OSS cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "title": "NodeGroupConfiguration", "type": "array" }, @@ -91028,7 +91022,7 @@ "type": "number" }, "NumNodeGroups": { - "markdownDescription": "An optional parameter that specifies the number of node groups (shards) for this Redis OSS (cluster mode enabled) replication group. For Redis OSS (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", + "markdownDescription": "An optional parameter that specifies the number of node groups (shards) for this Valkey or Redis OSS (cluster mode enabled) replication group. For Valkey or Redis OSS (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", "title": "NumNodeGroups", "type": "number" }, @@ -91082,7 +91076,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of Amazon Resource Names (ARN) that uniquely identify the Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "markdownDescription": "A list of Amazon Resource Names (ARN) that uniquely identify the Valkey or Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", "title": "SnapshotArns", "type": "array" }, @@ -91102,7 +91096,7 @@ "type": "string" }, "SnapshottingClusterId": { - "markdownDescription": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis OSS (cluster mode enabled) replication groups.", + "markdownDescription": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Valkey or Redis OSS (cluster mode enabled) replication groups.", "title": "SnapshottingClusterId", "type": "string" }, @@ -91120,7 +91114,7 @@ "type": "boolean" }, "TransitEncryptionMode": { - "markdownDescription": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", + "markdownDescription": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Valkey or Redis OSS clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", "title": "TransitEncryptionMode", "type": "string" }, @@ -91239,7 +91233,7 @@ "additionalProperties": false, "properties": { "NodeGroupId": { - "markdownDescription": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "markdownDescription": "Either the ElastiCache supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "title": "NodeGroupId", "type": "string" }, @@ -91463,7 +91457,7 @@ "title": "CacheUsageLimits" }, "DailySnapshotTime": { - "markdownDescription": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis OSS and Serverless Memcached only.", + "markdownDescription": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Valkey, Redis OSS, and Serverless Memcached only.", "title": "DailySnapshotTime", "type": "string" }, @@ -91524,7 +91518,7 @@ "type": "array" }, "SnapshotRetentionLimit": { - "markdownDescription": "The current setting for the number of serverless cache snapshots the system will retain. Available for Redis OSS and Serverless Memcached only.", + "markdownDescription": "The current setting for the number of serverless cache snapshots the system will retain. Available for Valkey, Redis OSS, and Serverless Memcached only.", "title": "SnapshotRetentionLimit", "type": "number" }, @@ -91545,7 +91539,7 @@ "type": "array" }, "UserGroupId": { - "markdownDescription": "The identifier of the user group associated with the serverless cache. Available for Redis OSS only. Default is NULL.", + "markdownDescription": "The identifier of the user group associated with the serverless cache. Available for Valkey and Redis OSS only. Default is NULL.", "title": "UserGroupId", "type": "string" } @@ -91906,7 +91900,7 @@ "additionalProperties": false, "properties": { "Engine": { - "markdownDescription": "The current supported value is redis.", + "markdownDescription": "The current supported values are valkey and redis.", "title": "Engine", "type": "string" }, @@ -94129,7 +94123,7 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deletion_protection.enabled` - Indicates whether deletion protection is enabled. The value is `true` or `false` . The default is `false` .\n- `load_balancing.cross_zone.enabled` - Indicates whether cross-zone load balancing is enabled. The possible values are `true` and `false` . The default for Network Load Balancers and Gateway Load Balancers is `false` . The default for Application Load Balancers is `true` , and cannot be changed.\n\nThe following attributes are supported by both Application Load Balancers and Network Load Balancers:\n\n- `access_logs.s3.enabled` - Indicates whether access logs are enabled. The value is `true` or `false` . The default is `false` .\n- `access_logs.s3.bucket` - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `access_logs.s3.prefix` - The prefix for the location in the S3 bucket for the access logs.\n- `ipv6.deny_all_igw_traffic` - Blocks internet gateway (IGW) access to the load balancer. It is set to `false` for internet-facing load balancers and `true` for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway.\n\nThe following attributes are supported by only Application Load Balancers:\n\n- `idle_timeout.timeout_seconds` - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.\n- `client_keep_alive.seconds` - The client keep alive value, in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds.\n- `connection_logs.s3.enabled` - Indicates whether connection logs are enabled. The value is `true` or `false` . The default is `false` .\n- `connection_logs.s3.bucket` - The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `connection_logs.s3.prefix` - The prefix for the location in the S3 bucket for the connection logs.\n- `routing.http.desync_mitigation_mode` - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are `monitor` , `defensive` , and `strictest` . The default is `defensive` .\n- `routing.http.drop_invalid_header_fields.enabled` - Indicates whether HTTP headers with invalid header fields are removed by the load balancer ( `true` ) or routed to targets ( `false` ). The default is `false` .\n- `routing.http.preserve_host_header.enabled` - Indicates whether the Application Load Balancer should preserve the `Host` header in the HTTP request and send it to the target without any change. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.x_amzn_tls_version_and_cipher_suite.enabled` - Indicates whether the two headers ( `x-amzn-tls-version` and `x-amzn-tls-cipher-suite` ), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The `x-amzn-tls-version` header has information about the TLS protocol version negotiated with the client, and the `x-amzn-tls-cipher-suite` header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are `true` and `false` . The default is `false` .\n- `routing.http.xff_client_port.enabled` - Indicates whether the `X-Forwarded-For` header should preserve the source port that the client used to connect to the load balancer. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.xff_header_processing.mode` - Enables you to modify, preserve, or remove the `X-Forwarded-For` header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are `append` , `preserve` , and `remove` . The default is `append` .\n\n- If the value is `append` , the Application Load Balancer adds the client IP address (of the last hop) to the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- If the value is `preserve` the Application Load Balancer preserves the `X-Forwarded-For` header in the HTTP request, and sends it to targets without any change.\n- If the value is `remove` , the Application Load Balancer removes the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- `routing.http2.enabled` - Indicates whether HTTP/2 is enabled. The possible values are `true` and `false` . The default is `true` . Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.\n- `waf.fail_open.enabled` - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. The possible values are `true` and `false` . The default is `false` .\n\nThe following attributes are supported by only Network Load Balancers:\n\n- `dns_record.client_routing_policy` - Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are `availability_zone_affinity` with 100 percent zonal affinity, `partial_availability_zone_affinity` with 85 percent zonal affinity, and `any_availability_zone` with 0 percent zonal affinity.", + "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deletion_protection.enabled` - Indicates whether deletion protection is enabled. The value is `true` or `false` . The default is `false` .\n- `load_balancing.cross_zone.enabled` - Indicates whether cross-zone load balancing is enabled. The possible values are `true` and `false` . The default for Network Load Balancers and Gateway Load Balancers is `false` . The default for Application Load Balancers is `true` , and cannot be changed.\n\nThe following attributes are supported by both Application Load Balancers and Network Load Balancers:\n\n- `access_logs.s3.enabled` - Indicates whether access logs are enabled. The value is `true` or `false` . The default is `false` .\n- `access_logs.s3.bucket` - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `access_logs.s3.prefix` - The prefix for the location in the S3 bucket for the access logs.\n- `ipv6.deny_all_igw_traffic` - Blocks internet gateway (IGW) access to the load balancer. It is set to `false` for internet-facing load balancers and `true` for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway.\n\nThe following attributes are supported by only Application Load Balancers:\n\n- `idle_timeout.timeout_seconds` - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.\n- `client_keep_alive.seconds` - The client keep alive value, in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds.\n- `connection_logs.s3.enabled` - Indicates whether connection logs are enabled. The value is `true` or `false` . The default is `false` .\n- `connection_logs.s3.bucket` - The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `connection_logs.s3.prefix` - The prefix for the location in the S3 bucket for the connection logs.\n- `routing.http.desync_mitigation_mode` - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are `monitor` , `defensive` , and `strictest` . The default is `defensive` .\n- `routing.http.drop_invalid_header_fields.enabled` - Indicates whether HTTP headers with invalid header fields are removed by the load balancer ( `true` ) or routed to targets ( `false` ). The default is `false` .\n- `routing.http.preserve_host_header.enabled` - Indicates whether the Application Load Balancer should preserve the `Host` header in the HTTP request and send it to the target without any change. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.x_amzn_tls_version_and_cipher_suite.enabled` - Indicates whether the two headers ( `x-amzn-tls-version` and `x-amzn-tls-cipher-suite` ), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The `x-amzn-tls-version` header has information about the TLS protocol version negotiated with the client, and the `x-amzn-tls-cipher-suite` header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are `true` and `false` . The default is `false` .\n- `routing.http.xff_client_port.enabled` - Indicates whether the `X-Forwarded-For` header should preserve the source port that the client used to connect to the load balancer. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.xff_header_processing.mode` - Enables you to modify, preserve, or remove the `X-Forwarded-For` header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are `append` , `preserve` , and `remove` . The default is `append` .\n\n- If the value is `append` , the Application Load Balancer adds the client IP address (of the last hop) to the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- If the value is `preserve` the Application Load Balancer preserves the `X-Forwarded-For` header in the HTTP request, and sends it to targets without any change.\n- If the value is `remove` , the Application Load Balancer removes the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- `routing.http2.enabled` - Indicates whether HTTP/2 is enabled. The possible values are `true` and `false` . The default is `true` . Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.\n- `waf.fail_open.enabled` - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. The possible values are `true` and `false` . The default is `false` .\n\nThe following attributes are supported by only Network Load Balancers:\n\n- `dns_record.client_routing_policy` - Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are `availability_zone_affinity` with 100 percent zonal affinity, `partial_availability_zone_affinity` with 85 percent zonal affinity, and `any_availability_zone` with 0 percent zonal affinity.\n- `zonal_shift.config.enabled` - Indicates whether zonal shift is enabled. The possible values are `true` and `false` . The default is `false` .", "title": "Key", "type": "string" }, @@ -123176,7 +123170,7 @@ "properties": { "Attributes": { "additionalProperties": true, - "markdownDescription": "", + "markdownDescription": "Metadata that can be used to define a package version\u2019s configuration. For example, the S3 file location, configuration options that are being sent to the device or fleet.\n\nThe combined size of all the attributes on a package version is limited to 3KB.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -123186,12 +123180,12 @@ "type": "object" }, "Description": { - "markdownDescription": "", + "markdownDescription": "A summary of the package version being created. This can be used to outline the package's contents or purpose.", "title": "Description", "type": "string" }, "PackageName": { - "markdownDescription": "", + "markdownDescription": "The name of the associated software package.", "title": "PackageName", "type": "string" }, @@ -123199,12 +123193,12 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "", + "markdownDescription": "Metadata that can be used to manage the package version.", "title": "Tags", "type": "array" }, "VersionName": { - "markdownDescription": "", + "markdownDescription": "The name of the new package version.", "title": "VersionName", "type": "string" } @@ -133463,7 +133457,7 @@ "type": "array" }, "Role": { - "markdownDescription": "The IAM role that allows AWS IoT Wireless to access the CSV file in the S3 bucket.", + "markdownDescription": "The IAM role that allows to access the CSV file in the S3 bucket.", "title": "Role", "type": "string" }, @@ -165551,7 +165545,7 @@ "type": "string" }, "DataTiering": { - "markdownDescription": "Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see [Data tiering](https://docs.aws.amazon.com/memorydb/latest/devguide/data-tiering.html) .", + "markdownDescription": "Enables data tiering. Data tiering is only supported for clusters using the r6gd node type. This parameter must be set when using r6gd nodes. For more information, see [Data tiering](https://docs.aws.amazon.com/memorydb/latest/devguide/data-tiering.html) .", "title": "DataTiering", "type": "string" }, @@ -165561,7 +165555,7 @@ "type": "string" }, "EngineVersion": { - "markdownDescription": "The Redis engine version used by the cluster .", + "markdownDescription": "The Valkey or Redis OSS engine version used by the cluster .", "title": "EngineVersion", "type": "string" }, @@ -186600,7 +186594,7 @@ }, "SourceColumn": { "$ref": "#/definitions/AWS::QuickSight::Analysis.ColumnIdentifier", - "markdownDescription": "", + "markdownDescription": "A column of a data set.", "title": "SourceColumn" }, "SourceField": { @@ -199086,7 +199080,7 @@ }, "SourceColumn": { "$ref": "#/definitions/AWS::QuickSight::Dashboard.ColumnIdentifier", - "markdownDescription": "", + "markdownDescription": "A column of a data set.", "title": "SourceColumn" }, "SourceField": { @@ -209380,7 +209374,7 @@ }, "OverrideDatasetParameterOperation": { "$ref": "#/definitions/AWS::QuickSight::DataSet.OverrideDatasetParameterOperation", - "markdownDescription": "", + "markdownDescription": "A transform operation that overrides the dataset parameter values that are defined in another dataset.", "title": "OverrideDatasetParameterOperation" }, "ProjectOperation": { @@ -213585,7 +213579,7 @@ }, "SourceColumn": { "$ref": "#/definitions/AWS::QuickSight::Template.ColumnIdentifier", - "markdownDescription": "", + "markdownDescription": "A column of a data set.", "title": "SourceColumn" }, "SourceField": { @@ -222945,7 +222939,7 @@ }, "Typography": { "$ref": "#/definitions/AWS::QuickSight::Theme.Typography", - "markdownDescription": "", + "markdownDescription": "Determines the typography options.", "title": "Typography" }, "UIColorPalette": { @@ -227398,7 +227392,7 @@ "type": "object" }, "NodeType": { - "markdownDescription": "The node type to be provisioned for the cluster. For information about node types, go to [Working with Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes) in the *Amazon Redshift Cluster Management Guide* .\n\nValid Values: `dc2.large` | `dc2.8xlarge` | `ra3.xlplus` | `ra3.4xlarge` | `ra3.16xlarge`", + "markdownDescription": "The node type to be provisioned for the cluster. For information about node types, go to [Working with Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes) in the *Amazon Redshift Cluster Management Guide* .\n\nValid Values: `dc2.large` | `dc2.8xlarge` | `ra3.large` | `ra3.xlplus` | `ra3.4xlarge` | `ra3.16xlarge`", "title": "NodeType", "type": "string" }, @@ -227538,7 +227532,7 @@ "type": "string" }, "S3KeyPrefix": { - "markdownDescription": "The prefix applied to the log file names.\n\nConstraints:\n\n- Cannot exceed 512 characters\n- Cannot contain spaces( ), double quotes (\"), single quotes ('), a backslash (\\), or control characters. The hexadecimal codes for invalid characters are:\n\n- x00 to x20\n- x22\n- x27\n- x5c\n- x7f or larger", + "markdownDescription": "The prefix applied to the log file names.\n\nValid characters are any letter from any language, any whitespace character, any numeric character, and the following characters: underscore ( `_` ), period ( `.` ), colon ( `:` ), slash ( `/` ), equal ( `=` ), plus ( `+` ), backslash ( `\\` ), hyphen ( `-` ), at symbol ( `@` ).", "title": "S3KeyPrefix", "type": "string" } @@ -230213,7 +230207,7 @@ "title": "Policy" }, "PolicyDescription": { - "markdownDescription": "The description for the policy.", + "markdownDescription": "Description of the resiliency policy.", "title": "PolicyDescription", "type": "string" }, @@ -233090,7 +233084,7 @@ "type": "string" }, "ProfileId": { - "markdownDescription": "ID of the Profile.", + "markdownDescription": "ID of the Profile.\n\nUpdate to this property requires update to the `ResourceId` property as well, because you can only associate one Profile per VPC. For more information, see [Route 53 Profiles](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/profiles.html) .", "title": "ProfileId", "type": "string" }, @@ -236103,7 +236097,7 @@ "items": { "$ref": "#/definitions/AWS::S3::Bucket.Rule" }, - "markdownDescription": "Specifies lifecycle configuration rules for an Amazon S3 bucket.", + "markdownDescription": "A lifecycle rule for individual objects in an Amazon S3 bucket.", "title": "Rules", "type": "array" } @@ -237404,7 +237398,7 @@ }, "BucketLevel": { "$ref": "#/definitions/AWS::S3::StorageLens.BucketLevel", - "markdownDescription": "This property contains the details of the account-level bucket-level configurations for Amazon S3 Storage Lens.", + "markdownDescription": "This property contains the details of the account-level bucket-level configurations for Amazon S3 Storage Lens. To enable bucket-level configurations, make sure to also set the same metrics at the account level.", "title": "BucketLevel" }, "DetailedStatusCodesMetrics": { @@ -245513,7 +245507,7 @@ }, "DefaultSpaceSettings": { "$ref": "#/definitions/AWS::SageMaker::Domain.DefaultSpaceSettings", - "markdownDescription": "A collection of settings that apply to spaces created in the domain.", + "markdownDescription": "The default settings for shared spaces that users create in the domain.\n\nSageMaker applies these settings only to shared spaces. It doesn't apply them to private spaces.", "title": "DefaultSpaceSettings" }, "DefaultUserSettings": { @@ -245991,20 +245985,20 @@ "properties": { "CodeEditorAppSettings": { "$ref": "#/definitions/AWS::SageMaker::Domain.CodeEditorAppSettings", - "markdownDescription": "The Code Editor application settings.", + "markdownDescription": "The Code Editor application settings.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "CodeEditorAppSettings" }, "CustomFileSystemConfigs": { "items": { "$ref": "#/definitions/AWS::SageMaker::Domain.CustomFileSystemConfig" }, - "markdownDescription": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio.", + "markdownDescription": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "CustomFileSystemConfigs", "type": "array" }, "CustomPosixUserConfig": { "$ref": "#/definitions/AWS::SageMaker::Domain.CustomPosixUserConfig", - "markdownDescription": "Details about the POSIX identity that is used for file system operations.", + "markdownDescription": "Details about the POSIX identity that is used for file system operations.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "CustomPosixUserConfig" }, "DefaultLandingUri": { @@ -246013,13 +246007,13 @@ "type": "string" }, "ExecutionRole": { - "markdownDescription": "The execution role for the user.", + "markdownDescription": "The execution role for the user.\n\nSageMaker applies this setting only to private spaces that the user creates in the domain. SageMaker doesn't apply this setting to shared spaces.", "title": "ExecutionRole", "type": "string" }, "JupyterLabAppSettings": { "$ref": "#/definitions/AWS::SageMaker::Domain.JupyterLabAppSettings", - "markdownDescription": "The settings for the JupyterLab application.", + "markdownDescription": "The settings for the JupyterLab application.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "JupyterLabAppSettings" }, "JupyterServerAppSettings": { @@ -246046,7 +246040,7 @@ "items": { "type": "string" }, - "markdownDescription": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.", + "markdownDescription": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "SecurityGroups", "type": "array" }, @@ -246057,7 +246051,7 @@ }, "SpaceStorageSettings": { "$ref": "#/definitions/AWS::SageMaker::Domain.DefaultSpaceStorageSettings", - "markdownDescription": "The storage settings for a space.", + "markdownDescription": "The storage settings for a space.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "SpaceStorageSettings" }, "StudioWebPortal": { @@ -253682,20 +253676,20 @@ "properties": { "CodeEditorAppSettings": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.CodeEditorAppSettings", - "markdownDescription": "The Code Editor application settings.", + "markdownDescription": "The Code Editor application settings.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "CodeEditorAppSettings" }, "CustomFileSystemConfigs": { "items": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.CustomFileSystemConfig" }, - "markdownDescription": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio.", + "markdownDescription": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "CustomFileSystemConfigs", "type": "array" }, "CustomPosixUserConfig": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.CustomPosixUserConfig", - "markdownDescription": "Details about the POSIX identity that is used for file system operations.", + "markdownDescription": "Details about the POSIX identity that is used for file system operations.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "CustomPosixUserConfig" }, "DefaultLandingUri": { @@ -253704,13 +253698,13 @@ "type": "string" }, "ExecutionRole": { - "markdownDescription": "The execution role for the user.", + "markdownDescription": "The execution role for the user.\n\nSageMaker applies this setting only to private spaces that the user creates in the domain. SageMaker doesn't apply this setting to shared spaces.", "title": "ExecutionRole", "type": "string" }, "JupyterLabAppSettings": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.JupyterLabAppSettings", - "markdownDescription": "The settings for the JupyterLab application.", + "markdownDescription": "The settings for the JupyterLab application.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "JupyterLabAppSettings" }, "JupyterServerAppSettings": { @@ -253732,7 +253726,7 @@ "items": { "type": "string" }, - "markdownDescription": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.", + "markdownDescription": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "SecurityGroups", "type": "array" }, @@ -253743,7 +253737,7 @@ }, "SpaceStorageSettings": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.DefaultSpaceStorageSettings", - "markdownDescription": "The storage settings for a space.", + "markdownDescription": "The storage settings for a space.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "SpaceStorageSettings" }, "StudioWebPortal": { @@ -263358,7 +263352,7 @@ }, "IdentityProviderDetails": { "$ref": "#/definitions/AWS::Transfer::Server.IdentityProviderDetails", - "markdownDescription": "Required when `IdentityProviderType` is set to `AWS_DIRECTORY_SERVICE` , `AWS _LAMBDA` or `API_GATEWAY` . Accepts an array containing all of the information required to use a directory in `AWS_DIRECTORY_SERVICE` or invoke a customer-supplied authentication API, including the API Gateway URL. Not required when `IdentityProviderType` is set to `SERVICE_MANAGED` .", + "markdownDescription": "Required when `IdentityProviderType` is set to `AWS_DIRECTORY_SERVICE` , `AWS _LAMBDA` or `API_GATEWAY` . Accepts an array containing all of the information required to use a directory in `AWS_DIRECTORY_SERVICE` or invoke a customer-supplied authentication API, including the API Gateway URL. Cannot be specified when `IdentityProviderType` is set to `SERVICE_MANAGED` .", "title": "IdentityProviderDetails" }, "IdentityProviderType": { @@ -271944,7 +271938,7 @@ }, "SourceConfiguration": { "$ref": "#/definitions/AWS::Wisdom::KnowledgeBase.SourceConfiguration", - "markdownDescription": "The source of the knowledge base content. Only set this argument for EXTERNAL knowledge bases.", + "markdownDescription": "The source of the knowledge base content. Only set this argument for EXTERNAL or Managed knowledge bases.", "title": "SourceConfiguration" }, "Tags": { @@ -271987,7 +271981,7 @@ "additionalProperties": false, "properties": { "AppIntegrationArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the AppIntegrations DataIntegration to use for ingesting content.\n\n- For [Salesforce](https://docs.aws.amazon.com/https://developer.salesforce.com/docs/atlas.en-us.knowledge_dev.meta/knowledge_dev/sforce_api_objects_knowledge__kav.htm) , your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least `Id` , `ArticleNumber` , `VersionNumber` , `Title` , `PublishStatus` , and `IsDeleted` as source fields.\n- For [ServiceNow](https://docs.aws.amazon.com/https://developer.servicenow.com/dev.do#!/reference/api/rome/rest/knowledge-management-api) , your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least `number` , `short_description` , `sys_mod_count` , `workflow_state` , and `active` as source fields.\n- For [Zendesk](https://docs.aws.amazon.com/https://developer.zendesk.com/api-reference/help_center/help-center-api/articles/) , your AppIntegrations DataIntegration must have an ObjectConfiguration if `objectFields` is not provided, including at least `id` , `title` , `updated_at` , and `draft` as source fields.\n- For [SharePoint](https://docs.aws.amazon.com/https://learn.microsoft.com/en-us/sharepoint/dev/sp-add-ins/sharepoint-net-server-csom-jsom-and-rest-api-index) , your AppIntegrations DataIntegration must have a FileConfiguration, including only file extensions that are among `docx` , `pdf` , `html` , `htm` , and `txt` .\n- For [Amazon S3](https://docs.aws.amazon.com/https://aws.amazon.com/s3/) , the ObjectConfiguration and FileConfiguration of your AppIntegrations DataIntegration must be null. The `SourceURI` of your DataIntegration must use the following format: `s3://your_s3_bucket_name` .\n\n> The bucket policy of the corresponding S3 bucket must allow the AWS principal `app-integrations.amazonaws.com` to perform `s3:ListBucket` , `s3:GetObject` , and `s3:GetBucketLocation` against the bucket.", + "markdownDescription": "The Amazon Resource Name (ARN) of the AppIntegrations DataIntegration to use for ingesting content.\n\n- For [Salesforce](https://docs.aws.amazon.com/https://developer.salesforce.com/docs/atlas.en-us.knowledge_dev.meta/knowledge_dev/sforce_api_objects_knowledge__kav.htm) , your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least `Id` , `ArticleNumber` , `VersionNumber` , `Title` , `PublishStatus` , and `IsDeleted` as source fields.\n- For [ServiceNow](https://docs.aws.amazon.com/https://developer.servicenow.com/dev.do#!/reference/api/rome/rest/knowledge-management-api) , your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least `number` , `short_description` , `sys_mod_count` , `workflow_state` , and `active` as source fields.\n- For [Zendesk](https://docs.aws.amazon.com/https://developer.zendesk.com/api-reference/help_center/help-center-api/articles/) , your AppIntegrations DataIntegration must have an ObjectConfiguration if `objectFields` is not provided, including at least `id` , `title` , `updated_at` , and `draft` as source fields.\n- For [SharePoint](https://docs.aws.amazon.com/https://learn.microsoft.com/en-us/sharepoint/dev/sp-add-ins/sharepoint-net-server-csom-jsom-and-rest-api-index) , your AppIntegrations DataIntegration must have a FileConfiguration, including only file extensions that are among `docx` , `pdf` , `html` , `htm` , and `txt` .\n- For [Amazon S3](https://docs.aws.amazon.com/s3/) , the ObjectConfiguration and FileConfiguration of your AppIntegrations DataIntegration must be null. The `SourceURI` of your DataIntegration must use the following format: `s3://your_s3_bucket_name` .\n\n> The bucket policy of the corresponding S3 bucket must allow the AWS principal `app-integrations.amazonaws.com` to perform `s3:ListBucket` , `s3:GetObject` , and `s3:GetBucketLocation` against the bucket.", "title": "AppIntegrationArn", "type": "string" }, diff --git a/schema_source/cloudformation-docs.json b/schema_source/cloudformation-docs.json index 772079fbb..3f0649f19 100644 --- a/schema_source/cloudformation-docs.json +++ b/schema_source/cloudformation-docs.json @@ -1162,7 +1162,7 @@ "StageName": "The name of an existing stage to associate with the deployment." }, "AWS::ApiGatewayV2::DomainName": { - "DomainName": "The custom domain name for your API in Amazon API Gateway. Uppercase letters are not supported.", + "DomainName": "The custom domain name for your API in Amazon API Gateway. Uppercase letters and the underscore ( `_` ) character are not supported.", "DomainNameConfigurations": "The domain name configurations.", "MutualTlsAuthentication": "The mutual TLS authentication configuration for a custom domain name.", "Tags": "The collection of tags associated with a domain name." @@ -1587,7 +1587,7 @@ "ApplicationHostUrl": "The location of the SAPOData resource.", "ApplicationServicePath": "The application path to catalog service.", "ClientNumber": "The client number for the client creating the connection.", - "DisableSSO": "", + "DisableSSO": "If you set this parameter to `true` , Amazon AppFlow bypasses the single sign-on (SSO) settings in your SAP account when it accesses your SAP OData instance.\n\nWhether you need this option depends on the types of credentials that you applied to your SAP OData connection profile. If your profile uses basic authentication credentials, SAP SSO can prevent Amazon AppFlow from connecting to your account with your username and password. In this case, bypassing SSO makes it possible for Amazon AppFlow to connect successfully. However, if your profile uses OAuth credentials, this parameter has no affect.", "LogonLanguage": "The logon language of SAPOData instance.", "OAuthProperties": "The SAPOData OAuth properties required for OAuth type authentication.", "PortNumber": "The port number of the SAPOData instance.", @@ -1598,7 +1598,7 @@ "ClientCredentialsArn": "The secret manager ARN, which contains the client ID and client secret of the connected app.", "ConnectorOAuthRequest": "Used by select connectors for which the OAuth workflow is supported, such as Salesforce, Google Analytics, Marketo, Zendesk, and Slack.", "JwtToken": "A JSON web token (JWT) that authorizes Amazon AppFlow to access your Salesforce records.", - "OAuth2GrantType": "Specifies the OAuth 2.0 grant type that Amazon AppFlow uses when it requests an access token from Salesforce. Amazon AppFlow requires an access token each time it attempts to access your Salesforce records.\n\nYou can specify one of the following values:\n\n- **AUTHORIZATION_CODE** - Amazon AppFlow passes an authorization code when it requests the access token from Salesforce. Amazon AppFlow receives the authorization code from Salesforce after you log in to your Salesforce account and authorize Amazon AppFlow to access your records.\n- **CLIENT_CREDENTIALS** - Amazon AppFlow passes client credentials (a client ID and client secret) when it requests the access token from Salesforce. You provide these credentials to Amazon AppFlow when you define the connection to your Salesforce account.\n- **JWT_BEARER** - Amazon AppFlow passes a JSON web token (JWT) when it requests the access token from Salesforce. You provide the JWT to Amazon AppFlow when you define the connection to your Salesforce account. When you use this grant type, you don't need to log in to your Salesforce account to authorize Amazon AppFlow to access your records.", + "OAuth2GrantType": "Specifies the OAuth 2.0 grant type that Amazon AppFlow uses when it requests an access token from Salesforce. Amazon AppFlow requires an access token each time it attempts to access your Salesforce records.\n\nYou can specify one of the following values:\n\n- **AUTHORIZATION_CODE** - Amazon AppFlow passes an authorization code when it requests the access token from Salesforce. Amazon AppFlow receives the authorization code from Salesforce after you log in to your Salesforce account and authorize Amazon AppFlow to access your records.\n- **JWT_BEARER** - Amazon AppFlow passes a JSON web token (JWT) when it requests the access token from Salesforce. You provide the JWT to Amazon AppFlow when you define the connection to your Salesforce account. When you use this grant type, you don't need to log in to your Salesforce account to authorize Amazon AppFlow to access your records.\n\n> The CLIENT_CREDENTIALS value is not supported for Salesforce.", "RefreshToken": "The credentials used to acquire new access tokens." }, "AWS::AppFlow::ConnectorProfile SalesforceConnectorProfileProperties": { @@ -1607,7 +1607,7 @@ "usePrivateLinkForMetadataAndAuthorization": "If the connection mode for the connector profile is private, this parameter sets whether Amazon AppFlow uses the private network to send metadata and authorization calls to Salesforce. Amazon AppFlow sends private calls through AWS PrivateLink . These calls travel through AWS infrastructure without being exposed to the public internet.\n\nSet either of the following values:\n\n- **true** - Amazon AppFlow sends all calls to Salesforce over the private network.\n\nThese private calls are:\n\n- Calls to get metadata about your Salesforce records. This metadata describes your Salesforce objects and their fields.\n- Calls to get or refresh access tokens that allow Amazon AppFlow to access your Salesforce records.\n- Calls to transfer your Salesforce records as part of a flow run.\n- **false** - The default value. Amazon AppFlow sends some calls to Salesforce privately and other calls over the public internet.\n\nThe public calls are:\n\n- Calls to get metadata about your Salesforce records.\n- Calls to get or refresh access tokens.\n\nThe private calls are:\n\n- Calls to transfer your Salesforce records as part of a flow run." }, "AWS::AppFlow::ConnectorProfile ServiceNowConnectorProfileCredentials": { - "OAuth2Credentials": "", + "OAuth2Credentials": "The OAuth 2.0 credentials required to authenticate the user.", "Password": "The password that corresponds to the user name.", "Username": "The name of the user." }, @@ -1814,15 +1814,15 @@ "WriteOperationType": "The possible write operations in the destination connector. When this value is not provided, this defaults to the `INSERT` operation." }, "AWS::AppFlow::Flow SAPODataPaginationConfig": { - "maxPageSize": "" + "maxPageSize": "The maximum number of records that Amazon AppFlow receives in each page of the response from your SAP application. For transfers of OData records, the maximum page size is 3,000. For transfers of data that comes from an ODP provider, the maximum page size is 10,000." }, "AWS::AppFlow::Flow SAPODataParallelismConfig": { - "maxParallelism": "" + "maxParallelism": "The maximum number of processes that Amazon AppFlow runs at the same time when it retrieves your data from your SAP application." }, "AWS::AppFlow::Flow SAPODataSourceProperties": { "ObjectPath": "The object path specified in the SAPOData flow source.", - "paginationConfig": "", - "parallelismConfig": "" + "paginationConfig": "Sets the page size for each concurrent process that transfers OData records from your SAP instance.", + "parallelismConfig": "Sets the number of concurrent processes that transfers OData records from your SAP instance." }, "AWS::AppFlow::Flow SalesforceDestinationProperties": { "DataTransferApi": "Specifies which Salesforce API is used by Amazon AppFlow when your flow transfers data to Salesforce.\n\n- **AUTOMATIC** - The default. Amazon AppFlow selects which API to use based on the number of records that your flow transfers to Salesforce. If your flow transfers fewer than 1,000 records, Amazon AppFlow uses Salesforce REST API. If your flow transfers 1,000 records or more, Amazon AppFlow uses Salesforce Bulk API 2.0.\n\nEach of these Salesforce APIs structures data differently. If Amazon AppFlow selects the API automatically, be aware that, for recurring flows, the data output might vary from one flow run to the next. For example, if a flow runs daily, it might use REST API on one day to transfer 900 records, and it might use Bulk API 2.0 on the next day to transfer 1,100 records. For each of these flow runs, the respective Salesforce API formats the data differently. Some of the differences include how dates are formatted and null values are represented. Also, Bulk API 2.0 doesn't transfer Salesforce compound fields.\n\nBy choosing this option, you optimize flow performance for both small and large data transfers, but the tradeoff is inconsistent formatting in the output.\n- **BULKV2** - Amazon AppFlow uses only Salesforce Bulk API 2.0. This API runs asynchronous data transfers, and it's optimal for large sets of data. By choosing this option, you ensure that your flow writes consistent output, but you optimize performance only for large data transfers.\n\nNote that Bulk API 2.0 does not transfer Salesforce compound fields.\n- **REST_SYNC** - Amazon AppFlow uses only Salesforce REST API. By choosing this option, you ensure that your flow writes consistent output, but you decrease performance for large data transfers that are better suited for Bulk API 2.0. In some cases, if your flow attempts to transfer a vary large set of data, it might fail with a timed out error.", @@ -3048,7 +3048,6 @@ "ApiId": "Unique AWS AppSync GraphQL API identifier where this data source will be created.", "Description": "The description of the data source.", "DynamoDBConfig": "AWS Region and TableName for an Amazon DynamoDB table in your account.", - "ElasticsearchConfig": "AWS Region and Endpoints for an Amazon OpenSearch Service domain in your account.\n\nAs of September 2021, Amazon Elasticsearch Service is Amazon OpenSearch Service . This property is deprecated. For new data sources, use *OpenSearchServiceConfig* to specify an OpenSearch Service data source.", "EventBridgeConfig": "An EventBridge configuration that contains a valid ARN of an event bus.", "HttpConfig": "Endpoints for an HTTP data source.", "LambdaConfig": "An ARN of a Lambda function in valid ARN format. This can be the ARN of a Lambda function that exists in the current account or in another account.", @@ -3079,10 +3078,6 @@ "UseCallerCredentials": "Set to `TRUE` to use AWS Identity and Access Management with this data source.", "Versioned": "Set to TRUE to use Conflict Detection and Resolution with this data source." }, - "AWS::AppSync::DataSource ElasticsearchConfig": { - "AwsRegion": "The AWS Region.", - "Endpoint": "The endpoint." - }, "AWS::AppSync::DataSource EventBridgeConfig": { "EventBusArn": "The event bus pipeline's ARN. For more information about event buses, see [EventBridge event buses](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-event-bus.html) ." }, @@ -3825,6 +3820,7 @@ "Tags": "One or more tags. You can tag your Auto Scaling group and propagate the tags to the Amazon EC2 instances it launches. Tags are not propagated to Amazon EBS volumes. To add tags to Amazon EBS volumes, specify the tags in a launch template but use caution. If the launch template specifies an instance tag with a key that is also specified for the Auto Scaling group, Amazon EC2 Auto Scaling overrides the value of that instance tag with the value specified by the Auto Scaling group. For more information, see [Tag Auto Scaling groups and instances](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-tagging.html) in the *Amazon EC2 Auto Scaling User Guide* .", "TargetGroupARNs": "The Amazon Resource Names (ARN) of the Elastic Load Balancing target groups to associate with the Auto Scaling group. Instances are registered as targets with the target groups. The target groups receive incoming traffic and route requests to one or more registered targets. For more information, see [Use Elastic Load Balancing to distribute traffic across the instances in your Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-load-balancer.html) in the *Amazon EC2 Auto Scaling User Guide* .", "TerminationPolicies": "A policy or a list of policies that are used to select the instance to terminate. These policies are executed in the order that you list them. For more information, see [Configure termination policies for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-termination-policies.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nValid values: `Default` | `AllocationStrategy` | `ClosestToNextInstanceHour` | `NewestInstance` | `OldestInstance` | `OldestLaunchConfiguration` | `OldestLaunchTemplate` | `arn:aws:lambda:region:account-id:function:my-function:my-alias`", + "TrafficSources": "The traffic sources associated with this Auto Scaling group.", "VPCZoneIdentifier": "A list of subnet IDs for a virtual private cloud (VPC) where instances in the Auto Scaling group can be created.\n\nIf this resource specifies public subnets and is also in a VPC that is defined in the same stack template, you must use the [DependsOn attribute](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-dependson.html) to declare a dependency on the [VPC-gateway attachment](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-vpc-gateway-attachment.html) .\n\n> When you update `VPCZoneIdentifier` , this retains the same Auto Scaling group and replaces old instances with new ones, according to the specified subnets. You can optionally specify how CloudFormation handles these updates by using an [UpdatePolicy attribute](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html) . \n\nRequired to launch instances into a nondefault VPC. If you specify `VPCZoneIdentifier` with `AvailabilityZones` , the subnets that you specify for this property must reside in those Availability Zones." }, "AWS::AutoScaling::AutoScalingGroup AcceleratorCountRequest": { @@ -3938,6 +3934,10 @@ "Max": "The storage maximum in GB.", "Min": "The storage minimum in GB." }, + "AWS::AutoScaling::AutoScalingGroup TrafficSourceIdentifier": { + "Identifier": "Identifies the traffic source.\n\nFor Application Load Balancers, Gateway Load Balancers, Network Load Balancers, and VPC Lattice, this will be the Amazon Resource Name (ARN) for a target group in this account and Region. For Classic Load Balancers, this will be the name of the Classic Load Balancer in this account and Region.\n\nFor example:\n\n- Application Load Balancer ARN: `arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/1234567890123456`\n- Classic Load Balancer name: `my-classic-load-balancer`\n- VPC Lattice ARN: `arn:aws:vpc-lattice:us-west-2:123456789012:targetgroup/tg-1234567890123456`\n\nTo get the ARN of a target group for a Application Load Balancer, Gateway Load Balancer, or Network Load Balancer, or the name of a Classic Load Balancer, use the Elastic Load Balancing [DescribeTargetGroups](https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeTargetGroups.html) and [DescribeLoadBalancers](https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html) API operations.\n\nTo get the ARN of a target group for VPC Lattice, use the VPC Lattice [GetTargetGroup](https://docs.aws.amazon.com/vpc-lattice/latest/APIReference/API_GetTargetGroup.html) API operation.", + "Type": "Provides additional context for the value of `Identifier` .\n\nThe following lists the valid values:\n\n- `elb` if `Identifier` is the name of a Classic Load Balancer.\n- `elbv2` if `Identifier` is the ARN of an Application Load Balancer, Gateway Load Balancer, or Network Load Balancer target group.\n- `vpc-lattice` if `Identifier` is the ARN of a VPC Lattice target group.\n\nRequired if the identifier is the name of a Classic Load Balancer." + }, "AWS::AutoScaling::AutoScalingGroup VCpuCountRequest": { "Max": "The maximum number of vCPUs.", "Min": "The minimum number of vCPUs." @@ -4453,6 +4453,20 @@ "Key": "The tag key.", "Value": "The tag value." }, + "AWS::Backup::LogicallyAirGappedBackupVault": { + "AccessPolicy": "The backup vault access policy document in JSON format.", + "BackupVaultName": "The name of a logical container where backups are stored. Logically air-gapped backup vaults are identified by names that are unique to the account used to create them and the Region where they are created.", + "BackupVaultTags": "The tags to assign to the vault.", + "MaxRetentionDays": "The maximum retention period that the vault retains its recovery points.", + "MinRetentionDays": "This setting specifies the minimum retention period that the vault retains its recovery points.\n\nThe minimum value accepted is 7 days.", + "Notifications": "Returns event notifications for the specified backup vault.", + "VaultState": "The current state of the vault.", + "VaultType": "The type of vault described." + }, + "AWS::Backup::LogicallyAirGappedBackupVault NotificationObjectType": { + "BackupVaultEvents": "An array of events that indicate the status of jobs to back up resources to the backup vault.", + "SNSTopicArn": "The Amazon Resource Name (ARN) that specifies the topic for a backup vault\u2019s events; for example, `arn:aws:sns:us-west-2:111122223333:MyVaultTopic` ." + }, "AWS::Backup::ReportPlan": { "ReportDeliveryChannel": "Contains information about where and how to deliver your reports, specifically your Amazon S3 bucket name, S3 key prefix, and the formats of your reports.", "ReportPlanDescription": "An optional description of the report plan with a maximum 1,024 characters.", @@ -4481,7 +4495,7 @@ "RestoreTestingPlanName": "The RestoreTestingPlanName is a unique string that is the name of the restore testing plan. This cannot be changed after creation, and it must consist of only alphanumeric characters and underscores.", "ScheduleExpression": "A CRON expression in specified timezone when a restore testing plan is executed.", "ScheduleExpressionTimezone": "Optional. This is the timezone in which the schedule expression is set. By default, ScheduleExpressions are in UTC. You can modify this to a specified timezone.", - "ScheduleStatus": "", + "ScheduleStatus": "This parameter is not currently supported.", "StartWindowHours": "Defaults to 24 hours.\n\nA value in hours after a restore test is scheduled before a job will be canceled if it doesn't start successfully. This value is optional. If this value is included, this parameter has a maximum value of 168 hours (one week).", "Tags": "Optional tags to include. A tag is a key-value pair you can use to manage, filter, and search for your resources. Allowed characters include UTF-8 letters,numbers, spaces, and the following characters: `+ - = . _ : /.`" }, @@ -5228,7 +5242,7 @@ "MaxTokens": "The maximum number of tokens to return in the response.", "StopSequences": "A list of strings that define sequences after which the model will stop generating.", "Temperature": "Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.", - "TopK": "The number of most-likely candidates that the model considers for the next token during generation.", + "TopK": "", "TopP": "The percentage of most-likely candidates that the model considers for the next token." }, "AWS::Bedrock::Flow PromptTemplateConfiguration": { @@ -5374,7 +5388,7 @@ "MaxTokens": "The maximum number of tokens to return in the response.", "StopSequences": "A list of strings that define sequences after which the model will stop generating.", "Temperature": "Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.", - "TopK": "The number of most-likely candidates that the model considers for the next token during generation.", + "TopK": "", "TopP": "The percentage of most-likely candidates that the model considers for the next token." }, "AWS::Bedrock::FlowVersion PromptTemplateConfiguration": { @@ -5565,7 +5579,7 @@ "MaxTokens": "The maximum number of tokens to return in the response.", "StopSequences": "A list of strings that define sequences after which the model will stop generating.", "Temperature": "Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.", - "TopK": "The number of most-likely candidates that the model considers for the next token during generation.", + "TopK": "", "TopP": "The percentage of most-likely candidates that the model considers for the next token." }, "AWS::Bedrock::Prompt PromptTemplateConfiguration": { @@ -5603,7 +5617,7 @@ "MaxTokens": "The maximum number of tokens to return in the response.", "StopSequences": "A list of strings that define sequences after which the model will stop generating.", "Temperature": "Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.", - "TopK": "The number of most-likely candidates that the model considers for the next token during generation.", + "TopK": "", "TopP": "The percentage of most-likely candidates that the model considers for the next token." }, "AWS::Bedrock::PromptVersion PromptTemplateConfiguration": { @@ -7260,11 +7274,11 @@ }, "AWS::CodeBuild::Fleet": { "BaseCapacity": "The initial number of machines allocated to the compute \ufb02eet, which de\ufb01nes the number of builds that can run in parallel.", - "ComputeType": "> Updating this field is not allowed for `MAC_ARM` . \n\nInformation about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", - "EnvironmentType": "> Updating this field is not allowed for `MAC_ARM` . \n\nThe environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", + "ComputeType": "Information about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", + "EnvironmentType": "The environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", "FleetServiceRole": "The service role associated with the compute fleet. For more information, see [Allow a user to add a permission policy for a fleet service role](https://docs.aws.amazon.com/codebuild/latest/userguide/auth-and-access-control-iam-identity-based-access-control.html#customer-managed-policies-example-permission-policy-fleet-service-role.html) in the *AWS CodeBuild User Guide* .", - "FleetVpcConfig": "> Updating this field is not allowed for `MAC_ARM` . \n\nInformation about the VPC configuration that AWS CodeBuild accesses.", - "ImageId": "> Updating this field is not allowed for `MAC_ARM` . \n\nThe Amazon Machine Image (AMI) of the compute fleet.", + "FleetVpcConfig": "Information about the VPC configuration that AWS CodeBuild accesses.", + "ImageId": "The Amazon Machine Image (AMI) of the compute fleet.", "Name": "The name of the compute fleet.", "OverflowBehavior": "The compute fleet overflow behavior.\n\n- For overflow behavior `QUEUE` , your overflow builds need to wait on the existing fleet instance to become available.\n- For overflow behavior `ON_DEMAND` , your overflow builds run on CodeBuild on-demand.\n\n> If you choose to set your overflow behavior to on-demand while creating a VPC-connected fleet, make sure that you add the required VPC permissions to your project service role. For more information, see [Example policy statement to allow CodeBuild access to AWS services required to create a VPC network interface](https://docs.aws.amazon.com/codebuild/latest/userguide/auth-and-access-control-iam-identity-based-access-control.html#customer-managed-policies-example-create-vpc-network-interface) .", "Tags": "A list of tag key and value pairs associated with this compute fleet.\n\nThese tags are available for use by AWS services that support AWS CodeBuild compute fleet tags." @@ -7742,11 +7756,13 @@ }, "AWS::CodePipeline::Pipeline ActionDeclaration": { "ActionTypeId": "Specifies the action type and the provider of the action.", + "Commands": "The shell commands to run with your compute action in CodePipeline. All commands are supported except multi-line formats. While CodeBuild logs and permissions are used, you do not need to create any resources in CodeBuild.\n\n> Using compute time for this action will incur separate charges in AWS CodeBuild .", "Configuration": "The action's configuration. These are key-value pairs that specify input values for an action. For more information, see [Action Structure Requirements in CodePipeline](https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#action-requirements) . For the list of configuration properties for the AWS CloudFormation action type in CodePipeline, see [Configuration Properties Reference](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/continuous-delivery-codepipeline-action-reference.html) in the *AWS CloudFormation User Guide* . For template snippets with examples, see [Using Parameter Override Functions with CodePipeline Pipelines](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/continuous-delivery-codepipeline-parameter-override-functions.html) in the *AWS CloudFormation User Guide* .\n\nThe values can be represented in either JSON or YAML format. For example, the JSON configuration item format is as follows:\n\n*JSON:*\n\n`\"Configuration\" : { Key : Value },`", "InputArtifacts": "The name or ID of the artifact consumed by the action, such as a test or build artifact. While the field is not a required parameter, most actions have an action configuration that requires a specified quantity of input artifacts. To refer to the action configuration specification by action provider, see the [Action structure reference](https://docs.aws.amazon.com/codepipeline/latest/userguide/action-reference.html) in the *AWS CodePipeline User Guide* .\n\n> For a CodeBuild action with multiple input artifacts, one of your input sources must be designated the PrimarySource. For more information, see the [CodeBuild action reference page](https://docs.aws.amazon.com/codepipeline/latest/userguide/action-reference-CodeBuild.html) in the *AWS CodePipeline User Guide* .", "Name": "The action declaration's name.", "Namespace": "The variable namespace associated with the action. All variables produced as output by this action fall under this namespace.", "OutputArtifacts": "The name or ID of the result of the action declaration, such as a test or build artifact. While the field is not a required parameter, most actions have an action configuration that requires a specified quantity of output artifacts. To refer to the action configuration specification by action provider, see the [Action structure reference](https://docs.aws.amazon.com/codepipeline/latest/userguide/action-reference.html) in the *AWS CodePipeline User Guide* .", + "OutputVariables": "The list of variables that are to be exported from the compute action. This is specifically CodeBuild environment variables as used for that action.", "Region": "The action declaration's AWS Region, such as us-east-1.", "RoleArn": "The ARN of the IAM service role that performs the declared action. This is assumed through the roleArn for the pipeline.", "RunOrder": "The order in which actions are run.", @@ -7817,6 +7833,7 @@ "Name": "The name of the artifact to be worked on (for example, \"My App\").\n\nArtifacts are the files that are worked on by actions in the pipeline. See the action configuration for each action for details about artifact parameters. For example, the S3 source action input artifact is a file name (or file path), and the files are generally provided as a ZIP file. Example artifact name: SampleApp_Windows.zip\n\nThe input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions." }, "AWS::CodePipeline::Pipeline OutputArtifact": { + "Files": "The files that you want to associate with the output artifact that will be exported from the compute action.", "Name": "The name of the output of an artifact, such as \"My App\".\n\nThe output artifact name must exactly match the input artifact declared for a downstream action. However, the downstream action's input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.\n\nOutput artifact names must be unique within a pipeline." }, "AWS::CodePipeline::Pipeline PipelineTriggerDeclaration": { @@ -8023,60 +8040,60 @@ "BucketArn": "The ARN of an Amazon S3 bucket that's the destination for advanced security features log export." }, "AWS::Cognito::UserPool": { - "AccountRecoverySetting": "Use this setting to define which verified available method a user can use to recover their password when they call `ForgotPassword` . It allows you to define a preferred method when a user has more than one method available. With this setting, SMS does not qualify for a valid password recovery mechanism if the user also has SMS MFA enabled. In the absence of this setting, Cognito uses the legacy behavior to determine the recovery method where SMS is preferred over email.", + "AccountRecoverySetting": "The available verified method a user can use to recover their password when they call `ForgotPassword` . You can use this setting to define a preferred method when a user has more than one method available. With this setting, SMS doesn't qualify for a valid password recovery mechanism if the user also has SMS multi-factor authentication (MFA) activated. In the absence of this setting, Amazon Cognito uses the legacy behavior to determine the recovery method where SMS is preferred through email.", "AdminCreateUserConfig": "The settings for administrator creation of users in a user pool. Contains settings for allowing user sign-up, customizing invitation messages to new users, and the amount of time before temporary passwords expire.\n\nThis data type is a request and response parameter of [CreateUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateUserPool.html) and [UpdateUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UpdateUserPool.html) , and a response parameter of [DescribeUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_DescribeUserPool.html) .", - "AliasAttributes": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* .\n\n> This user pool property cannot be updated.", + "AliasAttributes": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* .", "AutoVerifiedAttributes": "The attributes to be auto-verified. Possible values: *email* , *phone_number* .", "DeletionProtection": "When active, `DeletionProtection` prevents accidental deletion of your user\npool. Before you can delete a user pool that you have protected against deletion, you\nmust deactivate this feature.\n\nWhen you try to delete a protected user pool in a `DeleteUserPool` API request, Amazon Cognito returns an `InvalidParameterException` error. To delete a protected user pool, send a new `DeleteUserPool` request after you deactivate deletion protection in an `UpdateUserPool` API request.", "DeviceConfiguration": "The device-remembering configuration for a user pool. A null value indicates that you have deactivated device remembering in your user pool.\n\n> When you provide a value for any `DeviceConfiguration` field, you activate the Amazon Cognito device-remembering feature.", "EmailAuthenticationMessage": "", "EmailAuthenticationSubject": "", "EmailConfiguration": "The email configuration of your user pool. The email configuration type sets your preferred sending method, AWS Region, and sender for messages from your user pool.", - "EmailVerificationMessage": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) .", - "EmailVerificationSubject": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) .", - "EnabledMfas": "Enables MFA on a specified user pool. To disable all MFAs after it has been enabled, set MfaConfiguration to \u201cOFF\u201d and remove EnabledMfas. MFAs can only be all disabled if MfaConfiguration is OFF. Once SMS_MFA is enabled, SMS_MFA can only be disabled by setting MfaConfiguration to \u201cOFF\u201d. Can be one of the following values:\n\n- `SMS_MFA` - Enables SMS MFA for the user pool. SMS_MFA can only be enabled if SMS configuration is provided.\n- `SOFTWARE_TOKEN_MFA` - Enables software token MFA for the user pool.\n\nAllowed values: `SMS_MFA` | `SOFTWARE_TOKEN_MFA`", + "EmailVerificationMessage": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) .", + "EmailVerificationSubject": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) .", + "EnabledMfas": "Set enabled MFA options on a specified user pool. To disable all MFAs after it has been enabled, set `MfaConfiguration` to `OFF` and remove EnabledMfas. MFAs can only be all disabled if `MfaConfiguration` is `OFF` . After you enable `SMS_MFA` , you can only disable it by setting `MfaConfiguration` to `OFF` . Can be one of the following values:\n\n- `SMS_MFA` - Enables MFA with SMS for the user pool. To select this option, you must also provide values for `SmsConfiguration` .\n- `SOFTWARE_TOKEN_MFA` - Enables software token MFA for the user pool.\n- `EMAIL_OTP` - Enables MFA with email for the user pool. To select this option, you must provide values for `EmailConfiguration` and within those, set `EmailSendingAccount` to `DEVELOPER` .\n\nAllowed values: `SMS_MFA` | `SOFTWARE_TOKEN_MFA` | `EMAIL_OTP`", "LambdaConfig": "A collection of user pool Lambda triggers. Amazon Cognito invokes triggers at several possible stages of authentication operations. Triggers can modify the outcome of the operations that invoked them.", "MfaConfiguration": "The multi-factor authentication (MFA) configuration. Valid values include:\n\n- `OFF` MFA won't be used for any users.\n- `ON` MFA is required for all users to sign in.\n- `OPTIONAL` MFA will be required only for individual users who have an MFA factor activated.", "Policies": "A list of user pool policies. Contains the policy that sets password-complexity requirements.\n\nThis data type is a request and response parameter of [CreateUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateUserPool.html) and [UpdateUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UpdateUserPool.html) , and a response parameter of [DescribeUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_DescribeUserPool.html) .", - "Schema": "The schema attributes for the new user pool. These attributes can be standard or custom attributes.\n\n> During a user pool update, you can add new schema attributes but you cannot modify or delete an existing schema attribute.", - "SmsAuthenticationMessage": "A string representing the SMS authentication message.", + "Schema": "An array of schema attributes for the new user pool. These attributes can be standard or custom attributes.", + "SmsAuthenticationMessage": "The contents of the SMS authentication message.", "SmsConfiguration": "The SMS configuration with the settings that your Amazon Cognito user pool must use to send an SMS message from your AWS account through Amazon Simple Notification Service. To send SMS messages with Amazon SNS in the AWS Region that you want, the Amazon Cognito user pool uses an AWS Identity and Access Management (IAM) role in your AWS account .", - "SmsVerificationMessage": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) .", + "SmsVerificationMessage": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) .", "UserAttributeUpdateSettings": "The settings for updates to user attributes. These settings include the property `AttributesRequireVerificationBeforeUpdate` ,\na user-pool setting that tells Amazon Cognito how to handle changes to the value of your users' email address and phone number attributes. For\nmore information, see [Verifying updates to email addresses and phone numbers](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-email-phone-verification.html#user-pool-settings-verifications-verify-attribute-updates) .", "UserPoolAddOns": "User pool add-ons. Contains settings for activation of advanced security features. To log user security information but take no action, set to `AUDIT` . To configure automatic security responses to risky traffic to your user pool, set to `ENFORCED` .\n\nFor more information, see [Adding advanced security to a user pool](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-advanced-security.html) .", "UserPoolName": "A string used to name the user pool.", "UserPoolTags": "The tag keys and values to assign to the user pool. A tag is a label that you can use to categorize and manage user pools in different ways, such as by purpose, owner, environment, or other criteria.", - "UsernameAttributes": "Determines whether email addresses or phone numbers can be specified as user names when a user signs up. Possible values: `phone_number` or `email` .\n\nThis user pool property cannot be updated.", - "UsernameConfiguration": "You can choose to set case sensitivity on the username input for the selected sign-in option. For example, when this is set to `False` , users will be able to sign in using either \"username\" or \"Username\". This configuration is immutable once it has been set.", + "UsernameAttributes": "Specifies whether a user can use an email address or phone number as a username when they sign up.", + "UsernameConfiguration": "Case sensitivity on the username input for the selected sign-in option. When case sensitivity is set to `False` (case insensitive), users can sign in with any combination of capital and lowercase letters. For example, `username` , `USERNAME` , or `UserName` , or for email, `email@example.com` or `EMaiL@eXamplE.Com` . For most use cases, set case sensitivity to `False` (case insensitive) as a best practice. When usernames and email addresses are case insensitive, Amazon Cognito treats any variation in case as the same user, and prevents a case variation from being assigned to the same attribute for a different user.\n\nThis configuration is immutable after you set it. For more information, see [UsernameConfigurationType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UsernameConfigurationType.html) .", "VerificationMessageTemplate": "The template for the verification message that your user pool delivers to users who set an email address or phone number attribute.\n\nSet the email message type that corresponds to your `DefaultEmailOption` selection. For `CONFIRM_WITH_LINK` , specify an `EmailMessageByLink` and leave `EmailMessage` blank. For `CONFIRM_WITH_CODE` , specify an `EmailMessage` and leave `EmailMessageByLink` blank. When you supply both parameters with either choice, Amazon Cognito returns an error." }, "AWS::Cognito::UserPool AccountRecoverySetting": { - "RecoveryMechanisms": "The list of `RecoveryOptionTypes` ." + "RecoveryMechanisms": "The list of options and priorities for user message delivery in forgot-password operations. Sets or displays user pool preferences for email or SMS message priority, whether users should fall back to a second delivery method, and whether passwords should only be reset by administrators." }, "AWS::Cognito::UserPool AdminCreateUserConfig": { "AllowAdminCreateUserOnly": "The setting for allowing self-service sign-up. When `true` , only administrators can create new user profiles. When `false` , users can register themselves and create a new user profile with the [SignUp](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_SignUp.html) operation.", - "InviteMessageTemplate": "The message template to be used for the welcome message to new users.\n\nSee also [Customizing User Invitation Messages](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-message-customizations.html#cognito-user-pool-settings-user-invitation-message-customization) .", + "InviteMessageTemplate": "The template for the welcome message to new users.\n\nSee also [Customizing User Invitation Messages](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-message-customizations.html#cognito-user-pool-settings-user-invitation-message-customization) .", "UnusedAccountValidityDays": "This parameter is no longer in use. Configure the duration of temporary passwords with the `TemporaryPasswordValidityDays` parameter of [PasswordPolicyType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_PasswordPolicyType.html) . For older user pools that have a `UnusedAccountValidityDays` configuration, that value is effective until you set a value for `TemporaryPasswordValidityDays` .\n\nThe password expiration limit in days for administrator-created users. When this time expires, the user can't sign in with their temporary password. To reset the account after that time limit, you must call `AdminCreateUser` again, specifying `RESEND` for the `MessageAction` parameter.\n\nThe default value for this parameter is 7." }, "AWS::Cognito::UserPool AdvancedSecurityAdditionalFlows": { "CustomAuthMode": "" }, "AWS::Cognito::UserPool CustomEmailSender": { - "LambdaArn": "The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon Cognito triggers to send email notifications to users.", - "LambdaVersion": "The Lambda version represents the signature of the \"request\" attribute in the \"event\" information that Amazon Cognito passes to your custom email sender AWS Lambda function. The only supported value is `V1_0` ." + "LambdaArn": "The Amazon Resource Name (ARN) of the function that you want to assign to your Lambda trigger.", + "LambdaVersion": "The user pool trigger version of the request that Amazon Cognito sends to your Lambda function. Higher-numbered versions add fields that support new features.\n\nYou must use a `LambdaVersion` of `V1_0` with a custom sender function." }, "AWS::Cognito::UserPool CustomSMSSender": { - "LambdaArn": "The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon Cognito triggers to send SMS notifications to users.", - "LambdaVersion": "The Lambda version represents the signature of the \"request\" attribute in the \"event\" information Amazon Cognito passes to your custom SMS sender Lambda function. The only supported value is `V1_0` ." + "LambdaArn": "The Amazon Resource Name (ARN) of the function that you want to assign to your Lambda trigger.", + "LambdaVersion": "The user pool trigger version of the request that Amazon Cognito sends to your Lambda function. Higher-numbered versions add fields that support new features.\n\nYou must use a `LambdaVersion` of `V1_0` with a custom sender function." }, "AWS::Cognito::UserPool DeviceConfiguration": { "ChallengeRequiredOnNewDevice": "When true, a remembered device can sign in with device authentication instead of SMS and time-based one-time password (TOTP) factors for multi-factor authentication (MFA).\n\n> Whether or not `ChallengeRequiredOnNewDevice` is true, users who sign in with devices that have not been confirmed or remembered must still provide a second factor in a user pool that requires MFA.", "DeviceOnlyRememberedOnUserPrompt": "When true, Amazon Cognito doesn't automatically remember a user's device when your app sends a [ConfirmDevice](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_ConfirmDevice.html) API request. In your app, create a prompt for your user to choose whether they want to remember their device. Return the user's choice in an [UpdateDeviceStatus](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UpdateDeviceStatus.html) API request.\n\nWhen `DeviceOnlyRememberedOnUserPrompt` is `false` , Amazon Cognito immediately remembers devices that you register in a `ConfirmDevice` API request." }, "AWS::Cognito::UserPool EmailConfiguration": { - "ConfigurationSet": "The set of configuration rules that can be applied to emails sent using Amazon SES. A configuration set is applied to an email by including a reference to the configuration set in the headers of the email. Once applied, all of the rules in that configuration set are applied to the email. Configuration sets can be used to apply the following types of rules to emails:\n\n- Event publishing \u2013 Amazon SES can track the number of send, delivery, open, click, bounce, and complaint events for each email sent. Use event publishing to send information about these events to other AWS services such as SNS and CloudWatch.\n- IP pool management \u2013 When leasing dedicated IP addresses with Amazon SES, you can create groups of IP addresses, called dedicated IP pools. You can then associate the dedicated IP pools with configuration sets.", + "ConfigurationSet": "The set of configuration rules that can be applied to emails sent using Amazon Simple Email Service. A configuration set is applied to an email by including a reference to the configuration set in the headers of the email. Once applied, all of the rules in that configuration set are applied to the email. Configuration sets can be used to apply the following types of rules to emails:\n\n- **Event publishing** - Amazon Simple Email Service can track the number of send, delivery, open, click, bounce, and complaint events for each email sent. Use event publishing to send information about these events to other AWS services such as and Amazon CloudWatch\n- **IP pool management** - When leasing dedicated IP addresses with Amazon Simple Email Service, you can create groups of IP addresses, called dedicated IP pools. You can then associate the dedicated IP pools with configuration sets.", "EmailSendingAccount": "Specifies whether Amazon Cognito uses its built-in functionality to send your users email messages, or uses your Amazon Simple Email Service email configuration. Specify one of the following values:\n\n- **COGNITO_DEFAULT** - When Amazon Cognito emails your users, it uses its built-in email functionality. When you use the default option, Amazon Cognito allows only a limited number of emails each day for your user pool. For typical production environments, the default email limit is less than the required delivery volume. To achieve a higher delivery volume, specify DEVELOPER to use your Amazon SES email configuration.\n\nTo look up the email delivery limit for the default option, see [Limits](https://docs.aws.amazon.com/cognito/latest/developerguide/limits.html) in the *Amazon Cognito Developer Guide* .\n\nThe default FROM address is `no-reply@verificationemail.com` . To customize the FROM address, provide the Amazon Resource Name (ARN) of an Amazon SES verified email address for the `SourceArn` parameter.\n- **DEVELOPER** - When Amazon Cognito emails your users, it uses your Amazon SES configuration. Amazon Cognito calls Amazon SES on your behalf to send email from your verified email address. When you use this option, the email delivery limits are the same limits that apply to your Amazon SES verified email address in your AWS account .\n\nIf you use this option, provide the ARN of an Amazon SES verified email address for the `SourceArn` parameter.\n\nBefore Amazon Cognito can email your users, it requires additional permissions to call Amazon SES on your behalf. When you update your user pool with this option, Amazon Cognito creates a *service-linked role* , which is a type of role in your AWS account . This role contains the permissions that allow you to access Amazon SES and send email messages from your email address. For more information about the service-linked role that Amazon Cognito creates, see [Using Service-Linked Roles for Amazon Cognito](https://docs.aws.amazon.com/cognito/latest/developerguide/using-service-linked-roles.html) in the *Amazon Cognito Developer Guide* .", - "From": "Identifies either the sender's email address or the sender's name with their email address. For example, `testuser@example.com` or `Test User ` . This address appears before the body of the email.", + "From": "Either the sender\u2019s email address or the sender\u2019s name with their email address. For example, `testuser@example.com` or `Test User ` . This address appears before the body of the email.", "ReplyToEmailAddress": "The destination to which the receiver of the email should reply.", "SourceArn": "The ARN of a verified email address or an address from a verified domain in Amazon SES. You can set a `SourceArn` email from a verified domain only with an API request. You can set a verified email address, but not an address in a verified domain, in the Amazon Cognito console. Amazon Cognito uses the email address that you provide in one of the following ways, depending on the value that you specify for the `EmailSendingAccount` parameter:\n\n- If you specify `COGNITO_DEFAULT` , Amazon Cognito uses this address as the custom FROM address when it emails your users using its built-in email account.\n- If you specify `DEVELOPER` , Amazon Cognito emails your users with this address by calling Amazon SES on your behalf.\n\nThe Region value of the `SourceArn` parameter must indicate a supported AWS Region of your user pool. Typically, the Region in the `SourceArn` and the user pool Region are the same. For more information, see [Amazon SES email configuration regions](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-email.html#user-pool-email-developer-region-mapping) in the [Amazon Cognito Developer Guide](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools.html) ." }, @@ -8087,11 +8104,11 @@ }, "AWS::Cognito::UserPool LambdaConfig": { "CreateAuthChallenge": "The configuration of a create auth challenge Lambda trigger, one of three triggers in the sequence of the [custom authentication challenge triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-challenge.html) .", - "CustomEmailSender": "A custom email sender AWS Lambda trigger.", + "CustomEmailSender": "The configuration of a custom email sender Lambda trigger. This trigger routes all email notifications from a user pool to a Lambda function that delivers the message using custom logic.", "CustomMessage": "A custom message Lambda trigger. This trigger is an opportunity to customize all SMS and email messages from your user pool. When a custom message trigger is active, your user pool routes all messages to a Lambda function that returns a runtime-customized message subject and body for your user pool to deliver to a user.", - "CustomSMSSender": "A custom SMS sender AWS Lambda trigger.", + "CustomSMSSender": "The configuration of a custom SMS sender Lambda trigger. This trigger routes all SMS notifications from a user pool to a Lambda function that delivers the message using custom logic.", "DefineAuthChallenge": "The configuration of a define auth challenge Lambda trigger, one of three triggers in the sequence of the [custom authentication challenge triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-challenge.html) .", - "KMSKeyID": "The Amazon Resource Name of a AWS Key Management Service ( AWS KMS ) key. Amazon Cognito uses the key to encrypt codes and temporary passwords sent to `CustomEmailSender` and `CustomSMSSender` .", + "KMSKeyID": "The ARN of an [KMS key](https://docs.aws.amazon.com//kms/latest/developerguide/concepts.html#master_keys) . Amazon Cognito uses the key to encrypt codes and temporary passwords sent to custom sender Lambda triggers.", "PostAuthentication": "The configuration of a [post authentication Lambda trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-post-authentication.html) in a user pool. This trigger can take custom actions after a user signs in.", "PostConfirmation": "The configuration of a [post confirmation Lambda trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-post-confirmation.html) in a user pool. This trigger can take custom actions after a user confirms their user account and their email address or phone number.", "PreAuthentication": "The configuration of a [pre authentication trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-pre-authentication.html) in a user pool. This trigger can evaluate and modify user sign-in events.", @@ -8122,12 +8139,12 @@ "LambdaVersion": "The user pool trigger version of the request that Amazon Cognito sends to your Lambda function. Higher-numbered versions add fields that support new features." }, "AWS::Cognito::UserPool RecoveryOption": { - "Name": "Specifies the recovery method for a user.", - "Priority": "A positive integer specifying priority of a method with 1 being the highest priority." + "Name": "The recovery method that this object sets a recovery option for.", + "Priority": "Your priority preference for using the specified attribute in account recovery. The highest priority is `1` ." }, "AWS::Cognito::UserPool SchemaAttribute": { "AttributeDataType": "The data format of the values for your attribute. When you choose an `AttributeDataType` , Amazon Cognito validates the input against the data type. A custom attribute value in your user's ID token is always a string, for example `\"custom:isMember\" : \"true\"` or `\"custom:YearsAsMember\" : \"12\"` .", - "DeveloperOnlyAttribute": "> We recommend that you use [WriteAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UserPoolClientType.html#CognitoUserPools-Type-UserPoolClientType-WriteAttributes) in the user pool client to control how attributes can be mutated for new use cases instead of using `DeveloperOnlyAttribute` . \n\nSpecifies whether the attribute type is developer only. This attribute can only be modified by an administrator. Users will not be able to modify this attribute using their access token.", + "DeveloperOnlyAttribute": "> You should use [WriteAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UserPoolClientType.html#CognitoUserPools-Type-UserPoolClientType-WriteAttributes) in the user pool client to control how attributes can be mutated for new use cases instead of using `DeveloperOnlyAttribute` . \n\nSpecifies whether the attribute type is developer only. This attribute can only be modified by an administrator. Users won't be able to modify this attribute using their access token. For example, `DeveloperOnlyAttribute` can be modified using AdminUpdateUserAttributes but can't be updated using UpdateUserAttributes.", "Mutable": "Specifies whether the value of the attribute can be changed.\n\nAny user pool attribute whose value you map from an IdP attribute must be mutable, with a parameter value of `true` . Amazon Cognito updates mapped attributes when users sign in to your application through an IdP. If an attribute is immutable, Amazon Cognito throws an error when it attempts to update the attribute. For more information, see [Specifying Identity Provider Attribute Mappings for Your User Pool](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-specifying-attribute-mapping.html) .", "Name": "The name of your user pool attribute. When you create or update a user pool, adding a schema attribute creates a custom or developer-only attribute. When you add an attribute with a `Name` value of `MyAttribute` , Amazon Cognito creates the custom attribute `custom:MyAttribute` . When `DeveloperOnlyAttribute` is `true` , Amazon Cognito creates your attribute as `dev:MyAttribute` . In an operation that describes a user pool, Amazon Cognito returns this value as `value` for standard attributes, `custom:value` for custom attributes, and `dev:value` for developer-only attributes..", "NumberAttributeConstraints": "Specifies the constraints for an attribute of the number type.", @@ -8135,7 +8152,7 @@ "StringAttributeConstraints": "Specifies the constraints for an attribute of the string type." }, "AWS::Cognito::UserPool SmsConfiguration": { - "ExternalId": "The external ID is a value. We recommend you use `ExternalId` to add security to your IAM role, which is used to call Amazon SNS to send SMS messages for your user pool. If you provide an `ExternalId` , the Cognito User Pool uses it when attempting to assume your IAM role. You can also set your roles trust policy to require the `ExternalID` . If you use the Cognito Management Console to create a role for SMS MFA, Cognito creates a role with the required permissions and a trust policy that uses `ExternalId` .", + "ExternalId": "The external ID provides additional security for your IAM role. You can use an `ExternalId` with the IAM role that you use with Amazon SNS to send SMS messages for your user pool. If you provide an `ExternalId` , your Amazon Cognito user pool includes it in the request to assume your IAM role. You can configure the role trust policy to require that Amazon Cognito, and any principal, provide the `ExternalID` . If you use the Amazon Cognito Management Console to create a role for SMS multi-factor authentication (MFA), Amazon Cognito creates a role with the required permissions and a trust policy that demonstrates use of the `ExternalId` .\n\nFor more information about the `ExternalId` of a role, see [How to use an external ID when granting access to your AWS resources to a third party](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) .", "SnsCallerArn": "The Amazon Resource Name (ARN) of the Amazon SNS caller. This is the ARN of the IAM role in your AWS account that Amazon Cognito will use to send SMS messages. SMS messages are subject to a [spending limit](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-email-phone-verification.html) .", "SnsRegion": "The AWS Region to use with Amazon SNS integration. You can choose the same Region as your user pool, or a supported *Legacy Amazon SNS alternate Region* .\n\nAmazon Cognito resources in the Asia Pacific (Seoul) AWS Region must use your Amazon SNS configuration in the Asia Pacific (Tokyo) Region. For more information, see [SMS message settings for Amazon Cognito user pools](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-sms-settings.html) ." }, @@ -8162,7 +8179,7 @@ "SmsMessage": "The template for SMS messages that Amazon Cognito sends to your users." }, "AWS::Cognito::UserPoolClient": { - "AccessTokenValidity": "The access token time limit. After this limit expires, your user can't use their access token. To specify the time unit for `AccessTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `AccessTokenValidity` to `10` and `TokenValidityUnits` to `hours` , your user can authorize access with their access token for 10 hours.\n\nThe default time unit for `AccessTokenValidity` in an API request is hours.", + "AccessTokenValidity": "The access token time limit. After this limit expires, your user can't use their access token. To specify the time unit for `AccessTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `AccessTokenValidity` to `10` and `TokenValidityUnits` to `hours` , your user can authorize access with\ntheir access token for 10 hours.\n\nThe default time unit for `AccessTokenValidity` in an API request is hours. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your access\ntokens are valid for one hour.", "AllowedOAuthFlows": "The OAuth grant types that you want your app client to generate. To create an app client that generates client credentials grants, you must add `client_credentials` as the only allowed OAuth flow.\n\n- **code** - Use a code grant flow, which provides an authorization code as the response. This code can be exchanged for access tokens with the `/oauth2/token` endpoint.\n- **implicit** - Issue the access token (and, optionally, ID token, based on scopes) directly to your user.\n- **client_credentials** - Issue the access token from the `/oauth2/token` endpoint directly to a non-person user using a combination of the client ID and client secret.", "AllowedOAuthFlowsUserPoolClient": "Set to `true` to use OAuth 2.0 features in your user pool app client.\n\n`AllowedOAuthFlowsUserPoolClient` must be `true` before you can configure the following features in your app client.\n\n- `CallBackURLs` : Callback URLs.\n- `LogoutURLs` : Sign-out redirect URLs.\n- `AllowedOAuthScopes` : OAuth 2.0 scopes.\n- `AllowedOAuthFlows` : Support for authorization code, implicit, and client credentials OAuth 2.0 grants.\n\nTo use OAuth 2.0 features, configure one of these features in the Amazon Cognito console or set `AllowedOAuthFlowsUserPoolClient` to `true` in a `CreateUserPoolClient` or `UpdateUserPoolClient` API request. If you don't set a value for `AllowedOAuthFlowsUserPoolClient` in a request with the AWS CLI or SDKs, it defaults to `false` .", "AllowedOAuthScopes": "The allowed OAuth scopes. Possible values provided by OAuth are `phone` , `email` , `openid` , and `profile` . Possible values provided by AWS are `aws.cognito.signin.user.admin` . Custom scopes created in Resource Servers are also supported.", @@ -8175,18 +8192,18 @@ "EnableTokenRevocation": "Activates or deactivates token revocation. For more information about revoking tokens, see [RevokeToken](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_RevokeToken.html) .\n\nIf you don't include this parameter, token revocation is automatically activated for the new user pool client.", "ExplicitAuthFlows": "The authentication flows that you want your user pool client to support. For each app client in your user pool, you can sign in your users with any combination of one or more flows, including with a user name and Secure Remote Password (SRP), a user name and password, or a custom authentication process that you define with Lambda functions.\n\n> If you don't specify a value for `ExplicitAuthFlows` , your user client supports `ALLOW_REFRESH_TOKEN_AUTH` , `ALLOW_USER_SRP_AUTH` , and `ALLOW_CUSTOM_AUTH` . \n\nValid values include:\n\n- `ALLOW_ADMIN_USER_PASSWORD_AUTH` : Enable admin based user password authentication flow `ADMIN_USER_PASSWORD_AUTH` . This setting replaces the `ADMIN_NO_SRP_AUTH` setting. With this authentication flow, your app passes a user name and password to Amazon Cognito in the request, instead of using the Secure Remote Password (SRP) protocol to securely transmit the password.\n- `ALLOW_CUSTOM_AUTH` : Enable Lambda trigger based authentication.\n- `ALLOW_USER_PASSWORD_AUTH` : Enable user password-based authentication. In this flow, Amazon Cognito receives the password in the request instead of using the SRP protocol to verify passwords.\n- `ALLOW_USER_SRP_AUTH` : Enable SRP-based authentication.\n- `ALLOW_REFRESH_TOKEN_AUTH` : Enable authflow to refresh tokens.\n\nIn some environments, you will see the values `ADMIN_NO_SRP_AUTH` , `CUSTOM_AUTH_FLOW_ONLY` , or `USER_PASSWORD_AUTH` . You can't assign these legacy `ExplicitAuthFlows` values to user pool clients at the same time as values that begin with `ALLOW_` ,\nlike `ALLOW_USER_SRP_AUTH` .", "GenerateSecret": "Boolean to specify whether you want to generate a secret for the user pool client being created.", - "IdTokenValidity": "The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for `IdTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `IdTokenValidity` as `10` and `TokenValidityUnits` as `hours` , your user can authenticate their session with their ID token for 10 hours.\n\nThe default time unit for `IdTokenValidity` in an API request is hours.", + "IdTokenValidity": "The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for `IdTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `IdTokenValidity` as `10` and `TokenValidityUnits` as `hours` , your user can authenticate their session with their ID token for 10 hours.\n\nThe default time unit for `IdTokenValidity` in an API request is hours. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your ID\ntokens are valid for one hour.", "LogoutURLs": "A list of allowed logout URLs for the IdPs.", - "PreventUserExistenceErrors": "Use this setting to choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to `ENABLED` and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY` , those APIs will return a `UserNotFoundException` exception if the user does not exist in the user pool.", + "PreventUserExistenceErrors": "Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to `ENABLED` and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY` , those APIs return a `UserNotFoundException` exception if the user doesn't exist in the user pool.\n\nValid values include:\n\n- `ENABLED` - This prevents user existence-related errors.\n- `LEGACY` - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented.\n\nDefaults to `LEGACY` when you don't provide a value.", "ReadAttributes": "The list of user attributes that you want your app client to have read access to. After your user authenticates in your app, their access token authorizes them to read their own attribute value for any attribute in this list. An example of this kind of activity is when your user selects a link to view their profile information. Your app makes a [GetUser](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_GetUser.html) API request to retrieve and display your user's profile data.\n\nWhen you don't specify the `ReadAttributes` for your app client, your app can read the values of `email_verified` , `phone_number_verified` , and the Standard attributes of your user pool. When your user pool app client has read access to these default attributes, `ReadAttributes` doesn't return any information. Amazon Cognito only populates `ReadAttributes` in the API response if you have specified your own custom set of read attributes.", - "RefreshTokenValidity": "The refresh token time limit. After this limit expires, your user can't use their refresh token. To specify the time unit for `RefreshTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `RefreshTokenValidity` as `10` and `TokenValidityUnits` as `days` , your user can refresh their session and retrieve new access and ID tokens for 10 days.\n\nThe default time unit for `RefreshTokenValidity` in an API request is days. You can't set `RefreshTokenValidity` to 0. If you do, Amazon Cognito overrides the value with the default value of 30 days.", + "RefreshTokenValidity": "The refresh token time limit. After this limit expires, your user can't use their refresh token. To specify the time unit for `RefreshTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `RefreshTokenValidity` as `10` and `TokenValidityUnits` as `days` , your user can refresh their session\nand retrieve new access and ID tokens for 10 days.\n\nThe default time unit for `RefreshTokenValidity` in an API request is days. You can't set `RefreshTokenValidity` to 0. If you do, Amazon Cognito overrides the value with the default value of 30 days. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your refresh\ntokens are valid for 30 days.", "SupportedIdentityProviders": "A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: `COGNITO` , `Facebook` , `Google` , `SignInWithApple` , and `LoginWithAmazon` . You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example `MySAMLIdP` or `MyOIDCIdP` .", "TokenValidityUnits": "The units in which the validity times are represented. The default unit for RefreshToken is days, and default for ID and access tokens are hours.", "UserPoolId": "The user pool ID for the user pool where you want to create a user pool client.", "WriteAttributes": "The list of user attributes that you want your app client to have write access to. After your user authenticates in your app, their access token authorizes them to set or modify their own attribute value for any attribute in this list. An example of this kind of activity is when you present your user with a form to update their profile information and they change their last name. Your app then makes an [UpdateUserAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UpdateUserAttributes.html) API request and sets `family_name` to the new value.\n\nWhen you don't specify the `WriteAttributes` for your app client, your app can write the values of the Standard attributes of your user pool. When your user pool has write access to these default attributes, `WriteAttributes` doesn't return any information. Amazon Cognito only populates `WriteAttributes` in the API response if you have specified your own custom set of write attributes.\n\nIf your app client allows users to sign in through an IdP, this array must include all attributes that you have mapped to IdP attributes. Amazon Cognito updates mapped attributes when users sign in to your application through an IdP. If your app client does not have write access to a mapped attribute, Amazon Cognito throws an error when it tries to update the attribute. For more information, see [Specifying IdP Attribute Mappings for Your user pool](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-specifying-attribute-mapping.html) ." }, "AWS::Cognito::UserPoolClient AnalyticsConfiguration": { - "ApplicationArn": "The Amazon Resource Name (ARN) of an Amazon Pinpoint project. You can use the Amazon Pinpoint project for integration with the chosen user pool client. Amazon Cognito publishes events to the Amazon Pinpoint project that the app ARN declares.", + "ApplicationArn": "The Amazon Resource Name (ARN) of an Amazon Pinpoint project that you want to connect to your user pool app client. Amazon Cognito publishes events to the Amazon Pinpoint project that `ApplicationArn` declares. You can also configure your application to pass an endpoint ID in the `AnalyticsMetadata` parameter of sign-in operations. The endpoint ID is information about the destination for push notifications", "ApplicationId": "Your Amazon Pinpoint project ID.", "ExternalId": "The [external ID](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) of the role that Amazon Cognito assumes to send analytics data to Amazon Pinpoint.", "RoleArn": "The ARN of an AWS Identity and Access Management role that has the permissions required for Amazon Cognito to publish events to Amazon Pinpoint analytics.", @@ -8199,8 +8216,8 @@ }, "AWS::Cognito::UserPoolDomain": { "CustomDomainConfig": "The configuration for a custom domain that hosts the sign-up and sign-in pages for your application. Use this object to specify an SSL certificate that is managed by ACM.", - "Domain": "The domain name for the domain that hosts the sign-up and sign-in pages for your application. For example: `auth.example.com` . If you're using a prefix domain, this field denotes the first part of the domain before `.auth.[region].amazoncognito.com` .\n\nThis string can include only lowercase letters, numbers, and hyphens. Don't use a hyphen for the first or last character. Use periods to separate subdomain names.", - "UserPoolId": "The user pool ID for the user pool where you want to associate a user pool domain." + "Domain": "The domain name for the custom domain that hosts the sign-up and sign-in pages for your application. One example might be `auth.example.com` .\n\nThis string can include only lowercase letters, numbers, and hyphens. Don't use a hyphen for the first or last character. Use periods to separate subdomain names.", + "UserPoolId": "The ID of the user pool that is associated with the custom domain whose certificate you're updating." }, "AWS::Cognito::UserPoolDomain CustomDomainConfigType": { "CertificateArn": "The Amazon Resource Name (ARN) of an AWS Certificate Manager SSL certificate. You use this certificate for the subdomain of your custom domain." @@ -8221,7 +8238,7 @@ "UserPoolId": "The user pool ID." }, "AWS::Cognito::UserPoolResourceServer": { - "Identifier": "A unique resource server identifier for the resource server. This could be an HTTPS endpoint where the resource server is located. For example: `https://my-weather-api.example.com` .", + "Identifier": "A unique resource server identifier for the resource server. The identifier can be an API friendly name like `solar-system-data` . You can also set an API URL like `https://solar-system-data-api.example.com` as your identifier.\n\nAmazon Cognito represents scopes in the access token in the format `$resource-server-identifier/$scope` . Longer scope-identifier strings increase the size of your access tokens.", "Name": "A friendly name for the resource server.", "Scopes": "A list of scopes. Each scope is a map with keys `ScopeName` and `ScopeDescription` .", "UserPoolId": "The user pool ID for the user pool." @@ -8232,7 +8249,7 @@ }, "AWS::Cognito::UserPoolRiskConfigurationAttachment": { "AccountTakeoverRiskConfiguration": "The settings for automated responses and notification templates for adaptive authentication with advanced security features.", - "ClientId": "The app client ID. You can specify the risk configuration for a single client (with a specific ClientId) or for all clients (by setting the ClientId to `ALL` ).", + "ClientId": "The app client where this configuration is applied. When this parameter isn't present, the risk configuration applies to all user pool app clients that don't have client-level settings.", "CompromisedCredentialsRiskConfiguration": "Settings for compromised-credentials actions and authentication types with advanced security features in full-function `ENFORCED` mode.", "RiskExceptionConfiguration": "Exceptions to the risk evaluation configuration, including always-allow and always-block IP address ranges.", "UserPoolId": "The ID of the user pool that has the risk configuration applied." @@ -8276,7 +8293,7 @@ }, "AWS::Cognito::UserPoolUICustomizationAttachment": { "CSS": "The CSS values in the UI customization.", - "ClientId": "The client ID for the client app. You can specify the UI customization settings for a single client (with a specific clientId) or for all clients (by setting the clientId to `ALL` ).", + "ClientId": "The app client ID for your UI customization. When this value isn't present, the customization applies to all user pool app clients that don't have client-level settings..", "UserPoolId": "The user pool ID for the user pool." }, "AWS::Cognito::UserPoolUser": { @@ -8284,7 +8301,7 @@ "DesiredDeliveryMediums": "Specify `\"EMAIL\"` if email will be used to send the welcome message. Specify `\"SMS\"` if the phone number will be used. The default value is `\"SMS\"` . You can specify more than one value.", "ForceAliasCreation": "This parameter is used only if the `phone_number_verified` or `email_verified` attribute is set to `True` . Otherwise, it is ignored.\n\nIf this parameter is set to `True` and the phone number or email address specified in the UserAttributes parameter already exists as an alias with a different user, the API call will migrate the alias from the previous user to the newly created user. The previous user will no longer be able to log in using that alias.\n\nIf this parameter is set to `False` , the API throws an `AliasExistsException` error if the alias already exists. The default value is `False` .", "MessageAction": "Set to `RESEND` to resend the invitation message to a user that already exists and reset the expiration limit on the user's account. Set to `SUPPRESS` to suppress sending the message. You can specify only one value.", - "UserAttributes": "An array of name-value pairs that contain user attributes and attribute values.", + "UserAttributes": "An array of name-value pairs that contain user attributes and attribute values to be set for the user to be created. You can create a user without specifying any attributes other than `Username` . However, any attributes that you specify as required (when creating a user pool or in the *Attributes* tab of the console) either you should supply (in your call to `AdminCreateUser` ) or the user should supply (when they sign up in response to your welcome message).\n\nFor custom attributes, you must prepend the `custom:` prefix to the attribute name.\n\nTo send a message inviting the user to sign up, you must specify the user's email address or phone number. You can do this in your call to AdminCreateUser or in the *Users* tab of the Amazon Cognito console for managing your user pools.\n\nIn your call to `AdminCreateUser` , you can set the `email_verified` attribute to `True` , and you can set the `phone_number_verified` attribute to `True` . You can also do this by calling [AdminUpdateUserAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AdminUpdateUserAttributes.html) .\n\n- *email* : The email address of the user to whom the message that contains the code and username will be sent. Required if the `email_verified` attribute is set to `True` , or if `\"EMAIL\"` is specified in the `DesiredDeliveryMediums` parameter.\n- *phone_number* : The phone number of the user to whom the message that contains the code and username will be sent. Required if the `phone_number_verified` attribute is set to `True` , or if `\"SMS\"` is specified in the `DesiredDeliveryMediums` parameter.", "UserPoolId": "The user pool ID for the user pool where the user will be created.", "Username": "The value that you want to set as the username sign-in attribute. The following conditions apply to the username parameter.\n\n- The username can't be a duplicate of another username in the same user pool.\n- You can't change the value of a username after you create it.\n- You can only provide a value if usernames are a valid sign-in attribute for your user pool. If your user pool only supports phone numbers or email addresses as sign-in attributes, Amazon Cognito automatically generates a username value. For more information, see [Customizing sign-in attributes](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-attributes.html#user-pool-settings-aliases) .", "ValidationData": "Temporary user attributes that contribute to the outcomes of your pre sign-up Lambda trigger. This set of key-value pairs are for custom validation of information that you collect from your users but don't need to retain.\n\nYour Lambda function can analyze this additional data and act on it. Your function might perform external API operations like logging user attributes and validation data to Amazon CloudWatch Logs. Validation data might also affect the response that your function returns to Amazon Cognito, like automatically confirming the user if they sign up from within your network.\n\nFor more information about the pre sign-up Lambda trigger, see [Pre sign-up Lambda trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-pre-sign-up.html) ." @@ -8296,7 +8313,7 @@ "AWS::Cognito::UserPoolUserToGroupAttachment": { "GroupName": "The name of the group that you want to add your user to.", "UserPoolId": "The user pool ID for the user pool.", - "Username": "" + "Username": "The user's username." }, "AWS::Comprehend::DocumentClassifier": { "DataAccessRoleArn": "The Amazon Resource Name (ARN) of the IAM role that grants Amazon Comprehend read access to your input data.", @@ -10101,6 +10118,7 @@ "EngineVersion": "The engine version number of the replication instance.\n\nIf an engine version number is not specified when a replication instance is created, the default is the latest engine version available.", "KmsKeyId": "An AWS KMS key identifier that is used to encrypt the data on the replication instance.\n\nIf you don't specify a value for the `KmsKeyId` parameter, AWS DMS uses your default encryption key.\n\nAWS KMS creates the default encryption key for your AWS account . Your AWS account has a different default encryption key for each AWS Region .", "MultiAZ": "Specifies whether the replication instance is a Multi-AZ deployment. You can't set the `AvailabilityZone` parameter if the Multi-AZ parameter is set to `true` .", + "NetworkType": "The type of IP address protocol used by a replication instance, such as IPv4 only or Dual-stack that supports both IPv4 and IPv6 addressing. IPv6 only is not yet supported.", "PreferredMaintenanceWindow": "The weekly time range during which system maintenance can occur, in UTC.\n\n*Format* : `ddd:hh24:mi-ddd:hh24:mi`\n\n*Default* : A 30-minute window selected at random from an 8-hour block of time per AWS Region , occurring on a random day of the week.\n\n*Valid days* ( `ddd` ): `Mon` | `Tue` | `Wed` | `Thu` | `Fri` | `Sat` | `Sun`\n\n*Constraints* : Minimum 30-minute window.", "PubliclyAccessible": "Specifies the accessibility options for the replication instance. A value of `true` represents an instance with a public IP address. A value of `false` represents an instance with a private IP address. The default value is `true` .", "ReplicationInstanceClass": "The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example, to specify the instance class dms.c4.large, set this parameter to `\"dms.c4.large\"` . For more information on the settings and capacities for the available replication instance classes, see [Selecting the right AWS DMS replication instance for your migration](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReplicationInstance.html#CHAP_ReplicationInstance.InDepth) in the *AWS Database Migration Service User Guide* .", @@ -11752,7 +11770,8 @@ "OutPostArn": "The Amazon Resource Name (ARN) of the Outpost on which to create the Capacity Reservation.", "PlacementGroupArn": "The Amazon Resource Name (ARN) of the cluster placement group in which to create the Capacity Reservation. For more information, see [Capacity Reservations for cluster placement groups](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/cr-cpg.html) in the *Amazon EC2 User Guide* .", "TagSpecifications": "The tags to apply to the Capacity Reservation during launch.", - "Tenancy": "Indicates the tenancy of the Capacity Reservation. A Capacity Reservation can have one of the following tenancy settings:\n\n- `default` - The Capacity Reservation is created on hardware that is shared with other AWS accounts .\n- `dedicated` - The Capacity Reservation is created on single-tenant hardware that is dedicated to a single AWS account ." + "Tenancy": "Indicates the tenancy of the Capacity Reservation. A Capacity Reservation can have one of the following tenancy settings:\n\n- `default` - The Capacity Reservation is created on hardware that is shared with other AWS accounts .\n- `dedicated` - The Capacity Reservation is created on single-tenant hardware that is dedicated to a single AWS account .", + "UnusedReservationBillingOwnerId": "The ID of the AWS account to which billing of the unused capacity of the Capacity Reservation is assigned." }, "AWS::EC2::CapacityReservation Tag": { "Key": "The tag key.", @@ -11953,7 +11972,7 @@ "AcceleratorManufacturers": "Indicates whether instance types must have accelerators by specific manufacturers.\n\n- For instance types with AWS devices, specify `amazon-web-services` .\n- For instance types with AMD devices, specify `amd` .\n- For instance types with Habana devices, specify `habana` .\n- For instance types with NVIDIA devices, specify `nvidia` .\n- For instance types with Xilinx devices, specify `xilinx` .\n\nDefault: Any manufacturer", "AcceleratorNames": "The accelerators that must be on the instance type.\n\n- For instance types with NVIDIA A10G GPUs, specify `a10g` .\n- For instance types with NVIDIA A100 GPUs, specify `a100` .\n- For instance types with NVIDIA H100 GPUs, specify `h100` .\n- For instance types with AWS Inferentia chips, specify `inferentia` .\n- For instance types with NVIDIA GRID K520 GPUs, specify `k520` .\n- For instance types with NVIDIA K80 GPUs, specify `k80` .\n- For instance types with NVIDIA M60 GPUs, specify `m60` .\n- For instance types with AMD Radeon Pro V520 GPUs, specify `radeon-pro-v520` .\n- For instance types with NVIDIA T4 GPUs, specify `t4` .\n- For instance types with NVIDIA T4G GPUs, specify `t4g` .\n- For instance types with Xilinx VU9P FPGAs, specify `vu9p` .\n- For instance types with NVIDIA V100 GPUs, specify `v100` .\n\nDefault: Any accelerator", "AcceleratorTotalMemoryMiB": "The minimum and maximum amount of total accelerator memory, in MiB.\n\nDefault: No minimum or maximum limits", - "AcceleratorTypes": "The accelerator types that must be on the instance type.\n\n- To include instance types with GPU hardware, specify `gpu` .\n- To include instance types with FPGA hardware, specify `fpga` .\n- To include instance types with inference hardware, specify `inference` .\n\nDefault: Any accelerator type", + "AcceleratorTypes": "The accelerator types that must be on the instance type.\n\n- To include instance types with GPU hardware, specify `gpu` .\n- To include instance types with FPGA hardware, specify `fpga` .\n\nDefault: Any accelerator type", "AllowedInstanceTypes": "The instance types to apply your specified attributes against. All other instance types are ignored, even if they match your specified attributes.\n\nYou can use strings with one or more wild cards, represented by an asterisk ( `*` ), to allow an instance type, size, or generation. The following are examples: `m5.8xlarge` , `c5*.*` , `m5a.*` , `r*` , `*3*` .\n\nFor example, if you specify `c5*` ,Amazon EC2 will allow the entire C5 instance family, which includes all C5a and C5n instance types. If you specify `m5a.*` , Amazon EC2 will allow all the M5a instance types, but not the M5n instance types.\n\n> If you specify `AllowedInstanceTypes` , you can't specify `ExcludedInstanceTypes` . \n\nDefault: All instance types", "BareMetal": "Indicates whether bare metal instance types must be included, excluded, or required.\n\n- To include bare metal instance types, specify `included` .\n- To require only bare metal instance types, specify `required` .\n- To exclude bare metal instance types, specify `excluded` .\n\nDefault: `excluded`", "BaselineEbsBandwidthMbps": "The minimum and maximum baseline bandwidth to Amazon EBS, in Mbps. For more information, see [Amazon EBS\u2013optimized instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html) in the *Amazon EC2 User Guide* .\n\nDefault: No minimum or maximum limits", @@ -12202,7 +12221,7 @@ "DisableApiTermination": "If you set this parameter to `true` , you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. To change this attribute after launch, use [ModifyInstanceAttribute](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceAttribute.html) . Alternatively, if you set `InstanceInitiatedShutdownBehavior` to `terminate` , you can terminate the instance by running the shutdown command from the instance.\n\nDefault: `false`", "EbsOptimized": "Indicates whether the instance is optimized for Amazon EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal Amazon EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS-optimized instance.\n\nDefault: `false`", "ElasticGpuSpecifications": "An elastic GPU to associate with the instance.\n\n> Amazon Elastic Graphics reached end of life on January 8, 2024.", - "ElasticInferenceAccelerators": "An elastic inference accelerator to associate with the instance.\n\n> Amazon Elastic Inference (EI) is no longer available to new customers. For more information, see [Amazon Elastic Inference FAQs](https://docs.aws.amazon.com/machine-learning/elastic-inference/faqs/) .", + "ElasticInferenceAccelerators": "An elastic inference accelerator to associate with the instance.\n\n> Amazon Elastic Inference is no longer available.", "EnclaveOptions": "Indicates whether the instance is enabled for AWS Nitro Enclaves.", "HibernationOptions": "Indicates whether an instance is enabled for hibernation. This parameter is valid only if the instance meets the [hibernation prerequisites](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html) . For more information, see [Hibernate your Amazon EC2 instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) in the *Amazon EC2 User Guide* .\n\nYou can't enable hibernation and AWS Nitro Enclaves on the same instance.", "HostId": "If you specify host for the `Affinity` property, the ID of a dedicated host that the instance is associated with. If you don't specify an ID, Amazon EC2 launches the instance onto any available, compatible dedicated host in your account. This type of launch is called an untargeted launch. Note that for untargeted launches, you must have a compatible, dedicated host available to successfully launch instances.", @@ -12402,7 +12421,7 @@ "DeleteOnTermination": "Indicates whether the EBS volume is deleted on instance termination.", "Encrypted": "Indicates whether the EBS volume is encrypted. Encrypted volumes can only be attached to instances that support Amazon EBS encryption. If you are creating a volume from a snapshot, you can't specify an encryption value.", "Iops": "The number of I/O operations per second (IOPS). For `gp3` , `io1` , and `io2` volumes, this represents the number of IOPS that are provisioned for the volume. For `gp2` volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.\n\nThe following are the supported values for each volume type:\n\n- `gp3` : 3,000 - 16,000 IOPS\n- `io1` : 100 - 64,000 IOPS\n- `io2` : 100 - 256,000 IOPS\n\nFor `io2` volumes, you can achieve up to 256,000 IOPS on [instances built on the Nitro System](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances) . On other instances, you can achieve performance up to 32,000 IOPS.\n\nThis parameter is supported for `io1` , `io2` , and `gp3` volumes only.", - "KmsKeyId": "The ARN of the symmetric AWS Key Management Service ( AWS KMS ) CMK used for encryption.", + "KmsKeyId": "Identifier (key ID, key alias, key ARN, or alias ARN) of the customer managed KMS key to use for EBS encryption.", "SnapshotId": "The ID of the snapshot.", "Throughput": "The throughput to provision for a `gp3` volume, with a maximum of 1,000 MiB/s.\n\nValid Range: Minimum value of 125. Maximum value of 1000.", "VolumeSize": "The size of the volume, in GiBs. You must specify either a snapshot ID or a volume size. The following are the supported volumes sizes for each volume type:\n\n- `gp2` and `gp3` : 1 - 16,384 GiB\n- `io1` : 4 - 16,384 GiB\n- `io2` : 4 - 65,536 GiB\n- `st1` and `sc1` : 125 - 16,384 GiB\n- `standard` : 1 - 1024 GiB", @@ -12437,7 +12456,7 @@ "AcceleratorManufacturers": "Indicates whether instance types must have accelerators by specific manufacturers.\n\n- For instance types with AWS devices, specify `amazon-web-services` .\n- For instance types with AMD devices, specify `amd` .\n- For instance types with Habana devices, specify `habana` .\n- For instance types with NVIDIA devices, specify `nvidia` .\n- For instance types with Xilinx devices, specify `xilinx` .\n\nDefault: Any manufacturer", "AcceleratorNames": "The accelerators that must be on the instance type.\n\n- For instance types with NVIDIA A10G GPUs, specify `a10g` .\n- For instance types with NVIDIA A100 GPUs, specify `a100` .\n- For instance types with NVIDIA H100 GPUs, specify `h100` .\n- For instance types with AWS Inferentia chips, specify `inferentia` .\n- For instance types with NVIDIA GRID K520 GPUs, specify `k520` .\n- For instance types with NVIDIA K80 GPUs, specify `k80` .\n- For instance types with NVIDIA M60 GPUs, specify `m60` .\n- For instance types with AMD Radeon Pro V520 GPUs, specify `radeon-pro-v520` .\n- For instance types with NVIDIA T4 GPUs, specify `t4` .\n- For instance types with NVIDIA T4G GPUs, specify `t4g` .\n- For instance types with Xilinx VU9P FPGAs, specify `vu9p` .\n- For instance types with NVIDIA V100 GPUs, specify `v100` .\n\nDefault: Any accelerator", "AcceleratorTotalMemoryMiB": "The minimum and maximum amount of total accelerator memory, in MiB.\n\nDefault: No minimum or maximum limits", - "AcceleratorTypes": "The accelerator types that must be on the instance type.\n\n- For instance types with GPU accelerators, specify `gpu` .\n- For instance types with FPGA accelerators, specify `fpga` .\n- For instance types with inference accelerators, specify `inference` .\n\nDefault: Any accelerator type", + "AcceleratorTypes": "The accelerator types that must be on the instance type.\n\n- For instance types with GPU accelerators, specify `gpu` .\n- For instance types with FPGA accelerators, specify `fpga` .\n\nDefault: Any accelerator type", "AllowedInstanceTypes": "The instance types to apply your specified attributes against. All other instance types are ignored, even if they match your specified attributes.\n\nYou can use strings with one or more wild cards, represented by an asterisk ( `*` ), to allow an instance type, size, or generation. The following are examples: `m5.8xlarge` , `c5*.*` , `m5a.*` , `r*` , `*3*` .\n\nFor example, if you specify `c5*` ,Amazon EC2 will allow the entire C5 instance family, which includes all C5a and C5n instance types. If you specify `m5a.*` , Amazon EC2 will allow all the M5a instance types, but not the M5n instance types.\n\n> If you specify `AllowedInstanceTypes` , you can't specify `ExcludedInstanceTypes` . \n\nDefault: All instance types", "BareMetal": "Indicates whether bare metal instance types must be included, excluded, or required.\n\n- To include bare metal instance types, specify `included` .\n- To require only bare metal instance types, specify `required` .\n- To exclude bare metal instance types, specify `excluded` .\n\nDefault: `excluded`", "BaselineEbsBandwidthMbps": "The minimum and maximum baseline bandwidth to Amazon EBS, in Mbps. For more information, see [Amazon EBS\u2013optimized instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html) in the *Amazon EC2 User Guide* .\n\nDefault: No minimum or maximum limits", @@ -12476,14 +12495,14 @@ "DisableApiTermination": "If you set this parameter to `true` , you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. To change this attribute after launch, use [ModifyInstanceAttribute](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceAttribute.html) . Alternatively, if you set `InstanceInitiatedShutdownBehavior` to `terminate` , you can terminate the instance by running the shutdown command from the instance.", "EbsOptimized": "Indicates whether the instance is optimized for Amazon EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal Amazon EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS-optimized instance.", "ElasticGpuSpecifications": "Deprecated.\n\n> Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances.", - "ElasticInferenceAccelerators": "An elastic inference accelerator to associate with the instance. Elastic inference accelerators are a resource you can attach to your Amazon EC2 instances to accelerate your Deep Learning (DL) inference workloads.\n\nYou cannot specify accelerators from different generations in the same request.\n\n> Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.", + "ElasticInferenceAccelerators": "> Amazon Elastic Inference is no longer available. \n\nAn elastic inference accelerator to associate with the instance. Elastic inference accelerators are a resource you can attach to your Amazon EC2 instances to accelerate your Deep Learning (DL) inference workloads.\n\nYou cannot specify accelerators from different generations in the same request.\n\n> Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.", "EnclaveOptions": "Indicates whether the instance is enabled for AWS Nitro Enclaves. For more information, see [What is AWS Nitro Enclaves?](https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) in the *AWS Nitro Enclaves User Guide* .\n\nYou can't enable AWS Nitro Enclaves and hibernation on the same instance.", "HibernationOptions": "Indicates whether an instance is enabled for hibernation. This parameter is valid only if the instance meets the [hibernation prerequisites](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html) . For more information, see [Hibernate your Amazon EC2 instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) in the *Amazon EC2 User Guide* .", "IamInstanceProfile": "The name or Amazon Resource Name (ARN) of an IAM instance profile.", "ImageId": "The ID of the AMI. Alternatively, you can specify a Systems Manager parameter, which will resolve to an AMI ID on launch.\n\nValid formats:\n\n- `ami-0ac394d6a3example`\n- `resolve:ssm:parameter-name`\n- `resolve:ssm:parameter-name:version-number`\n- `resolve:ssm:parameter-name:label`\n\nFor more information, see [Use a Systems Manager parameter to find an AMI](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html#using-systems-manager-parameter-to-find-AMI) in the *Amazon Elastic Compute Cloud User Guide* .", "InstanceInitiatedShutdownBehavior": "Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).\n\nDefault: `stop`", "InstanceMarketOptions": "The market (purchasing) option for the instances.", - "InstanceRequirements": "The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify instance types with these attributes.\n\nYou must specify `VCpuCount` and `MemoryMiB` . All other attributes are optional. Any unspecified optional attribute is set to its default.\n\nWhen you specify multiple attributes, you get instance types that satisfy all of the specified attributes. If you specify multiple values for an attribute, you get instance types that satisfy any of the specified values.\n\nTo limit the list of instance types from which Amazon EC2 can identify matching instance types, you can use one of the following parameters, but not both in the same request:\n\n- `AllowedInstanceTypes` - The instance types to include in the list. All other instance types are ignored, even if they match your specified attributes.\n- `ExcludedInstanceTypes` - The instance types to exclude from the list, even if they match your specified attributes.\n\n> If you specify `InstanceRequirements` , you can't specify `InstanceType` .\n> \n> Attribute-based instance type selection is only supported when using Auto Scaling groups, EC2 Fleet, and Spot Fleet to launch instances. If you plan to use the launch template in the [launch instance wizard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-instance-wizard.html) , or with the [RunInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) API or [AWS::EC2::Instance](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-instance.html) AWS CloudFormation resource, you can't specify `InstanceRequirements` . \n\nFor more information, see [Attribute-based instance type selection for EC2 Fleet](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html) , [Attribute-based instance type selection for Spot Fleet](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-attribute-based-instance-type-selection.html) , and [Spot placement score](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-placement-score.html) in the *Amazon EC2 User Guide* .", + "InstanceRequirements": "The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify instance types with these attributes.\n\nYou must specify `VCpuCount` and `MemoryMiB` . All other attributes are optional. Any unspecified optional attribute is set to its default.\n\nWhen you specify multiple attributes, you get instance types that satisfy all of the specified attributes. If you specify multiple values for an attribute, you get instance types that satisfy any of the specified values.\n\nTo limit the list of instance types from which Amazon EC2 can identify matching instance types, you can use one of the following parameters, but not both in the same request:\n\n- `AllowedInstanceTypes` - The instance types to include in the list. All other instance types are ignored, even if they match your specified attributes.\n- `ExcludedInstanceTypes` - The instance types to exclude from the list, even if they match your specified attributes.\n\n> If you specify `InstanceRequirements` , you can't specify `InstanceType` .\n> \n> Attribute-based instance type selection is only supported when using Auto Scaling groups, EC2 Fleet, and Spot Fleet to launch instances. If you plan to use the launch template in the [launch instance wizard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-instance-wizard.html) , or with the [RunInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) API or [AWS::EC2::Instance](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-instance.html) AWS CloudFormation resource, you can't specify `InstanceRequirements` . \n\nFor more information, see [Specify attributes for instance type selection for EC2 Fleet or Spot Fleet](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html) and [Spot placement score](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-placement-score.html) in the *Amazon EC2 User Guide* .", "InstanceType": "The instance type. For more information, see [Amazon EC2 instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the *Amazon EC2 User Guide* .\n\nIf you specify `InstanceType` , you can't specify `InstanceRequirements` .", "KernelId": "The ID of the kernel.\n\nWe recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see [User Provided Kernels](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) in the *Amazon EC2 User Guide* .", "KeyName": "The name of the key pair. You can create a key pair using [CreateKeyPair](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateKeyPair.html) or [ImportKeyPair](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportKeyPair.html) .\n\n> If you do not specify a key pair, you can't connect to the instance unless you choose an AMI that is configured to allow users another way to log in.", @@ -12545,7 +12564,7 @@ "DeviceIndex": "The device index for the network interface attachment. Each network interface requires a device index. If you create a launch template that includes secondary network interfaces but not a primary network interface, then you must add a primary network interface as a launch parameter when you launch an instance from the template.", "EnaSrdSpecification": "The ENA Express configuration for the network interface.", "Groups": "The IDs of one or more security groups.", - "InterfaceType": "The type of network interface. To create an Elastic Fabric Adapter (EFA), specify `efa` . For more information, see [Elastic Fabric Adapter](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) in the *Amazon EC2 User Guide* .\n\nIf you are not creating an EFA, specify `interface` or omit this parameter.\n\nValid values: `interface` | `efa`", + "InterfaceType": "The type of network interface. To create an Elastic Fabric Adapter (EFA), specify `efa` or `efa` . For more information, see [Elastic Fabric Adapter](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) in the *Amazon EC2 User Guide* .\n\nIf you are not creating an EFA, specify `interface` or omit this parameter.\n\nIf you specify `efa-only` , do not assign any IP addresses to the network interface. EFA-only network interfaces do not support IP addresses.\n\nValid values: `interface` | `efa` | `efa-only`", "Ipv4PrefixCount": "The number of IPv4 prefixes to be automatically assigned to the network interface. You cannot use this option if you use the `Ipv4Prefix` option.", "Ipv4Prefixes": "One or more IPv4 prefixes to be assigned to the network interface. You cannot use this option if you use the `Ipv4PrefixCount` option.", "Ipv6AddressCount": "The number of IPv6 addresses to assign to a network interface. Amazon EC2 automatically selects the IPv6 addresses from the subnet range. You can't use this option if specifying specific IPv6 addresses.", @@ -12983,7 +13002,7 @@ "AWS::EC2::PrefixList": { "AddressFamily": "The IP address type.\n\nValid Values: `IPv4` | `IPv6`", "Entries": "The entries for the prefix list.", - "MaxEntries": "The maximum number of entries for the prefix list. This property is required when you create a prefix list.", + "MaxEntries": "The maximum number of entries for the prefix list.", "PrefixListName": "A name for the prefix list.\n\nConstraints: Up to 255 characters in length. The name cannot start with `com.amazonaws` .", "Tags": "The tags for the prefix list." }, @@ -13149,7 +13168,7 @@ "AcceleratorManufacturers": "Indicates whether instance types must have accelerators by specific manufacturers.\n\n- For instance types with AWS devices, specify `amazon-web-services` .\n- For instance types with AMD devices, specify `amd` .\n- For instance types with Habana devices, specify `habana` .\n- For instance types with NVIDIA devices, specify `nvidia` .\n- For instance types with Xilinx devices, specify `xilinx` .\n\nDefault: Any manufacturer", "AcceleratorNames": "The accelerators that must be on the instance type.\n\n- For instance types with NVIDIA A10G GPUs, specify `a10g` .\n- For instance types with NVIDIA A100 GPUs, specify `a100` .\n- For instance types with NVIDIA H100 GPUs, specify `h100` .\n- For instance types with AWS Inferentia chips, specify `inferentia` .\n- For instance types with NVIDIA GRID K520 GPUs, specify `k520` .\n- For instance types with NVIDIA K80 GPUs, specify `k80` .\n- For instance types with NVIDIA M60 GPUs, specify `m60` .\n- For instance types with AMD Radeon Pro V520 GPUs, specify `radeon-pro-v520` .\n- For instance types with NVIDIA T4 GPUs, specify `t4` .\n- For instance types with NVIDIA T4G GPUs, specify `t4g` .\n- For instance types with Xilinx VU9P FPGAs, specify `vu9p` .\n- For instance types with NVIDIA V100 GPUs, specify `v100` .\n\nDefault: Any accelerator", "AcceleratorTotalMemoryMiB": "The minimum and maximum amount of total accelerator memory, in MiB.\n\nDefault: No minimum or maximum limits", - "AcceleratorTypes": "The accelerator types that must be on the instance type.\n\n- To include instance types with GPU hardware, specify `gpu` .\n- To include instance types with FPGA hardware, specify `fpga` .\n- To include instance types with inference hardware, specify `inference` .\n\nDefault: Any accelerator type", + "AcceleratorTypes": "The accelerator types that must be on the instance type.\n\n- To include instance types with GPU hardware, specify `gpu` .\n- To include instance types with FPGA hardware, specify `fpga` .\n\nDefault: Any accelerator type", "AllowedInstanceTypes": "The instance types to apply your specified attributes against. All other instance types are ignored, even if they match your specified attributes.\n\nYou can use strings with one or more wild cards, represented by an asterisk ( `*` ), to allow an instance type, size, or generation. The following are examples: `m5.8xlarge` , `c5*.*` , `m5a.*` , `r*` , `*3*` .\n\nFor example, if you specify `c5*` ,Amazon EC2 will allow the entire C5 instance family, which includes all C5a and C5n instance types. If you specify `m5a.*` , Amazon EC2 will allow all the M5a instance types, but not the M5n instance types.\n\n> If you specify `AllowedInstanceTypes` , you can't specify `ExcludedInstanceTypes` . \n\nDefault: All instance types", "BareMetal": "Indicates whether bare metal instance types must be included, excluded, or required.\n\n- To include bare metal instance types, specify `included` .\n- To require only bare metal instance types, specify `required` .\n- To exclude bare metal instance types, specify `excluded` .\n\nDefault: `excluded`", "BaselineEbsBandwidthMbps": "The minimum and maximum baseline bandwidth to Amazon EBS, in Mbps. For more information, see [Amazon EBS\u2013optimized instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html) in the *Amazon EC2 User Guide* .\n\nDefault: No minimum or maximum limits", @@ -13608,13 +13627,60 @@ "VpnGatewayId": "The ID of the virtual private gateway at the AWS side of the VPN connection.\n\nYou must specify either `TransitGatewayId` or `VpnGatewayId` , but not both.", "VpnTunnelOptionsSpecifications": "The tunnel options for the VPN connection." }, + "AWS::EC2::VPNConnection CloudwatchLogOptionsSpecification": { + "LogEnabled": "Enable or disable VPN tunnel logging feature. Default value is `False` .\n\nValid values: `True` | `False`", + "LogGroupArn": "The Amazon Resource Name (ARN) of the CloudWatch log group to send logs to.", + "LogOutputFormat": "Set log format. Default format is `json` .\n\nValid values: `json` | `text`" + }, + "AWS::EC2::VPNConnection IKEVersionsRequestListValue": { + "Value": "The IKE version." + }, + "AWS::EC2::VPNConnection Phase1DHGroupNumbersRequestListValue": { + "Value": "The Diffie-Hellmann group number." + }, + "AWS::EC2::VPNConnection Phase1EncryptionAlgorithmsRequestListValue": { + "Value": "The value for the encryption algorithm." + }, + "AWS::EC2::VPNConnection Phase1IntegrityAlgorithmsRequestListValue": { + "Value": "The value for the integrity algorithm." + }, + "AWS::EC2::VPNConnection Phase2DHGroupNumbersRequestListValue": { + "Value": "The Diffie-Hellmann group number." + }, + "AWS::EC2::VPNConnection Phase2EncryptionAlgorithmsRequestListValue": { + "Value": "The encryption algorithm." + }, + "AWS::EC2::VPNConnection Phase2IntegrityAlgorithmsRequestListValue": { + "Value": "The integrity algorithm." + }, "AWS::EC2::VPNConnection Tag": { "Key": "The tag key.", "Value": "The tag value." }, + "AWS::EC2::VPNConnection VpnTunnelLogOptionsSpecification": { + "CloudwatchLogOptions": "Options for sending VPN tunnel logs to CloudWatch." + }, "AWS::EC2::VPNConnection VpnTunnelOptionsSpecification": { + "DPDTimeoutAction": "The action to take after DPD timeout occurs. Specify `restart` to restart the IKE initiation. Specify `clear` to end the IKE session.\n\nValid Values: `clear` | `none` | `restart`\n\nDefault: `clear`", + "DPDTimeoutSeconds": "The number of seconds after which a DPD timeout occurs.\n\nConstraints: A value greater than or equal to 30.\n\nDefault: `30`", + "EnableTunnelLifecycleControl": "Turn on or off tunnel endpoint lifecycle control feature.", + "IKEVersions": "The IKE versions that are permitted for the VPN tunnel.\n\nValid values: `ikev1` | `ikev2`", + "LogOptions": "Options for logging VPN tunnel activity.", + "Phase1DHGroupNumbers": "One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel for phase 1 IKE negotiations.\n\nValid values: `2` | `14` | `15` | `16` | `17` | `18` | `19` | `20` | `21` | `22` | `23` | `24`", + "Phase1EncryptionAlgorithms": "One or more encryption algorithms that are permitted for the VPN tunnel for phase 1 IKE negotiations.\n\nValid values: `AES128` | `AES256` | `AES128-GCM-16` | `AES256-GCM-16`", + "Phase1IntegrityAlgorithms": "One or more integrity algorithms that are permitted for the VPN tunnel for phase 1 IKE negotiations.\n\nValid values: `SHA1` | `SHA2-256` | `SHA2-384` | `SHA2-512`", + "Phase1LifetimeSeconds": "The lifetime for phase 1 of the IKE negotiation, in seconds.\n\nConstraints: A value between 900 and 28,800.\n\nDefault: `28800`", + "Phase2DHGroupNumbers": "One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel for phase 2 IKE negotiations.\n\nValid values: `2` | `5` | `14` | `15` | `16` | `17` | `18` | `19` | `20` | `21` | `22` | `23` | `24`", + "Phase2EncryptionAlgorithms": "One or more encryption algorithms that are permitted for the VPN tunnel for phase 2 IKE negotiations.\n\nValid values: `AES128` | `AES256` | `AES128-GCM-16` | `AES256-GCM-16`", + "Phase2IntegrityAlgorithms": "One or more integrity algorithms that are permitted for the VPN tunnel for phase 2 IKE negotiations.\n\nValid values: `SHA1` | `SHA2-256` | `SHA2-384` | `SHA2-512`", + "Phase2LifetimeSeconds": "The lifetime for phase 2 of the IKE negotiation, in seconds.\n\nConstraints: A value between 900 and 3,600. The value must be less than the value for `Phase1LifetimeSeconds` .\n\nDefault: `3600`", "PreSharedKey": "The pre-shared key (PSK) to establish initial authentication between the virtual private gateway and customer gateway.\n\nConstraints: Allowed characters are alphanumeric characters, periods (.), and underscores (_). Must be between 8 and 64 characters in length and cannot start with zero (0).", - "TunnelInsideCidr": "The range of inside IP addresses for the tunnel. Any specified CIDR blocks must be unique across all VPN connections that use the same virtual private gateway.\n\nConstraints: A size /30 CIDR block from the `169.254.0.0/16` range. The following CIDR blocks are reserved and cannot be used:\n\n- `169.254.0.0/30`\n- `169.254.1.0/30`\n- `169.254.2.0/30`\n- `169.254.3.0/30`\n- `169.254.4.0/30`\n- `169.254.5.0/30`\n- `169.254.169.252/30`" + "RekeyFuzzPercentage": "The percentage of the rekey window (determined by `RekeyMarginTimeSeconds` ) during which the rekey time is randomly selected.\n\nConstraints: A value between 0 and 100.\n\nDefault: `100`", + "RekeyMarginTimeSeconds": "The margin time, in seconds, before the phase 2 lifetime expires, during which the AWS side of the VPN connection performs an IKE rekey. The exact time of the rekey is randomly selected based on the value for `RekeyFuzzPercentage` .\n\nConstraints: A value between 60 and half of `Phase2LifetimeSeconds` .\n\nDefault: `270`", + "ReplayWindowSize": "The number of packets in an IKE replay window.\n\nConstraints: A value between 64 and 2048.\n\nDefault: `1024`", + "StartupAction": "The action to take when the establishing the tunnel for the VPN connection. By default, your customer gateway device must initiate the IKE negotiation and bring up the tunnel. Specify `start` for AWS to initiate the IKE negotiation.\n\nValid Values: `add` | `start`\n\nDefault: `add`", + "TunnelInsideCidr": "The range of inside IP addresses for the tunnel. Any specified CIDR blocks must be unique across all VPN connections that use the same virtual private gateway.\n\nConstraints: A size /30 CIDR block from the `169.254.0.0/16` range. The following CIDR blocks are reserved and cannot be used:\n\n- `169.254.0.0/30`\n- `169.254.1.0/30`\n- `169.254.2.0/30`\n- `169.254.3.0/30`\n- `169.254.4.0/30`\n- `169.254.5.0/30`\n- `169.254.169.252/30`", + "TunnelInsideIpv6Cidr": "The range of inside IPv6 addresses for the tunnel. Any specified CIDR blocks must be unique across all VPN connections that use the same transit gateway.\n\nConstraints: A size /126 CIDR block from the local `fd00::/8` range." }, "AWS::EC2::VPNConnectionRoute": { "DestinationCidrBlock": "The CIDR block associated with the local subnet of the customer network.", @@ -14061,7 +14127,7 @@ }, "AWS::ECS::Service ServiceManagedEBSVolumeConfiguration": { "Encrypted": "Indicates whether the volume should be encrypted. If no value is specified, encryption is turned on by default. This parameter maps 1:1 with the `Encrypted` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* .", - "FilesystemType": "The Linux filesystem type for the volume. For volumes created from a snapshot, you must specify the same filesystem type that the volume was using when the snapshot was created. If there is a filesystem type mismatch, the task will fail to start.\n\nThe available filesystem types are `ext3` , `ext4` , and `xfs` . If no value is specified, the `xfs` filesystem type is used by default.", + "FilesystemType": "The filesystem type for the volume. For volumes created from a snapshot, you must specify the same filesystem type that the volume was using when the snapshot was created. If there is a filesystem type mismatch, the task will fail to start.\n\nThe available Linux filesystem types are `ext3` , `ext4` , and `xfs` . If no value is specified, the `xfs` filesystem type is used by default.\n\nThe available Windows filesystem types are `NTFS` .", "Iops": "The number of I/O operations per second (IOPS). For `gp3` , `io1` , and `io2` volumes, this represents the number of IOPS that are provisioned for the volume. For `gp2` volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.\n\nThe following are the supported values for each volume type.\n\n- `gp3` : 3,000 - 16,000 IOPS\n- `io1` : 100 - 64,000 IOPS\n- `io2` : 100 - 256,000 IOPS\n\nThis parameter is required for `io1` and `io2` volume types. The default for `gp3` volumes is `3,000 IOPS` . This parameter is not supported for `st1` , `sc1` , or `standard` volume types.\n\nThis parameter maps 1:1 with the `Iops` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* .", "KmsKeyId": "The Amazon Resource Name (ARN) identifier of the AWS Key Management Service key to use for Amazon EBS encryption. When encryption is turned on and no AWS Key Management Service key is specified, the default AWS managed key for Amazon EBS volumes is used. This parameter maps 1:1 with the `KmsKeyId` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference* .\n\n> AWS authenticates the AWS Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or ARN that is invalid, the action can appear to complete, but eventually fails.", "RoleArn": "The ARN of the IAM role to associate with this volume. This is the Amazon ECS infrastructure IAM role that is used to manage your AWS infrastructure. We recommend using the Amazon ECS-managed `AmazonECSInfrastructureRolePolicyForVolumes` IAM policy with this role. For more information, see [Amazon ECS infrastructure IAM role](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/infrastructure_IAM_role.html) in the *Amazon ECS Developer Guide* .", @@ -14199,7 +14265,7 @@ "Type": "The log router to use. The valid values are `fluentd` or `fluentbit` ." }, "AWS::ECS::TaskDefinition HealthCheck": { - "Command": "A string array representing the command that the container runs to determine if it is healthy. The string array must start with `CMD` to run the command arguments directly, or `CMD-SHELL` to run the command with the container's default shell.\n\nWhen you use the AWS Management Console JSON panel, the AWS Command Line Interface , or the APIs, enclose the list of commands in double quotes and brackets.\n\n`[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]`\n\nYou don't include the double quotes and brackets when you use the AWS Management Console.\n\n`CMD-SHELL, curl -f http://localhost/ || exit 1`\n\nAn exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see `HealthCheck` in the docker container create command", + "Command": "A string array representing the command that the container runs to determine if it is healthy. The string array must start with `CMD` to run the command arguments directly, or `CMD-SHELL` to run the command with the container's default shell.\n\nWhen you use the AWS Management Console JSON panel, the AWS Command Line Interface , or the APIs, enclose the list of commands in double quotes and brackets.\n\n`[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]`\n\nYou don't include the double quotes and brackets when you use the AWS Management Console.\n\n`CMD-SHELL, curl -f http://localhost/ || exit 1`\n\nAn exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see `HealthCheck` in the docker container create command.", "Interval": "The time period in seconds between each health check execution. You may specify between 5 and 300 seconds. The default value is 30 seconds.", "Retries": "The number of times to retry a failed health check before the container is considered unhealthy. You may specify between 1 and 10 retries. The default value is 3.", "StartPeriod": "The optional grace period to provide containers time to bootstrap before failed health checks count towards the maximum number of retries. You can specify between 0 and 300 seconds. By default, the `startPeriod` is off.\n\n> If a health check succeeds within the `startPeriod` , then the container is considered healthy and any subsequent failures count toward the maximum number of retries.", @@ -14311,6 +14377,7 @@ "SourceContainer": "The name of another container within the same task definition to mount volumes from." }, "AWS::ECS::TaskSet": { + "CapacityProviderStrategy": "The capacity provider strategy that are associated with the task set.", "Cluster": "The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service to create the task set in.", "ExternalId": "An optional non-unique tag that identifies this task set in external systems. If the task set is associated with a service discovery registry, the tasks in this task set will have the `ECS_TASK_SET_EXTERNAL_ID` AWS Cloud Map attribute set to the provided value.", "LaunchType": "The launch type that new tasks in the task set uses. For more information, see [Amazon ECS launch types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nIf a `launchType` is specified, the `capacityProviderStrategy` parameter must be omitted.", @@ -14328,6 +14395,11 @@ "SecurityGroups": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `awsvpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", "Subnets": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `awsvpcConfiguration` .\n\n> All specified subnets must be from the same VPC." }, + "AWS::ECS::TaskSet CapacityProviderStrategyItem": { + "Base": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.", + "CapacityProvider": "The short name of the capacity provider.", + "Weight": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nAn example scenario for using weights is defining a strategy that contains two capacity providers and both have a weight of `1` , then when the `base` is satisfied, the tasks will be split evenly across the two capacity providers. Using that same logic, if you specify a weight of `1` for *capacityProviderA* and a weight of `4` for *capacityProviderB* , then for every one task that's run using *capacityProviderA* , four tasks would use *capacityProviderB* ." + }, "AWS::ECS::TaskSet LoadBalancer": { "ContainerName": "The name of the container (as it appears in a container definition) to associate with the load balancer.\n\nYou need to specify the container name when configuring the target group for an Amazon ECS load balancer.", "ContainerPort": "The port on the container to associate with the load balancer. This port must correspond to a `containerPort` in the task definition the tasks in the service are using. For tasks that use the EC2 launch type, the container instance they're launched on must allow ingress traffic on the `hostPort` of the port mapping.", @@ -14466,14 +14538,14 @@ "EncryptionConfig": "The encryption configuration for the cluster.", "KubernetesNetworkConfig": "The Kubernetes network configuration for the cluster.", "Logging": "The logging configuration for your cluster.", - "Name": "The unique name to give to your cluster.", + "Name": "The unique name to give to your cluster. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphanumeric character and can't be longer than 100 characters. The name must be unique within the AWS Region and AWS account that you're creating the cluster in. Note that underscores can't be used in AWS CloudFormation .", "OutpostConfig": "An object representing the configuration of your local Amazon EKS cluster on an AWS Outpost. This object isn't available for clusters on the AWS cloud.", "ResourcesVpcConfig": "The VPC configuration that's used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html) and [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) in the *Amazon EKS User Guide* . You must specify at least two subnets. You can specify up to five security groups, but we recommend that you use a dedicated security group for your cluster control plane.", "RoleArn": "The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf. For more information, see [Amazon EKS Service IAM Role](https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html) in the **Amazon EKS User Guide** .", "Tags": "The metadata that you apply to the cluster to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define. Cluster tags don't propagate to any other resources associated with the cluster.\n\n> You must have the `eks:TagResource` and `eks:UntagResource` permissions for your [IAM principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) to manage the AWS CloudFormation stack. If you don't have these permissions, there might be unexpected behavior with stack-level tags propagating to the resource during resource creation and update.", "UpgradePolicy": "This value indicates if extended support is enabled or disabled for the cluster.\n\n[Learn more about EKS Extended Support in the EKS User Guide.](https://docs.aws.amazon.com/eks/latest/userguide/extended-support-control.html)", "Version": "The desired Kubernetes version for your cluster. If you don't specify a value here, the default version available in Amazon EKS is used.\n\n> The default version might not be the latest version available.", - "ZonalShiftConfig": "" + "ZonalShiftConfig": "The configuration for zonal shift for the cluster." }, "AWS::EKS::Cluster AccessConfig": { "AuthenticationMode": "The desired authentication mode for the cluster. If you create a cluster by using the EKS API, AWS SDKs, or AWS CloudFormation , the default is `CONFIG_MAP` . If you create the cluster by using the AWS Management Console , the default value is `API_AND_CONFIG_MAP` .", @@ -14523,7 +14595,7 @@ "SupportType": "" }, "AWS::EKS::Cluster ZonalShiftConfig": { - "Enabled": "" + "Enabled": "If zonal shift is enabled, AWS configures zonal autoshift for the cluster." }, "AWS::EKS::FargateProfile": { "ClusterName": "The name of your cluster.", @@ -15093,11 +15165,11 @@ "WorkerTypeSpecifications": "The specification applied to each worker type." }, "AWS::EMRServerless::Application AutoStartConfiguration": { - "Enabled": "" + "Enabled": "Enables the application to automatically start on job submission." }, "AWS::EMRServerless::Application AutoStopConfiguration": { - "Enabled": "", - "IdleTimeoutMinutes": "" + "Enabled": "Enables the application to automatically stop after a certain amount of time being idle. Defaults to true.", + "IdleTimeoutMinutes": "The amount of idle time in minutes after which your application will automatically stop. Defaults to 15 minutes." }, "AWS::EMRServerless::Application CloudWatchLoggingConfiguration": { "Enabled": "Enables CloudWatch logging.", @@ -15107,9 +15179,9 @@ "LogTypeMap": "" }, "AWS::EMRServerless::Application ConfigurationObject": { - "Classification": "", - "Configurations": "", - "Properties": "" + "Classification": "The classification within a configuration.", + "Configurations": "A list of additional configurations to apply within a configuration object.", + "Properties": "A set of properties specified within a configuration classification." }, "AWS::EMRServerless::Application ImageConfigurationInput": { "ImageUri": "The URI of an image in the Amazon ECR registry. This field is required when you create a new application. If you leave this field blank in an update, Amazon EMR will remove the image configuration." @@ -15153,39 +15225,39 @@ "LogUri": "The Amazon S3 destination URI for log publishing." }, "AWS::EMRServerless::Application Tag": { - "Key": "", - "Value": "" + "Key": "The key to use in the tag.", + "Value": "The value of the tag." }, "AWS::EMRServerless::Application WorkerConfiguration": { - "Cpu": "", - "Disk": "", - "DiskType": "", - "Memory": "" + "Cpu": "The CPU requirements of the worker configuration. Each worker can have 1, 2, 4, 8, or 16 vCPUs.", + "Disk": "The disk requirements of the worker configuration.", + "DiskType": "The disk type for every worker instance of the work type. Shuffle optimized disks have higher performance characteristics and are better for shuffle heavy workloads. Default is `STANDARD` .", + "Memory": "The memory requirements of the worker configuration." }, "AWS::EMRServerless::Application WorkerTypeSpecificationInput": { "ImageConfiguration": "The image configuration for a worker type." }, "AWS::ElastiCache::CacheCluster": { "AZMode": "Specifies whether the nodes in this Memcached cluster are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region.\n\nThis parameter is only supported for Memcached clusters.\n\nIf the `AZMode` and `PreferredAvailabilityZones` are not specified, ElastiCache assumes `single-az` mode.", - "AutoMinorVersionUpgrade": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", - "CacheNodeType": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.\n- Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis OSS configuration variables `appendonly` and `appendfsync` are not supported on Redis OSS version 2.8.22 and later.", + "AutoMinorVersionUpgrade": "If you are running Valkey 7.2 or later, or Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "CacheNodeType": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Valkey and Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.\n- Valkey and Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis OSS configuration variables `appendonly` and `appendfsync` are not supported on Redis OSS version 2.8.22 and later.", "CacheParameterGroupName": "The name of the parameter group to associate with this cluster. If this argument is omitted, the default parameter group for the specified engine is used. You cannot use any parameter group which has `cluster-enabled='yes'` when creating a cluster.", "CacheSecurityGroupNames": "A list of security group names to associate with this cluster.\n\nUse this parameter only when you are creating a cluster outside of an Amazon Virtual Private Cloud (Amazon VPC).", "CacheSubnetGroupName": "The name of the subnet group to be used for the cluster.\n\nUse this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (Amazon VPC).\n\n> If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see `[AWS::ElastiCache::SubnetGroup](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticache-subnetgroup.html) .`", "ClusterName": "A name for the cache cluster. If you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the cache cluster. For more information, see [Name Type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) .\n\nThe name must contain 1 to 50 alphanumeric characters or hyphens. The name must start with a letter and cannot end with a hyphen or contain two consecutive hyphens.", - "Engine": "The name of the cache engine to be used for this cluster.\n\nValid values for this parameter are: `memcached` | `redis`", - "EngineVersion": "The version number of the cache engine to be used for this cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.\n\n*Important:* You can upgrade to a newer engine version (see [Selecting a Cache Engine and Version](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement) ), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.", - "IpDiscovery": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "Engine": "The name of the cache engine to be used for this cluster.\n\nValid values for this parameter are: `memcached` | valkey | `redis`", + "EngineVersion": "The version number of the cache engine to be used for this cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.\n\n*Important:* You can upgrade to a newer engine version (see [Selecting a Cache Engine and Version](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SelectEngine.html#VersionManagement) ), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.", + "IpDiscovery": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "LogDeliveryConfigurations": "Specifies the destination, format and type of the logs.", - "NetworkType": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "NetworkType": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "NotificationTopicArn": "The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent.\n\n> The Amazon SNS topic owner must be the same as the cluster owner.", "NumCacheNodes": "The number of cache nodes that the cache cluster should have.\n\n> However, if the `PreferredAvailabilityZone` and `PreferredAvailabilityZones` properties were not previously specified and you don't specify any new values, an update requires [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "Port": "The port number on which each of the cache nodes accepts connections.", "PreferredAvailabilityZone": "The EC2 Availability Zone in which the cluster is created.\n\nAll nodes belonging to this cluster are placed in the preferred Availability Zone. If you want to create your nodes across multiple Availability Zones, use `PreferredAvailabilityZones` .\n\nDefault: System chosen Availability Zone.", "PreferredAvailabilityZones": "A list of the Availability Zones in which cache nodes are created. The order of the zones in the list is not important.\n\nThis option is only supported on Memcached.\n\n> If you are creating your cluster in an Amazon VPC (recommended) you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group.\n> \n> The number of Availability Zones listed must equal the value of `NumCacheNodes` . \n\nIf you want all the nodes in the same Availability Zone, use `PreferredAvailabilityZone` instead, or repeat the Availability Zone multiple times in the list.\n\nDefault: System chosen Availability Zones.", "PreferredMaintenanceWindow": "Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.\n\nValid values for `ddd` are:\n\n- `sun`\n- `mon`\n- `tue`\n- `wed`\n- `thu`\n- `fri`\n- `sat`\n\nExample: `sun:23:00-mon:01:30`", - "SnapshotArns": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", - "SnapshotName": "The name of a Redis OSS snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", + "SnapshotArns": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Valkey or Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "SnapshotName": "The name of a Valkey or Redis OSS snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", "SnapshotRetentionLimit": "The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set `SnapshotRetentionLimit` to 5, a snapshot taken today is retained for 5 days before being deleted.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nDefault: 0 (i.e., automatic backups are disabled for this cache cluster).", "SnapshotWindow": "The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard).\n\nExample: `05:00-09:00`\n\nIf you do not specify this parameter, ElastiCache automatically chooses an appropriate time range.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", "Tags": "A list of tags to be added to this resource.", @@ -15213,10 +15285,11 @@ "Value": "The tag's value. May be null." }, "AWS::ElastiCache::GlobalReplicationGroup": { - "AutomaticFailoverEnabled": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.", + "AutomaticFailoverEnabled": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Valkey or Redis OSS (cluster mode enabled) replication groups.", "CacheNodeType": "The cache node type of the Global datastore", "CacheParameterGroupName": "The name of the cache parameter group to use with the Global datastore. It must be compatible with the major engine version used by the Global datastore.", - "EngineVersion": "The Elasticache Redis OSS engine version.", + "Engine": "The ElastiCache engine. For Valkey or Redis OSS only.", + "EngineVersion": "The Elasticache Valkey or Redis OSS engine version.", "GlobalNodeGroupCount": "The number of node groups that comprise the Global Datastore.", "GlobalReplicationGroupDescription": "The optional description of the Global datastore", "GlobalReplicationGroupIdSuffix": "The suffix name of a Global Datastore. The suffix guarantees uniqueness of the Global Datastore name across multiple regions.", @@ -15234,7 +15307,7 @@ "ReshardingConfigurations": "A list of PreferredAvailabilityZones objects that specifies the configuration of a node group in the resharded cluster." }, "AWS::ElastiCache::GlobalReplicationGroup ReshardingConfiguration": { - "NodeGroupId": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "NodeGroupId": "Either the ElastiCache supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "PreferredAvailabilityZones": "A list of preferred availability zones for the nodes in this cluster." }, "AWS::ElastiCache::ParameterGroup": { @@ -15249,27 +15322,27 @@ }, "AWS::ElastiCache::ReplicationGroup": { "AtRestEncryptionEnabled": "A flag that enables encryption at rest when set to `true` .\n\nYou cannot modify the value of `AtRestEncryptionEnabled` after the replication group is created. To enable encryption at rest on a replication group you must set `AtRestEncryptionEnabled` to `true` when you create the replication group.\n\n*Required:* Only available when creating a replication group in an Amazon VPC using Redis OSS version `3.2.6` or `4.x` onward.\n\nDefault: `false`", - "AuthToken": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Users with the Redis OSS AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", - "AutoMinorVersionUpgrade": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", - "AutomaticFailoverEnabled": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.\n\nDefault: false", - "CacheNodeType": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.12xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.12xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Amazon Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)", - "CacheParameterGroupName": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Redis OSS version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Redis OSS (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Redis OSS (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", + "AuthToken": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Valkey or Redis OSS users with the AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "AutoMinorVersionUpgrade": "If you are running Valkey 7.2 or later, or Redis OSS 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "AutomaticFailoverEnabled": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Valkey or Redis OSS (cluster mode enabled) replication groups.\n\nDefault: false", + "CacheNodeType": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.12xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.12xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Amazon Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)", + "CacheParameterGroupName": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Valkey or Redis OSS version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Valkey or Redis OSS (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Valkey or Redis OSS (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", "CacheSecurityGroupNames": "A list of cache security group names to associate with this replication group.", "CacheSubnetGroupName": "The name of the cache subnet group to be used for the replication group.\n\n> If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see [AWS::ElastiCache::SubnetGroup](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticache-subnetgroup.html) .", - "ClusterMode": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/modify-cluster-mode.html) .", - "DataTieringEnabled": "Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see [Data tiering](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html) .", - "Engine": "The name of the cache engine to be used for the clusters in this replication group. The value must be set to `Redis` .", - "EngineVersion": "The version number of the cache engine to be used for the clusters in this replication group. To view the supported cache engine versions, use the `DescribeCacheEngineVersions` operation.\n\n*Important:* You can upgrade to a newer engine version (see [Selecting a Cache Engine and Version](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement) ) in the *ElastiCache User Guide* , but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.", + "ClusterMode": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Valkey or Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Valkey or Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/modify-cluster-mode.html) .", + "DataTieringEnabled": "Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see [Data tiering](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/data-tiering.html) .", + "Engine": "The name of the cache engine to be used for the clusters in this replication group. The value must be set to `valkey` or `redis` .\n\n> Upgrading an existing engine from redis to valkey is done through in-place migration, and requires a parameter group.", + "EngineVersion": "The version number of the cache engine to be used for the clusters in this replication group. To view the supported cache engine versions, use the `DescribeCacheEngineVersions` operation.\n\n*Important:* You can upgrade to a newer engine version (see [Selecting a Cache Engine and Version](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SelectEngine.html#VersionManagement) ) in the *ElastiCache User Guide* , but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.", "GlobalReplicationGroupId": "The name of the Global datastore", - "IpDiscovery": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "IpDiscovery": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "KmsKeyId": "The ID of the KMS key used to encrypt the disk on the cluster.", "LogDeliveryConfigurations": "Specifies the destination, format and type of the logs.", - "MultiAZEnabled": "A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see [Minimizing Downtime: Multi-AZ](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html) .", - "NetworkType": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", - "NodeGroupConfiguration": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Redis OSS cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "MultiAZEnabled": "A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see [Minimizing Downtime: Multi-AZ](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/AutoFailover.html) .", + "NetworkType": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "NodeGroupConfiguration": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Valkey or Redis OSS cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "NotificationTopicArn": "The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent.\n\n> The Amazon SNS topic owner must be the same as the cluster owner.", "NumCacheClusters": "The number of clusters this replication group initially has.\n\nThis parameter is not used if there is more than one node group (shard). You should use `ReplicasPerNodeGroup` instead.\n\nIf `AutomaticFailoverEnabled` is `true` , the value of this parameter must be at least 2. If `AutomaticFailoverEnabled` is `false` you can omit this parameter (it will default to 1), or you can explicitly set it to a value between 2 and 6.\n\nThe maximum permitted value for `NumCacheClusters` is 6 (1 primary plus 5 replicas).", - "NumNodeGroups": "An optional parameter that specifies the number of node groups (shards) for this Redis OSS (cluster mode enabled) replication group. For Redis OSS (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", + "NumNodeGroups": "An optional parameter that specifies the number of node groups (shards) for this Valkey or Redis OSS (cluster mode enabled) replication group. For Valkey or Redis OSS (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", "Port": "The port number on which each member of the replication group accepts connections.", "PreferredCacheClusterAZs": "A list of EC2 Availability Zones in which the replication group's clusters are created. The order of the Availability Zones in the list is the order in which clusters are allocated. The primary cluster is created in the first AZ in the list.\n\nThis parameter is not used if there is more than one node group (shard). You should use `NodeGroupConfiguration` instead.\n\n> If you are creating your replication group in an Amazon VPC (recommended), you can only locate clusters in Availability Zones associated with the subnets in the selected subnet group.\n> \n> The number of Availability Zones listed must equal the value of `NumCacheClusters` . \n\nDefault: system chosen Availability Zones.", "PreferredMaintenanceWindow": "Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.\n\nValid values for `ddd` are:\n\n- `sun`\n- `mon`\n- `tue`\n- `wed`\n- `thu`\n- `fri`\n- `sat`\n\nExample: `sun:23:00-mon:01:30`", @@ -15278,14 +15351,14 @@ "ReplicationGroupDescription": "A user-created description for the replication group.", "ReplicationGroupId": "The replication group identifier. This parameter is stored as a lowercase string.\n\nConstraints:\n\n- A name must contain from 1 to 40 alphanumeric characters or hyphens.\n- The first character must be a letter.\n- A name cannot end with a hyphen or contain two consecutive hyphens.", "SecurityGroupIds": "One or more Amazon VPC security groups associated with this replication group.\n\nUse this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud (Amazon VPC).", - "SnapshotArns": "A list of Amazon Resource Names (ARN) that uniquely identify the Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "SnapshotArns": "A list of Amazon Resource Names (ARN) that uniquely identify the Valkey or Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", "SnapshotName": "The name of a snapshot from which to restore data into the new replication group. The snapshot status changes to `restoring` while the new replication group is being created.", "SnapshotRetentionLimit": "The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set `SnapshotRetentionLimit` to 5, a snapshot that was taken today is retained for 5 days before being deleted.\n\nDefault: 0 (i.e., automatic backups are disabled for this cluster).", "SnapshotWindow": "The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard).\n\nExample: `05:00-09:00`\n\nIf you do not specify this parameter, ElastiCache automatically chooses an appropriate time range.", - "SnapshottingClusterId": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis OSS (cluster mode enabled) replication groups.", + "SnapshottingClusterId": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Valkey or Redis OSS (cluster mode enabled) replication groups.", "Tags": "A list of tags to be added to this resource. Tags are comma-separated key,value pairs (e.g. Key= `myKey` , Value= `myKeyValue` . You can include multiple tags as shown following: Key= `myKey` , Value= `myKeyValue` Key= `mySecondKey` , Value= `mySecondKeyValue` . Tags on replication groups will be replicated to all nodes.", "TransitEncryptionEnabled": "A flag that enables in-transit encryption when set to `true` .\n\nYou cannot modify the value of `TransitEncryptionEnabled` after the cluster is created. To enable in-transit encryption on a cluster you must set `TransitEncryptionEnabled` to `true` when you create a cluster.\n\nThis parameter is valid only if the `Engine` parameter is `redis` , the `EngineVersion` parameter is `3.2.6` or `4.x` onward, and the cluster is being created in an Amazon VPC.\n\nIf you enable in-transit encryption, you must also specify a value for `CacheSubnetGroup` .\n\n*Required:* Only available when creating a replication group in an Amazon VPC using Redis OSS version `3.2.6` or `4.x` onward.\n\nDefault: `false`\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` .", - "TransitEncryptionMode": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", + "TransitEncryptionMode": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Valkey or Redis OSS clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", "UserGroupIds": "The ID of user group to associate with the replication group." }, "AWS::ElastiCache::ReplicationGroup CloudWatchLogsDestinationDetails": { @@ -15305,7 +15378,7 @@ "LogType": "Valid value is either `slow-log` , which refers to [slow-log](https://docs.aws.amazon.com/https://redis.io/commands/slowlog) or `engine-log` ." }, "AWS::ElastiCache::ReplicationGroup NodeGroupConfiguration": { - "NodeGroupId": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "NodeGroupId": "Either the ElastiCache supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "PrimaryAvailabilityZone": "The Availability Zone where the primary node of this node group (shard) is launched.", "ReplicaAvailabilityZones": "A list of Availability Zones to be used for the read replicas. The number of Availability Zones in this list must match the value of `ReplicaCount` or `ReplicasPerNodeGroup` if not specified.", "ReplicaCount": "The number of read replica nodes in this node group (shard).", @@ -15330,7 +15403,7 @@ }, "AWS::ElastiCache::ServerlessCache": { "CacheUsageLimits": "The cache usage limit for the serverless cache.", - "DailySnapshotTime": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis OSS and Serverless Memcached only.", + "DailySnapshotTime": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Valkey, Redis OSS, and Serverless Memcached only.", "Description": "A description of the serverless cache.", "Endpoint": "Represents the information required for client programs to connect to a cache node. This value is read-only.", "Engine": "The engine the serverless cache is compatible with.", @@ -15341,10 +15414,10 @@ "SecurityGroupIds": "The IDs of the EC2 security groups associated with the serverless cache.", "ServerlessCacheName": "The unique identifier of the serverless cache.", "SnapshotArnsToRestore": "The ARN of the snapshot from which to restore data into the new cache.", - "SnapshotRetentionLimit": "The current setting for the number of serverless cache snapshots the system will retain. Available for Redis OSS and Serverless Memcached only.", + "SnapshotRetentionLimit": "The current setting for the number of serverless cache snapshots the system will retain. Available for Valkey, Redis OSS, and Serverless Memcached only.", "SubnetIds": "If no subnet IDs are given and your VPC is in us-west-1, then ElastiCache will select 2 default subnets across AZs in your VPC. For all other Regions, if no subnet IDs are given then ElastiCache will select 3 default subnets across AZs in your default VPC.", "Tags": "A list of tags to be added to this resource.", - "UserGroupId": "The identifier of the user group associated with the serverless cache. Available for Redis OSS only. Default is NULL." + "UserGroupId": "The identifier of the user group associated with the serverless cache. Available for Valkey and Redis OSS only. Default is NULL." }, "AWS::ElastiCache::ServerlessCache CacheUsageLimits": { "DataStorage": "The maximum data storage limit in the cache, expressed in Gigabytes.", @@ -15396,7 +15469,7 @@ "Value": "The tag's value. May be null." }, "AWS::ElastiCache::UserGroup": { - "Engine": "The current supported value is redis.", + "Engine": "The current supported values are valkey and redis.", "Tags": "The list of tags.", "UserGroupId": "The ID of the user group.", "UserIds": "The list of user IDs that belong to the user group. A user named `default` must be included." @@ -15750,7 +15823,7 @@ "Type": "The type of load balancer. The default is `application` ." }, "AWS::ElasticLoadBalancingV2::LoadBalancer LoadBalancerAttribute": { - "Key": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deletion_protection.enabled` - Indicates whether deletion protection is enabled. The value is `true` or `false` . The default is `false` .\n- `load_balancing.cross_zone.enabled` - Indicates whether cross-zone load balancing is enabled. The possible values are `true` and `false` . The default for Network Load Balancers and Gateway Load Balancers is `false` . The default for Application Load Balancers is `true` , and cannot be changed.\n\nThe following attributes are supported by both Application Load Balancers and Network Load Balancers:\n\n- `access_logs.s3.enabled` - Indicates whether access logs are enabled. The value is `true` or `false` . The default is `false` .\n- `access_logs.s3.bucket` - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `access_logs.s3.prefix` - The prefix for the location in the S3 bucket for the access logs.\n- `ipv6.deny_all_igw_traffic` - Blocks internet gateway (IGW) access to the load balancer. It is set to `false` for internet-facing load balancers and `true` for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway.\n\nThe following attributes are supported by only Application Load Balancers:\n\n- `idle_timeout.timeout_seconds` - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.\n- `client_keep_alive.seconds` - The client keep alive value, in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds.\n- `connection_logs.s3.enabled` - Indicates whether connection logs are enabled. The value is `true` or `false` . The default is `false` .\n- `connection_logs.s3.bucket` - The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `connection_logs.s3.prefix` - The prefix for the location in the S3 bucket for the connection logs.\n- `routing.http.desync_mitigation_mode` - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are `monitor` , `defensive` , and `strictest` . The default is `defensive` .\n- `routing.http.drop_invalid_header_fields.enabled` - Indicates whether HTTP headers with invalid header fields are removed by the load balancer ( `true` ) or routed to targets ( `false` ). The default is `false` .\n- `routing.http.preserve_host_header.enabled` - Indicates whether the Application Load Balancer should preserve the `Host` header in the HTTP request and send it to the target without any change. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.x_amzn_tls_version_and_cipher_suite.enabled` - Indicates whether the two headers ( `x-amzn-tls-version` and `x-amzn-tls-cipher-suite` ), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The `x-amzn-tls-version` header has information about the TLS protocol version negotiated with the client, and the `x-amzn-tls-cipher-suite` header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are `true` and `false` . The default is `false` .\n- `routing.http.xff_client_port.enabled` - Indicates whether the `X-Forwarded-For` header should preserve the source port that the client used to connect to the load balancer. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.xff_header_processing.mode` - Enables you to modify, preserve, or remove the `X-Forwarded-For` header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are `append` , `preserve` , and `remove` . The default is `append` .\n\n- If the value is `append` , the Application Load Balancer adds the client IP address (of the last hop) to the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- If the value is `preserve` the Application Load Balancer preserves the `X-Forwarded-For` header in the HTTP request, and sends it to targets without any change.\n- If the value is `remove` , the Application Load Balancer removes the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- `routing.http2.enabled` - Indicates whether HTTP/2 is enabled. The possible values are `true` and `false` . The default is `true` . Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.\n- `waf.fail_open.enabled` - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. The possible values are `true` and `false` . The default is `false` .\n\nThe following attributes are supported by only Network Load Balancers:\n\n- `dns_record.client_routing_policy` - Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are `availability_zone_affinity` with 100 percent zonal affinity, `partial_availability_zone_affinity` with 85 percent zonal affinity, and `any_availability_zone` with 0 percent zonal affinity.", + "Key": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deletion_protection.enabled` - Indicates whether deletion protection is enabled. The value is `true` or `false` . The default is `false` .\n- `load_balancing.cross_zone.enabled` - Indicates whether cross-zone load balancing is enabled. The possible values are `true` and `false` . The default for Network Load Balancers and Gateway Load Balancers is `false` . The default for Application Load Balancers is `true` , and cannot be changed.\n\nThe following attributes are supported by both Application Load Balancers and Network Load Balancers:\n\n- `access_logs.s3.enabled` - Indicates whether access logs are enabled. The value is `true` or `false` . The default is `false` .\n- `access_logs.s3.bucket` - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `access_logs.s3.prefix` - The prefix for the location in the S3 bucket for the access logs.\n- `ipv6.deny_all_igw_traffic` - Blocks internet gateway (IGW) access to the load balancer. It is set to `false` for internet-facing load balancers and `true` for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway.\n\nThe following attributes are supported by only Application Load Balancers:\n\n- `idle_timeout.timeout_seconds` - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.\n- `client_keep_alive.seconds` - The client keep alive value, in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds.\n- `connection_logs.s3.enabled` - Indicates whether connection logs are enabled. The value is `true` or `false` . The default is `false` .\n- `connection_logs.s3.bucket` - The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `connection_logs.s3.prefix` - The prefix for the location in the S3 bucket for the connection logs.\n- `routing.http.desync_mitigation_mode` - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are `monitor` , `defensive` , and `strictest` . The default is `defensive` .\n- `routing.http.drop_invalid_header_fields.enabled` - Indicates whether HTTP headers with invalid header fields are removed by the load balancer ( `true` ) or routed to targets ( `false` ). The default is `false` .\n- `routing.http.preserve_host_header.enabled` - Indicates whether the Application Load Balancer should preserve the `Host` header in the HTTP request and send it to the target without any change. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.x_amzn_tls_version_and_cipher_suite.enabled` - Indicates whether the two headers ( `x-amzn-tls-version` and `x-amzn-tls-cipher-suite` ), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The `x-amzn-tls-version` header has information about the TLS protocol version negotiated with the client, and the `x-amzn-tls-cipher-suite` header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are `true` and `false` . The default is `false` .\n- `routing.http.xff_client_port.enabled` - Indicates whether the `X-Forwarded-For` header should preserve the source port that the client used to connect to the load balancer. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.xff_header_processing.mode` - Enables you to modify, preserve, or remove the `X-Forwarded-For` header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are `append` , `preserve` , and `remove` . The default is `append` .\n\n- If the value is `append` , the Application Load Balancer adds the client IP address (of the last hop) to the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- If the value is `preserve` the Application Load Balancer preserves the `X-Forwarded-For` header in the HTTP request, and sends it to targets without any change.\n- If the value is `remove` , the Application Load Balancer removes the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- `routing.http2.enabled` - Indicates whether HTTP/2 is enabled. The possible values are `true` and `false` . The default is `true` . Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.\n- `waf.fail_open.enabled` - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. The possible values are `true` and `false` . The default is `false` .\n\nThe following attributes are supported by only Network Load Balancers:\n\n- `dns_record.client_routing_policy` - Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are `availability_zone_affinity` with 100 percent zonal affinity, `partial_availability_zone_affinity` with 85 percent zonal affinity, and `any_availability_zone` with 0 percent zonal affinity.\n- `zonal_shift.config.enabled` - Indicates whether zonal shift is enabled. The possible values are `true` and `false` . The default is `false` .", "Value": "The value of the attribute." }, "AWS::ElasticLoadBalancingV2::LoadBalancer SubnetMapping": { @@ -17172,6 +17245,8 @@ "Name": "A descriptive identifier for the container group definition. The name value is unique in an AWS Region.", "OperatingSystem": "The platform required for all containers in the container group definition.\n\n> Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the [Amazon Linux 2 FAQs](https://docs.aws.amazon.com/https://aws.amazon.com/amazon-linux-2/faqs/) . For game servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x., first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See [Migrate to Amazon GameLift server SDK version 5.](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-serversdk5-migration.html)", "SchedulingStrategy": "The method for deploying the container group across fleet instances. A replica container group might have multiple copies on each fleet instance. A daemon container group maintains only one copy per fleet instance.", + "SourceVersionNumber": "", + "SupportContainerDefinitions": "", "Tags": "", "TotalCpuLimit": "The amount of CPU units on a fleet instance to allocate for the container group. All containers in the group share these resources. This property is an integer value in CPU units (1 vCPU is equal to 1024 CPU units).\n\nYou can set additional limits for each `ContainerDefinition` in the group. If individual containers have limits, this value must be equal to or greater than the sum of all container-specific CPU limits in the group.\n\nFor more details on memory allocation, see the [Container fleet design guide](https://docs.aws.amazon.com/gamelift/latest/developerguide/containers-design-fleet) .", "TotalMemoryLimit": "The amount of memory (in MiB) on a fleet instance to allocate for the container group. All containers in the group share these resources.\n\nYou can set additional limits for each `ContainerDefinition` in the group. If individual containers have limits, this value must meet the following requirements:\n\n- Equal to or greater than the sum of all container-specific soft memory limits in the group.\n- Equal to or greater than any container-specific hard limits in the group.\n\nFor more details on memory allocation, see the [Container fleet design guide](https://docs.aws.amazon.com/gamelift/latest/developerguide/containers-design-fleet) ." @@ -19442,6 +19517,7 @@ "KeyPair": "The Amazon EC2 key pair of the infrastructure configuration.", "Logging": "The logging configuration defines where Image Builder uploads your logs.", "Name": "The name of the infrastructure configuration.", + "Placement": "The instance placement settings that define where the instances that are launched from your image will run.", "ResourceTags": "The tags attached to the resource created by Image Builder.", "SecurityGroupIds": "The security group IDs of the infrastructure configuration.", "SnsTopicArn": "The Amazon Resource Name (ARN) of the SNS topic for the infrastructure configuration.", @@ -19456,6 +19532,12 @@ "AWS::ImageBuilder::InfrastructureConfiguration Logging": { "S3Logs": "The Amazon S3 logging configuration." }, + "AWS::ImageBuilder::InfrastructureConfiguration Placement": { + "AvailabilityZone": "The Availability Zone where your build and test instances will launch.", + "HostId": "The ID of the Dedicated Host on which build and test instances run. This only applies if `tenancy` is `host` . If you specify the host ID, you must not specify the resource group ARN. If you specify both, Image Builder returns an error.", + "HostResourceGroupArn": "The Amazon Resource Name (ARN) of the host resource group in which to launch build and test instances. This only applies if `tenancy` is `host` . If you specify the resource group ARN, you must not specify the host ID. If you specify both, Image Builder returns an error.", + "Tenancy": "The tenancy of the instance. An instance with a tenancy of `dedicated` runs on single-tenant hardware. An instance with a tenancy of `host` runs on a Dedicated Host.\n\nIf tenancy is set to `host` , then you can optionally specify one target for placement \u2013 either host ID or host resource group ARN. If automatic placement is enabled for your host, and you don't specify any placement target, Amazon EC2 will try to find an available host for your build and test instances." + }, "AWS::ImageBuilder::InfrastructureConfiguration S3Logs": { "S3BucketName": "The S3 bucket in which to store the logs.", "S3KeyPrefix": "The Amazon S3 path to the bucket where the logs are stored." @@ -19820,10 +19902,10 @@ "Value": "The tag's value." }, "AWS::IoT::DomainConfiguration": { - "ApplicationProtocol": "An enumerated string that speci\ufb01es the application-layer protocol.\n\n> This property isn't available in China.", - "AuthenticationType": "An enumerated string that speci\ufb01es the authentication type.\n\n> This property isn't available in China.", + "ApplicationProtocol": "An enumerated string that speci\ufb01es the application-layer protocol.", + "AuthenticationType": "An enumerated string that speci\ufb01es the authentication type.", "AuthorizerConfig": "An object that specifies the authorization service for a domain.", - "ClientCertificateConfig": "An object that speci\ufb01es the client certificate con\ufb01guration for a domain.\n\n> This property isn't available in China.", + "ClientCertificateConfig": "An object that speci\ufb01es the client certificate con\ufb01guration for a domain.", "DomainConfigurationName": "The name of the domain configuration. This value must be unique to a region.", "DomainConfigurationStatus": "The status to which the domain configuration should be updated.\n\nValid values: `ENABLED` | `DISABLED`", "DomainName": "The name of the domain.", @@ -19839,7 +19921,7 @@ "DefaultAuthorizerName": "The name of the authorization service for a domain configuration." }, "AWS::IoT::DomainConfiguration ClientCertificateConfig": { - "ClientCertificateCallbackArn": "The ARN of the Lambda function that IoT invokes after mutual TLS authentication during the connection.\n\n> This property isn't available in China." + "ClientCertificateCallbackArn": "The ARN of the Lambda function that IoT invokes after mutual TLS authentication during the connection." }, "AWS::IoT::DomainConfiguration ServerCertificateConfig": { "EnableOCSPCheck": "A Boolean value that indicates whether Online Certificate Status Protocol (OCSP) server certificate check is enabled or not. For more information, see [Configurable endpoints](https://docs.aws.amazon.com//iot/latest/developerguide/iot-custom-endpoints-configurable.html) from the AWS IoT Core Developer Guide." @@ -20108,11 +20190,11 @@ "Value": "The tag's value." }, "AWS::IoT::SoftwarePackageVersion": { - "Attributes": "", - "Description": "", - "PackageName": "", - "Tags": "", - "VersionName": "" + "Attributes": "Metadata that can be used to define a package version\u2019s configuration. For example, the S3 file location, configuration options that are being sent to the device or fleet.\n\nThe combined size of all the attributes on a package version is limited to 3KB.", + "Description": "A summary of the package version being created. This can be used to outline the package's contents or purpose.", + "PackageName": "The name of the associated software package.", + "Tags": "Metadata that can be used to manage the package version.", + "VersionName": "The name of the new package version." }, "AWS::IoT::SoftwarePackageVersion Tag": { "Key": "The tag's key.", @@ -21862,7 +21944,7 @@ "AWS::IoTWireless::WirelessDeviceImportTask Sidewalk": { "DeviceCreationFile": "The CSV file contained in an S3 bucket that's used for adding devices to an import task.", "DeviceCreationFileList": "List of Sidewalk devices that are added to the import task.", - "Role": "The IAM role that allows AWS IoT Wireless to access the CSV file in the S3 bucket.", + "Role": "The IAM role that allows to access the CSV file in the S3 bucket.", "SidewalkManufacturingSn": "The Sidewalk manufacturing serial number (SMSN) of the Sidewalk device." }, "AWS::IoTWireless::WirelessDeviceImportTask Tag": { @@ -22447,6 +22529,10 @@ "Key": "The key for the tag. Keys are not case sensitive and must be unique.", "Value": "The value associated with the tag. The value can be an empty string but it can't be null." }, + "AWS::Kinesis::ResourcePolicy": { + "ResourceArn": "This is the name for the resource policy.", + "ResourcePolicy": "This is the description for the resource policy." + }, "AWS::Kinesis::Stream": { "Name": "The name of the Kinesis stream. If you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the stream name. For more information, see [Name Type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) .\n\nIf you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.", "RetentionPeriodHours": "The number of hours for the data records that are stored in shards to remain accessible. The default value is 24. For more information about the stream retention period, see [Changing the Data Retention Period](https://docs.aws.amazon.com/streams/latest/dev/kinesis-extended-retention.html) in the Amazon Kinesis Developer Guide.", @@ -27163,7 +27249,7 @@ "ChannelGroupName": "The name of the channel group associated with the channel configuration.", "ChannelName": "The name of the channel.", "Description": "The description of the channel.", - "InputType": "", + "InputType": "The input type will be an immutable field which will be used to define whether the channel will allow CMAF ingest or HLS ingest. If unprovided, it will default to HLS to preserve current behavior.\n\nThe allowed values are:\n\n- `HLS` - The HLS streaming specification (which defines M3U8 manifests and TS segments).\n- `CMAF` - The DASH-IF CMAF Ingest specification (which defines CMAF segments with optional DASH manifests).", "Tags": "The tags associated with the channel." }, "AWS::MediaPackageV2::Channel IngestEndpoint": { @@ -27194,7 +27280,7 @@ "ContainerType": "The container type associated with the origin endpoint configuration.", "DashManifests": "A DASH manifest configuration.", "Description": "The description associated with the origin endpoint.", - "ForceEndpointErrorConfiguration": "", + "ForceEndpointErrorConfiguration": "The failover settings for the endpoint.", "HlsManifests": "The HLS manfiests associated with the origin endpoint configuration.", "LowLatencyHlsManifests": "The low-latency HLS (LL-HLS) manifests associated with the origin endpoint.", "OriginEndpointName": "The name of the origin endpoint associated with the origin endpoint configuration.", @@ -27240,7 +27326,7 @@ "TimeDelaySeconds": "Optionally specify the time delay for all of your manifest egress requests. Enter a value that is smaller than your endpoint's startover window. When you include time delay, note that you cannot use time delay query parameters for this manifest's endpoint URL." }, "AWS::MediaPackageV2::OriginEndpoint ForceEndpointErrorConfiguration": { - "EndpointErrorConditions": "" + "EndpointErrorConditions": "The failover conditions for the endpoint. The options are:\n\n- `STALE_MANIFEST` - The manifest stalled and there are no new segments or parts.\n- `INCOMPLETE_MANIFEST` - There is a gap in the manifest.\n- `MISSING_DRM_KEY` - Key rotation is enabled but we're unable to fetch the key for the current key period.\n- `SLATE_INPUT` - The segments which contain slate content are considered to be missing content." }, "AWS::MediaPackageV2::OriginEndpoint HlsManifestConfiguration": { "ChildManifestName": "The name of the child manifest associated with the HLS manifest configuration.", @@ -27495,9 +27581,10 @@ "AutoMinorVersionUpgrade": "When set to true, the cluster will automatically receive minor engine version upgrades after launch.", "ClusterEndpoint": "The cluster 's configuration endpoint.", "ClusterName": "The name of the cluster .", - "DataTiering": "Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see [Data tiering](https://docs.aws.amazon.com/memorydb/latest/devguide/data-tiering.html) .", + "DataTiering": "Enables data tiering. Data tiering is only supported for clusters using the r6gd node type. This parameter must be set when using r6gd nodes. For more information, see [Data tiering](https://docs.aws.amazon.com/memorydb/latest/devguide/data-tiering.html) .", "Description": "A description of the cluster .", - "EngineVersion": "The Redis engine version used by the cluster .", + "Engine": "The Valkey or Redis OSS engine used by the cluster.", + "EngineVersion": "The Valkey or Redis OSS engine version used by the cluster .", "FinalSnapshotName": "The user-supplied name of a final cluster snapshot. This is the unique name that identifies the snapshot. MemoryDB creates the snapshot, and then deletes the cluster immediately afterward.", "KmsKeyId": "The ID of the KMS key used to encrypt the cluster .", "MaintenanceWindow": "Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC). The minimum maintenance window is a 60 minute period.\n\n*Pattern* : `ddd:hh24:mi-ddd:hh24:mi`", @@ -27724,6 +27811,9 @@ "StatelessRuleGroupReferences": "References to the stateless rule groups that are used in the policy. These define the matching criteria in stateless rules.", "TLSInspectionConfigurationArn": "The Amazon Resource Name (ARN) of the TLS inspection configuration." }, + "AWS::NetworkFirewall::FirewallPolicy FlowTimeouts": { + "TcpIdleTimeoutSeconds": "" + }, "AWS::NetworkFirewall::FirewallPolicy IPSet": { "Definition": "The list of IP addresses and address ranges, in CIDR notation." }, @@ -27734,6 +27824,7 @@ "Dimensions": "" }, "AWS::NetworkFirewall::FirewallPolicy StatefulEngineOptions": { + "FlowTimeouts": "", "RuleOrder": "Indicates how to manage the order of stateful rule evaluation for the policy. `DEFAULT_ACTION_ORDER` is the default behavior. Stateful rules are provided to the rule engine as Suricata compatible strings, and Suricata evaluates them based on certain settings. For more information, see [Evaluation order for stateful rules](https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-rule-evaluation-order.html) in the *AWS Network Firewall Developer Guide* .", "StreamExceptionPolicy": "Configures how Network Firewall processes traffic when a network connection breaks midstream. Network connections can break due to disruptions in external networks or within the firewall itself.\n\n- `DROP` - Network Firewall fails closed and drops all subsequent traffic going to the firewall. This is the default behavior.\n- `CONTINUE` - Network Firewall continues to apply rules to the subsequent traffic without context from traffic before the break. This impacts the behavior of rules that depend on this context. For example, if you have a stateful rule to `drop http` traffic, Network Firewall won't match the traffic for this rule because the service won't have the context from session initialization defining the application layer protocol as HTTP. However, this behavior is rule dependent\u2014a TCP-layer rule using a `flow:stateless` rule would still match, as would the `aws:drop_strict` default action.\n- `REJECT` - Network Firewall fails closed and drops all subsequent traffic going to the firewall. Network Firewall also sends a TCP reject packet back to your client so that the client can immediately establish a new session. Network Firewall will have context about the new session and will apply rules to the subsequent traffic." }, @@ -30223,7 +30314,7 @@ }, "AWS::QBusiness::DataSource": { "ApplicationId": "The identifier of the Amazon Q Business application the data source will be attached to.", - "Configuration": "Use this property to specify a JSON or YAML schema with configuration information specific to your data source connector to connect your data source repository to Amazon Q Business . You must use the JSON or YAML schema provided by Amazon Q .\n\nYou can find configuration templates for your specific data source using the following steps:\n\n- Navigate to the [Supported connectors](https://docs.aws.amazon.com/amazonq/latest/business-use-dg/connectors-list.html) page in the Amazon Q Business User Guide, and select the data source connector of your choice.\n- Then, from that specific data source connector's page, select *Using AWS CloudFormation* to find the schemas for your data source connector, including parameter descriptions and examples.", + "Configuration": "Use this property to specify a JSON or YAML schema with configuration properties specific to your data source connector to connect your data source repository to Amazon Q Business . You must use the JSON or YAML schema provided by Amazon Q .\n\nThe following links have the configuration properties and schemas for AWS CloudFormation for the following connectors:\n\n- [Amazon Simple Storage Service](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/s3-cfn.html)\n- [Amazon Q Web Crawler](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/web-crawler-cfn.html)\n\nSimilarly, you can find configuration templates and properties for your specific data source using the following steps:\n\n- Navigate to the [Supported connectors](https://docs.aws.amazon.com/amazonq/latest/business-use-dg/connectors-list.html) page in the Amazon Q Business User Guide, and select the data source connector of your choice.\n- Then, from that specific data source connector's page, choose the topic containing *Using AWS CloudFormation* to find the schemas for your data source connector, including configuration parameter descriptions and examples.", "Description": "A description for the data source connector.", "DisplayName": "The name of the Amazon Q Business data source.", "DocumentEnrichmentConfiguration": "Provides the configuration information for altering document metadata and content during the document ingestion process.\n\nFor more information, see [Custom document enrichment](https://docs.aws.amazon.com/amazonq/latest/business-use-dg/custom-document-enrichment.html) .", @@ -30366,6 +30457,7 @@ "AWS::QBusiness::WebExperience": { "ApplicationId": "The identifier of the Amazon Q Business web experience.", "IdentityProviderConfiguration": "Provides information about the identity provider (IdP) used to authenticate end users of an Amazon Q Business web experience.", + "Origins": "", "RoleArn": "The Amazon Resource Name (ARN) of the service role attached to your web experience.\n\n> You must provide this value if you're using IAM Identity Center to manage end user access to your application. If you're using legacy identity management to manage user access, you don't need to provide this value.", "SamplePromptsControlMode": "Determines whether sample prompts are enabled in the web experience for an end user.", "Subtitle": "A subtitle to personalize your Amazon Q Business web experience.", @@ -30421,6 +30513,7 @@ "AwsAccountId": "The ID of the AWS account where you are creating an analysis.", "Definition": "", "Errors": "Errors associated with the analysis.", + "FolderArns": "", "Name": "A descriptive name for the analysis that you're creating. This name displays for the analysis in the Amazon QuickSight console.", "Parameters": "The parameter names and override values that you want to use. An analysis can have any parameter type, and some parameters might accept multiple values.", "Permissions": "A structure that describes the principals and the resource-level permissions on an analysis. You can use the `Permissions` structure to grant permissions by providing a list of AWS Identity and Access Management (IAM) action information for each principal listed by Amazon Resource Name (ARN).\n\nTo specify no permissions, omit `Permissions` .", @@ -31106,7 +31199,7 @@ "AWS::QuickSight::Analysis DestinationParameterValueConfiguration": { "CustomValuesConfiguration": "The configuration of custom values for destination parameter in `DestinationParameterValueConfiguration` .", "SelectAllValueOptions": "The configuration that selects all options.", - "SourceColumn": "", + "SourceColumn": "A column of a data set.", "SourceField": "The source field ID of the destination parameter.", "SourceParameterName": "The source parameter name of the destination parameter." }, @@ -33035,6 +33128,7 @@ "DashboardId": "The ID for the dashboard, also added to the IAM policy.", "DashboardPublishOptions": "Options for publishing the dashboard when you create it:\n\n- `AvailabilityStatus` for `AdHocFilteringOption` - This status can be either `ENABLED` or `DISABLED` . When this is set to `DISABLED` , Amazon QuickSight disables the left filter pane on the published dashboard, which can be used for ad hoc (one-time) filtering. This option is `ENABLED` by default.\n- `AvailabilityStatus` for `ExportToCSVOption` - This status can be either `ENABLED` or `DISABLED` . The visual option to export data to .CSV format isn't enabled when this is set to `DISABLED` . This option is `ENABLED` by default.\n- `VisibilityState` for `SheetControlsOption` - This visibility state can be either `COLLAPSED` or `EXPANDED` . This option is `COLLAPSED` by default.", "Definition": "", + "FolderArns": "", "LinkEntities": "A list of analysis Amazon Resource Names (ARNs) to be linked to the dashboard.", "LinkSharingConfiguration": "A structure that contains the link sharing configurations that you want to apply overrides to.", "Name": "The display name of the dashboard.", @@ -33760,7 +33854,7 @@ "AWS::QuickSight::Dashboard DestinationParameterValueConfiguration": { "CustomValuesConfiguration": "The configuration of custom values for destination parameter in `DestinationParameterValueConfiguration` .", "SelectAllValueOptions": "The configuration that selects all options.", - "SourceColumn": "", + "SourceColumn": "A column of a data set.", "SourceField": "The source field ID of the destination parameter.", "SourceParameterName": "The source parameter name of the destination parameter." }, @@ -35937,7 +36031,7 @@ "CastColumnTypeOperation": "A transform operation that casts a column to a different type.", "CreateColumnsOperation": "An operation that creates calculated columns. Columns created in one such operation form a lexical closure.", "FilterOperation": "An operation that filters rows based on some condition.", - "OverrideDatasetParameterOperation": "", + "OverrideDatasetParameterOperation": "A transform operation that overrides the dataset parameter values that are defined in another dataset.", "ProjectOperation": "An operation that projects columns. Operations that come after a projection can only refer to projected columns.", "RenameColumnOperation": "An operation that renames a column.", "TagColumnOperation": "An operation that tags a column with additional information.", @@ -35961,6 +36055,7 @@ "DataSourceId": "An ID for the data source. This ID is unique per AWS Region for each AWS account.", "DataSourceParameters": "The parameters that Amazon QuickSight uses to connect to your underlying source.", "ErrorInfo": "Error information from the last update or the creation of the data source.", + "FolderArns": "", "Name": "A display name for the data source.", "Permissions": "A list of resource permissions on the data source.", "SslProperties": "Secure Socket Layer (SSL) properties that apply when Amazon QuickSight connects to your underlying source.", @@ -36129,12 +36224,12 @@ "VpcConnectionArn": "The Amazon Resource Name (ARN) for the VPC connection." }, "AWS::QuickSight::Folder": { - "AwsAccountId": "", + "AwsAccountId": "The ID for the AWS account where you want to create the folder.", "FolderId": "The ID of the folder.", "FolderType": "The type of folder it is.", "Name": "A display name for the folder.", - "ParentFolderArn": "A new parent folder arn. This change can only be applied if the import creates a brand new folder. Existing folders cannot be moved.", - "Permissions": "", + "ParentFolderArn": "The Amazon Resource Name (ARN) for the folder.", + "Permissions": "A structure that describes the principals and the resource-level permissions of a folder.\n\nTo specify no permissions, omit `Permissions` .", "SharingModel": "The sharing scope of the folder.", "Tags": "A list of tags for the folders that you want to apply overrides to." }, @@ -36838,7 +36933,7 @@ "AWS::QuickSight::Template DestinationParameterValueConfiguration": { "CustomValuesConfiguration": "The configuration of custom values for destination parameter in `DestinationParameterValueConfiguration` .", "SelectAllValueOptions": "The configuration that selects all options.", - "SourceColumn": "", + "SourceColumn": "A column of a data set.", "SourceField": "The source field ID of the destination parameter.", "SourceParameterName": "The source parameter name of the destination parameter." }, @@ -38828,7 +38923,7 @@ "AWS::QuickSight::Theme ThemeConfiguration": { "DataColorPalette": "Color properties that apply to chart data colors.", "Sheet": "Display options related to sheets.", - "Typography": "", + "Typography": "Determines the typography options.", "UIColorPalette": "Color properties that apply to the UI and to charts, excluding the colors that apply to data." }, "AWS::QuickSight::Theme ThemeError": { @@ -39557,7 +39652,7 @@ "MasterUsername": "The user name associated with the admin user account for the cluster that is being created.\n\nConstraints:\n\n- Must be 1 - 128 alphanumeric characters or hyphens. The user name can't be `PUBLIC` .\n- Must contain only lowercase letters, numbers, underscore, plus sign, period (dot), at symbol (@), or hyphen.\n- The first character must be a letter.\n- Must not contain a colon (:) or a slash (/).\n- Cannot be a reserved word. A list of reserved words can be found in [Reserved Words](https://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html) in the Amazon Redshift Database Developer Guide.", "MultiAZ": "A boolean indicating whether Amazon Redshift should deploy the cluster in two Availability Zones. The default is false.", "NamespaceResourcePolicy": "The policy that is attached to a resource.", - "NodeType": "The node type to be provisioned for the cluster. For information about node types, go to [Working with Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes) in the *Amazon Redshift Cluster Management Guide* .\n\nValid Values: `dc2.large` | `dc2.8xlarge` | `ra3.xlplus` | `ra3.4xlarge` | `ra3.16xlarge`", + "NodeType": "The node type to be provisioned for the cluster. For information about node types, go to [Working with Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes) in the *Amazon Redshift Cluster Management Guide* .\n\nValid Values: `dc2.large` | `dc2.8xlarge` | `ra3.large` | `ra3.xlplus` | `ra3.4xlarge` | `ra3.16xlarge`", "NumberOfNodes": "The number of compute nodes in the cluster. This parameter is required when the *ClusterType* parameter is specified as `multi-node` .\n\nFor information about determining how many nodes you need, go to [Working with Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes) in the *Amazon Redshift Cluster Management Guide* .\n\nIf you don't specify this parameter, you get a single-node cluster. When requesting a multi-node cluster, you must specify the number of nodes that you want in the cluster.\n\nDefault: `1`\n\nConstraints: Value must be at least 1 and no more than 100.", "OwnerAccount": "The AWS account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot.", "Port": "The port number on which the cluster accepts incoming connections.\n\nThe cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections.\n\nDefault: `5439`\n\nValid Values:\n\n- For clusters with ra3 nodes - Select a port within the ranges `5431-5455` or `8191-8215` . (If you have an existing cluster with ra3 nodes, it isn't required that you change the port to these ranges.)\n- For clusters with dc2 nodes - Select a port within the range `1150-65535` .", @@ -39582,7 +39677,7 @@ "BucketName": "The name of an existing S3 bucket where the log files are to be stored.\n\nConstraints:\n\n- Must be in the same region as the cluster\n- The cluster must have read bucket and put object permissions", "LogDestinationType": "The log destination type. An enum with possible values of `s3` and `cloudwatch` .", "LogExports": "The collection of exported log types. Possible values are `connectionlog` , `useractivitylog` , and `userlog` .", - "S3KeyPrefix": "The prefix applied to the log file names.\n\nConstraints:\n\n- Cannot exceed 512 characters\n- Cannot contain spaces( ), double quotes (\"), single quotes ('), a backslash (\\), or control characters. The hexadecimal codes for invalid characters are:\n\n- x00 to x20\n- x22\n- x27\n- x5c\n- x7f or larger" + "S3KeyPrefix": "The prefix applied to the log file names.\n\nValid characters are any letter from any language, any whitespace character, any numeric character, and the following characters: underscore ( `_` ), period ( `.` ), colon ( `:` ), slash ( `/` ), equal ( `=` ), plus ( `+` ), backslash ( `\\` ), hyphen ( `-` ), at symbol ( `@` )." }, "AWS::Redshift::Cluster Tag": { "Key": "The key, or name, for the resource tag.", @@ -39668,6 +39763,18 @@ "Key": "The key, or name, for the resource tag.", "Value": "The value for the resource tag." }, + "AWS::Redshift::Integration": { + "AdditionalEncryptionContext": "The encryption context for the integration. For more information, see [Encryption context](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) in the *AWS Key Management Service Developer Guide* .", + "IntegrationName": "The name of the integration.", + "KMSKeyId": "The AWS Key Management Service ( AWS KMS ) key identifier for the key used to encrypt the integration.", + "SourceArn": "The Amazon Resource Name (ARN) of the database used as the source for replication.", + "Tags": "The list of tags associated with the integration.", + "TargetArn": "The Amazon Resource Name (ARN) of the Amazon Redshift data warehouse to use as the target for replication." + }, + "AWS::Redshift::Integration Tag": { + "Key": "The key, or name, for the resource tag.", + "Value": "The value for the resource tag." + }, "AWS::Redshift::ScheduledAction": { "Enable": "If true, the schedule is enabled. If false, the scheduled action does not trigger. For more information about `state` of the scheduled action, see `ScheduledAction` .", "EndTime": "The end time in UTC when the schedule is no longer active. After this time, the scheduled action does not trigger.", @@ -39964,7 +40071,7 @@ "AWS::ResilienceHub::ResiliencyPolicy": { "DataLocationConstraint": "Specifies a high-level geographical location constraint for where your resilience policy data can be stored.", "Policy": "The resiliency policy.", - "PolicyDescription": "The description for the policy.", + "PolicyDescription": "Description of the resiliency policy.", "PolicyName": "The name of the policy", "Tags": "Tags assigned to the resource. A tag is a label that you assign to an AWS resource. Each tag consists of a key/value pair.", "Tier": "The tier for this resiliency policy, ranging from the highest severity ( `MissionCritical` ) to lowest ( `NonCritical` )." @@ -40325,7 +40432,7 @@ "AWS::Route53Profiles::ProfileAssociation": { "Arn": "The Amazon Resource Name (ARN) of the profile association to a VPC.", "Name": "Name of the Profile association.", - "ProfileId": "ID of the Profile.", + "ProfileId": "ID of the Profile.\n\nUpdate to this property requires update to the `ResourceId` property as well, because you can only associate one Profile per VPC. For more information, see [Route 53 Profiles](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/profiles.html) .", "ResourceId": "The ID of the VPC.", "Tags": "" }, @@ -40541,7 +40648,6 @@ "ResourceId": "The ID of the Amazon VPC that is associated with the query logging configuration." }, "AWS::Route53Resolver::ResolverRule": { - "DelegationRecord": "", "DomainName": "DNS queries for this domain name are forwarded to the IP addresses that are specified in `TargetIps` . If a query matches multiple Resolver rules (example.com and www.example.com), the query is routed using the Resolver rule that contains the most specific domain name (www.example.com).", "Name": "The name for the Resolver rule, which you specified when you created the Resolver rule.", "ResolverEndpointId": "The ID of the endpoint that the rule is associated with.", @@ -40557,7 +40663,8 @@ "Ip": "One IPv4 address that you want to forward DNS queries to.", "Ipv6": "One IPv6 address that you want to forward DNS queries to.", "Port": "The port at `Ip` that you want to forward DNS queries to.", - "Protocol": "The protocols for the target address. The protocol you choose needs to be supported by the outbound endpoint of the Resolver rule." + "Protocol": "The protocols for the target address. The protocol you choose needs to be supported by the outbound endpoint of the Resolver rule.", + "ServerNameIndication": "The Server Name Indication of the DoH server that you want to forward queries to. This is only used if the Protocol of the `TargetAddress` is `DoH` ." }, "AWS::Route53Resolver::ResolverRuleAssociation": { "Name": "The name of an association between a Resolver rule and a VPC.", @@ -40719,8 +40826,8 @@ "Function": "The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3 invokes when the specified event type occurs." }, "AWS::S3::Bucket LifecycleConfiguration": { - "Rules": "Specifies lifecycle configuration rules for an Amazon S3 bucket.", - "TransitionDefaultMinimumObjectSize": "Indicates which default minimum object size behavior is applied to the lifecycle configuration.\n\n- `all_storage_classes_128K` - Objects smaller than 128 KB will not transition to any storage class by default.\n- `varies_by_storage_class` - Objects smaller than 128 KB will transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, all other storage classes will prevent transitions smaller than 128 KB.\n\nTo customize the minimum object size for any transition you can add a filter that specifies a custom `ObjectSizeGreaterThan` or `ObjectSizeLessThan` in the body of your transition rule. Custom filters always take precedence over the default transition behavior." + "Rules": "A lifecycle rule for individual objects in an Amazon S3 bucket.", + "TransitionDefaultMinimumObjectSize": "" }, "AWS::S3::Bucket LoggingConfiguration": { "DestinationBucketName": "The name of the bucket where Amazon S3 should store server access log files. You can store log files in any bucket that you own. By default, logs are stored in the bucket where the `LoggingConfiguration` property is defined.", @@ -40951,7 +41058,7 @@ "ActivityMetrics": "This property contains the details of account-level activity metrics for S3 Storage Lens.", "AdvancedCostOptimizationMetrics": "This property contains the details of account-level advanced cost optimization metrics for S3 Storage Lens.", "AdvancedDataProtectionMetrics": "This property contains the details of account-level advanced data protection metrics for S3 Storage Lens.", - "BucketLevel": "This property contains the details of the account-level bucket-level configurations for Amazon S3 Storage Lens.", + "BucketLevel": "This property contains the details of the account-level bucket-level configurations for Amazon S3 Storage Lens. To enable bucket-level configurations, make sure to also set the same metrics at the account level.", "DetailedStatusCodesMetrics": "This property contains the details of account-level detailed status code metrics for S3 Storage Lens.", "StorageLensGroupLevel": "This property determines the scope of Storage Lens group data that is displayed in the Storage Lens dashboard." }, @@ -42120,7 +42227,7 @@ "AWS::SSMQuickSetup::ConfigurationManager ConfigurationDefinition": { "LocalDeploymentAdministrationRoleArn": "The ARN of the IAM role used to administrate local configuration deployments.", "LocalDeploymentExecutionRoleName": "The name of the IAM role used to deploy local configurations.", - "Parameters": "The parameters for the configuration definition type. Parameters for configuration definitions vary based the configuration type. The following tables outline the parameters for each configuration type.\n\n- **OpsCenter (Type: AWS QuickSetupType-SSMOpsCenter)** - - `DelegatedAccountId`\n\n- Description: (Required) The ID of the delegated administrator account.\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Resource Scheduler (Type: AWS QuickSetupType-Scheduler)** - - `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target.\n- `ICalendarString`\n\n- Description: (Required) An iCalendar formatted string containing the schedule you want Change Manager to use.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Default Host Management Configuration (Type: AWS QuickSetupType-DHMC)** - - `UpdateSSMAgent`\n\n- Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is \" `true` \".\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Resource Explorer (Type: AWS QuickSetupType-ResourceExplorer)** - - `SelectedAggregatorRegion`\n\n- Description: (Required) The AWS Region where you want to create the aggregator index.\n- `ReplaceExistingAggregator`\n\n- Description: (Required) A boolean value that determines whether to demote an existing aggregator if it is in a Region that differs from the value you specify for the `SelectedAggregatorRegion` .\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Change Manager (Type: AWS QuickSetupType-SSMChangeMgr)** - - `DelegatedAccountId`\n\n- Description: (Required) The ID of the delegated administrator account.\n- `JobFunction`\n\n- Description: (Required) The name for the Change Manager job function.\n- `PermissionType`\n\n- Description: (Optional) Specifies whether you want to use default administrator permissions for the job function role, or provide a custom IAM policy. The valid values are `CustomPermissions` and `AdminPermissions` . The default value for the parameter is `CustomerPermissions` .\n- `CustomPermissions`\n\n- Description: (Optional) A JSON string containing the IAM policy you want your job function to use. You must provide a value for this parameter if you specify `CustomPermissions` for the `PermissionType` parameter.\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **DevOps\u00a0Guru (Type: AWS QuickSetupType-DevOpsGuru)** - - `AnalyseAllResources`\n\n- Description: (Optional) A boolean value that determines whether DevOps\u00a0Guru analyzes all AWS CloudFormation stacks in the account. The default value is \" `false` \".\n- `EnableSnsNotifications`\n\n- Description: (Optional) A boolean value that determines whether DevOps\u00a0Guru sends notifications when an insight is created. The default value is \" `true` \".\n- `EnableSsmOpsItems`\n\n- Description: (Optional) A boolean value that determines whether DevOps\u00a0Guru creates an OpsCenter OpsItem when an insight is created. The default value is \" `true` \".\n- `EnableDriftRemediation`\n\n- Description: (Optional) A boolean value that determines whether a drift remediation schedule is used. The default value is \" `false` \".\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(1 days)` , and `none` . The default value is \" `none` \".\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Conformance Packs (Type: AWS QuickSetupType-CFGCPacks)** - - `DelegatedAccountId`\n\n- Description: (Optional) The ID of the delegated administrator account. This parameter is required for Organization deployments.\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(2 days)` , and `none` . The default value is \" `none` \".\n- `CPackNames`\n\n- Description: (Required) A comma separated list of AWS Config conformance packs.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **AWS Config Recording (Type: AWS QuickSetupType-CFGRecording)** - - `RecordAllResources`\n\n- Description: (Optional) A boolean value that determines whether all supported resources are recorded. The default value is \" `true` \".\n- `ResourceTypesToRecord`\n\n- Description: (Optional) A comma separated list of resource types you want to record.\n- `RecordGlobalResourceTypes`\n\n- Description: (Optional) A boolean value that determines whether global resources are recorded with all resource configurations. The default value is \" `false` \".\n- `GlobalResourceTypesRegion`\n\n- Description: (Optional) Determines the AWS Region where global resources are recorded.\n- `UseCustomBucket`\n\n- Description: (Optional) A boolean value that determines whether a custom Amazon S3 bucket is used for delivery. The default value is \" `false` \".\n- `DeliveryBucketName`\n\n- Description: (Optional) The name of the Amazon S3 bucket you want AWS Config to deliver configuration snapshots and configuration history files to.\n- `DeliveryBucketPrefix`\n\n- Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket.\n- `NotificationOptions`\n\n- Description: (Optional) Determines the notification configuration for the recorder. The valid values are `NoStreaming` , `UseExistingTopic` , and `CreateTopic` . The default value is `NoStreaming` .\n- `CustomDeliveryTopicAccountId`\n\n- Description: (Optional) The ID of the AWS account where the Amazon SNS topic you want to use for notifications resides. You must specify a value for this parameter if you use the `UseExistingTopic` notification option.\n- `CustomDeliveryTopicName`\n\n- Description: (Optional) The name of the Amazon SNS topic you want to use for notifications. You must specify a value for this parameter if you use the `UseExistingTopic` notification option.\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(7 days)` , `rate(1 days)` , and `none` . The default value is \" `none` \".\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Host Management (Type: AWS QuickSetupType-SSMHostMgmt)** - - `UpdateSSMAgent`\n\n- Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is \" `true` \".\n- `UpdateEc2LaunchAgent`\n\n- Description: (Optional) A boolean value that determines whether the EC2 Launch agent is updated on the target instances every month. The default value is \" `false` \".\n- `CollectInventory`\n\n- Description: (Optional) A boolean value that determines whether the EC2 Launch agent is updated on the target instances every month. The default value is \" `true` \".\n- `ScanInstances`\n\n- Description: (Optional) A boolean value that determines whether the target instances are scanned daily for available patches. The default value is \" `true` \".\n- `InstallCloudWatchAgent`\n\n- Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is installed on the target instances. The default value is \" `false` \".\n- `UpdateCloudWatchAgent`\n\n- Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is updated on the target instances every month. The default value is \" `false` \".\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Optional) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Optional) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Optional) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Distributor (Type: AWS QuickSetupType-Distributor)** - - `PackagesToInstall`\n\n- Description: (Required) A comma separated list of packages you want to install on the target instances. The valid values are `AWSEFSTools` , `AWSCWAgent` , and `AWSEC2LaunchAgent` .\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(2 days)` , and `none` . The default value is \" `rate(30 days)` \".\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Patch Policy (Type: AWS QuickSetupType-PatchPolicy)** - - `PatchPolicyName`\n\n- Description: (Required) A name for the patch policy. The value you provide is applied to target Amazon EC2 instances as a tag.\n- `SelectedPatchBaselines`\n\n- Description: (Required) An array of JSON objects containing the information for the patch baselines to include in your patch policy.\n- `PatchBaselineUseDefault`\n\n- Description: (Optional) A boolean value that determines whether the selected patch baselines are all AWS provided.\n- `ConfigurationOptionsPatchOperation`\n\n- Description: (Optional) Determines whether target instances scan for available patches, or scan and install available patches. The valid values are `Scan` and `ScanAndInstall` . The default value for the parameter is `Scan` .\n- `ConfigurationOptionsScanValue`\n\n- Description: (Optional) A cron expression that is used as the schedule for when instances scan for available patches.\n- `ConfigurationOptionsInstallValue`\n\n- Description: (Optional) A cron expression that is used as the schedule for when instances install available patches.\n- `ConfigurationOptionsScanNextInterval`\n\n- Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is \" `false` \".\n- `ConfigurationOptionsInstallNextInterval`\n\n- Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is \" `false` \".\n- `RebootOption`\n\n- Description: (Optional) A boolean value that determines whether instances are rebooted after patches are installed. The default value is \" `false` \".\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `OutputLogEnableS3`\n\n- Description: (Optional) A boolean value that determines whether command output logs are sent to Amazon S3.\n- `OutputS3Location`\n\n- Description: (Optional) A JSON string containing information about the Amazon S3 bucket where you want to store the output details of the request.\n\n- `OutputS3BucketRegion`\n\n- Description: (Optional) The AWS Region where the Amazon S3 bucket you want AWS Config to deliver command output to is located.\n- `OutputS3BucketName`\n\n- Description: (Optional) The name of the Amazon S3 bucket you want AWS Config to deliver command output to.\n- `OutputS3KeyPrefix`\n\n- Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket.\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.", + "Parameters": "The parameters for the configuration definition type. Parameters for configuration definitions vary based the configuration type. The following tables outline the parameters for each configuration type.\n\n- **OpsCenter (Type: AWS QuickSetupType-SSMOpsCenter)** - - `DelegatedAccountId`\n\n- Description: (Required) The ID of the delegated administrator account.\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Resource Scheduler (Type: AWS QuickSetupType-Scheduler)** - - `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target.\n- `ICalendarString`\n\n- Description: (Required) An iCalendar formatted string containing the schedule you want Change Manager to use.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Default Host Management Configuration (Type: AWS QuickSetupType-DHMC)** - - `UpdateSSMAgent`\n\n- Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is \" `true` \".\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Resource Explorer (Type: AWS QuickSetupType-ResourceExplorer)** - - `SelectedAggregatorRegion`\n\n- Description: (Required) The AWS Region where you want to create the aggregator index.\n- `ReplaceExistingAggregator`\n\n- Description: (Required) A boolean value that determines whether to demote an existing aggregator if it is in a Region that differs from the value you specify for the `SelectedAggregatorRegion` .\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Change Manager (Type: AWS QuickSetupType-SSMChangeMgr)** - - `DelegatedAccountId`\n\n- Description: (Required) The ID of the delegated administrator account.\n- `JobFunction`\n\n- Description: (Required) The name for the Change Manager job function.\n- `PermissionType`\n\n- Description: (Optional) Specifies whether you want to use default administrator permissions for the job function role, or provide a custom IAM policy. The valid values are `CustomPermissions` and `AdminPermissions` . The default value for the parameter is `CustomerPermissions` .\n- `CustomPermissions`\n\n- Description: (Optional) A JSON string containing the IAM policy you want your job function to use. You must provide a value for this parameter if you specify `CustomPermissions` for the `PermissionType` parameter.\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **DevOps\u00a0Guru (Type: AWS QuickSetupType-DevOpsGuru)** - - `AnalyseAllResources`\n\n- Description: (Optional) A boolean value that determines whether DevOps\u00a0Guru analyzes all AWS CloudFormation stacks in the account. The default value is \" `false` \".\n- `EnableSnsNotifications`\n\n- Description: (Optional) A boolean value that determines whether DevOps\u00a0Guru sends notifications when an insight is created. The default value is \" `true` \".\n- `EnableSsmOpsItems`\n\n- Description: (Optional) A boolean value that determines whether DevOps\u00a0Guru creates an OpsCenter OpsItem when an insight is created. The default value is \" `true` \".\n- `EnableDriftRemediation`\n\n- Description: (Optional) A boolean value that determines whether a drift remediation schedule is used. The default value is \" `false` \".\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(1 days)` , and `none` . The default value is \" `none` \".\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Conformance Packs (Type: AWS QuickSetupType-CFGCPacks)** - - `DelegatedAccountId`\n\n- Description: (Optional) The ID of the delegated administrator account. This parameter is required for Organization deployments.\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(2 days)` , and `none` . The default value is \" `none` \".\n- `CPackNames`\n\n- Description: (Required) A comma separated list of AWS Config conformance packs.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **AWS Config Recording (Type: AWS QuickSetupType-CFGRecording)** - - `RecordAllResources`\n\n- Description: (Optional) A boolean value that determines whether all supported resources are recorded. The default value is \" `true` \".\n- `ResourceTypesToRecord`\n\n- Description: (Optional) A comma separated list of resource types you want to record.\n- `RecordGlobalResourceTypes`\n\n- Description: (Optional) A boolean value that determines whether global resources are recorded with all resource configurations. The default value is \" `false` \".\n- `GlobalResourceTypesRegion`\n\n- Description: (Optional) Determines the AWS Region where global resources are recorded.\n- `UseCustomBucket`\n\n- Description: (Optional) A boolean value that determines whether a custom Amazon S3 bucket is used for delivery. The default value is \" `false` \".\n- `DeliveryBucketName`\n\n- Description: (Optional) The name of the Amazon S3 bucket you want AWS Config to deliver configuration snapshots and configuration history files to.\n- `DeliveryBucketPrefix`\n\n- Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket.\n- `NotificationOptions`\n\n- Description: (Optional) Determines the notification configuration for the recorder. The valid values are `NoStreaming` , `UseExistingTopic` , and `CreateTopic` . The default value is `NoStreaming` .\n- `CustomDeliveryTopicAccountId`\n\n- Description: (Optional) The ID of the AWS account where the Amazon SNS topic you want to use for notifications resides. You must specify a value for this parameter if you use the `UseExistingTopic` notification option.\n- `CustomDeliveryTopicName`\n\n- Description: (Optional) The name of the Amazon SNS topic you want to use for notifications. You must specify a value for this parameter if you use the `UseExistingTopic` notification option.\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(7 days)` , `rate(1 days)` , and `none` . The default value is \" `none` \".\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Host Management (Type: AWS QuickSetupType-SSMHostMgmt)** - - `UpdateSSMAgent`\n\n- Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is \" `true` \".\n- `UpdateEc2LaunchAgent`\n\n- Description: (Optional) A boolean value that determines whether the EC2 Launch agent is updated on the target instances every month. The default value is \" `false` \".\n- `CollectInventory`\n\n- Description: (Optional) A boolean value that determines whether the EC2 Launch agent is updated on the target instances every month. The default value is \" `true` \".\n- `ScanInstances`\n\n- Description: (Optional) A boolean value that determines whether the target instances are scanned daily for available patches. The default value is \" `true` \".\n- `InstallCloudWatchAgent`\n\n- Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is installed on the target instances. The default value is \" `false` \".\n- `UpdateCloudWatchAgent`\n\n- Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is updated on the target instances every month. The default value is \" `false` \".\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Optional) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Optional) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Optional) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Distributor (Type: AWS QuickSetupType-Distributor)** - - `PackagesToInstall`\n\n- Description: (Required) A comma separated list of packages you want to install on the target instances. The valid values are `AWSEFSTools` , `AWSCWAgent` , and `AWSEC2LaunchAgent` .\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(2 days)` , and `none` . The default value is \" `rate(30 days)` \".\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Patch Policy (Type: AWS QuickSetupType-PatchPolicy)** - - `PatchPolicyName`\n\n- Description: (Required) A name for the patch policy. The value you provide is applied to target Amazon EC2 instances as a tag.\n- `SelectedPatchBaselines`\n\n- Description: (Required) An array of JSON objects containing the information for the patch baselines to include in your patch policy.\n- `PatchBaselineUseDefault`\n\n- Description: (Optional) A boolean value that determines whether the selected patch baselines are all AWS provided.\n- `ConfigurationOptionsPatchOperation`\n\n- Description: (Optional) Determines whether target instances scan for available patches, or scan and install available patches. The valid values are `Scan` and `ScanAndInstall` . The default value for the parameter is `Scan` .\n- `ConfigurationOptionsScanValue`\n\n- Description: (Optional) A cron expression that is used as the schedule for when instances scan for available patches.\n- `ConfigurationOptionsInstallValue`\n\n- Description: (Optional) A cron expression that is used as the schedule for when instances install available patches.\n- `ConfigurationOptionsScanNextInterval`\n\n- Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is \" `false` \".\n- `ConfigurationOptionsInstallNextInterval`\n\n- Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is \" `false` \".\n- `RebootOption`\n\n- Description: (Optional) Determines whether instances are rebooted after patches are installed. Valid values are `RebootIfNeeded` and `NoReboot` .\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `OutputLogEnableS3`\n\n- Description: (Optional) A boolean value that determines whether command output logs are sent to Amazon S3.\n- `OutputS3Location`\n\n- Description: (Optional) A JSON string containing information about the Amazon S3 bucket where you want to store the output details of the request.\n\n- `OutputS3BucketRegion`\n\n- Description: (Optional) The AWS Region where the Amazon S3 bucket you want AWS Config to deliver command output to is located.\n- `OutputS3BucketName`\n\n- Description: (Optional) The name of the Amazon S3 bucket you want AWS Config to deliver command output to.\n- `OutputS3KeyPrefix`\n\n- Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket.\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.", "Type": "The type of the Quick Setup configuration.", "TypeVersion": "The version of the Quick Setup type used.", "id": "The ID of the configuration definition." @@ -42455,12 +42562,13 @@ "AppNetworkAccessType": "Specifies the VPC used for non-EFS traffic. The default value is `PublicInternetOnly` .\n\n- `PublicInternetOnly` - Non-EFS traffic is through a VPC managed by Amazon SageMaker , which allows direct internet access\n- `VpcOnly` - All Studio traffic is through the specified VPC and subnets\n\n*Valid Values* : `PublicInternetOnly | VpcOnly`", "AppSecurityGroupManagement": "The entity that creates and manages the required security groups for inter-app communication in `VpcOnly` mode. Required when `CreateDomain.AppNetworkAccessType` is `VpcOnly` and `DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn` is provided. If setting up the domain for use with RStudio, this value must be set to `Service` .\n\n*Allowed Values* : `Service` | `Customer`", "AuthMode": "The mode of authentication that members use to access the Domain.\n\n*Valid Values* : `SSO | IAM`", - "DefaultSpaceSettings": "A collection of settings that apply to spaces created in the domain.", + "DefaultSpaceSettings": "The default settings for shared spaces that users create in the domain.\n\nSageMaker applies these settings only to shared spaces. It doesn't apply them to private spaces.", "DefaultUserSettings": "The default user settings.", "DomainName": "The domain name.", "DomainSettings": "A collection of settings that apply to the `SageMaker Domain` . These settings are specified through the `CreateDomain` API call.", "KmsKeyId": "SageMaker uses AWS KMS to encrypt the EFS volume attached to the Domain with an AWS managed customer master key (CMK) by default. For more control, specify a customer managed CMK.\n\n*Length Constraints* : Maximum length of 2048.\n\n*Pattern* : `.*`", "SubnetIds": "The VPC subnets that Studio uses for communication.\n\n*Length Constraints* : Maximum length of 32.\n\n*Array members* : Minimum number of 1 item. Maximum number of 16 items.\n\n*Pattern* : `[-0-9a-zA-Z]+`", + "TagPropagation": "", "Tags": "Tags to associated with the Domain. Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags are searchable using the Search API.\n\nTags that you specify for the Domain are also added to all apps that are launched in the Domain.\n\n*Array members* : Minimum number of 0 items. Maximum number of 50 items.", "VpcId": "The ID of the Amazon Virtual Private Cloud (Amazon VPC) that Studio uses for communication.\n\n*Length Constraints* : Maximum length of 32.\n\n*Pattern* : `[-0-9a-zA-Z]+`" }, @@ -42511,6 +42619,7 @@ }, "AWS::SageMaker::Domain DomainSettings": { "DockerSettings": "A collection of settings that configure the domain's Docker interaction.", + "ExecutionRoleIdentityConfig": "The configuration for attaching a SageMaker user profile name to the execution role as a [sts:SourceIdentity key](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) .", "RStudioServerProDomainSettings": "A collection of settings that configure the `RStudioServerPro` Domain-level app.", "SecurityGroupIds": "The security groups for the Amazon Virtual Private Cloud that the `Domain` uses for communication between Domain-level apps and user apps." }, @@ -42574,19 +42683,19 @@ "Value": "The tag value." }, "AWS::SageMaker::Domain UserSettings": { - "CodeEditorAppSettings": "The Code Editor application settings.", - "CustomFileSystemConfigs": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio.", - "CustomPosixUserConfig": "Details about the POSIX identity that is used for file system operations.", + "CodeEditorAppSettings": "The Code Editor application settings.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", + "CustomFileSystemConfigs": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", + "CustomPosixUserConfig": "Details about the POSIX identity that is used for file system operations.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "DefaultLandingUri": "The default experience that the user is directed to when accessing the domain. The supported values are:\n\n- `studio::` : Indicates that Studio is the default experience. This value can only be passed if `StudioWebPortal` is set to `ENABLED` .\n- `app:JupyterServer:` : Indicates that Studio Classic is the default experience.", - "ExecutionRole": "The execution role for the user.", - "JupyterLabAppSettings": "The settings for the JupyterLab application.", + "ExecutionRole": "The execution role for the user.\n\nSageMaker applies this setting only to private spaces that the user creates in the domain. SageMaker doesn't apply this setting to shared spaces.", + "JupyterLabAppSettings": "The settings for the JupyterLab application.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "JupyterServerAppSettings": "The Jupyter server's app settings.", "KernelGatewayAppSettings": "The kernel gateway app settings.", "RSessionAppSettings": "A collection of settings that configure the `RSessionGateway` app.", "RStudioServerProAppSettings": "A collection of settings that configure user interaction with the `RStudioServerPro` app.", - "SecurityGroups": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.", + "SecurityGroups": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "SharingSettings": "Specifies options for sharing Amazon SageMaker Studio notebooks.", - "SpaceStorageSettings": "The storage settings for a space.", + "SpaceStorageSettings": "The storage settings for a space.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "StudioWebPortal": "Whether the user can access Studio. If this value is set to `DISABLED` , the user cannot access Studio, even if that is the default experience for the domain.", "StudioWebPortalSettings": "Studio settings. If these settings are applied on a user level, they take priority over the settings applied on a domain level." }, @@ -42815,8 +42924,7 @@ "Processor": "", "ProgrammingLang": "", "ReleaseNotes": "", - "VendorGuidance": "", - "Version": "The version number." + "VendorGuidance": "" }, "AWS::SageMaker::InferenceComponent": { "EndpointArn": "The Amazon Resource Name (ARN) of the endpoint that hosts the inference component.", @@ -44050,18 +44158,18 @@ "Value": "The tag value." }, "AWS::SageMaker::UserProfile UserSettings": { - "CodeEditorAppSettings": "The Code Editor application settings.", - "CustomFileSystemConfigs": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio.", - "CustomPosixUserConfig": "Details about the POSIX identity that is used for file system operations.", + "CodeEditorAppSettings": "The Code Editor application settings.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", + "CustomFileSystemConfigs": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", + "CustomPosixUserConfig": "Details about the POSIX identity that is used for file system operations.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "DefaultLandingUri": "The default experience that the user is directed to when accessing the domain. The supported values are:\n\n- `studio::` : Indicates that Studio is the default experience. This value can only be passed if `StudioWebPortal` is set to `ENABLED` .\n- `app:JupyterServer:` : Indicates that Studio Classic is the default experience.", - "ExecutionRole": "The execution role for the user.", - "JupyterLabAppSettings": "The settings for the JupyterLab application.", + "ExecutionRole": "The execution role for the user.\n\nSageMaker applies this setting only to private spaces that the user creates in the domain. SageMaker doesn't apply this setting to shared spaces.", + "JupyterLabAppSettings": "The settings for the JupyterLab application.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "JupyterServerAppSettings": "The Jupyter server's app settings.", "KernelGatewayAppSettings": "The kernel gateway app settings.", "RStudioServerProAppSettings": "A collection of settings that configure user interaction with the `RStudioServerPro` app.", - "SecurityGroups": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.", + "SecurityGroups": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "SharingSettings": "Specifies options for sharing Amazon SageMaker Studio notebooks.", - "SpaceStorageSettings": "The storage settings for a space.", + "SpaceStorageSettings": "The storage settings for a space.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "StudioWebPortal": "Whether the user can access Studio. If this value is set to `DISABLED` , the user cannot access Studio, even if that is the default experience for the domain.", "StudioWebPortalSettings": "Studio settings. If these settings are applied on a user level, they take priority over the settings applied on a domain level." }, @@ -45407,7 +45515,7 @@ "Domain": "Specifies the domain of the storage system that is used for file transfers. There are two domains available: Amazon Simple Storage Service (Amazon S3) and Amazon Elastic File System (Amazon EFS). The default value is S3.", "EndpointDetails": "The virtual private cloud (VPC) endpoint settings that are configured for your server. When you host your endpoint within your VPC, you can make your endpoint accessible only to resources within your VPC, or you can attach Elastic IP addresses and make your endpoint accessible to clients over the internet. Your VPC's default security groups are automatically assigned to your endpoint.", "EndpointType": "The type of endpoint that you want your server to use. You can choose to make your server's endpoint publicly accessible (PUBLIC) or host it inside your VPC. With an endpoint that is hosted in a VPC, you can restrict access to your server and resources only within your VPC or choose to make it internet facing by attaching Elastic IP addresses directly to it.\n\n> After May 19, 2021, you won't be able to create a server using `EndpointType=VPC_ENDPOINT` in your AWS account if your account hasn't already done so before May 19, 2021. If you have already created servers with `EndpointType=VPC_ENDPOINT` in your AWS account on or before May 19, 2021, you will not be affected. After this date, use `EndpointType` = `VPC` .\n> \n> For more information, see [Discontinuing the use of VPC_ENDPOINT](https://docs.aws.amazon.com//transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint) .\n> \n> It is recommended that you use `VPC` as the `EndpointType` . With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with `EndpointType` set to `VPC_ENDPOINT` .", - "IdentityProviderDetails": "Required when `IdentityProviderType` is set to `AWS_DIRECTORY_SERVICE` , `AWS _LAMBDA` or `API_GATEWAY` . Accepts an array containing all of the information required to use a directory in `AWS_DIRECTORY_SERVICE` or invoke a customer-supplied authentication API, including the API Gateway URL. Not required when `IdentityProviderType` is set to `SERVICE_MANAGED` .", + "IdentityProviderDetails": "Required when `IdentityProviderType` is set to `AWS_DIRECTORY_SERVICE` , `AWS _LAMBDA` or `API_GATEWAY` . Accepts an array containing all of the information required to use a directory in `AWS_DIRECTORY_SERVICE` or invoke a customer-supplied authentication API, including the API Gateway URL. Cannot be specified when `IdentityProviderType` is set to `SERVICE_MANAGED` .", "IdentityProviderType": "The mode of authentication for a server. The default value is `SERVICE_MANAGED` , which allows you to store and access user credentials within the AWS Transfer Family service.\n\nUse `AWS_DIRECTORY_SERVICE` to provide access to Active Directory groups in AWS Directory Service for Microsoft Active Directory or Microsoft Active Directory in your on-premises environment or in AWS using AD Connector. This option also requires you to provide a Directory ID by using the `IdentityProviderDetails` parameter.\n\nUse the `API_GATEWAY` value to integrate with an identity provider of your choosing. The `API_GATEWAY` setting requires you to provide an Amazon API Gateway endpoint URL to call for authentication by using the `IdentityProviderDetails` parameter.\n\nUse the `AWS_LAMBDA` value to directly use an AWS Lambda function as your identity provider. If you choose this value, you must specify the ARN for the Lambda function in the `Function` parameter for the `IdentityProviderDetails` data type.", "LoggingRole": "The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that allows a server to turn on Amazon CloudWatch logging for Amazon S3 or Amazon EFSevents. When set, you can view user activity in your CloudWatch logs.", "PostAuthenticationLoginBanner": "Specifies a string to display when users connect to a server. This string is displayed after the user authenticates.\n\n> The SFTP protocol does not support post-authentication display banners.", @@ -46701,6 +46809,23 @@ "ResourceArn": "The Amazon Resource Name (ARN) of the resource to associate with the web ACL.\n\nThe ARN must be in one of the following formats:\n\n- For an Application Load Balancer: `arn: *partition* :elasticloadbalancing: *region* : *account-id* :loadbalancer/app/ *load-balancer-name* / *load-balancer-id*`\n- For an Amazon API Gateway REST API: `arn: *partition* :apigateway: *region* ::/restapis/ *api-id* /stages/ *stage-name*`\n- For an AWS AppSync GraphQL API: `arn: *partition* :appsync: *region* : *account-id* :apis/ *GraphQLApiId*`\n- For an Amazon Cognito user pool: `arn: *partition* :cognito-idp: *region* : *account-id* :userpool/ *user-pool-id*`\n- For an AWS App Runner service: `arn: *partition* :apprunner: *region* : *account-id* :service/ *apprunner-service-name* / *apprunner-service-id*`\n- For an AWS Verified Access instance: `arn: *partition* :ec2: *region* : *account-id* :verified-access-instance/ *instance-id*`", "WebACLArn": "The Amazon Resource Name (ARN) of the web ACL that you want to associate with the resource." }, + "AWS::Wisdom::AIPrompt": { + "ApiFormat": "The API format used for this AI Prompt.", + "AssistantId": "The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN.", + "Description": "The description of the AI Prompt.", + "ModelId": "The identifier of the model used for this AI Prompt. Model Ids supported are: `CLAUDE_3_HAIKU_20240307_V1` .", + "Name": "The name of the AI Prompt", + "Tags": "The tags used to organize, track, or control access for this resource.", + "TemplateConfiguration": "The configuration of the prompt template for this AI Prompt.", + "TemplateType": "The type of the prompt template for this AI Prompt.", + "Type": "The type of this AI Prompt." + }, + "AWS::Wisdom::AIPrompt AIPromptTemplateConfiguration": { + "TextFullAIPromptEditTemplateConfiguration": "" + }, + "AWS::Wisdom::AIPrompt TextFullAIPromptEditTemplateConfiguration": { + "Text": "" + }, "AWS::Wisdom::Assistant": { "Description": "The description of the assistant.", "Name": "The name of the assistant.", @@ -46734,11 +46859,11 @@ "Name": "The name of the knowledge base.", "RenderingConfiguration": "Information about how to render the content.", "ServerSideEncryptionConfiguration": "This customer managed key must have a policy that allows `kms:CreateGrant` and `kms:DescribeKey` permissions to the IAM identity using the key to invoke Wisdom. For more information about setting up a customer managed key for Wisdom, see [Enable Amazon Connect Wisdom for your instance](https://docs.aws.amazon.com/connect/latest/adminguide/enable-wisdom.html) . For information about valid ID values, see [Key identifiers (KeyId)](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id) in the *AWS Key Management Service Developer Guide* .", - "SourceConfiguration": "The source of the knowledge base content. Only set this argument for EXTERNAL knowledge bases.", + "SourceConfiguration": "The source of the knowledge base content. Only set this argument for EXTERNAL or Managed knowledge bases.", "Tags": "The tags used to organize, track, or control access for this resource." }, "AWS::Wisdom::KnowledgeBase AppIntegrationsConfiguration": { - "AppIntegrationArn": "The Amazon Resource Name (ARN) of the AppIntegrations DataIntegration to use for ingesting content.\n\n- For [Salesforce](https://docs.aws.amazon.com/https://developer.salesforce.com/docs/atlas.en-us.knowledge_dev.meta/knowledge_dev/sforce_api_objects_knowledge__kav.htm) , your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least `Id` , `ArticleNumber` , `VersionNumber` , `Title` , `PublishStatus` , and `IsDeleted` as source fields.\n- For [ServiceNow](https://docs.aws.amazon.com/https://developer.servicenow.com/dev.do#!/reference/api/rome/rest/knowledge-management-api) , your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least `number` , `short_description` , `sys_mod_count` , `workflow_state` , and `active` as source fields.\n- For [Zendesk](https://docs.aws.amazon.com/https://developer.zendesk.com/api-reference/help_center/help-center-api/articles/) , your AppIntegrations DataIntegration must have an ObjectConfiguration if `objectFields` is not provided, including at least `id` , `title` , `updated_at` , and `draft` as source fields.\n- For [SharePoint](https://docs.aws.amazon.com/https://learn.microsoft.com/en-us/sharepoint/dev/sp-add-ins/sharepoint-net-server-csom-jsom-and-rest-api-index) , your AppIntegrations DataIntegration must have a FileConfiguration, including only file extensions that are among `docx` , `pdf` , `html` , `htm` , and `txt` .\n- For [Amazon S3](https://docs.aws.amazon.com/https://aws.amazon.com/s3/) , the ObjectConfiguration and FileConfiguration of your AppIntegrations DataIntegration must be null. The `SourceURI` of your DataIntegration must use the following format: `s3://your_s3_bucket_name` .\n\n> The bucket policy of the corresponding S3 bucket must allow the AWS principal `app-integrations.amazonaws.com` to perform `s3:ListBucket` , `s3:GetObject` , and `s3:GetBucketLocation` against the bucket.", + "AppIntegrationArn": "The Amazon Resource Name (ARN) of the AppIntegrations DataIntegration to use for ingesting content.\n\n- For [Salesforce](https://docs.aws.amazon.com/https://developer.salesforce.com/docs/atlas.en-us.knowledge_dev.meta/knowledge_dev/sforce_api_objects_knowledge__kav.htm) , your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least `Id` , `ArticleNumber` , `VersionNumber` , `Title` , `PublishStatus` , and `IsDeleted` as source fields.\n- For [ServiceNow](https://docs.aws.amazon.com/https://developer.servicenow.com/dev.do#!/reference/api/rome/rest/knowledge-management-api) , your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least `number` , `short_description` , `sys_mod_count` , `workflow_state` , and `active` as source fields.\n- For [Zendesk](https://docs.aws.amazon.com/https://developer.zendesk.com/api-reference/help_center/help-center-api/articles/) , your AppIntegrations DataIntegration must have an ObjectConfiguration if `objectFields` is not provided, including at least `id` , `title` , `updated_at` , and `draft` as source fields.\n- For [SharePoint](https://docs.aws.amazon.com/https://learn.microsoft.com/en-us/sharepoint/dev/sp-add-ins/sharepoint-net-server-csom-jsom-and-rest-api-index) , your AppIntegrations DataIntegration must have a FileConfiguration, including only file extensions that are among `docx` , `pdf` , `html` , `htm` , and `txt` .\n- For [Amazon S3](https://docs.aws.amazon.com/s3/) , the ObjectConfiguration and FileConfiguration of your AppIntegrations DataIntegration must be null. The `SourceURI` of your DataIntegration must use the following format: `s3://your_s3_bucket_name` .\n\n> The bucket policy of the corresponding S3 bucket must allow the AWS principal `app-integrations.amazonaws.com` to perform `s3:ListBucket` , `s3:GetObject` , and `s3:GetBucketLocation` against the bucket.", "ObjectFields": "The fields from the source that are made available to your agents in Amazon Q in Connect. Optional if ObjectConfiguration is included in the provided DataIntegration.\n\n- For [Salesforce](https://docs.aws.amazon.com/https://developer.salesforce.com/docs/atlas.en-us.knowledge_dev.meta/knowledge_dev/sforce_api_objects_knowledge__kav.htm) , you must include at least `Id` , `ArticleNumber` , `VersionNumber` , `Title` , `PublishStatus` , and `IsDeleted` .\n- For [ServiceNow](https://docs.aws.amazon.com/https://developer.servicenow.com/dev.do#!/reference/api/rome/rest/knowledge-management-api) , you must include at least `number` , `short_description` , `sys_mod_count` , `workflow_state` , and `active` .\n- For [Zendesk](https://docs.aws.amazon.com/https://developer.zendesk.com/api-reference/help_center/help-center-api/articles/) , you must include at least `id` , `title` , `updated_at` , and `draft` .\n\nMake sure to include additional fields. These fields are indexed and used to source recommendations." }, "AWS::Wisdom::KnowledgeBase RenderingConfiguration": { diff --git a/schema_source/cloudformation.schema.json b/schema_source/cloudformation.schema.json index 93120609a..1e37b45b3 100644 --- a/schema_source/cloudformation.schema.json +++ b/schema_source/cloudformation.schema.json @@ -7876,7 +7876,7 @@ "additionalProperties": false, "properties": { "DomainName": { - "markdownDescription": "The custom domain name for your API in Amazon API Gateway. Uppercase letters are not supported.", + "markdownDescription": "The custom domain name for your API in Amazon API Gateway. Uppercase letters and the underscore ( `_` ) character are not supported.", "title": "DomainName", "type": "string" }, @@ -10825,7 +10825,7 @@ "type": "string" }, "DisableSSO": { - "markdownDescription": "", + "markdownDescription": "If you set this parameter to `true` , Amazon AppFlow bypasses the single sign-on (SSO) settings in your SAP account when it accesses your SAP OData instance.\n\nWhether you need this option depends on the types of credentials that you applied to your SAP OData connection profile. If your profile uses basic authentication credentials, SAP SSO can prevent Amazon AppFlow from connecting to your account with your username and password. In this case, bypassing SSO makes it possible for Amazon AppFlow to connect successfully. However, if your profile uses OAuth credentials, this parameter has no affect.", "title": "DisableSSO", "type": "boolean" }, @@ -10876,7 +10876,7 @@ "type": "string" }, "OAuth2GrantType": { - "markdownDescription": "Specifies the OAuth 2.0 grant type that Amazon AppFlow uses when it requests an access token from Salesforce. Amazon AppFlow requires an access token each time it attempts to access your Salesforce records.\n\nYou can specify one of the following values:\n\n- **AUTHORIZATION_CODE** - Amazon AppFlow passes an authorization code when it requests the access token from Salesforce. Amazon AppFlow receives the authorization code from Salesforce after you log in to your Salesforce account and authorize Amazon AppFlow to access your records.\n- **CLIENT_CREDENTIALS** - Amazon AppFlow passes client credentials (a client ID and client secret) when it requests the access token from Salesforce. You provide these credentials to Amazon AppFlow when you define the connection to your Salesforce account.\n- **JWT_BEARER** - Amazon AppFlow passes a JSON web token (JWT) when it requests the access token from Salesforce. You provide the JWT to Amazon AppFlow when you define the connection to your Salesforce account. When you use this grant type, you don't need to log in to your Salesforce account to authorize Amazon AppFlow to access your records.", + "markdownDescription": "Specifies the OAuth 2.0 grant type that Amazon AppFlow uses when it requests an access token from Salesforce. Amazon AppFlow requires an access token each time it attempts to access your Salesforce records.\n\nYou can specify one of the following values:\n\n- **AUTHORIZATION_CODE** - Amazon AppFlow passes an authorization code when it requests the access token from Salesforce. Amazon AppFlow receives the authorization code from Salesforce after you log in to your Salesforce account and authorize Amazon AppFlow to access your records.\n- **JWT_BEARER** - Amazon AppFlow passes a JSON web token (JWT) when it requests the access token from Salesforce. You provide the JWT to Amazon AppFlow when you define the connection to your Salesforce account. When you use this grant type, you don't need to log in to your Salesforce account to authorize Amazon AppFlow to access your records.\n\n> The CLIENT_CREDENTIALS value is not supported for Salesforce.", "title": "OAuth2GrantType", "type": "string" }, @@ -10914,7 +10914,7 @@ "properties": { "OAuth2Credentials": { "$ref": "#/definitions/AWS::AppFlow::ConnectorProfile.OAuth2Credentials", - "markdownDescription": "", + "markdownDescription": "The OAuth 2.0 credentials required to authenticate the user.", "title": "OAuth2Credentials" }, "Password": { @@ -11973,7 +11973,7 @@ "additionalProperties": false, "properties": { "maxPageSize": { - "markdownDescription": "", + "markdownDescription": "The maximum number of records that Amazon AppFlow receives in each page of the response from your SAP application. For transfers of OData records, the maximum page size is 3,000. For transfers of data that comes from an ODP provider, the maximum page size is 10,000.", "title": "maxPageSize", "type": "number" } @@ -11987,7 +11987,7 @@ "additionalProperties": false, "properties": { "maxParallelism": { - "markdownDescription": "", + "markdownDescription": "The maximum number of processes that Amazon AppFlow runs at the same time when it retrieves your data from your SAP application.", "title": "maxParallelism", "type": "number" } @@ -12007,12 +12007,12 @@ }, "paginationConfig": { "$ref": "#/definitions/AWS::AppFlow::Flow.SAPODataPaginationConfig", - "markdownDescription": "", + "markdownDescription": "Sets the page size for each concurrent process that transfers OData records from your SAP instance.", "title": "paginationConfig" }, "parallelismConfig": { "$ref": "#/definitions/AWS::AppFlow::Flow.SAPODataParallelismConfig", - "markdownDescription": "", + "markdownDescription": "Sets the number of concurrent processes that transfers OData records from your SAP instance.", "title": "parallelismConfig" } }, @@ -19172,9 +19172,7 @@ "title": "DynamoDBConfig" }, "ElasticsearchConfig": { - "$ref": "#/definitions/AWS::AppSync::DataSource.ElasticsearchConfig", - "markdownDescription": "AWS Region and Endpoints for an Amazon OpenSearch Service domain in your account.\n\nAs of September 2021, Amazon Elasticsearch Service is Amazon OpenSearch Service . This property is deprecated. For new data sources, use *OpenSearchServiceConfig* to specify an OpenSearch Service data source.", - "title": "ElasticsearchConfig" + "$ref": "#/definitions/AWS::AppSync::DataSource.ElasticsearchConfig" }, "EventBridgeConfig": { "$ref": "#/definitions/AWS::AppSync::DataSource.EventBridgeConfig", @@ -19350,13 +19348,9 @@ "additionalProperties": false, "properties": { "AwsRegion": { - "markdownDescription": "The AWS Region.", - "title": "AwsRegion", "type": "string" }, "Endpoint": { - "markdownDescription": "The endpoint.", - "title": "Endpoint", "type": "string" } }, @@ -41213,12 +41207,12 @@ "type": "number" }, "ComputeType": { - "markdownDescription": "> Updating this field is not allowed for `MAC_ARM` . \n\nInformation about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", + "markdownDescription": "Information about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", "title": "ComputeType", "type": "string" }, "EnvironmentType": { - "markdownDescription": "> Updating this field is not allowed for `MAC_ARM` . \n\nThe environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", + "markdownDescription": "The environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", "title": "EnvironmentType", "type": "string" }, @@ -45765,7 +45759,7 @@ "properties": { "AccountRecoverySetting": { "$ref": "#/definitions/AWS::Cognito::UserPool.AccountRecoverySetting", - "markdownDescription": "Use this setting to define which verified available method a user can use to recover their password when they call `ForgotPassword` . It allows you to define a preferred method when a user has more than one method available. With this setting, SMS does not qualify for a valid password recovery mechanism if the user also has SMS MFA enabled. In the absence of this setting, Cognito uses the legacy behavior to determine the recovery method where SMS is preferred over email.", + "markdownDescription": "The available verified method a user can use to recover their password when they call `ForgotPassword` . You can use this setting to define a preferred method when a user has more than one method available. With this setting, SMS doesn't qualify for a valid password recovery mechanism if the user also has SMS multi-factor authentication (MFA) activated. In the absence of this setting, Amazon Cognito uses the legacy behavior to determine the recovery method where SMS is preferred through email.", "title": "AccountRecoverySetting" }, "AdminCreateUserConfig": { @@ -45777,7 +45771,7 @@ "items": { "type": "string" }, - "markdownDescription": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* .\n\n> This user pool property cannot be updated.", + "markdownDescription": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* .", "title": "AliasAttributes", "type": "array" }, @@ -45805,12 +45799,12 @@ "title": "EmailConfiguration" }, "EmailVerificationMessage": { - "markdownDescription": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) .", + "markdownDescription": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) .", "title": "EmailVerificationMessage", "type": "string" }, "EmailVerificationSubject": { - "markdownDescription": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) .", + "markdownDescription": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) .", "title": "EmailVerificationSubject", "type": "string" }, @@ -45818,7 +45812,7 @@ "items": { "type": "string" }, - "markdownDescription": "Enables MFA on a specified user pool. To disable all MFAs after it has been enabled, set MfaConfiguration to \u201cOFF\u201d and remove EnabledMfas. MFAs can only be all disabled if MfaConfiguration is OFF. Once SMS_MFA is enabled, SMS_MFA can only be disabled by setting MfaConfiguration to \u201cOFF\u201d. Can be one of the following values:\n\n- `SMS_MFA` - Enables SMS MFA for the user pool. SMS_MFA can only be enabled if SMS configuration is provided.\n- `SOFTWARE_TOKEN_MFA` - Enables software token MFA for the user pool.\n\nAllowed values: `SMS_MFA` | `SOFTWARE_TOKEN_MFA`", + "markdownDescription": "Set enabled MFA options on a specified user pool. To disable all MFAs after it has been enabled, set `MfaConfiguration` to `OFF` and remove EnabledMfas. MFAs can only be all disabled if `MfaConfiguration` is `OFF` . After you enable `SMS_MFA` , you can only disable it by setting `MfaConfiguration` to `OFF` . Can be one of the following values:\n\n- `SMS_MFA` - Enables MFA with SMS for the user pool. To select this option, you must also provide values for `SmsConfiguration` .\n- `SOFTWARE_TOKEN_MFA` - Enables software token MFA for the user pool.\n- `EMAIL_OTP` - Enables MFA with email for the user pool. To select this option, you must provide values for `EmailConfiguration` and within those, set `EmailSendingAccount` to `DEVELOPER` .\n\nAllowed values: `SMS_MFA` | `SOFTWARE_TOKEN_MFA` | `EMAIL_OTP`", "title": "EnabledMfas", "type": "array" }, @@ -45841,12 +45835,12 @@ "items": { "$ref": "#/definitions/AWS::Cognito::UserPool.SchemaAttribute" }, - "markdownDescription": "The schema attributes for the new user pool. These attributes can be standard or custom attributes.\n\n> During a user pool update, you can add new schema attributes but you cannot modify or delete an existing schema attribute.", + "markdownDescription": "An array of schema attributes for the new user pool. These attributes can be standard or custom attributes.", "title": "Schema", "type": "array" }, "SmsAuthenticationMessage": { - "markdownDescription": "A string representing the SMS authentication message.", + "markdownDescription": "The contents of the SMS authentication message.", "title": "SmsAuthenticationMessage", "type": "string" }, @@ -45856,7 +45850,7 @@ "title": "SmsConfiguration" }, "SmsVerificationMessage": { - "markdownDescription": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) .", + "markdownDescription": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) .", "title": "SmsVerificationMessage", "type": "string" }, @@ -45890,13 +45884,13 @@ "items": { "type": "string" }, - "markdownDescription": "Determines whether email addresses or phone numbers can be specified as user names when a user signs up. Possible values: `phone_number` or `email` .\n\nThis user pool property cannot be updated.", + "markdownDescription": "Specifies whether a user can use an email address or phone number as a username when they sign up.", "title": "UsernameAttributes", "type": "array" }, "UsernameConfiguration": { "$ref": "#/definitions/AWS::Cognito::UserPool.UsernameConfiguration", - "markdownDescription": "You can choose to set case sensitivity on the username input for the selected sign-in option. For example, when this is set to `False` , users will be able to sign in using either \"username\" or \"Username\". This configuration is immutable once it has been set.", + "markdownDescription": "Case sensitivity on the username input for the selected sign-in option. When case sensitivity is set to `False` (case insensitive), users can sign in with any combination of capital and lowercase letters. For example, `username` , `USERNAME` , or `UserName` , or for email, `email@example.com` or `EMaiL@eXamplE.Com` . For most use cases, set case sensitivity to `False` (case insensitive) as a best practice. When usernames and email addresses are case insensitive, Amazon Cognito treats any variation in case as the same user, and prevents a case variation from being assigned to the same attribute for a different user.\n\nThis configuration is immutable after you set it. For more information, see [UsernameConfigurationType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UsernameConfigurationType.html) .", "title": "UsernameConfiguration" }, "VerificationMessageTemplate": { @@ -45934,7 +45928,7 @@ "items": { "$ref": "#/definitions/AWS::Cognito::UserPool.RecoveryOption" }, - "markdownDescription": "The list of `RecoveryOptionTypes` .", + "markdownDescription": "The list of options and priorities for user message delivery in forgot-password operations. Sets or displays user pool preferences for email or SMS message priority, whether users should fall back to a second delivery method, and whether passwords should only be reset by administrators.", "title": "RecoveryMechanisms", "type": "array" } @@ -45951,7 +45945,7 @@ }, "InviteMessageTemplate": { "$ref": "#/definitions/AWS::Cognito::UserPool.InviteMessageTemplate", - "markdownDescription": "The message template to be used for the welcome message to new users.\n\nSee also [Customizing User Invitation Messages](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-message-customizations.html#cognito-user-pool-settings-user-invitation-message-customization) .", + "markdownDescription": "The template for the welcome message to new users.\n\nSee also [Customizing User Invitation Messages](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-message-customizations.html#cognito-user-pool-settings-user-invitation-message-customization) .", "title": "InviteMessageTemplate" }, "UnusedAccountValidityDays": { @@ -45966,12 +45960,12 @@ "additionalProperties": false, "properties": { "LambdaArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon Cognito triggers to send email notifications to users.", + "markdownDescription": "The Amazon Resource Name (ARN) of the function that you want to assign to your Lambda trigger.", "title": "LambdaArn", "type": "string" }, "LambdaVersion": { - "markdownDescription": "The Lambda version represents the signature of the \"request\" attribute in the \"event\" information that Amazon Cognito passes to your custom email sender AWS Lambda function. The only supported value is `V1_0` .", + "markdownDescription": "The user pool trigger version of the request that Amazon Cognito sends to your Lambda function. Higher-numbered versions add fields that support new features.\n\nYou must use a `LambdaVersion` of `V1_0` with a custom sender function.", "title": "LambdaVersion", "type": "string" } @@ -45982,12 +45976,12 @@ "additionalProperties": false, "properties": { "LambdaArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon Cognito triggers to send SMS notifications to users.", + "markdownDescription": "The Amazon Resource Name (ARN) of the function that you want to assign to your Lambda trigger.", "title": "LambdaArn", "type": "string" }, "LambdaVersion": { - "markdownDescription": "The Lambda version represents the signature of the \"request\" attribute in the \"event\" information Amazon Cognito passes to your custom SMS sender Lambda function. The only supported value is `V1_0` .", + "markdownDescription": "The user pool trigger version of the request that Amazon Cognito sends to your Lambda function. Higher-numbered versions add fields that support new features.\n\nYou must use a `LambdaVersion` of `V1_0` with a custom sender function.", "title": "LambdaVersion", "type": "string" } @@ -46014,7 +46008,7 @@ "additionalProperties": false, "properties": { "ConfigurationSet": { - "markdownDescription": "The set of configuration rules that can be applied to emails sent using Amazon SES. A configuration set is applied to an email by including a reference to the configuration set in the headers of the email. Once applied, all of the rules in that configuration set are applied to the email. Configuration sets can be used to apply the following types of rules to emails:\n\n- Event publishing \u2013 Amazon SES can track the number of send, delivery, open, click, bounce, and complaint events for each email sent. Use event publishing to send information about these events to other AWS services such as SNS and CloudWatch.\n- IP pool management \u2013 When leasing dedicated IP addresses with Amazon SES, you can create groups of IP addresses, called dedicated IP pools. You can then associate the dedicated IP pools with configuration sets.", + "markdownDescription": "The set of configuration rules that can be applied to emails sent using Amazon Simple Email Service. A configuration set is applied to an email by including a reference to the configuration set in the headers of the email. Once applied, all of the rules in that configuration set are applied to the email. Configuration sets can be used to apply the following types of rules to emails:\n\n- **Event publishing** - Amazon Simple Email Service can track the number of send, delivery, open, click, bounce, and complaint events for each email sent. Use event publishing to send information about these events to other AWS services such as and Amazon CloudWatch\n- **IP pool management** - When leasing dedicated IP addresses with Amazon Simple Email Service, you can create groups of IP addresses, called dedicated IP pools. You can then associate the dedicated IP pools with configuration sets.", "title": "ConfigurationSet", "type": "string" }, @@ -46024,7 +46018,7 @@ "type": "string" }, "From": { - "markdownDescription": "Identifies either the sender's email address or the sender's name with their email address. For example, `testuser@example.com` or `Test User ` . This address appears before the body of the email.", + "markdownDescription": "Either the sender\u2019s email address or the sender\u2019s name with their email address. For example, `testuser@example.com` or `Test User ` . This address appears before the body of the email.", "title": "From", "type": "string" }, @@ -46072,7 +46066,7 @@ }, "CustomEmailSender": { "$ref": "#/definitions/AWS::Cognito::UserPool.CustomEmailSender", - "markdownDescription": "A custom email sender AWS Lambda trigger.", + "markdownDescription": "The configuration of a custom email sender Lambda trigger. This trigger routes all email notifications from a user pool to a Lambda function that delivers the message using custom logic.", "title": "CustomEmailSender" }, "CustomMessage": { @@ -46082,7 +46076,7 @@ }, "CustomSMSSender": { "$ref": "#/definitions/AWS::Cognito::UserPool.CustomSMSSender", - "markdownDescription": "A custom SMS sender AWS Lambda trigger.", + "markdownDescription": "The configuration of a custom SMS sender Lambda trigger. This trigger routes all SMS notifications from a user pool to a Lambda function that delivers the message using custom logic.", "title": "CustomSMSSender" }, "DefineAuthChallenge": { @@ -46091,7 +46085,7 @@ "type": "string" }, "KMSKeyID": { - "markdownDescription": "The Amazon Resource Name of a AWS Key Management Service ( AWS KMS ) key. Amazon Cognito uses the key to encrypt codes and temporary passwords sent to `CustomEmailSender` and `CustomSMSSender` .", + "markdownDescription": "The ARN of an [KMS key](https://docs.aws.amazon.com//kms/latest/developerguide/concepts.html#master_keys) . Amazon Cognito uses the key to encrypt codes and temporary passwords sent to custom sender Lambda triggers.", "title": "KMSKeyID", "type": "string" }, @@ -46221,12 +46215,12 @@ "additionalProperties": false, "properties": { "Name": { - "markdownDescription": "Specifies the recovery method for a user.", + "markdownDescription": "The recovery method that this object sets a recovery option for.", "title": "Name", "type": "string" }, "Priority": { - "markdownDescription": "A positive integer specifying priority of a method with 1 being the highest priority.", + "markdownDescription": "Your priority preference for using the specified attribute in account recovery. The highest priority is `1` .", "title": "Priority", "type": "number" } @@ -46242,7 +46236,7 @@ "type": "string" }, "DeveloperOnlyAttribute": { - "markdownDescription": "> We recommend that you use [WriteAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UserPoolClientType.html#CognitoUserPools-Type-UserPoolClientType-WriteAttributes) in the user pool client to control how attributes can be mutated for new use cases instead of using `DeveloperOnlyAttribute` . \n\nSpecifies whether the attribute type is developer only. This attribute can only be modified by an administrator. Users will not be able to modify this attribute using their access token.", + "markdownDescription": "> You should use [WriteAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UserPoolClientType.html#CognitoUserPools-Type-UserPoolClientType-WriteAttributes) in the user pool client to control how attributes can be mutated for new use cases instead of using `DeveloperOnlyAttribute` . \n\nSpecifies whether the attribute type is developer only. This attribute can only be modified by an administrator. Users won't be able to modify this attribute using their access token. For example, `DeveloperOnlyAttribute` can be modified using AdminUpdateUserAttributes but can't be updated using UpdateUserAttributes.", "title": "DeveloperOnlyAttribute", "type": "boolean" }, @@ -46278,7 +46272,7 @@ "additionalProperties": false, "properties": { "ExternalId": { - "markdownDescription": "The external ID is a value. We recommend you use `ExternalId` to add security to your IAM role, which is used to call Amazon SNS to send SMS messages for your user pool. If you provide an `ExternalId` , the Cognito User Pool uses it when attempting to assume your IAM role. You can also set your roles trust policy to require the `ExternalID` . If you use the Cognito Management Console to create a role for SMS MFA, Cognito creates a role with the required permissions and a trust policy that uses `ExternalId` .", + "markdownDescription": "The external ID provides additional security for your IAM role. You can use an `ExternalId` with the IAM role that you use with Amazon SNS to send SMS messages for your user pool. If you provide an `ExternalId` , your Amazon Cognito user pool includes it in the request to assume your IAM role. You can configure the role trust policy to require that Amazon Cognito, and any principal, provide the `ExternalID` . If you use the Amazon Cognito Management Console to create a role for SMS multi-factor authentication (MFA), Amazon Cognito creates a role with the required permissions and a trust policy that demonstrates use of the `ExternalId` .\n\nFor more information about the `ExternalId` of a role, see [How to use an external ID when granting access to your AWS resources to a third party](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) .", "title": "ExternalId", "type": "string" }, @@ -46422,7 +46416,7 @@ "additionalProperties": false, "properties": { "AccessTokenValidity": { - "markdownDescription": "The access token time limit. After this limit expires, your user can't use their access token. To specify the time unit for `AccessTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `AccessTokenValidity` to `10` and `TokenValidityUnits` to `hours` , your user can authorize access with their access token for 10 hours.\n\nThe default time unit for `AccessTokenValidity` in an API request is hours.", + "markdownDescription": "The access token time limit. After this limit expires, your user can't use their access token. To specify the time unit for `AccessTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `AccessTokenValidity` to `10` and `TokenValidityUnits` to `hours` , your user can authorize access with\ntheir access token for 10 hours.\n\nThe default time unit for `AccessTokenValidity` in an API request is hours. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your access\ntokens are valid for one hour.", "title": "AccessTokenValidity", "type": "number" }, @@ -46499,7 +46493,7 @@ "type": "boolean" }, "IdTokenValidity": { - "markdownDescription": "The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for `IdTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `IdTokenValidity` as `10` and `TokenValidityUnits` as `hours` , your user can authenticate their session with their ID token for 10 hours.\n\nThe default time unit for `IdTokenValidity` in an API request is hours.", + "markdownDescription": "The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for `IdTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `IdTokenValidity` as `10` and `TokenValidityUnits` as `hours` , your user can authenticate their session with their ID token for 10 hours.\n\nThe default time unit for `IdTokenValidity` in an API request is hours. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your ID\ntokens are valid for one hour.", "title": "IdTokenValidity", "type": "number" }, @@ -46512,7 +46506,7 @@ "type": "array" }, "PreventUserExistenceErrors": { - "markdownDescription": "Use this setting to choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to `ENABLED` and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY` , those APIs will return a `UserNotFoundException` exception if the user does not exist in the user pool.", + "markdownDescription": "Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to `ENABLED` and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY` , those APIs return a `UserNotFoundException` exception if the user doesn't exist in the user pool.\n\nValid values include:\n\n- `ENABLED` - This prevents user existence-related errors.\n- `LEGACY` - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented.\n\nDefaults to `LEGACY` when you don't provide a value.", "title": "PreventUserExistenceErrors", "type": "string" }, @@ -46525,7 +46519,7 @@ "type": "array" }, "RefreshTokenValidity": { - "markdownDescription": "The refresh token time limit. After this limit expires, your user can't use their refresh token. To specify the time unit for `RefreshTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `RefreshTokenValidity` as `10` and `TokenValidityUnits` as `days` , your user can refresh their session and retrieve new access and ID tokens for 10 days.\n\nThe default time unit for `RefreshTokenValidity` in an API request is days. You can't set `RefreshTokenValidity` to 0. If you do, Amazon Cognito overrides the value with the default value of 30 days.", + "markdownDescription": "The refresh token time limit. After this limit expires, your user can't use their refresh token. To specify the time unit for `RefreshTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `RefreshTokenValidity` as `10` and `TokenValidityUnits` as `days` , your user can refresh their session\nand retrieve new access and ID tokens for 10 days.\n\nThe default time unit for `RefreshTokenValidity` in an API request is days. You can't set `RefreshTokenValidity` to 0. If you do, Amazon Cognito overrides the value with the default value of 30 days. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your refresh\ntokens are valid for 30 days.", "title": "RefreshTokenValidity", "type": "number" }, @@ -46586,7 +46580,7 @@ "additionalProperties": false, "properties": { "ApplicationArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of an Amazon Pinpoint project. You can use the Amazon Pinpoint project for integration with the chosen user pool client. Amazon Cognito publishes events to the Amazon Pinpoint project that the app ARN declares.", + "markdownDescription": "The Amazon Resource Name (ARN) of an Amazon Pinpoint project that you want to connect to your user pool app client. Amazon Cognito publishes events to the Amazon Pinpoint project that `ApplicationArn` declares. You can also configure your application to pass an endpoint ID in the `AnalyticsMetadata` parameter of sign-in operations. The endpoint ID is information about the destination for push notifications", "title": "ApplicationArn", "type": "string" }, @@ -46675,12 +46669,12 @@ "title": "CustomDomainConfig" }, "Domain": { - "markdownDescription": "The domain name for the domain that hosts the sign-up and sign-in pages for your application. For example: `auth.example.com` . If you're using a prefix domain, this field denotes the first part of the domain before `.auth.[region].amazoncognito.com` .\n\nThis string can include only lowercase letters, numbers, and hyphens. Don't use a hyphen for the first or last character. Use periods to separate subdomain names.", + "markdownDescription": "The domain name for the custom domain that hosts the sign-up and sign-in pages for your application. One example might be `auth.example.com` .\n\nThis string can include only lowercase letters, numbers, and hyphens. Don't use a hyphen for the first or last character. Use periods to separate subdomain names.", "title": "Domain", "type": "string" }, "UserPoolId": { - "markdownDescription": "The user pool ID for the user pool where you want to associate a user pool domain.", + "markdownDescription": "The ID of the user pool that is associated with the custom domain whose certificate you're updating.", "title": "UserPoolId", "type": "string" } @@ -46943,7 +46937,7 @@ "additionalProperties": false, "properties": { "Identifier": { - "markdownDescription": "A unique resource server identifier for the resource server. This could be an HTTPS endpoint where the resource server is located. For example: `https://my-weather-api.example.com` .", + "markdownDescription": "A unique resource server identifier for the resource server. The identifier can be an API friendly name like `solar-system-data` . You can also set an API URL like `https://solar-system-data-api.example.com` as your identifier.\n\nAmazon Cognito represents scopes in the access token in the format `$resource-server-identifier/$scope` . Longer scope-identifier strings increase the size of your access tokens.", "title": "Identifier", "type": "string" }, @@ -47055,7 +47049,7 @@ "title": "AccountTakeoverRiskConfiguration" }, "ClientId": { - "markdownDescription": "The app client ID. You can specify the risk configuration for a single client (with a specific ClientId) or for all clients (by setting the ClientId to `ALL` ).", + "markdownDescription": "The app client where this configuration is applied. When this parameter isn't present, the risk configuration applies to all user pool app clients that don't have client-level settings.", "title": "ClientId", "type": "string" }, @@ -47324,7 +47318,7 @@ "type": "string" }, "ClientId": { - "markdownDescription": "The client ID for the client app. You can specify the UI customization settings for a single client (with a specific clientId) or for all clients (by setting the clientId to `ALL` ).", + "markdownDescription": "The app client ID for your UI customization. When this value isn't present, the customization applies to all user pool app clients that don't have client-level settings..", "title": "ClientId", "type": "string" }, @@ -47429,7 +47423,7 @@ "items": { "$ref": "#/definitions/AWS::Cognito::UserPoolUser.AttributeType" }, - "markdownDescription": "An array of name-value pairs that contain user attributes and attribute values.", + "markdownDescription": "An array of name-value pairs that contain user attributes and attribute values to be set for the user to be created. You can create a user without specifying any attributes other than `Username` . However, any attributes that you specify as required (when creating a user pool or in the *Attributes* tab of the console) either you should supply (in your call to `AdminCreateUser` ) or the user should supply (when they sign up in response to your welcome message).\n\nFor custom attributes, you must prepend the `custom:` prefix to the attribute name.\n\nTo send a message inviting the user to sign up, you must specify the user's email address or phone number. You can do this in your call to AdminCreateUser or in the *Users* tab of the Amazon Cognito console for managing your user pools.\n\nIn your call to `AdminCreateUser` , you can set the `email_verified` attribute to `True` , and you can set the `phone_number_verified` attribute to `True` . You can also do this by calling [AdminUpdateUserAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AdminUpdateUserAttributes.html) .\n\n- *email* : The email address of the user to whom the message that contains the code and username will be sent. Required if the `email_verified` attribute is set to `True` , or if `\"EMAIL\"` is specified in the `DesiredDeliveryMediums` parameter.\n- *phone_number* : The phone number of the user to whom the message that contains the code and username will be sent. Required if the `phone_number_verified` attribute is set to `True` , or if `\"SMS\"` is specified in the `DesiredDeliveryMediums` parameter.", "title": "UserAttributes", "type": "array" }, @@ -47540,7 +47534,7 @@ "type": "string" }, "Username": { - "markdownDescription": "", + "markdownDescription": "The user's username.", "title": "Username", "type": "string" } @@ -69852,7 +69846,7 @@ "items": { "type": "string" }, - "markdownDescription": "The accelerator types that must be on the instance type.\n\n- To include instance types with GPU hardware, specify `gpu` .\n- To include instance types with FPGA hardware, specify `fpga` .\n- To include instance types with inference hardware, specify `inference` .\n\nDefault: Any accelerator type", + "markdownDescription": "The accelerator types that must be on the instance type.\n\n- To include instance types with GPU hardware, specify `gpu` .\n- To include instance types with FPGA hardware, specify `fpga` .\n\nDefault: Any accelerator type", "title": "AcceleratorTypes", "type": "array" }, @@ -71695,7 +71689,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::Instance.ElasticInferenceAccelerator" }, - "markdownDescription": "An elastic inference accelerator to associate with the instance.\n\n> Amazon Elastic Inference (EI) is no longer available to new customers. For more information, see [Amazon Elastic Inference FAQs](https://docs.aws.amazon.com/machine-learning/elastic-inference/faqs/) .", + "markdownDescription": "An elastic inference accelerator to associate with the instance.\n\n> Amazon Elastic Inference is no longer available.", "title": "ElasticInferenceAccelerators", "type": "array" }, @@ -72801,7 +72795,7 @@ "type": "number" }, "KmsKeyId": { - "markdownDescription": "The ARN of the symmetric AWS Key Management Service ( AWS KMS ) CMK used for encryption.", + "markdownDescription": "Identifier (key ID, key alias, key ARN, or alias ARN) of the customer managed KMS key to use for EBS encryption.", "title": "KmsKeyId", "type": "string" }, @@ -72953,7 +72947,7 @@ "items": { "type": "string" }, - "markdownDescription": "The accelerator types that must be on the instance type.\n\n- For instance types with GPU accelerators, specify `gpu` .\n- For instance types with FPGA accelerators, specify `fpga` .\n- For instance types with inference accelerators, specify `inference` .\n\nDefault: Any accelerator type", + "markdownDescription": "The accelerator types that must be on the instance type.\n\n- For instance types with GPU accelerators, specify `gpu` .\n- For instance types with FPGA accelerators, specify `fpga` .\n\nDefault: Any accelerator type", "title": "AcceleratorTypes", "type": "array" }, @@ -73156,7 +73150,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::LaunchTemplate.LaunchTemplateElasticInferenceAccelerator" }, - "markdownDescription": "An elastic inference accelerator to associate with the instance. Elastic inference accelerators are a resource you can attach to your Amazon EC2 instances to accelerate your Deep Learning (DL) inference workloads.\n\nYou cannot specify accelerators from different generations in the same request.\n\n> Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.", + "markdownDescription": "> Amazon Elastic Inference is no longer available. \n\nAn elastic inference accelerator to associate with the instance. Elastic inference accelerators are a resource you can attach to your Amazon EC2 instances to accelerate your Deep Learning (DL) inference workloads.\n\nYou cannot specify accelerators from different generations in the same request.\n\n> Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.", "title": "ElasticInferenceAccelerators", "type": "array" }, @@ -73192,7 +73186,7 @@ }, "InstanceRequirements": { "$ref": "#/definitions/AWS::EC2::LaunchTemplate.InstanceRequirements", - "markdownDescription": "The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify instance types with these attributes.\n\nYou must specify `VCpuCount` and `MemoryMiB` . All other attributes are optional. Any unspecified optional attribute is set to its default.\n\nWhen you specify multiple attributes, you get instance types that satisfy all of the specified attributes. If you specify multiple values for an attribute, you get instance types that satisfy any of the specified values.\n\nTo limit the list of instance types from which Amazon EC2 can identify matching instance types, you can use one of the following parameters, but not both in the same request:\n\n- `AllowedInstanceTypes` - The instance types to include in the list. All other instance types are ignored, even if they match your specified attributes.\n- `ExcludedInstanceTypes` - The instance types to exclude from the list, even if they match your specified attributes.\n\n> If you specify `InstanceRequirements` , you can't specify `InstanceType` .\n> \n> Attribute-based instance type selection is only supported when using Auto Scaling groups, EC2 Fleet, and Spot Fleet to launch instances. If you plan to use the launch template in the [launch instance wizard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-instance-wizard.html) , or with the [RunInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) API or [AWS::EC2::Instance](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-instance.html) AWS CloudFormation resource, you can't specify `InstanceRequirements` . \n\nFor more information, see [Attribute-based instance type selection for EC2 Fleet](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html) , [Attribute-based instance type selection for Spot Fleet](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-attribute-based-instance-type-selection.html) , and [Spot placement score](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-placement-score.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify instance types with these attributes.\n\nYou must specify `VCpuCount` and `MemoryMiB` . All other attributes are optional. Any unspecified optional attribute is set to its default.\n\nWhen you specify multiple attributes, you get instance types that satisfy all of the specified attributes. If you specify multiple values for an attribute, you get instance types that satisfy any of the specified values.\n\nTo limit the list of instance types from which Amazon EC2 can identify matching instance types, you can use one of the following parameters, but not both in the same request:\n\n- `AllowedInstanceTypes` - The instance types to include in the list. All other instance types are ignored, even if they match your specified attributes.\n- `ExcludedInstanceTypes` - The instance types to exclude from the list, even if they match your specified attributes.\n\n> If you specify `InstanceRequirements` , you can't specify `InstanceType` .\n> \n> Attribute-based instance type selection is only supported when using Auto Scaling groups, EC2 Fleet, and Spot Fleet to launch instances. If you plan to use the launch template in the [launch instance wizard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-instance-wizard.html) , or with the [RunInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) API or [AWS::EC2::Instance](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-instance.html) AWS CloudFormation resource, you can't specify `InstanceRequirements` . \n\nFor more information, see [Specify attributes for instance type selection for EC2 Fleet or Spot Fleet](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html) and [Spot placement score](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-placement-score.html) in the *Amazon EC2 User Guide* .", "title": "InstanceRequirements" }, "InstanceType": { @@ -73482,7 +73476,7 @@ "type": "array" }, "InterfaceType": { - "markdownDescription": "The type of network interface. To create an Elastic Fabric Adapter (EFA), specify `efa` . For more information, see [Elastic Fabric Adapter](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) in the *Amazon EC2 User Guide* .\n\nIf you are not creating an EFA, specify `interface` or omit this parameter.\n\nValid values: `interface` | `efa`", + "markdownDescription": "The type of network interface. To create an Elastic Fabric Adapter (EFA), specify `efa` or `efa` . For more information, see [Elastic Fabric Adapter](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) in the *Amazon EC2 User Guide* .\n\nIf you are not creating an EFA, specify `interface` or omit this parameter.\n\nIf you specify `efa-only` , do not assign any IP addresses to the network interface. EFA-only network interfaces do not support IP addresses.\n\nValid values: `interface` | `efa` | `efa-only`", "title": "InterfaceType", "type": "string" }, @@ -76314,7 +76308,7 @@ "type": "array" }, "MaxEntries": { - "markdownDescription": "The maximum number of entries for the prefix list. This property is required when you create a prefix list.", + "markdownDescription": "The maximum number of entries for the prefix list.", "title": "MaxEntries", "type": "number" }, @@ -77473,7 +77467,7 @@ "items": { "type": "string" }, - "markdownDescription": "The accelerator types that must be on the instance type.\n\n- To include instance types with GPU hardware, specify `gpu` .\n- To include instance types with FPGA hardware, specify `fpga` .\n- To include instance types with inference hardware, specify `inference` .\n\nDefault: Any accelerator type", + "markdownDescription": "The accelerator types that must be on the instance type.\n\n- To include instance types with GPU hardware, specify `gpu` .\n- To include instance types with FPGA hardware, specify `fpga` .\n\nDefault: Any accelerator type", "title": "AcceleratorTypes", "type": "array" }, @@ -84101,7 +84095,7 @@ "type": "boolean" }, "FilesystemType": { - "markdownDescription": "The Linux filesystem type for the volume. For volumes created from a snapshot, you must specify the same filesystem type that the volume was using when the snapshot was created. If there is a filesystem type mismatch, the task will fail to start.\n\nThe available filesystem types are `ext3` , `ext4` , and `xfs` . If no value is specified, the `xfs` filesystem type is used by default.", + "markdownDescription": "The filesystem type for the volume. For volumes created from a snapshot, you must specify the same filesystem type that the volume was using when the snapshot was created. If there is a filesystem type mismatch, the task will fail to start.\n\nThe available Linux filesystem types are `ext3` , `ext4` , and `xfs` . If no value is specified, the `xfs` filesystem type is used by default.\n\nThe available Windows filesystem types are `NTFS` .", "title": "FilesystemType", "type": "string" }, @@ -84880,7 +84874,7 @@ "items": { "type": "string" }, - "markdownDescription": "A string array representing the command that the container runs to determine if it is healthy. The string array must start with `CMD` to run the command arguments directly, or `CMD-SHELL` to run the command with the container's default shell.\n\nWhen you use the AWS Management Console JSON panel, the AWS Command Line Interface , or the APIs, enclose the list of commands in double quotes and brackets.\n\n`[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]`\n\nYou don't include the double quotes and brackets when you use the AWS Management Console.\n\n`CMD-SHELL, curl -f http://localhost/ || exit 1`\n\nAn exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see `HealthCheck` in the docker container create command", + "markdownDescription": "A string array representing the command that the container runs to determine if it is healthy. The string array must start with `CMD` to run the command arguments directly, or `CMD-SHELL` to run the command with the container's default shell.\n\nWhen you use the AWS Management Console JSON panel, the AWS Command Line Interface , or the APIs, enclose the list of commands in double quotes and brackets.\n\n`[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]`\n\nYou don't include the double quotes and brackets when you use the AWS Management Console.\n\n`CMD-SHELL, curl -f http://localhost/ || exit 1`\n\nAn exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see `HealthCheck` in the docker container create command.", "title": "Command", "type": "array" }, @@ -86403,7 +86397,7 @@ "title": "Logging" }, "Name": { - "markdownDescription": "The unique name to give to your cluster.", + "markdownDescription": "The unique name to give to your cluster. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphanumeric character and can't be longer than 100 characters. The name must be unique within the AWS Region and AWS account that you're creating the cluster in. Note that underscores can't be used in AWS CloudFormation .", "title": "Name", "type": "string" }, @@ -89991,7 +89985,7 @@ "additionalProperties": false, "properties": { "Enabled": { - "markdownDescription": "", + "markdownDescription": "Enables the application to automatically start on job submission.", "title": "Enabled", "type": "boolean" } @@ -90002,12 +89996,12 @@ "additionalProperties": false, "properties": { "Enabled": { - "markdownDescription": "", + "markdownDescription": "Enables the application to automatically stop after a certain amount of time being idle. Defaults to true.", "title": "Enabled", "type": "boolean" }, "IdleTimeoutMinutes": { - "markdownDescription": "", + "markdownDescription": "The amount of idle time in minutes after which your application will automatically stop. Defaults to 15 minutes.", "title": "IdleTimeoutMinutes", "type": "number" } @@ -90052,7 +90046,7 @@ "additionalProperties": false, "properties": { "Classification": { - "markdownDescription": "", + "markdownDescription": "The classification within a configuration.", "title": "Classification", "type": "string" }, @@ -90060,13 +90054,13 @@ "items": { "$ref": "#/definitions/AWS::EMRServerless::Application.ConfigurationObject" }, - "markdownDescription": "", + "markdownDescription": "A list of additional configurations to apply within a configuration object.", "title": "Configurations", "type": "array" }, "Properties": { "additionalProperties": true, - "markdownDescription": "", + "markdownDescription": "A set of properties specified within a configuration classification.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -90259,17 +90253,17 @@ "additionalProperties": false, "properties": { "Cpu": { - "markdownDescription": "", + "markdownDescription": "The CPU requirements of the worker configuration. Each worker can have 1, 2, 4, 8, or 16 vCPUs.", "title": "Cpu", "type": "string" }, "Disk": { - "markdownDescription": "", + "markdownDescription": "The disk requirements of the worker configuration.", "title": "Disk", "type": "string" }, "Memory": { - "markdownDescription": "", + "markdownDescription": "The memory requirements of the worker configuration.", "title": "Memory", "type": "string" } @@ -90332,12 +90326,12 @@ "type": "string" }, "AutoMinorVersionUpgrade": { - "markdownDescription": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "markdownDescription": "If you are running Valkey 7.2 or later, or Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", "title": "AutoMinorVersionUpgrade", "type": "boolean" }, "CacheNodeType": { - "markdownDescription": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.\n- Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis OSS configuration variables `appendonly` and `appendfsync` are not supported on Redis OSS version 2.8.22 and later.", + "markdownDescription": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Valkey and Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.\n- Valkey and Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis OSS configuration variables `appendonly` and `appendfsync` are not supported on Redis OSS version 2.8.22 and later.", "title": "CacheNodeType", "type": "string" }, @@ -90365,17 +90359,17 @@ "type": "string" }, "Engine": { - "markdownDescription": "The name of the cache engine to be used for this cluster.\n\nValid values for this parameter are: `memcached` | `redis`", + "markdownDescription": "The name of the cache engine to be used for this cluster.\n\nValid values for this parameter are: `memcached` | valkey | `redis`", "title": "Engine", "type": "string" }, "EngineVersion": { - "markdownDescription": "The version number of the cache engine to be used for this cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.\n\n*Important:* You can upgrade to a newer engine version (see [Selecting a Cache Engine and Version](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement) ), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.", + "markdownDescription": "The version number of the cache engine to be used for this cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.\n\n*Important:* You can upgrade to a newer engine version (see [Selecting a Cache Engine and Version](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SelectEngine.html#VersionManagement) ), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.", "title": "EngineVersion", "type": "string" }, "IpDiscovery": { - "markdownDescription": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "IpDiscovery", "type": "string" }, @@ -90388,7 +90382,7 @@ "type": "array" }, "NetworkType": { - "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "NetworkType", "type": "string" }, @@ -90429,12 +90423,12 @@ "items": { "type": "string" }, - "markdownDescription": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "markdownDescription": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Valkey or Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", "title": "SnapshotArns", "type": "array" }, "SnapshotName": { - "markdownDescription": "The name of a Redis OSS snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", + "markdownDescription": "The name of a Valkey or Redis OSS snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", "title": "SnapshotName", "type": "string" }, @@ -90610,7 +90604,7 @@ "additionalProperties": false, "properties": { "AutomaticFailoverEnabled": { - "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.", + "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Valkey or Redis OSS (cluster mode enabled) replication groups.", "title": "AutomaticFailoverEnabled", "type": "boolean" }, @@ -90625,7 +90619,7 @@ "type": "string" }, "EngineVersion": { - "markdownDescription": "The Elasticache Redis OSS engine version.", + "markdownDescription": "The Elasticache Valkey or Redis OSS engine version.", "title": "EngineVersion", "type": "string" }, @@ -90736,7 +90730,7 @@ "additionalProperties": false, "properties": { "NodeGroupId": { - "markdownDescription": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "markdownDescription": "Either the ElastiCache supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "title": "NodeGroupId", "type": "string" }, @@ -90884,27 +90878,27 @@ "type": "boolean" }, "AuthToken": { - "markdownDescription": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Users with the Redis OSS AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "markdownDescription": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Valkey or Redis OSS users with the AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "title": "AuthToken", "type": "string" }, "AutoMinorVersionUpgrade": { - "markdownDescription": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "markdownDescription": "If you are running Valkey 7.2 or later, or Redis OSS 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", "title": "AutoMinorVersionUpgrade", "type": "boolean" }, "AutomaticFailoverEnabled": { - "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.\n\nDefault: false", + "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Valkey or Redis OSS (cluster mode enabled) replication groups.\n\nDefault: false", "title": "AutomaticFailoverEnabled", "type": "boolean" }, "CacheNodeType": { - "markdownDescription": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.12xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.12xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Amazon Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)", + "markdownDescription": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.12xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.12xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Amazon Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)", "title": "CacheNodeType", "type": "string" }, "CacheParameterGroupName": { - "markdownDescription": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Redis OSS version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Redis OSS (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Redis OSS (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", + "markdownDescription": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Valkey or Redis OSS version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Valkey or Redis OSS (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Valkey or Redis OSS (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", "title": "CacheParameterGroupName", "type": "string" }, @@ -90922,22 +90916,22 @@ "type": "string" }, "ClusterMode": { - "markdownDescription": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/modify-cluster-mode.html) .", + "markdownDescription": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Valkey or Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Valkey or Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/modify-cluster-mode.html) .", "title": "ClusterMode", "type": "string" }, "DataTieringEnabled": { - "markdownDescription": "Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see [Data tiering](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html) .", + "markdownDescription": "Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see [Data tiering](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/data-tiering.html) .", "title": "DataTieringEnabled", "type": "boolean" }, "Engine": { - "markdownDescription": "The name of the cache engine to be used for the clusters in this replication group. The value must be set to `Redis` .", + "markdownDescription": "The name of the cache engine to be used for the clusters in this replication group. The value must be set to `valkey` or `redis` .\n\n> Upgrading an existing engine from redis to valkey is done through in-place migration, and requires a parameter group.", "title": "Engine", "type": "string" }, "EngineVersion": { - "markdownDescription": "The version number of the cache engine to be used for the clusters in this replication group. To view the supported cache engine versions, use the `DescribeCacheEngineVersions` operation.\n\n*Important:* You can upgrade to a newer engine version (see [Selecting a Cache Engine and Version](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement) ) in the *ElastiCache User Guide* , but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.", + "markdownDescription": "The version number of the cache engine to be used for the clusters in this replication group. To view the supported cache engine versions, use the `DescribeCacheEngineVersions` operation.\n\n*Important:* You can upgrade to a newer engine version (see [Selecting a Cache Engine and Version](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SelectEngine.html#VersionManagement) ) in the *ElastiCache User Guide* , but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.", "title": "EngineVersion", "type": "string" }, @@ -90947,7 +90941,7 @@ "type": "string" }, "IpDiscovery": { - "markdownDescription": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "IpDiscovery", "type": "string" }, @@ -90965,12 +90959,12 @@ "type": "array" }, "MultiAZEnabled": { - "markdownDescription": "A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see [Minimizing Downtime: Multi-AZ](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html) .", + "markdownDescription": "A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see [Minimizing Downtime: Multi-AZ](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/AutoFailover.html) .", "title": "MultiAZEnabled", "type": "boolean" }, "NetworkType": { - "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "NetworkType", "type": "string" }, @@ -90978,7 +90972,7 @@ "items": { "$ref": "#/definitions/AWS::ElastiCache::ReplicationGroup.NodeGroupConfiguration" }, - "markdownDescription": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Redis OSS cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "markdownDescription": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Valkey or Redis OSS cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "title": "NodeGroupConfiguration", "type": "array" }, @@ -90993,7 +90987,7 @@ "type": "number" }, "NumNodeGroups": { - "markdownDescription": "An optional parameter that specifies the number of node groups (shards) for this Redis OSS (cluster mode enabled) replication group. For Redis OSS (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", + "markdownDescription": "An optional parameter that specifies the number of node groups (shards) for this Valkey or Redis OSS (cluster mode enabled) replication group. For Valkey or Redis OSS (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", "title": "NumNodeGroups", "type": "number" }, @@ -91047,7 +91041,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of Amazon Resource Names (ARN) that uniquely identify the Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "markdownDescription": "A list of Amazon Resource Names (ARN) that uniquely identify the Valkey or Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", "title": "SnapshotArns", "type": "array" }, @@ -91067,7 +91061,7 @@ "type": "string" }, "SnapshottingClusterId": { - "markdownDescription": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis OSS (cluster mode enabled) replication groups.", + "markdownDescription": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Valkey or Redis OSS (cluster mode enabled) replication groups.", "title": "SnapshottingClusterId", "type": "string" }, @@ -91085,7 +91079,7 @@ "type": "boolean" }, "TransitEncryptionMode": { - "markdownDescription": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", + "markdownDescription": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Valkey or Redis OSS clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", "title": "TransitEncryptionMode", "type": "string" }, @@ -91204,7 +91198,7 @@ "additionalProperties": false, "properties": { "NodeGroupId": { - "markdownDescription": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "markdownDescription": "Either the ElastiCache supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "title": "NodeGroupId", "type": "string" }, @@ -91428,7 +91422,7 @@ "title": "CacheUsageLimits" }, "DailySnapshotTime": { - "markdownDescription": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis OSS and Serverless Memcached only.", + "markdownDescription": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Valkey, Redis OSS, and Serverless Memcached only.", "title": "DailySnapshotTime", "type": "string" }, @@ -91489,7 +91483,7 @@ "type": "array" }, "SnapshotRetentionLimit": { - "markdownDescription": "The current setting for the number of serverless cache snapshots the system will retain. Available for Redis OSS and Serverless Memcached only.", + "markdownDescription": "The current setting for the number of serverless cache snapshots the system will retain. Available for Valkey, Redis OSS, and Serverless Memcached only.", "title": "SnapshotRetentionLimit", "type": "number" }, @@ -91510,7 +91504,7 @@ "type": "array" }, "UserGroupId": { - "markdownDescription": "The identifier of the user group associated with the serverless cache. Available for Redis OSS only. Default is NULL.", + "markdownDescription": "The identifier of the user group associated with the serverless cache. Available for Valkey and Redis OSS only. Default is NULL.", "title": "UserGroupId", "type": "string" } @@ -91871,7 +91865,7 @@ "additionalProperties": false, "properties": { "Engine": { - "markdownDescription": "The current supported value is redis.", + "markdownDescription": "The current supported values are valkey and redis.", "title": "Engine", "type": "string" }, @@ -94094,7 +94088,7 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deletion_protection.enabled` - Indicates whether deletion protection is enabled. The value is `true` or `false` . The default is `false` .\n- `load_balancing.cross_zone.enabled` - Indicates whether cross-zone load balancing is enabled. The possible values are `true` and `false` . The default for Network Load Balancers and Gateway Load Balancers is `false` . The default for Application Load Balancers is `true` , and cannot be changed.\n\nThe following attributes are supported by both Application Load Balancers and Network Load Balancers:\n\n- `access_logs.s3.enabled` - Indicates whether access logs are enabled. The value is `true` or `false` . The default is `false` .\n- `access_logs.s3.bucket` - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `access_logs.s3.prefix` - The prefix for the location in the S3 bucket for the access logs.\n- `ipv6.deny_all_igw_traffic` - Blocks internet gateway (IGW) access to the load balancer. It is set to `false` for internet-facing load balancers and `true` for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway.\n\nThe following attributes are supported by only Application Load Balancers:\n\n- `idle_timeout.timeout_seconds` - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.\n- `client_keep_alive.seconds` - The client keep alive value, in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds.\n- `connection_logs.s3.enabled` - Indicates whether connection logs are enabled. The value is `true` or `false` . The default is `false` .\n- `connection_logs.s3.bucket` - The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `connection_logs.s3.prefix` - The prefix for the location in the S3 bucket for the connection logs.\n- `routing.http.desync_mitigation_mode` - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are `monitor` , `defensive` , and `strictest` . The default is `defensive` .\n- `routing.http.drop_invalid_header_fields.enabled` - Indicates whether HTTP headers with invalid header fields are removed by the load balancer ( `true` ) or routed to targets ( `false` ). The default is `false` .\n- `routing.http.preserve_host_header.enabled` - Indicates whether the Application Load Balancer should preserve the `Host` header in the HTTP request and send it to the target without any change. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.x_amzn_tls_version_and_cipher_suite.enabled` - Indicates whether the two headers ( `x-amzn-tls-version` and `x-amzn-tls-cipher-suite` ), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The `x-amzn-tls-version` header has information about the TLS protocol version negotiated with the client, and the `x-amzn-tls-cipher-suite` header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are `true` and `false` . The default is `false` .\n- `routing.http.xff_client_port.enabled` - Indicates whether the `X-Forwarded-For` header should preserve the source port that the client used to connect to the load balancer. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.xff_header_processing.mode` - Enables you to modify, preserve, or remove the `X-Forwarded-For` header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are `append` , `preserve` , and `remove` . The default is `append` .\n\n- If the value is `append` , the Application Load Balancer adds the client IP address (of the last hop) to the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- If the value is `preserve` the Application Load Balancer preserves the `X-Forwarded-For` header in the HTTP request, and sends it to targets without any change.\n- If the value is `remove` , the Application Load Balancer removes the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- `routing.http2.enabled` - Indicates whether HTTP/2 is enabled. The possible values are `true` and `false` . The default is `true` . Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.\n- `waf.fail_open.enabled` - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. The possible values are `true` and `false` . The default is `false` .\n\nThe following attributes are supported by only Network Load Balancers:\n\n- `dns_record.client_routing_policy` - Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are `availability_zone_affinity` with 100 percent zonal affinity, `partial_availability_zone_affinity` with 85 percent zonal affinity, and `any_availability_zone` with 0 percent zonal affinity.", + "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deletion_protection.enabled` - Indicates whether deletion protection is enabled. The value is `true` or `false` . The default is `false` .\n- `load_balancing.cross_zone.enabled` - Indicates whether cross-zone load balancing is enabled. The possible values are `true` and `false` . The default for Network Load Balancers and Gateway Load Balancers is `false` . The default for Application Load Balancers is `true` , and cannot be changed.\n\nThe following attributes are supported by both Application Load Balancers and Network Load Balancers:\n\n- `access_logs.s3.enabled` - Indicates whether access logs are enabled. The value is `true` or `false` . The default is `false` .\n- `access_logs.s3.bucket` - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `access_logs.s3.prefix` - The prefix for the location in the S3 bucket for the access logs.\n- `ipv6.deny_all_igw_traffic` - Blocks internet gateway (IGW) access to the load balancer. It is set to `false` for internet-facing load balancers and `true` for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway.\n\nThe following attributes are supported by only Application Load Balancers:\n\n- `idle_timeout.timeout_seconds` - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.\n- `client_keep_alive.seconds` - The client keep alive value, in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds.\n- `connection_logs.s3.enabled` - Indicates whether connection logs are enabled. The value is `true` or `false` . The default is `false` .\n- `connection_logs.s3.bucket` - The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `connection_logs.s3.prefix` - The prefix for the location in the S3 bucket for the connection logs.\n- `routing.http.desync_mitigation_mode` - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are `monitor` , `defensive` , and `strictest` . The default is `defensive` .\n- `routing.http.drop_invalid_header_fields.enabled` - Indicates whether HTTP headers with invalid header fields are removed by the load balancer ( `true` ) or routed to targets ( `false` ). The default is `false` .\n- `routing.http.preserve_host_header.enabled` - Indicates whether the Application Load Balancer should preserve the `Host` header in the HTTP request and send it to the target without any change. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.x_amzn_tls_version_and_cipher_suite.enabled` - Indicates whether the two headers ( `x-amzn-tls-version` and `x-amzn-tls-cipher-suite` ), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The `x-amzn-tls-version` header has information about the TLS protocol version negotiated with the client, and the `x-amzn-tls-cipher-suite` header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are `true` and `false` . The default is `false` .\n- `routing.http.xff_client_port.enabled` - Indicates whether the `X-Forwarded-For` header should preserve the source port that the client used to connect to the load balancer. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.xff_header_processing.mode` - Enables you to modify, preserve, or remove the `X-Forwarded-For` header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are `append` , `preserve` , and `remove` . The default is `append` .\n\n- If the value is `append` , the Application Load Balancer adds the client IP address (of the last hop) to the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- If the value is `preserve` the Application Load Balancer preserves the `X-Forwarded-For` header in the HTTP request, and sends it to targets without any change.\n- If the value is `remove` , the Application Load Balancer removes the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- `routing.http2.enabled` - Indicates whether HTTP/2 is enabled. The possible values are `true` and `false` . The default is `true` . Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.\n- `waf.fail_open.enabled` - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. The possible values are `true` and `false` . The default is `false` .\n\nThe following attributes are supported by only Network Load Balancers:\n\n- `dns_record.client_routing_policy` - Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are `availability_zone_affinity` with 100 percent zonal affinity, `partial_availability_zone_affinity` with 85 percent zonal affinity, and `any_availability_zone` with 0 percent zonal affinity.\n- `zonal_shift.config.enabled` - Indicates whether zonal shift is enabled. The possible values are `true` and `false` . The default is `false` .", "title": "Key", "type": "string" }, @@ -123134,7 +123128,7 @@ "properties": { "Attributes": { "additionalProperties": true, - "markdownDescription": "", + "markdownDescription": "Metadata that can be used to define a package version\u2019s configuration. For example, the S3 file location, configuration options that are being sent to the device or fleet.\n\nThe combined size of all the attributes on a package version is limited to 3KB.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -123144,12 +123138,12 @@ "type": "object" }, "Description": { - "markdownDescription": "", + "markdownDescription": "A summary of the package version being created. This can be used to outline the package's contents or purpose.", "title": "Description", "type": "string" }, "PackageName": { - "markdownDescription": "", + "markdownDescription": "The name of the associated software package.", "title": "PackageName", "type": "string" }, @@ -123157,12 +123151,12 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "", + "markdownDescription": "Metadata that can be used to manage the package version.", "title": "Tags", "type": "array" }, "VersionName": { - "markdownDescription": "", + "markdownDescription": "The name of the new package version.", "title": "VersionName", "type": "string" } @@ -133421,7 +133415,7 @@ "type": "array" }, "Role": { - "markdownDescription": "The IAM role that allows AWS IoT Wireless to access the CSV file in the S3 bucket.", + "markdownDescription": "The IAM role that allows to access the CSV file in the S3 bucket.", "title": "Role", "type": "string" }, @@ -165502,7 +165496,7 @@ "type": "string" }, "DataTiering": { - "markdownDescription": "Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see [Data tiering](https://docs.aws.amazon.com/memorydb/latest/devguide/data-tiering.html) .", + "markdownDescription": "Enables data tiering. Data tiering is only supported for clusters using the r6gd node type. This parameter must be set when using r6gd nodes. For more information, see [Data tiering](https://docs.aws.amazon.com/memorydb/latest/devguide/data-tiering.html) .", "title": "DataTiering", "type": "string" }, @@ -165512,7 +165506,7 @@ "type": "string" }, "EngineVersion": { - "markdownDescription": "The Redis engine version used by the cluster .", + "markdownDescription": "The Valkey or Redis OSS engine version used by the cluster .", "title": "EngineVersion", "type": "string" }, @@ -186551,7 +186545,7 @@ }, "SourceColumn": { "$ref": "#/definitions/AWS::QuickSight::Analysis.ColumnIdentifier", - "markdownDescription": "", + "markdownDescription": "A column of a data set.", "title": "SourceColumn" }, "SourceField": { @@ -199037,7 +199031,7 @@ }, "SourceColumn": { "$ref": "#/definitions/AWS::QuickSight::Dashboard.ColumnIdentifier", - "markdownDescription": "", + "markdownDescription": "A column of a data set.", "title": "SourceColumn" }, "SourceField": { @@ -209331,7 +209325,7 @@ }, "OverrideDatasetParameterOperation": { "$ref": "#/definitions/AWS::QuickSight::DataSet.OverrideDatasetParameterOperation", - "markdownDescription": "", + "markdownDescription": "A transform operation that overrides the dataset parameter values that are defined in another dataset.", "title": "OverrideDatasetParameterOperation" }, "ProjectOperation": { @@ -213536,7 +213530,7 @@ }, "SourceColumn": { "$ref": "#/definitions/AWS::QuickSight::Template.ColumnIdentifier", - "markdownDescription": "", + "markdownDescription": "A column of a data set.", "title": "SourceColumn" }, "SourceField": { @@ -222896,7 +222890,7 @@ }, "Typography": { "$ref": "#/definitions/AWS::QuickSight::Theme.Typography", - "markdownDescription": "", + "markdownDescription": "Determines the typography options.", "title": "Typography" }, "UIColorPalette": { @@ -227349,7 +227343,7 @@ "type": "object" }, "NodeType": { - "markdownDescription": "The node type to be provisioned for the cluster. For information about node types, go to [Working with Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes) in the *Amazon Redshift Cluster Management Guide* .\n\nValid Values: `dc2.large` | `dc2.8xlarge` | `ra3.xlplus` | `ra3.4xlarge` | `ra3.16xlarge`", + "markdownDescription": "The node type to be provisioned for the cluster. For information about node types, go to [Working with Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes) in the *Amazon Redshift Cluster Management Guide* .\n\nValid Values: `dc2.large` | `dc2.8xlarge` | `ra3.large` | `ra3.xlplus` | `ra3.4xlarge` | `ra3.16xlarge`", "title": "NodeType", "type": "string" }, @@ -227489,7 +227483,7 @@ "type": "string" }, "S3KeyPrefix": { - "markdownDescription": "The prefix applied to the log file names.\n\nConstraints:\n\n- Cannot exceed 512 characters\n- Cannot contain spaces( ), double quotes (\"), single quotes ('), a backslash (\\), or control characters. The hexadecimal codes for invalid characters are:\n\n- x00 to x20\n- x22\n- x27\n- x5c\n- x7f or larger", + "markdownDescription": "The prefix applied to the log file names.\n\nValid characters are any letter from any language, any whitespace character, any numeric character, and the following characters: underscore ( `_` ), period ( `.` ), colon ( `:` ), slash ( `/` ), equal ( `=` ), plus ( `+` ), backslash ( `\\` ), hyphen ( `-` ), at symbol ( `@` ).", "title": "S3KeyPrefix", "type": "string" } @@ -230164,7 +230158,7 @@ "title": "Policy" }, "PolicyDescription": { - "markdownDescription": "The description for the policy.", + "markdownDescription": "Description of the resiliency policy.", "title": "PolicyDescription", "type": "string" }, @@ -233041,7 +233035,7 @@ "type": "string" }, "ProfileId": { - "markdownDescription": "ID of the Profile.", + "markdownDescription": "ID of the Profile.\n\nUpdate to this property requires update to the `ResourceId` property as well, because you can only associate one Profile per VPC. For more information, see [Route 53 Profiles](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/profiles.html) .", "title": "ProfileId", "type": "string" }, @@ -236047,7 +236041,7 @@ "items": { "$ref": "#/definitions/AWS::S3::Bucket.Rule" }, - "markdownDescription": "Specifies lifecycle configuration rules for an Amazon S3 bucket.", + "markdownDescription": "A lifecycle rule for individual objects in an Amazon S3 bucket.", "title": "Rules", "type": "array" } @@ -237348,7 +237342,7 @@ }, "BucketLevel": { "$ref": "#/definitions/AWS::S3::StorageLens.BucketLevel", - "markdownDescription": "This property contains the details of the account-level bucket-level configurations for Amazon S3 Storage Lens.", + "markdownDescription": "This property contains the details of the account-level bucket-level configurations for Amazon S3 Storage Lens. To enable bucket-level configurations, make sure to also set the same metrics at the account level.", "title": "BucketLevel" }, "DetailedStatusCodesMetrics": { @@ -245443,7 +245437,7 @@ }, "DefaultSpaceSettings": { "$ref": "#/definitions/AWS::SageMaker::Domain.DefaultSpaceSettings", - "markdownDescription": "A collection of settings that apply to spaces created in the domain.", + "markdownDescription": "The default settings for shared spaces that users create in the domain.\n\nSageMaker applies these settings only to shared spaces. It doesn't apply them to private spaces.", "title": "DefaultSpaceSettings" }, "DefaultUserSettings": { @@ -245921,20 +245915,20 @@ "properties": { "CodeEditorAppSettings": { "$ref": "#/definitions/AWS::SageMaker::Domain.CodeEditorAppSettings", - "markdownDescription": "The Code Editor application settings.", + "markdownDescription": "The Code Editor application settings.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "CodeEditorAppSettings" }, "CustomFileSystemConfigs": { "items": { "$ref": "#/definitions/AWS::SageMaker::Domain.CustomFileSystemConfig" }, - "markdownDescription": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio.", + "markdownDescription": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "CustomFileSystemConfigs", "type": "array" }, "CustomPosixUserConfig": { "$ref": "#/definitions/AWS::SageMaker::Domain.CustomPosixUserConfig", - "markdownDescription": "Details about the POSIX identity that is used for file system operations.", + "markdownDescription": "Details about the POSIX identity that is used for file system operations.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "CustomPosixUserConfig" }, "DefaultLandingUri": { @@ -245943,13 +245937,13 @@ "type": "string" }, "ExecutionRole": { - "markdownDescription": "The execution role for the user.", + "markdownDescription": "The execution role for the user.\n\nSageMaker applies this setting only to private spaces that the user creates in the domain. SageMaker doesn't apply this setting to shared spaces.", "title": "ExecutionRole", "type": "string" }, "JupyterLabAppSettings": { "$ref": "#/definitions/AWS::SageMaker::Domain.JupyterLabAppSettings", - "markdownDescription": "The settings for the JupyterLab application.", + "markdownDescription": "The settings for the JupyterLab application.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "JupyterLabAppSettings" }, "JupyterServerAppSettings": { @@ -245976,7 +245970,7 @@ "items": { "type": "string" }, - "markdownDescription": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.", + "markdownDescription": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "SecurityGroups", "type": "array" }, @@ -245987,7 +245981,7 @@ }, "SpaceStorageSettings": { "$ref": "#/definitions/AWS::SageMaker::Domain.DefaultSpaceStorageSettings", - "markdownDescription": "The storage settings for a space.", + "markdownDescription": "The storage settings for a space.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "SpaceStorageSettings" }, "StudioWebPortal": { @@ -253612,20 +253606,20 @@ "properties": { "CodeEditorAppSettings": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.CodeEditorAppSettings", - "markdownDescription": "The Code Editor application settings.", + "markdownDescription": "The Code Editor application settings.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "CodeEditorAppSettings" }, "CustomFileSystemConfigs": { "items": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.CustomFileSystemConfig" }, - "markdownDescription": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio.", + "markdownDescription": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "CustomFileSystemConfigs", "type": "array" }, "CustomPosixUserConfig": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.CustomPosixUserConfig", - "markdownDescription": "Details about the POSIX identity that is used for file system operations.", + "markdownDescription": "Details about the POSIX identity that is used for file system operations.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "CustomPosixUserConfig" }, "DefaultLandingUri": { @@ -253634,13 +253628,13 @@ "type": "string" }, "ExecutionRole": { - "markdownDescription": "The execution role for the user.", + "markdownDescription": "The execution role for the user.\n\nSageMaker applies this setting only to private spaces that the user creates in the domain. SageMaker doesn't apply this setting to shared spaces.", "title": "ExecutionRole", "type": "string" }, "JupyterLabAppSettings": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.JupyterLabAppSettings", - "markdownDescription": "The settings for the JupyterLab application.", + "markdownDescription": "The settings for the JupyterLab application.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "JupyterLabAppSettings" }, "JupyterServerAppSettings": { @@ -253662,7 +253656,7 @@ "items": { "type": "string" }, - "markdownDescription": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.", + "markdownDescription": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "SecurityGroups", "type": "array" }, @@ -253673,7 +253667,7 @@ }, "SpaceStorageSettings": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.DefaultSpaceStorageSettings", - "markdownDescription": "The storage settings for a space.", + "markdownDescription": "The storage settings for a space.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "SpaceStorageSettings" }, "StudioWebPortal": { @@ -263281,7 +263275,7 @@ }, "IdentityProviderDetails": { "$ref": "#/definitions/AWS::Transfer::Server.IdentityProviderDetails", - "markdownDescription": "Required when `IdentityProviderType` is set to `AWS_DIRECTORY_SERVICE` , `AWS _LAMBDA` or `API_GATEWAY` . Accepts an array containing all of the information required to use a directory in `AWS_DIRECTORY_SERVICE` or invoke a customer-supplied authentication API, including the API Gateway URL. Not required when `IdentityProviderType` is set to `SERVICE_MANAGED` .", + "markdownDescription": "Required when `IdentityProviderType` is set to `AWS_DIRECTORY_SERVICE` , `AWS _LAMBDA` or `API_GATEWAY` . Accepts an array containing all of the information required to use a directory in `AWS_DIRECTORY_SERVICE` or invoke a customer-supplied authentication API, including the API Gateway URL. Cannot be specified when `IdentityProviderType` is set to `SERVICE_MANAGED` .", "title": "IdentityProviderDetails" }, "IdentityProviderType": { @@ -271867,7 +271861,7 @@ }, "SourceConfiguration": { "$ref": "#/definitions/AWS::Wisdom::KnowledgeBase.SourceConfiguration", - "markdownDescription": "The source of the knowledge base content. Only set this argument for EXTERNAL knowledge bases.", + "markdownDescription": "The source of the knowledge base content. Only set this argument for EXTERNAL or Managed knowledge bases.", "title": "SourceConfiguration" }, "Tags": { @@ -271910,7 +271904,7 @@ "additionalProperties": false, "properties": { "AppIntegrationArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the AppIntegrations DataIntegration to use for ingesting content.\n\n- For [Salesforce](https://docs.aws.amazon.com/https://developer.salesforce.com/docs/atlas.en-us.knowledge_dev.meta/knowledge_dev/sforce_api_objects_knowledge__kav.htm) , your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least `Id` , `ArticleNumber` , `VersionNumber` , `Title` , `PublishStatus` , and `IsDeleted` as source fields.\n- For [ServiceNow](https://docs.aws.amazon.com/https://developer.servicenow.com/dev.do#!/reference/api/rome/rest/knowledge-management-api) , your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least `number` , `short_description` , `sys_mod_count` , `workflow_state` , and `active` as source fields.\n- For [Zendesk](https://docs.aws.amazon.com/https://developer.zendesk.com/api-reference/help_center/help-center-api/articles/) , your AppIntegrations DataIntegration must have an ObjectConfiguration if `objectFields` is not provided, including at least `id` , `title` , `updated_at` , and `draft` as source fields.\n- For [SharePoint](https://docs.aws.amazon.com/https://learn.microsoft.com/en-us/sharepoint/dev/sp-add-ins/sharepoint-net-server-csom-jsom-and-rest-api-index) , your AppIntegrations DataIntegration must have a FileConfiguration, including only file extensions that are among `docx` , `pdf` , `html` , `htm` , and `txt` .\n- For [Amazon S3](https://docs.aws.amazon.com/https://aws.amazon.com/s3/) , the ObjectConfiguration and FileConfiguration of your AppIntegrations DataIntegration must be null. The `SourceURI` of your DataIntegration must use the following format: `s3://your_s3_bucket_name` .\n\n> The bucket policy of the corresponding S3 bucket must allow the AWS principal `app-integrations.amazonaws.com` to perform `s3:ListBucket` , `s3:GetObject` , and `s3:GetBucketLocation` against the bucket.", + "markdownDescription": "The Amazon Resource Name (ARN) of the AppIntegrations DataIntegration to use for ingesting content.\n\n- For [Salesforce](https://docs.aws.amazon.com/https://developer.salesforce.com/docs/atlas.en-us.knowledge_dev.meta/knowledge_dev/sforce_api_objects_knowledge__kav.htm) , your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least `Id` , `ArticleNumber` , `VersionNumber` , `Title` , `PublishStatus` , and `IsDeleted` as source fields.\n- For [ServiceNow](https://docs.aws.amazon.com/https://developer.servicenow.com/dev.do#!/reference/api/rome/rest/knowledge-management-api) , your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least `number` , `short_description` , `sys_mod_count` , `workflow_state` , and `active` as source fields.\n- For [Zendesk](https://docs.aws.amazon.com/https://developer.zendesk.com/api-reference/help_center/help-center-api/articles/) , your AppIntegrations DataIntegration must have an ObjectConfiguration if `objectFields` is not provided, including at least `id` , `title` , `updated_at` , and `draft` as source fields.\n- For [SharePoint](https://docs.aws.amazon.com/https://learn.microsoft.com/en-us/sharepoint/dev/sp-add-ins/sharepoint-net-server-csom-jsom-and-rest-api-index) , your AppIntegrations DataIntegration must have a FileConfiguration, including only file extensions that are among `docx` , `pdf` , `html` , `htm` , and `txt` .\n- For [Amazon S3](https://docs.aws.amazon.com/s3/) , the ObjectConfiguration and FileConfiguration of your AppIntegrations DataIntegration must be null. The `SourceURI` of your DataIntegration must use the following format: `s3://your_s3_bucket_name` .\n\n> The bucket policy of the corresponding S3 bucket must allow the AWS principal `app-integrations.amazonaws.com` to perform `s3:ListBucket` , `s3:GetObject` , and `s3:GetBucketLocation` against the bucket.", "title": "AppIntegrationArn", "type": "string" }, From 03a37fe798f05a3752c237b7825e2416573d2990 Mon Sep 17 00:00:00 2001 From: vaibhav170 <42844029+vaibhav170@users.noreply.github.com> Date: Thu, 31 Oct 2024 19:47:15 +0530 Subject: [PATCH 4/8] feat: add introspection, query & resolver limits properties in appsync graphql api (#3668) Co-authored-by: Jacob Fuss <32497805+jfuss@users.noreply.github.com> --- .../single/graphqlapi-configuration.json | 58 ++++++++++++ .../single/graphqlapi-configuration.yaml | 94 +++++++++++++++++++ .../single/test_graphqlapi_configuration.py | 85 +++++++++++++++++ pytest.ini | 2 + samtranslator/internal/model/appsync.py | 6 ++ .../aws_serverless_graphqlapi.py | 3 + samtranslator/model/sam_resources.py | 13 +++ samtranslator/schema/schema.json | 9 ++ schema_source/sam.schema.json | 9 ++ ...i_introspection_query_resolver_limits.yaml | 17 ++++ ...i_introspection_query_resolver_limits.json | 69 ++++++++++++++ ...i_introspection_query_resolver_limits.json | 69 ++++++++++++++ ...i_introspection_query_resolver_limits.json | 69 ++++++++++++++ 13 files changed, 503 insertions(+) create mode 100644 integration/resources/expected/single/graphqlapi-configuration.json create mode 100644 integration/resources/templates/single/graphqlapi-configuration.yaml create mode 100644 integration/single/test_graphqlapi_configuration.py create mode 100644 tests/translator/input/graphqlapi_introspection_query_resolver_limits.yaml create mode 100644 tests/translator/output/aws-cn/graphqlapi_introspection_query_resolver_limits.json create mode 100644 tests/translator/output/aws-us-gov/graphqlapi_introspection_query_resolver_limits.json create mode 100644 tests/translator/output/graphqlapi_introspection_query_resolver_limits.json diff --git a/integration/resources/expected/single/graphqlapi-configuration.json b/integration/resources/expected/single/graphqlapi-configuration.json new file mode 100644 index 000000000..771f3bdd6 --- /dev/null +++ b/integration/resources/expected/single/graphqlapi-configuration.json @@ -0,0 +1,58 @@ +[ + { + "LogicalResourceId": "SuperCoolAPI", + "ResourceType": "AWS::AppSync::GraphQLApi" + }, + { + "LogicalResourceId": "SuperCoolAPICloudWatchRole", + "ResourceType": "AWS::IAM::Role" + }, + { + "LogicalResourceId": "SuperCoolAPISchema", + "ResourceType": "AWS::AppSync::GraphQLSchema" + }, + { + "LogicalResourceId": "SuperCoolAPIQuerygetBook", + "ResourceType": "AWS::AppSync::Resolver" + }, + { + "LogicalResourceId": "SuperCoolAPINoneDataSource", + "ResourceType": "AWS::AppSync::DataSource" + }, + { + "LogicalResourceId": "SuperCoolAPIprocessQuery", + "ResourceType": "AWS::AppSync::FunctionConfiguration" + }, + { + "LogicalResourceId": "SuperCoolAPIMyApiKey", + "ResourceType": "AWS::AppSync::ApiKey" + }, + { + "LogicalResourceId": "IntrospectionDisableSuperCoolAPI", + "ResourceType": "AWS::AppSync::GraphQLApi" + }, + { + "LogicalResourceId": "IntrospectionDisableSuperCoolAPICloudWatchRole", + "ResourceType": "AWS::IAM::Role" + }, + { + "LogicalResourceId": "IntrospectionDisableSuperCoolAPISchema", + "ResourceType": "AWS::AppSync::GraphQLSchema" + }, + { + "LogicalResourceId": "IntrospectionDisableSuperCoolAPIQuerygetBook", + "ResourceType": "AWS::AppSync::Resolver" + }, + { + "LogicalResourceId": "IntrospectionDisableSuperCoolAPINoneDataSource", + "ResourceType": "AWS::AppSync::DataSource" + }, + { + "LogicalResourceId": "IntrospectionDisableSuperCoolAPIprocessQuery", + "ResourceType": "AWS::AppSync::FunctionConfiguration" + }, + { + "LogicalResourceId": "IntrospectionDisableSuperCoolAPIMyApiKey", + "ResourceType": "AWS::AppSync::ApiKey" + } +] diff --git a/integration/resources/templates/single/graphqlapi-configuration.yaml b/integration/resources/templates/single/graphqlapi-configuration.yaml new file mode 100644 index 000000000..5afb5529a --- /dev/null +++ b/integration/resources/templates/single/graphqlapi-configuration.yaml @@ -0,0 +1,94 @@ +Transform: AWS::Serverless-2016-10-31 +Resources: + SuperCoolAPI: + Type: AWS::Serverless::GraphQLApi + Properties: + SchemaInline: | + type Book { + bookName: String + id: ID + } + type Query { getBook(bookName: String): Book } + OwnerContact: blah-blah + Auth: + Type: API_KEY + ApiKeys: + MyApiKey: {} + Functions: + processQuery: + Runtime: + Name: APPSYNC_JS + Version: 1.0.0 + DataSource: NONE + InlineCode: | + import { util } from '@aws-appsync/utils'; + + export function request(ctx) { + const id = util.autoId(); + return { payload: { ...ctx.args, id } }; + } + + export function response(ctx) { + return ctx.result; + } + Resolvers: + Query: + getBook: + Pipeline: + - processQuery + + IntrospectionDisableSuperCoolAPI: + Type: AWS::Serverless::GraphQLApi + Properties: + SchemaInline: | + type Book { + bookName: String + id: ID + } + type Query { getBook(bookName: String): Book } + OwnerContact: blah-blah + IntrospectionConfig: DISABLED + QueryDepthLimit: 10 + ResolverCountLimit: 100 + Auth: + Type: API_KEY + ApiKeys: + MyApiKey: {} + Functions: + processQuery: + Runtime: + Name: APPSYNC_JS + Version: 1.0.0 + DataSource: NONE + InlineCode: | + import { util } from '@aws-appsync/utils'; + + export function request(ctx) { + const id = util.autoId(); + return { payload: { ...ctx.args, id } }; + } + + export function response(ctx) { + return ctx.result; + } + Resolvers: + Query: + getBook: + Pipeline: + - processQuery +Outputs: + SuperCoolAPI: + Description: AppSync API + Value: !GetAtt SuperCoolAPI.GraphQLUrl + MyApiKey: + Description: API Id + Value: !GetAtt SuperCoolAPIMyApiKey.ApiKey + IntrospectionDisableSuperCoolAPI: + Description: AppSync API + Value: !GetAtt IntrospectionDisableSuperCoolAPI.GraphQLUrl + IntrospectionDisableSuperCoolAPIMyApiKey: + Description: API Id + Value: !GetAtt IntrospectionDisableSuperCoolAPIMyApiKey.ApiKey + +Metadata: + SamTransformTest: true diff --git a/integration/single/test_graphqlapi_configuration.py b/integration/single/test_graphqlapi_configuration.py new file mode 100644 index 000000000..6cddf1809 --- /dev/null +++ b/integration/single/test_graphqlapi_configuration.py @@ -0,0 +1,85 @@ +import json +from unittest.case import skipIf + +import pytest +import requests + +from integration.config.service_names import APP_SYNC +from integration.helpers.base_test import BaseTest +from integration.helpers.resource import current_region_does_not_support + + +def execute_and_verify_appsync_query(url, api_key, query): + """ + Executes a query to an AppSync GraphQLApi. + + Also checks that the response is 200 and does not contain errors before returning. + """ + headers = { + "Content-Type": "application/json", + "x-api-key": api_key, + } + payload = {"query": query} + + response = requests.post(url, json=payload, headers=headers) + response.raise_for_status() + data = response.json() + if "errors" in data: + raise Exception(json.dumps(data["errors"])) + + return data + + +@skipIf(current_region_does_not_support([APP_SYNC]), "AppSync is not supported in this testing region") +@pytest.mark.filterwarnings("ignore::DeprecationWarning") +class TestGraphQLApiConfiguration(BaseTest): + def test_api(self): + file_name = "single/graphqlapi-configuration" + self.create_and_verify_stack(file_name) + + outputs = self.get_stack_outputs() + + url = outputs["SuperCoolAPI"] + api_key = outputs["MyApiKey"] + + introspection_disable_api_url = outputs["IntrospectionDisableSuperCoolAPI"] + introspection_disable_api_key = outputs["IntrospectionDisableSuperCoolAPIMyApiKey"] + + book_name = "GoodBook" + query = f""" + query MyQuery {{ + getBook( + bookName: "{book_name}" + ) {{ + id + bookName + }} + }} + """ + + response = execute_and_verify_appsync_query(url, api_key, query) + self.assertEqual(response["data"]["getBook"]["bookName"], book_name) + + introspection_disable_query_response = execute_and_verify_appsync_query( + introspection_disable_api_url, introspection_disable_api_key, query + ) + self.assertEqual(introspection_disable_query_response["data"]["getBook"]["bookName"], book_name) + + query_introsepction = """ + query myQuery { + __schema { + types { + name + } + } + } + """ + + introspection_query_response = execute_and_verify_appsync_query(url, api_key, query_introsepction) + self.assertIsNotNone(introspection_query_response["data"]["__schema"]) + + # sending introspection query and expecting error as introspection is DISABLED for this API using template file + with self.assertRaises(Exception): + execute_and_verify_appsync_query( + introspection_disable_api_url, introspection_disable_api_key, query_introsepction + ) diff --git a/pytest.ini b/pytest.ini index cd1ec10a7..a83694b93 100644 --- a/pytest.ini +++ b/pytest.ini @@ -20,3 +20,5 @@ filterwarnings = ignore::pytest.PytestUnraisableExceptionWarning # https://github.com/urllib3/urllib3/blob/main/src/urllib3/poolmanager.py#L313 ignore::DeprecationWarning:urllib3.*: + # https://github.com/boto/boto3/issues/3889 + ignore:datetime.datetime.utcnow diff --git a/samtranslator/internal/model/appsync.py b/samtranslator/internal/model/appsync.py index de81660c5..45bbf9f3f 100644 --- a/samtranslator/internal/model/appsync.py +++ b/samtranslator/internal/model/appsync.py @@ -115,6 +115,9 @@ class GraphQLApi(Resource): "AdditionalAuthenticationProviders": GeneratedProperty(), "Visibility": GeneratedProperty(), "OwnerContact": GeneratedProperty(), + "IntrospectionConfig": GeneratedProperty(), + "QueryDepthLimit": GeneratedProperty(), + "ResolverCountLimit": GeneratedProperty(), } Name: str @@ -128,6 +131,9 @@ class GraphQLApi(Resource): LogConfig: Optional[LogConfigType] Visibility: Optional[str] OwnerContact: Optional[str] + IntrospectionConfig: Optional[str] + QueryDepthLimit: Optional[int] + ResolverCountLimit: Optional[int] runtime_attrs = {"api_id": lambda self: fnGetAtt(self.logical_id, "ApiId")} diff --git a/samtranslator/internal/schema_source/aws_serverless_graphqlapi.py b/samtranslator/internal/schema_source/aws_serverless_graphqlapi.py index b731abee7..ff6bd017c 100644 --- a/samtranslator/internal/schema_source/aws_serverless_graphqlapi.py +++ b/samtranslator/internal/schema_source/aws_serverless_graphqlapi.py @@ -164,6 +164,9 @@ class Properties(BaseModel): Cache: Optional[Cache] Visibility: Optional[PassThroughProp] OwnerContact: Optional[PassThroughProp] + IntrospectionConfig: Optional[PassThroughProp] + QueryDepthLimit: Optional[PassThroughProp] + ResolverCountLimit: Optional[PassThroughProp] class Resource(BaseModel): diff --git a/samtranslator/model/sam_resources.py b/samtranslator/model/sam_resources.py index 37a6deb06..8f3b6492f 100644 --- a/samtranslator/model/sam_resources.py +++ b/samtranslator/model/sam_resources.py @@ -2226,6 +2226,9 @@ class SamGraphQLApi(SamResourceMacro): "Cache": Property(False, IS_DICT), "Visibility": PassThroughProperty(False), "OwnerContact": PassThroughProperty(False), + "IntrospectionConfig": PassThroughProperty(False), + "QueryDepthLimit": PassThroughProperty(False), + "ResolverCountLimit": PassThroughProperty(False), } Auth: List[Dict[str, Any]] @@ -2243,6 +2246,9 @@ class SamGraphQLApi(SamResourceMacro): Cache: Optional[Dict[str, Any]] Visibility: Optional[PassThrough] OwnerContact: Optional[PassThrough] + IntrospectionConfig: Optional[PassThrough] + QueryDepthLimit: Optional[PassThrough] + ResolverCountLimit: Optional[PassThrough] # stop validation so we can use class variables for tracking state validate_setattr = False @@ -2322,6 +2328,13 @@ def _construct_appsync_api_resources( api.OwnerContact = passthrough_value(model.OwnerContact) api.XrayEnabled = model.XrayEnabled + if model.IntrospectionConfig: + api.IntrospectionConfig = passthrough_value(model.IntrospectionConfig) + if model.QueryDepthLimit: + api.QueryDepthLimit = passthrough_value(model.QueryDepthLimit) + if model.ResolverCountLimit: + api.ResolverCountLimit = passthrough_value(model.ResolverCountLimit) + lambda_auth_arns = self._parse_and_set_auth_properties(api, model.Auth) auth_connectors = [ self._construct_lambda_auth_connector(api, arn, i) for i, arn in enumerate(lambda_auth_arns, 1) diff --git a/samtranslator/schema/schema.json b/samtranslator/schema/schema.json index 556c36c6d..063ae65b2 100644 --- a/samtranslator/schema/schema.json +++ b/samtranslator/schema/schema.json @@ -279729,6 +279729,9 @@ "title": "Functions", "type": "object" }, + "IntrospectionConfig": { + "$ref": "#/definitions/PassThroughProp" + }, "Logging": { "anyOf": [ { @@ -279746,6 +279749,12 @@ "OwnerContact": { "$ref": "#/definitions/PassThroughProp" }, + "QueryDepthLimit": { + "$ref": "#/definitions/PassThroughProp" + }, + "ResolverCountLimit": { + "$ref": "#/definitions/PassThroughProp" + }, "Resolvers": { "additionalProperties": { "additionalProperties": { diff --git a/schema_source/sam.schema.json b/schema_source/sam.schema.json index 9900d9de1..60c1746ee 100644 --- a/schema_source/sam.schema.json +++ b/schema_source/sam.schema.json @@ -6659,6 +6659,9 @@ "title": "Functions", "type": "object" }, + "IntrospectionConfig": { + "$ref": "#/definitions/PassThroughProp" + }, "Logging": { "anyOf": [ { @@ -6676,6 +6679,12 @@ "OwnerContact": { "$ref": "#/definitions/PassThroughProp" }, + "QueryDepthLimit": { + "$ref": "#/definitions/PassThroughProp" + }, + "ResolverCountLimit": { + "$ref": "#/definitions/PassThroughProp" + }, "Resolvers": { "additionalProperties": { "additionalProperties": { diff --git a/tests/translator/input/graphqlapi_introspection_query_resolver_limits.yaml b/tests/translator/input/graphqlapi_introspection_query_resolver_limits.yaml new file mode 100644 index 000000000..6df2fd9cc --- /dev/null +++ b/tests/translator/input/graphqlapi_introspection_query_resolver_limits.yaml @@ -0,0 +1,17 @@ +Transform: AWS::Serverless-2016-10-31 +Resources: + SuperCoolAPI: + Type: AWS::Serverless::GraphQLApi + Properties: + SchemaInline: | + type Book { + bookName: String + } + type Query { getBook(bookName: String): Book } + Visibility: PRIVATE + OwnerContact: blah-blah + IntrospectionConfig: DISABLED + QueryDepthLimit: 10 + ResolverCountLimit: 100 + Auth: + Type: AWS_IAM diff --git a/tests/translator/output/aws-cn/graphqlapi_introspection_query_resolver_limits.json b/tests/translator/output/aws-cn/graphqlapi_introspection_query_resolver_limits.json new file mode 100644 index 000000000..fa83ad16d --- /dev/null +++ b/tests/translator/output/aws-cn/graphqlapi_introspection_query_resolver_limits.json @@ -0,0 +1,69 @@ +{ + "Resources": { + "SuperCoolAPI": { + "Properties": { + "AuthenticationType": "AWS_IAM", + "IntrospectionConfig": "DISABLED", + "LogConfig": { + "CloudWatchLogsRoleArn": { + "Fn::GetAtt": [ + "SuperCoolAPICloudWatchRole", + "Arn" + ] + }, + "FieldLogLevel": "ALL" + }, + "Name": "SuperCoolAPI", + "OwnerContact": "blah-blah", + "QueryDepthLimit": 10, + "ResolverCountLimit": 100, + "Tags": [ + { + "Key": "graphqlapi:createdBy", + "Value": "SAM" + } + ], + "Visibility": "PRIVATE" + }, + "Type": "AWS::AppSync::GraphQLApi" + }, + "SuperCoolAPICloudWatchRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "appsync.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Sub": "arn:${AWS::Partition}:iam::aws:policy/service-role/AWSAppSyncPushToCloudWatchLogs" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "SuperCoolAPISchema": { + "Properties": { + "ApiId": { + "Fn::GetAtt": [ + "SuperCoolAPI", + "ApiId" + ] + }, + "Definition": "type Book {\n bookName: String\n} \ntype Query { getBook(bookName: String): Book }\n" + }, + "Type": "AWS::AppSync::GraphQLSchema" + } + } +} diff --git a/tests/translator/output/aws-us-gov/graphqlapi_introspection_query_resolver_limits.json b/tests/translator/output/aws-us-gov/graphqlapi_introspection_query_resolver_limits.json new file mode 100644 index 000000000..fa83ad16d --- /dev/null +++ b/tests/translator/output/aws-us-gov/graphqlapi_introspection_query_resolver_limits.json @@ -0,0 +1,69 @@ +{ + "Resources": { + "SuperCoolAPI": { + "Properties": { + "AuthenticationType": "AWS_IAM", + "IntrospectionConfig": "DISABLED", + "LogConfig": { + "CloudWatchLogsRoleArn": { + "Fn::GetAtt": [ + "SuperCoolAPICloudWatchRole", + "Arn" + ] + }, + "FieldLogLevel": "ALL" + }, + "Name": "SuperCoolAPI", + "OwnerContact": "blah-blah", + "QueryDepthLimit": 10, + "ResolverCountLimit": 100, + "Tags": [ + { + "Key": "graphqlapi:createdBy", + "Value": "SAM" + } + ], + "Visibility": "PRIVATE" + }, + "Type": "AWS::AppSync::GraphQLApi" + }, + "SuperCoolAPICloudWatchRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "appsync.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Sub": "arn:${AWS::Partition}:iam::aws:policy/service-role/AWSAppSyncPushToCloudWatchLogs" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "SuperCoolAPISchema": { + "Properties": { + "ApiId": { + "Fn::GetAtt": [ + "SuperCoolAPI", + "ApiId" + ] + }, + "Definition": "type Book {\n bookName: String\n} \ntype Query { getBook(bookName: String): Book }\n" + }, + "Type": "AWS::AppSync::GraphQLSchema" + } + } +} diff --git a/tests/translator/output/graphqlapi_introspection_query_resolver_limits.json b/tests/translator/output/graphqlapi_introspection_query_resolver_limits.json new file mode 100644 index 000000000..fa83ad16d --- /dev/null +++ b/tests/translator/output/graphqlapi_introspection_query_resolver_limits.json @@ -0,0 +1,69 @@ +{ + "Resources": { + "SuperCoolAPI": { + "Properties": { + "AuthenticationType": "AWS_IAM", + "IntrospectionConfig": "DISABLED", + "LogConfig": { + "CloudWatchLogsRoleArn": { + "Fn::GetAtt": [ + "SuperCoolAPICloudWatchRole", + "Arn" + ] + }, + "FieldLogLevel": "ALL" + }, + "Name": "SuperCoolAPI", + "OwnerContact": "blah-blah", + "QueryDepthLimit": 10, + "ResolverCountLimit": 100, + "Tags": [ + { + "Key": "graphqlapi:createdBy", + "Value": "SAM" + } + ], + "Visibility": "PRIVATE" + }, + "Type": "AWS::AppSync::GraphQLApi" + }, + "SuperCoolAPICloudWatchRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "appsync.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Sub": "arn:${AWS::Partition}:iam::aws:policy/service-role/AWSAppSyncPushToCloudWatchLogs" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "SuperCoolAPISchema": { + "Properties": { + "ApiId": { + "Fn::GetAtt": [ + "SuperCoolAPI", + "ApiId" + ] + }, + "Definition": "type Book {\n bookName: String\n} \ntype Query { getBook(bookName: String): Book }\n" + }, + "Type": "AWS::AppSync::GraphQLSchema" + } + } +} From 5892219f5ccd0b1c9509fe3b5d56fc506161c746 Mon Sep 17 00:00:00 2001 From: Godwin Rose Samuel Date: Fri, 1 Nov 2024 12:10:29 -0700 Subject: [PATCH 5/8] fix:skip tests if eventrule to eventbus feature aren't available in specific regions (#3673) --- integration/combination/test_connectors.py | 2 - .../test_connectors_event_rule_eb.py | 53 +++++++++++++++++++ integration/config/service_names.py | 1 + 3 files changed, 54 insertions(+), 2 deletions(-) create mode 100644 integration/combination/test_connectors_event_rule_eb.py diff --git a/integration/combination/test_connectors.py b/integration/combination/test_connectors.py index 63b156038..c4965dd48 100644 --- a/integration/combination/test_connectors.py +++ b/integration/combination/test_connectors.py @@ -80,8 +80,6 @@ def tearDown(self): ("combination/connector_event_rule_to_sqs_write",), ("combination/connector_event_rule_to_sns_write",), ("combination/connector_event_rule_to_sfn_write",), - ("combination/connector_event_rule_to_eb_default_write",), - ("combination/connector_event_rule_to_eb_custom_write",), ("combination/connector_event_rule_to_lambda_write",), ("combination/connector_event_rule_to_lambda_write_multiple",), ("combination/connector_sqs_to_function",), diff --git a/integration/combination/test_connectors_event_rule_eb.py b/integration/combination/test_connectors_event_rule_eb.py new file mode 100644 index 000000000..206b767e5 --- /dev/null +++ b/integration/combination/test_connectors_event_rule_eb.py @@ -0,0 +1,53 @@ +from unittest import SkipTest +from unittest.case import skipIf + +from parameterized import parameterized +from tenacity import retry, retry_if_exception, stop_after_attempt + +from integration.config.service_names import EVENT_RULE_WITH_EVENT_BUS +from integration.conftest import clean_bucket +from integration.helpers.base_test import BaseTest +from integration.helpers.resource import current_region_does_not_support + +retry_once = retry( + stop=stop_after_attempt(2), + # unittest raises SkipTest for skipping tests + retry=retry_if_exception(lambda e: not isinstance(e, SkipTest)), +) + + +@skipIf( + current_region_does_not_support([EVENT_RULE_WITH_EVENT_BUS]), + "EVENT_RULE_WITH_EVENT_BUS is not supported in this testing region", +) +class TestConnectorsWithEventRuleToEB(BaseTest): + def tearDown(self): + # Some tests will create items in S3 Bucket, which result in stack DELETE_FAILED state + # manually empty the bucket to allow stacks to be deleted successfully. + bucket_name = self.get_physical_id_by_type("AWS::S3::Bucket") + if bucket_name: + clean_bucket(bucket_name, self.client_provider.s3_client) + super().tearDown() + + @parameterized.expand( + [ + ("combination/connector_event_rule_to_eb_default_write",), + ("combination/connector_event_rule_to_eb_custom_write",), + ] + ) + @retry_once + def test_connector_event_rule_eb_by_invoking_a_function(self, template_file_path): + self.skip_using_service_detector(template_file_path) + self.create_and_verify_stack(template_file_path) + + lambda_function_name = self.get_physical_id_by_logical_id("TriggerFunction") + lambda_client = self.client_provider.lambda_client + + request_params = { + "FunctionName": lambda_function_name, + "InvocationType": "RequestResponse", + "Payload": "{}", + } + response = lambda_client.invoke(**request_params) + self.assertEqual(response.get("StatusCode"), 200) + self.assertEqual(response.get("FunctionError"), None) diff --git a/integration/config/service_names.py b/integration/config/service_names.py index 14f96b3e1..b4a2c2626 100644 --- a/integration/config/service_names.py +++ b/integration/config/service_names.py @@ -36,3 +36,4 @@ APP_SYNC = "AppSync" SNS_FILTER_POLICY_SCOPE = "SnsFilterPolicyScope" LOGS = "Logs" +EVENT_RULE_WITH_EVENT_BUS = "EventRuleWithEventBus" From 51b69948efa593bc8bfdbfe8efe9093e6336e4af Mon Sep 17 00:00:00 2001 From: elmaimbo <76469980+elmaimbo@users.noreply.github.com> Date: Mon, 4 Nov 2024 20:57:03 +1300 Subject: [PATCH 6/8] Added kms:GenerateDataKey action to KMSEncryptPolicy policy (#3657) Co-authored-by: Nick Tait Co-authored-by: Aayush thapa <84202325+aaythapa@users.noreply.github.com> --- .../policy_templates.json | 32 +++++++++++++++++++ .../input/all_policy_templates.yaml | 3 ++ .../output/all_policy_templates.json | 25 +++++++++++++++ .../output/aws-cn/all_policy_templates.json | 25 +++++++++++++++ .../aws-us-gov/all_policy_templates.json | 25 +++++++++++++++ 5 files changed, 110 insertions(+) diff --git a/samtranslator/policy_templates_data/policy_templates.json b/samtranslator/policy_templates_data/policy_templates.json index 6ad389fba..e50f44792 100644 --- a/samtranslator/policy_templates_data/policy_templates.json +++ b/samtranslator/policy_templates_data/policy_templates.json @@ -1275,6 +1275,38 @@ } } }, + "KMSEncryptPolicy_v2": { + "Definition": { + "Statement": [ + { + "Action": [ + "kms:Encrypt", + "kms:GenerateDataKey", + "kms:GenerateDataKeyWithoutPlaintext", + "kms:GenerateDataKeyPair", + "kms:GenerateDataKeyPairWithoutPlaintext" + ], + "Effect": "Allow", + "Resource": { + "Fn::Sub": [ + "arn:${AWS::Partition}:kms:${AWS::Region}:${AWS::AccountId}:key/${keyId}", + { + "keyId": { + "Ref": "KeyId" + } + } + ] + } + } + ] + }, + "Description": "Gives permission to encrypt with KMS Key", + "Parameters": { + "KeyId": { + "Description": "ID of the KMS Key" + } + } + }, "KinesisCrudPolicy": { "Definition": { "Statement": [ diff --git a/tests/translator/input/all_policy_templates.yaml b/tests/translator/input/all_policy_templates.yaml index c2666a9c5..ab1f8d596 100644 --- a/tests/translator/input/all_policy_templates.yaml +++ b/tests/translator/input/all_policy_templates.yaml @@ -187,3 +187,6 @@ Resources: - StepFunctionsCallbackPolicy: StateMachineName: name + + - KMSEncryptPolicy_v2: + KeyId: keyId diff --git a/tests/translator/output/all_policy_templates.json b/tests/translator/output/all_policy_templates.json index 07507ade0..c42a0314a 100644 --- a/tests/translator/output/all_policy_templates.json +++ b/tests/translator/output/all_policy_templates.json @@ -1726,6 +1726,31 @@ ] }, "PolicyName": "KitchenSinkFunctionRolePolicy63" + }, + { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "kms:Encrypt", + "kms:GenerateDataKey", + "kms:GenerateDataKeyWithoutPlaintext", + "kms:GenerateDataKeyPair", + "kms:GenerateDataKeyPairWithoutPlaintext" + ], + "Effect": "Allow", + "Resource": { + "Fn::Sub": [ + "arn:${AWS::Partition}:kms:${AWS::Region}:${AWS::AccountId}:key/${keyId}", + { + "keyId": "keyId" + } + ] + } + } + ] + }, + "PolicyName": "KitchenSinkFunctionRolePolicy64" } ], "Tags": [ diff --git a/tests/translator/output/aws-cn/all_policy_templates.json b/tests/translator/output/aws-cn/all_policy_templates.json index 8a915b89a..7a6f70009 100644 --- a/tests/translator/output/aws-cn/all_policy_templates.json +++ b/tests/translator/output/aws-cn/all_policy_templates.json @@ -1726,6 +1726,31 @@ ] }, "PolicyName": "KitchenSinkFunctionRolePolicy63" + }, + { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "kms:Encrypt", + "kms:GenerateDataKey", + "kms:GenerateDataKeyWithoutPlaintext", + "kms:GenerateDataKeyPair", + "kms:GenerateDataKeyPairWithoutPlaintext" + ], + "Effect": "Allow", + "Resource": { + "Fn::Sub": [ + "arn:${AWS::Partition}:kms:${AWS::Region}:${AWS::AccountId}:key/${keyId}", + { + "keyId": "keyId" + } + ] + } + } + ] + }, + "PolicyName": "KitchenSinkFunctionRolePolicy64" } ], "Tags": [ diff --git a/tests/translator/output/aws-us-gov/all_policy_templates.json b/tests/translator/output/aws-us-gov/all_policy_templates.json index 3dc4ef5c5..bc6e666d9 100644 --- a/tests/translator/output/aws-us-gov/all_policy_templates.json +++ b/tests/translator/output/aws-us-gov/all_policy_templates.json @@ -1726,6 +1726,31 @@ ] }, "PolicyName": "KitchenSinkFunctionRolePolicy63" + }, + { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "kms:Encrypt", + "kms:GenerateDataKey", + "kms:GenerateDataKeyWithoutPlaintext", + "kms:GenerateDataKeyPair", + "kms:GenerateDataKeyPairWithoutPlaintext" + ], + "Effect": "Allow", + "Resource": { + "Fn::Sub": [ + "arn:${AWS::Partition}:kms:${AWS::Region}:${AWS::AccountId}:key/${keyId}", + { + "keyId": "keyId" + } + ] + } + } + ] + }, + "PolicyName": "KitchenSinkFunctionRolePolicy64" } ], "Tags": [ From fa7549f620fc51601811ce8eb1f8c4d6eaee2402 Mon Sep 17 00:00:00 2001 From: Aayush thapa <84202325+aaythapa@users.noreply.github.com> Date: Wed, 6 Nov 2024 14:09:21 +0545 Subject: [PATCH 7/8] feat: add property to update lambda version when lambda layer is updated (#3661) --- docs/globals.rst | 5 + .../combination/test_function_with_alias.py | 36 +++++++ integration/config/file_to_s3_map.json | 4 + integration/helpers/file_resources.py | 1 + integration/resources/code/layer2.zip | Bin 0 -> 2175 bytes ...lias_all_properties_and_layer_version.json | 22 ++++ ...lias_all_properties_and_layer_version.yaml | 21 ++++ .../aws_serverless_layerversion.py | 5 + .../internal/schema_source/schema.py | 1 + samtranslator/model/sam_resources.py | 37 ++++++- samtranslator/plugins/globals/globals.py | 1 + samtranslator/schema/schema.json | 18 ++++ schema_source/sam.schema.json | 18 ++++ tests/model/test_sam_resources.py | 5 + ...and_lambda_layer_publishversion_false.yaml | 20 ++++ ..._and_lambda_layer_publishversion_true.yaml | 20 ++++ ...and_lambda_layer_publishversion_false.json | 99 ++++++++++++++++++ ..._and_lambda_layer_publishversion_true.json | 99 ++++++++++++++++++ ...and_lambda_layer_publishversion_false.json | 99 ++++++++++++++++++ ..._and_lambda_layer_publishversion_true.json | 99 ++++++++++++++++++ .../error_globals_unsupported_type.json | 6 +- ...and_lambda_layer_publishversion_false.json | 99 ++++++++++++++++++ ..._and_lambda_layer_publishversion_true.json | 99 ++++++++++++++++++ tests/translator/test_function_resources.py | 62 ++++++++--- 24 files changed, 855 insertions(+), 21 deletions(-) create mode 100644 integration/resources/code/layer2.zip create mode 100644 integration/resources/expected/combination/function_with_alias_all_properties_and_layer_version.json create mode 100644 integration/resources/templates/combination/function_with_alias_all_properties_and_layer_version.yaml create mode 100644 tests/translator/input/function_with_aliasallproperties_and_lambda_layer_publishversion_false.yaml create mode 100644 tests/translator/input/function_with_aliasallproperties_and_lambda_layer_publishversion_true.yaml create mode 100644 tests/translator/output/aws-cn/function_with_aliasallproperties_and_lambda_layer_publishversion_false.json create mode 100644 tests/translator/output/aws-cn/function_with_aliasallproperties_and_lambda_layer_publishversion_true.json create mode 100644 tests/translator/output/aws-us-gov/function_with_aliasallproperties_and_lambda_layer_publishversion_false.json create mode 100644 tests/translator/output/aws-us-gov/function_with_aliasallproperties_and_lambda_layer_publishversion_true.json create mode 100644 tests/translator/output/function_with_aliasallproperties_and_lambda_layer_publishversion_false.json create mode 100644 tests/translator/output/function_with_aliasallproperties_and_lambda_layer_publishversion_true.json diff --git a/docs/globals.rst b/docs/globals.rst index 3fe3d2275..c7615c4a0 100644 --- a/docs/globals.rst +++ b/docs/globals.rst @@ -75,6 +75,7 @@ Currently, the following resources and properties are being supported: EphemeralStorage: RuntimeManagementConfig: LoggingConfig: + FileSystemConfigs: Api: # Properties of AWS::Serverless::Api @@ -113,6 +114,10 @@ Currently, the following resources and properties are being supported: # Properties of AWS::Serverless::SimpleTable SSESpecification: + LayerVersion: + # Properties of AWS::Serverless::LayerVersion + PublishLambdaVersion: + Implicit APIs ~~~~~~~~~~~~~ diff --git a/integration/combination/test_function_with_alias.py b/integration/combination/test_function_with_alias.py index d2f6c572e..792f377a1 100644 --- a/integration/combination/test_function_with_alias.py +++ b/integration/combination/test_function_with_alias.py @@ -160,6 +160,42 @@ def test_alias_with_event_sources_get_correct_permissions(self): function_policy = json.loads(function_policy_str) self.assertEqual(len(function_policy["Statement"]), len(permission_resources)) + def test_function_with_alias_and_layer_version(self): + self.create_and_verify_stack("combination/function_with_alias_all_properties_and_layer_version") + alias_name = "Live" + function_name = self.get_physical_id_by_type("AWS::Lambda::Function") + version_ids = self.get_function_version_by_name(function_name) + self.assertEqual(["1"], version_ids) + + alias = self.get_alias(function_name, alias_name) + self.assertEqual("1", alias["FunctionVersion"]) + + # Changing Description in the LayerVersion should create a new version, and leave the existing version intact + self.set_template_resource_property("MyLayer", "Description", "test123") + self.update_stack() + + version_ids = self.get_function_version_by_name(function_name) + self.assertEqual(["1", "2"], version_ids) + + alias = self.get_alias(function_name, alias_name) + self.assertEqual("2", alias["FunctionVersion"]) + + # Changing ContentUri in LayerVersion should create a new version, and leave the existing version intact + self.set_template_resource_property("MyLayer", "ContentUri", self.file_to_s3_uri_map["layer2.zip"]["uri"]) + self.update_stack() + + version_ids = self.get_function_version_by_name(function_name) + self.assertEqual(["1", "2", "3"], version_ids) + + alias = self.get_alias(function_name, alias_name) + self.assertEqual("3", alias["FunctionVersion"]) + + # Make sure the stack has only One Version & One Alias resource + alias = self.get_stack_resources("AWS::Lambda::Alias") + versions = self.get_stack_resources("AWS::Lambda::Version") + self.assertEqual(len(alias), 1) + self.assertEqual(len(versions), 1) + def get_function_version_by_name(self, function_name): lambda_client = self.client_provider.lambda_client versions = lambda_client.list_versions_by_function(FunctionName=function_name)["Versions"] diff --git a/integration/config/file_to_s3_map.json b/integration/config/file_to_s3_map.json index 204409bbd..bdffb17e7 100644 --- a/integration/config/file_to_s3_map.json +++ b/integration/config/file_to_s3_map.json @@ -23,6 +23,10 @@ "type": "s3", "uri": "" }, + "layer2.zip": { + "type": "s3", + "uri": "" + }, "swagger1.json": { "type": "s3", "uri": "" diff --git a/integration/helpers/file_resources.py b/integration/helpers/file_resources.py index 5ee533ca8..da68e465f 100644 --- a/integration/helpers/file_resources.py +++ b/integration/helpers/file_resources.py @@ -2,6 +2,7 @@ "code.zip": {"type": "s3", "uri": ""}, "code2.zip": {"type": "s3", "uri": ""}, "layer1.zip": {"type": "s3", "uri": ""}, + "layer2.zip": {"type": "s3", "uri": ""}, "swagger1.json": {"type": "s3", "uri": ""}, "swagger2.json": {"type": "s3", "uri": ""}, "binary-media.zip": {"type": "s3", "uri": ""}, diff --git a/integration/resources/code/layer2.zip b/integration/resources/code/layer2.zip new file mode 100644 index 0000000000000000000000000000000000000000..4fa17883af5904dac4e80054aff03ac242592095 GIT binary patch literal 2175 zcmWIWW@Zs#00IA2&qy!>O0Y91FytgwrWP6LhlcPnuy23Gmc9dsL3C*aHv=Qf4@L$C zu;u`;Ash@GD26Ns8YBobBtG8P(b+#ZLSHW)VjhZdF>f)9OHL3?PESZm_~7dk_JKdF zBcOq4f&{a=fHY&FqCoSHZpjX&K1D^x05y-}JPOa;f)8~Fi2I&9f7M$@AR$q$F7+7w-7>QhuEpLx49shl9xnr7)ngKu&`=8syn#G@r{Ld|sTHTac3) zUrgnWE_W?p7Vd_2r6w>hRngYqoae9I290S*x1iEJq-JdKDCPh9p7U{EqLi7?|X zj+{IUF1Owg%x_pB@3v~gMqh=u0STqMPPt8!p+EO8)Ph~c!Ghqj=z9RsGG4S zaCpYUSy&-F0 Dict[str, An dispatch_function: Callable[..., Dict[str, Any]] = artifact_dispatch[filtered_key] return dispatch_function(artifacts[filtered_key], self.logical_id, filtered_key) - def _construct_version( - self, function: LambdaFunction, intrinsics_resolver: IntrinsicsResolver, code_sha256: Optional[str] = None + def _construct_version( # noqa: PLR0912 + self, + function: LambdaFunction, + intrinsics_resolver: IntrinsicsResolver, + resource_resolver: ResourceResolver, + code_sha256: Optional[str] = None, ) -> LambdaVersion: """Constructs a Lambda Version resource that will be auto-published when CodeUri of the function changes. Old versions will not be deleted without a direct reference from the CloudFormation template. @@ -929,6 +938,26 @@ def _construct_version( # property that when set to true would change the lambda version whenever a property in the lambda function changes if self.AutoPublishAliasAllProperties: properties = function._generate_resource_dict().get("Properties", {}) + + # When a Lambda LayerVersion resource is updated, a new Lambda layer is created. + # However, we need the Lambda function to automatically create a new version + # and use the new layer. By setting the `PublishLambdaVersion` property to true, + # a new Lambda function version will be created when the layer version is updated. + if function.Layers: + for layer in function.Layers: + layer_logical_id = get_logical_id_from_intrinsic(layer) + if not layer_logical_id: + continue + + layer_resource = resource_resolver.get_resource_by_logical_id(layer_logical_id) + if not layer_resource: + continue + + layer_properties = layer_resource.get("Properties", {}) + publish_lambda_version = layer_properties.get("PublishLambdaVersion", False) + if publish_lambda_version: + properties.update({layer_logical_id: layer_properties}) + logical_dict = properties else: with suppress(AttributeError, UnboundLocalError): @@ -1596,6 +1625,7 @@ class SamLayerVersion(SamResourceMacro): property_types = { "LayerName": PropertyType(False, one_of(IS_STR, IS_DICT)), "Description": PropertyType(False, IS_STR), + "PublishLambdaVersion": PropertyType(False, IS_BOOL), "ContentUri": PropertyType(True, one_of(IS_STR, IS_DICT)), "CompatibleArchitectures": PropertyType(False, list_of(one_of(IS_STR, IS_DICT))), "CompatibleRuntimes": PropertyType(False, list_of(one_of(IS_STR, IS_DICT))), @@ -1605,6 +1635,7 @@ class SamLayerVersion(SamResourceMacro): LayerName: Optional[Intrinsicable[str]] Description: Optional[Intrinsicable[str]] + PublishLambdaVersion: Optional[bool] ContentUri: Dict[str, Any] CompatibleArchitectures: Optional[List[Any]] CompatibleRuntimes: Optional[List[Any]] diff --git a/samtranslator/plugins/globals/globals.py b/samtranslator/plugins/globals/globals.py index d6836ff52..fdc7b3835 100644 --- a/samtranslator/plugins/globals/globals.py +++ b/samtranslator/plugins/globals/globals.py @@ -96,6 +96,7 @@ class Globals: ], SamResourceType.SimpleTable.value: ["SSESpecification"], SamResourceType.StateMachine.value: ["PropagateTags"], + SamResourceType.LambdaLayerVersion.value: ["PublishLambdaVersion"], } # unreleased_properties *must be* part of supported_properties too unreleased_properties: Dict[str, List[str]] = { diff --git a/samtranslator/schema/schema.json b/samtranslator/schema/schema.json index 063ae65b2..6d4f0dca9 100644 --- a/samtranslator/schema/schema.json +++ b/samtranslator/schema/schema.json @@ -277235,6 +277235,9 @@ "HttpApi": { "$ref": "#/definitions/samtranslator__internal__schema_source__aws_serverless_httpapi__Globals" }, + "LayerVersion": { + "$ref": "#/definitions/samtranslator__internal__schema_source__aws_serverless_layerversion__Globals" + }, "SimpleTable": { "$ref": "#/definitions/samtranslator__internal__schema_source__aws_serverless_simpletable__Globals" }, @@ -280291,6 +280294,17 @@ "title": "Route53", "type": "object" }, + "samtranslator__internal__schema_source__aws_serverless_layerversion__Globals": { + "additionalProperties": false, + "properties": { + "PublishLambdaVersion": { + "title": "Publishlambdaversion", + "type": "boolean" + } + }, + "title": "Globals", + "type": "object" + }, "samtranslator__internal__schema_source__aws_serverless_layerversion__Properties": { "additionalProperties": false, "properties": { @@ -280341,6 +280355,10 @@ "title": "LicenseInfo", "type": "string" }, + "PublishLambdaVersion": { + "title": "Publishlambdaversion", + "type": "boolean" + }, "RetentionPolicy": { "anyOf": [ { diff --git a/schema_source/sam.schema.json b/schema_source/sam.schema.json index 60c1746ee..e1b638631 100644 --- a/schema_source/sam.schema.json +++ b/schema_source/sam.schema.json @@ -3413,6 +3413,9 @@ "HttpApi": { "$ref": "#/definitions/samtranslator__internal__schema_source__aws_serverless_httpapi__Globals" }, + "LayerVersion": { + "$ref": "#/definitions/samtranslator__internal__schema_source__aws_serverless_layerversion__Globals" + }, "SimpleTable": { "$ref": "#/definitions/samtranslator__internal__schema_source__aws_serverless_simpletable__Globals" }, @@ -7221,6 +7224,17 @@ "title": "Route53", "type": "object" }, + "samtranslator__internal__schema_source__aws_serverless_layerversion__Globals": { + "additionalProperties": false, + "properties": { + "PublishLambdaVersion": { + "title": "Publishlambdaversion", + "type": "boolean" + } + }, + "title": "Globals", + "type": "object" + }, "samtranslator__internal__schema_source__aws_serverless_layerversion__Properties": { "additionalProperties": false, "properties": { @@ -7321,6 +7335,10 @@ ], "title": "LicenseInfo" }, + "PublishLambdaVersion": { + "title": "Publishlambdaversion", + "type": "boolean" + }, "RetentionPolicy": { "anyOf": [ { diff --git a/tests/model/test_sam_resources.py b/tests/model/test_sam_resources.py index 9aed256a2..9b6c28908 100644 --- a/tests/model/test_sam_resources.py +++ b/tests/model/test_sam_resources.py @@ -24,6 +24,7 @@ class TestArchitecture(TestCase): "intrinsics_resolver": IntrinsicsResolver({}), "event_resources": [], "managed_policy_map": {"foo": "bar"}, + "resource_resolver": ResourceResolver({}), } @patch("boto3.session.Session.region_name", "ap-southeast-1") @@ -60,6 +61,7 @@ class TestCodeUriandImageUri(TestCase): "intrinsics_resolver": IntrinsicsResolver({}), "event_resources": [], "managed_policy_map": {"foo": "bar"}, + "resource_resolver": ResourceResolver({}), } @patch("boto3.session.Session.region_name", "ap-southeast-1") @@ -143,6 +145,7 @@ class TestAssumeRolePolicyDocument(TestCase): "intrinsics_resolver": IntrinsicsResolver({}), "event_resources": [], "managed_policy_map": {"foo": "bar"}, + "resource_resolver": ResourceResolver({}), } @patch("boto3.session.Session.region_name", "ap-southeast-1") @@ -193,6 +196,7 @@ class TestVersionDescription(TestCase): "intrinsics_resolver": IntrinsicsResolver({}), "event_resources": [], "managed_policy_map": {"foo": "bar"}, + "resource_resolver": ResourceResolver({}), } @patch("boto3.session.Session.region_name", "ap-southeast-1") @@ -441,6 +445,7 @@ class TestFunctionUrlConfig(TestCase): "intrinsics_resolver": IntrinsicsResolver({}), "event_resources": [], "managed_policy_map": {"foo": "bar"}, + "resource_resolver": ResourceResolver({}), } @patch("boto3.session.Session.region_name", "ap-southeast-1") diff --git a/tests/translator/input/function_with_aliasallproperties_and_lambda_layer_publishversion_false.yaml b/tests/translator/input/function_with_aliasallproperties_and_lambda_layer_publishversion_false.yaml new file mode 100644 index 000000000..b8ba18458 --- /dev/null +++ b/tests/translator/input/function_with_aliasallproperties_and_lambda_layer_publishversion_false.yaml @@ -0,0 +1,20 @@ +Resources: + MinimalFunction: + Type: AWS::Serverless::Function + Properties: + CodeUri: s3://sam-demo-bucket/hello.zip + Handler: hello.handler + Runtime: python3.10 + AutoPublishAlias: live + AutoPublishAliasAllProperties: true + VersionDescription: sam-testing + Layers: + - !Ref TestEnvLayer + + TestEnvLayer: + Type: AWS::Serverless::LayerVersion + Properties: + LayerName: test-env-dependencies + Description: Dependencies for test env implementation + ContentUri: s3://bucket/key + PublishLambdaVersion: false diff --git a/tests/translator/input/function_with_aliasallproperties_and_lambda_layer_publishversion_true.yaml b/tests/translator/input/function_with_aliasallproperties_and_lambda_layer_publishversion_true.yaml new file mode 100644 index 000000000..98a39b73a --- /dev/null +++ b/tests/translator/input/function_with_aliasallproperties_and_lambda_layer_publishversion_true.yaml @@ -0,0 +1,20 @@ +Resources: + MinimalFunction: + Type: AWS::Serverless::Function + Properties: + CodeUri: s3://sam-demo-bucket/hello.zip + Handler: hello.handler + Runtime: python3.10 + AutoPublishAlias: live + AutoPublishAliasAllProperties: true + VersionDescription: sam-testing + Layers: + - !Ref TestEnvLayer + + TestEnvLayer: + Type: AWS::Serverless::LayerVersion + Properties: + LayerName: test-env-dependencies + Description: Dependencies for test env implementation + ContentUri: s3://bucket/key + PublishLambdaVersion: true diff --git a/tests/translator/output/aws-cn/function_with_aliasallproperties_and_lambda_layer_publishversion_false.json b/tests/translator/output/aws-cn/function_with_aliasallproperties_and_lambda_layer_publishversion_false.json new file mode 100644 index 000000000..bcce9a525 --- /dev/null +++ b/tests/translator/output/aws-cn/function_with_aliasallproperties_and_lambda_layer_publishversion_false.json @@ -0,0 +1,99 @@ +{ + "Resources": { + "MinimalFunction": { + "Properties": { + "Code": { + "S3Bucket": "sam-demo-bucket", + "S3Key": "hello.zip" + }, + "Handler": "hello.handler", + "Layers": [ + { + "Ref": "TestEnvLayerd122689ed9" + } + ], + "Role": { + "Fn::GetAtt": [ + "MinimalFunctionRole", + "Arn" + ] + }, + "Runtime": "python3.10", + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::Lambda::Function" + }, + "MinimalFunctionAliaslive": { + "Properties": { + "FunctionName": { + "Ref": "MinimalFunction" + }, + "FunctionVersion": { + "Fn::GetAtt": [ + "MinimalFunctionVersion5346b7b9b0", + "Version" + ] + }, + "Name": "live" + }, + "Type": "AWS::Lambda::Alias" + }, + "MinimalFunctionRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "lambda.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + "arn:aws-cn:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ], + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MinimalFunctionVersion5346b7b9b0": { + "DeletionPolicy": "Retain", + "Properties": { + "Description": "sam-testing", + "FunctionName": { + "Ref": "MinimalFunction" + } + }, + "Type": "AWS::Lambda::Version" + }, + "TestEnvLayerd122689ed9": { + "DeletionPolicy": "Retain", + "Properties": { + "Content": { + "S3Bucket": "bucket", + "S3Key": "key" + }, + "Description": "Dependencies for test env implementation", + "LayerName": "test-env-dependencies" + }, + "Type": "AWS::Lambda::LayerVersion" + } + } +} diff --git a/tests/translator/output/aws-cn/function_with_aliasallproperties_and_lambda_layer_publishversion_true.json b/tests/translator/output/aws-cn/function_with_aliasallproperties_and_lambda_layer_publishversion_true.json new file mode 100644 index 000000000..48a46c6cd --- /dev/null +++ b/tests/translator/output/aws-cn/function_with_aliasallproperties_and_lambda_layer_publishversion_true.json @@ -0,0 +1,99 @@ +{ + "Resources": { + "MinimalFunction": { + "Properties": { + "Code": { + "S3Bucket": "sam-demo-bucket", + "S3Key": "hello.zip" + }, + "Handler": "hello.handler", + "Layers": [ + { + "Ref": "TestEnvLayer856c65a2ec" + } + ], + "Role": { + "Fn::GetAtt": [ + "MinimalFunctionRole", + "Arn" + ] + }, + "Runtime": "python3.10", + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::Lambda::Function" + }, + "MinimalFunctionAliaslive": { + "Properties": { + "FunctionName": { + "Ref": "MinimalFunction" + }, + "FunctionVersion": { + "Fn::GetAtt": [ + "MinimalFunctionVersion046dad86da", + "Version" + ] + }, + "Name": "live" + }, + "Type": "AWS::Lambda::Alias" + }, + "MinimalFunctionRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "lambda.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + "arn:aws-cn:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ], + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MinimalFunctionVersion046dad86da": { + "DeletionPolicy": "Retain", + "Properties": { + "Description": "sam-testing", + "FunctionName": { + "Ref": "MinimalFunction" + } + }, + "Type": "AWS::Lambda::Version" + }, + "TestEnvLayer856c65a2ec": { + "DeletionPolicy": "Retain", + "Properties": { + "Content": { + "S3Bucket": "bucket", + "S3Key": "key" + }, + "Description": "Dependencies for test env implementation", + "LayerName": "test-env-dependencies" + }, + "Type": "AWS::Lambda::LayerVersion" + } + } +} diff --git a/tests/translator/output/aws-us-gov/function_with_aliasallproperties_and_lambda_layer_publishversion_false.json b/tests/translator/output/aws-us-gov/function_with_aliasallproperties_and_lambda_layer_publishversion_false.json new file mode 100644 index 000000000..52a2e4d83 --- /dev/null +++ b/tests/translator/output/aws-us-gov/function_with_aliasallproperties_and_lambda_layer_publishversion_false.json @@ -0,0 +1,99 @@ +{ + "Resources": { + "MinimalFunction": { + "Properties": { + "Code": { + "S3Bucket": "sam-demo-bucket", + "S3Key": "hello.zip" + }, + "Handler": "hello.handler", + "Layers": [ + { + "Ref": "TestEnvLayerd122689ed9" + } + ], + "Role": { + "Fn::GetAtt": [ + "MinimalFunctionRole", + "Arn" + ] + }, + "Runtime": "python3.10", + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::Lambda::Function" + }, + "MinimalFunctionAliaslive": { + "Properties": { + "FunctionName": { + "Ref": "MinimalFunction" + }, + "FunctionVersion": { + "Fn::GetAtt": [ + "MinimalFunctionVersion5346b7b9b0", + "Version" + ] + }, + "Name": "live" + }, + "Type": "AWS::Lambda::Alias" + }, + "MinimalFunctionRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "lambda.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + "arn:aws-us-gov:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ], + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MinimalFunctionVersion5346b7b9b0": { + "DeletionPolicy": "Retain", + "Properties": { + "Description": "sam-testing", + "FunctionName": { + "Ref": "MinimalFunction" + } + }, + "Type": "AWS::Lambda::Version" + }, + "TestEnvLayerd122689ed9": { + "DeletionPolicy": "Retain", + "Properties": { + "Content": { + "S3Bucket": "bucket", + "S3Key": "key" + }, + "Description": "Dependencies for test env implementation", + "LayerName": "test-env-dependencies" + }, + "Type": "AWS::Lambda::LayerVersion" + } + } +} diff --git a/tests/translator/output/aws-us-gov/function_with_aliasallproperties_and_lambda_layer_publishversion_true.json b/tests/translator/output/aws-us-gov/function_with_aliasallproperties_and_lambda_layer_publishversion_true.json new file mode 100644 index 000000000..91d328a2c --- /dev/null +++ b/tests/translator/output/aws-us-gov/function_with_aliasallproperties_and_lambda_layer_publishversion_true.json @@ -0,0 +1,99 @@ +{ + "Resources": { + "MinimalFunction": { + "Properties": { + "Code": { + "S3Bucket": "sam-demo-bucket", + "S3Key": "hello.zip" + }, + "Handler": "hello.handler", + "Layers": [ + { + "Ref": "TestEnvLayer856c65a2ec" + } + ], + "Role": { + "Fn::GetAtt": [ + "MinimalFunctionRole", + "Arn" + ] + }, + "Runtime": "python3.10", + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::Lambda::Function" + }, + "MinimalFunctionAliaslive": { + "Properties": { + "FunctionName": { + "Ref": "MinimalFunction" + }, + "FunctionVersion": { + "Fn::GetAtt": [ + "MinimalFunctionVersion046dad86da", + "Version" + ] + }, + "Name": "live" + }, + "Type": "AWS::Lambda::Alias" + }, + "MinimalFunctionRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "lambda.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + "arn:aws-us-gov:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ], + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MinimalFunctionVersion046dad86da": { + "DeletionPolicy": "Retain", + "Properties": { + "Description": "sam-testing", + "FunctionName": { + "Ref": "MinimalFunction" + } + }, + "Type": "AWS::Lambda::Version" + }, + "TestEnvLayer856c65a2ec": { + "DeletionPolicy": "Retain", + "Properties": { + "Content": { + "S3Bucket": "bucket", + "S3Key": "key" + }, + "Description": "Dependencies for test env implementation", + "LayerName": "test-env-dependencies" + }, + "Type": "AWS::Lambda::LayerVersion" + } + } +} diff --git a/tests/translator/output/error_globals_unsupported_type.json b/tests/translator/output/error_globals_unsupported_type.json index 40ed76b7f..9d4d7a45c 100644 --- a/tests/translator/output/error_globals_unsupported_type.json +++ b/tests/translator/output/error_globals_unsupported_type.json @@ -4,12 +4,12 @@ "Number of errors found: 1. ", "'Globals' section is invalid. ", "'NewType' is not supported. ", - "Must be one of the following values - ['Api', 'Function', 'HttpApi', 'SimpleTable', 'StateMachine']" + "Must be one of the following values - ['Api', 'Function', 'HttpApi', 'LayerVersion', 'SimpleTable', 'StateMachine']" ], - "errorMessage": "Invalid Serverless Application Specification document. Number of errors found: 1. 'Globals' section is invalid. 'NewType' is not supported. Must be one of the following values - ['Api', 'Function', 'HttpApi', 'SimpleTable', 'StateMachine']", + "errorMessage": "Invalid Serverless Application Specification document. Number of errors found: 1. 'Globals' section is invalid. 'NewType' is not supported. Must be one of the following values - ['Api', 'Function', 'HttpApi', 'LayerVersion', 'SimpleTable', 'StateMachine']", "errors": [ { - "errorMessage": "'Globals' section is invalid. 'NewType' is not supported. Must be one of the following values - ['Api', 'Function', 'HttpApi', 'SimpleTable', 'StateMachine']" + "errorMessage": "'Globals' section is invalid. 'NewType' is not supported. Must be one of the following values - ['Api', 'Function', 'HttpApi', 'LayerVersion', 'SimpleTable', 'StateMachine']" } ] } diff --git a/tests/translator/output/function_with_aliasallproperties_and_lambda_layer_publishversion_false.json b/tests/translator/output/function_with_aliasallproperties_and_lambda_layer_publishversion_false.json new file mode 100644 index 000000000..c47ff847b --- /dev/null +++ b/tests/translator/output/function_with_aliasallproperties_and_lambda_layer_publishversion_false.json @@ -0,0 +1,99 @@ +{ + "Resources": { + "MinimalFunction": { + "Properties": { + "Code": { + "S3Bucket": "sam-demo-bucket", + "S3Key": "hello.zip" + }, + "Handler": "hello.handler", + "Layers": [ + { + "Ref": "TestEnvLayerd122689ed9" + } + ], + "Role": { + "Fn::GetAtt": [ + "MinimalFunctionRole", + "Arn" + ] + }, + "Runtime": "python3.10", + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::Lambda::Function" + }, + "MinimalFunctionAliaslive": { + "Properties": { + "FunctionName": { + "Ref": "MinimalFunction" + }, + "FunctionVersion": { + "Fn::GetAtt": [ + "MinimalFunctionVersion5346b7b9b0", + "Version" + ] + }, + "Name": "live" + }, + "Type": "AWS::Lambda::Alias" + }, + "MinimalFunctionRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "lambda.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ], + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MinimalFunctionVersion5346b7b9b0": { + "DeletionPolicy": "Retain", + "Properties": { + "Description": "sam-testing", + "FunctionName": { + "Ref": "MinimalFunction" + } + }, + "Type": "AWS::Lambda::Version" + }, + "TestEnvLayerd122689ed9": { + "DeletionPolicy": "Retain", + "Properties": { + "Content": { + "S3Bucket": "bucket", + "S3Key": "key" + }, + "Description": "Dependencies for test env implementation", + "LayerName": "test-env-dependencies" + }, + "Type": "AWS::Lambda::LayerVersion" + } + } +} diff --git a/tests/translator/output/function_with_aliasallproperties_and_lambda_layer_publishversion_true.json b/tests/translator/output/function_with_aliasallproperties_and_lambda_layer_publishversion_true.json new file mode 100644 index 000000000..d13d47197 --- /dev/null +++ b/tests/translator/output/function_with_aliasallproperties_and_lambda_layer_publishversion_true.json @@ -0,0 +1,99 @@ +{ + "Resources": { + "MinimalFunction": { + "Properties": { + "Code": { + "S3Bucket": "sam-demo-bucket", + "S3Key": "hello.zip" + }, + "Handler": "hello.handler", + "Layers": [ + { + "Ref": "TestEnvLayer856c65a2ec" + } + ], + "Role": { + "Fn::GetAtt": [ + "MinimalFunctionRole", + "Arn" + ] + }, + "Runtime": "python3.10", + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::Lambda::Function" + }, + "MinimalFunctionAliaslive": { + "Properties": { + "FunctionName": { + "Ref": "MinimalFunction" + }, + "FunctionVersion": { + "Fn::GetAtt": [ + "MinimalFunctionVersion046dad86da", + "Version" + ] + }, + "Name": "live" + }, + "Type": "AWS::Lambda::Alias" + }, + "MinimalFunctionRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "lambda.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ], + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MinimalFunctionVersion046dad86da": { + "DeletionPolicy": "Retain", + "Properties": { + "Description": "sam-testing", + "FunctionName": { + "Ref": "MinimalFunction" + } + }, + "Type": "AWS::Lambda::Version" + }, + "TestEnvLayer856c65a2ec": { + "DeletionPolicy": "Retain", + "Properties": { + "Content": { + "S3Bucket": "bucket", + "S3Key": "key" + }, + "Description": "Dependencies for test env implementation", + "LayerName": "test-env-dependencies" + }, + "Type": "AWS::Lambda::LayerVersion" + } + } +} diff --git a/tests/translator/test_function_resources.py b/tests/translator/test_function_resources.py index 353b92c17..5fa2d0aa4 100644 --- a/tests/translator/test_function_resources.py +++ b/tests/translator/test_function_resources.py @@ -15,6 +15,8 @@ def setUp(self): self.intrinsics_resolver_mock.resolve = Mock() self.mappings_resolver_mock = Mock() self.mappings_resolver_mock.resolve = Mock() + self.resource_resolver_mock = Mock() + self.resource_resolver_mock.resolve = Mock() self.code_uri = "s3://bucket/key?versionId=version" self.func_dict = { @@ -44,6 +46,7 @@ def test_sam_function_with_code_signer(self): kwargs["managed_policy_map"] = {"a": "b"} kwargs["event_resources"] = [] kwargs["intrinsics_resolver"] = self.intrinsics_resolver_mock + kwargs["resource_resolver"] = self.resource_resolver_mock self.intrinsics_resolver_mock.resolve_parameter_refs.return_value = { "S3Bucket": "bucket", "S3Key": "key", @@ -76,6 +79,7 @@ def test_sam_function_with_alias(self, get_resolved_alias_name_mock): kwargs["managed_policy_map"] = {"a": "b"} kwargs["event_resources"] = [] kwargs["intrinsics_resolver"] = self.intrinsics_resolver_mock + kwargs["resource_resolver"] = self.resource_resolver_mock self.intrinsics_resolver_mock.resolve_parameter_refs.return_value = { "S3Bucket": "bucket", "S3Key": "key", @@ -131,6 +135,7 @@ def test_sam_function_with_deployment_preference(self, get_resolved_alias_name_m kwargs["event_resources"] = [] kwargs["intrinsics_resolver"] = self.intrinsics_resolver_mock kwargs["mappings_resolver"] = self.mappings_resolver_mock + kwargs["resource_resolver"] = self.resource_resolver_mock deployment_preference_collection = self._make_deployment_preference_collection() kwargs["deployment_preference_collection"] = deployment_preference_collection get_resolved_alias_name_mock.return_value = alias_name @@ -176,6 +181,7 @@ def test_sam_function_with_deployment_preference_missing_collection_raises_error kwargs["event_resources"] = [] kwargs["intrinsics_resolver"] = self.intrinsics_resolver_mock kwargs["mappings_resolver"] = self.mappings_resolver_mock + kwargs["resource_resolver"] = self.resource_resolver_mock self.intrinsics_resolver_mock.resolve_parameter_refs.return_value = { "S3Bucket": "bucket", "S3Key": "key", @@ -212,6 +218,7 @@ def test_sam_function_with_disabled_deployment_preference_does_not_add_update_po kwargs["event_resources"] = [] kwargs["intrinsics_resolver"] = self.intrinsics_resolver_mock kwargs["mappings_resolver"] = self.mappings_resolver_mock + kwargs["resource_resolver"] = self.resource_resolver_mock preference_collection = self._make_deployment_preference_collection() preference_collection.get.return_value = DeploymentPreference.from_dict( sam_func.logical_id, deploy_preference_dict @@ -248,6 +255,7 @@ def test_sam_function_cannot_be_with_deployment_preference_without_alias(self): kwargs["intrinsics_resolver"] = self.intrinsics_resolver_mock kwargs["mappings_resolver"] = self.mappings_resolver_mock kwargs["deployment_preference_collection"] = self._make_deployment_preference_collection() + kwargs["resource_resolver"] = self.resource_resolver_mock sam_func.to_cloudformation(**kwargs) @patch("boto3.session.Session.region_name", "ap-southeast-1") @@ -271,6 +279,7 @@ def test_sam_function_without_alias_allows_disabled_deployment_preference(self): kwargs["event_resources"] = [] kwargs["intrinsics_resolver"] = self.intrinsics_resolver_mock kwargs["mappings_resolver"] = self.mappings_resolver_mock + kwargs["resource_resolver"] = self.resource_resolver_mock preference_collection = self._make_deployment_preference_collection() preference_collection.get.return_value = DeploymentPreference.from_dict( @@ -311,6 +320,7 @@ def test_sam_function_with_deployment_preference_intrinsic_ref_enabled_boolean_p kwargs["event_resources"] = [] kwargs["intrinsics_resolver"] = self.intrinsics_resolver_mock kwargs["mappings_resolver"] = self.mappings_resolver_mock + kwargs["resource_resolver"] = self.resource_resolver_mock deployment_preference_collection = self._make_deployment_preference_collection() kwargs["deployment_preference_collection"] = deployment_preference_collection self.intrinsics_resolver_mock.resolve_parameter_refs.return_value = True @@ -355,6 +365,7 @@ def test_sam_function_with_deployment_preference_intrinsic_ref_enabled_dict_para kwargs["event_resources"] = [] kwargs["intrinsics_resolver"] = self.intrinsics_resolver_mock kwargs["mappings_resolver"] = self.mappings_resolver_mock + kwargs["resource_resolver"] = self.resource_resolver_mock deployment_preference_collection = self._make_deployment_preference_collection() kwargs["deployment_preference_collection"] = deployment_preference_collection self.intrinsics_resolver_mock.resolve_parameter_refs.return_value = {"MyEnabledFlag": True} @@ -389,6 +400,7 @@ def test_sam_function_with_deployment_preference_intrinsic_findinmap_enabled_dic kwargs["event_resources"] = [] kwargs["intrinsics_resolver"] = self.intrinsics_resolver_mock kwargs["mappings_resolver"] = self.mappings_resolver_mock + kwargs["resource_resolver"] = self.resource_resolver_mock deployment_preference_collection = self._make_deployment_preference_collection() kwargs["deployment_preference_collection"] = deployment_preference_collection self.intrinsics_resolver_mock.resolve_parameter_refs.return_value = {"MyEnabledFlag": True} @@ -424,6 +436,7 @@ def test_sam_function_with_deployment_preference_passthrough_condition_through_p kwargs["event_resources"] = [] kwargs["intrinsics_resolver"] = self.intrinsics_resolver_mock kwargs["mappings_resolver"] = self.mappings_resolver_mock + kwargs["resource_resolver"] = self.resource_resolver_mock deployment_preference_collection = self._make_deployment_preference_collection() kwargs["deployment_preference_collection"] = deployment_preference_collection get_resolved_alias_name_mock.return_value = alias_name @@ -472,6 +485,7 @@ def test_sam_function_with_deployment_preference_passthrough_condition_through_f kwargs["event_resources"] = [] kwargs["intrinsics_resolver"] = self.intrinsics_resolver_mock kwargs["mappings_resolver"] = self.mappings_resolver_mock + kwargs["resource_resolver"] = self.resource_resolver_mock deployment_preference_collection = self._make_deployment_preference_collection() kwargs["deployment_preference_collection"] = deployment_preference_collection get_resolved_alias_name_mock.return_value = alias_name @@ -531,6 +545,7 @@ def test_sam_function_with_deployment_preference_passthrough_condition_invalid_i kwargs["event_resources"] = [] kwargs["intrinsics_resolver"] = self.intrinsics_resolver_mock kwargs["mappings_resolver"] = self.mappings_resolver_mock + kwargs["resource_resolver"] = self.resource_resolver_mock deployment_preference_collection = self._make_deployment_preference_collection() kwargs["deployment_preference_collection"] = deployment_preference_collection get_resolved_alias_name_mock.return_value = alias_name @@ -560,7 +575,9 @@ def test_version_creation(self, LogicalIdGeneratorMock): generator_mock.gen.return_value = id_val self.intrinsics_resolver_mock.resolve_parameter_refs.return_value = self.lambda_func.Code - version = self.sam_func._construct_version(self.lambda_func, self.intrinsics_resolver_mock) + version = self.sam_func._construct_version( + self.lambda_func, self.intrinsics_resolver_mock, self.resource_resolver_mock + ) self.assertEqual(version.logical_id, id_val) self.assertEqual(version.Description, None) @@ -582,7 +599,9 @@ def test_version_creation_with_code_sha(self, LogicalIdGeneratorMock): self.intrinsics_resolver_mock.resolve_parameter_refs.return_value = self.lambda_func.Code self.sam_func.AutoPublishCodeSha256 = hash_code - version = self.sam_func._construct_version(self.lambda_func, self.intrinsics_resolver_mock, hash_code) + version = self.sam_func._construct_version( + self.lambda_func, self.intrinsics_resolver_mock, self.resource_resolver_mock, hash_code + ) self.assertEqual(version.logical_id, id_val) self.assertEqual(version.Description, None) @@ -603,7 +622,9 @@ def test_version_creation_without_s3_object_version(self, LogicalIdGeneratorMock del self.lambda_func.Code["S3ObjectVersion"] self.intrinsics_resolver_mock.resolve_parameter_refs.return_value = self.lambda_func.Code - version = self.sam_func._construct_version(self.lambda_func, self.intrinsics_resolver_mock) + version = self.sam_func._construct_version( + self.lambda_func, self.intrinsics_resolver_mock, self.resource_resolver_mock + ) self.assertEqual(version.logical_id, id_val) @@ -616,7 +637,9 @@ def test_version_creation_error(self): # Empty code dictionary self.lambda_func.Code = {} with self.assertRaises(ValueError): - self.sam_func._construct_version(self.lambda_func, self.intrinsics_resolver_mock) + self.sam_func._construct_version( + self.lambda_func, self.intrinsics_resolver_mock, self.resource_resolver_mock + ) @patch("samtranslator.translator.logical_id_generator.LogicalIdGenerator") def test_version_creation_intrinsic_function_in_code_s3key(self, LogicalIdGeneratorMock): @@ -628,7 +651,9 @@ def test_version_creation_intrinsic_function_in_code_s3key(self, LogicalIdGenera self.lambda_func.Code = {"S3Bucket": "bucket", "S3Key": {"Ref": "keyparameter"}, "S3ObjectVersion": "version"} self.intrinsics_resolver_mock.resolve_parameter_refs.return_value = self.lambda_func.Code - version = self.sam_func._construct_version(self.lambda_func, self.intrinsics_resolver_mock) + version = self.sam_func._construct_version( + self.lambda_func, self.intrinsics_resolver_mock, self.resource_resolver_mock + ) self.assertEqual(version.logical_id, id_val) expected_prefix = self.sam_func.logical_id + "Version" @@ -644,7 +669,9 @@ def test_version_creation_intrinsic_function_in_code_s3bucket(self, LogicalIdGen self.lambda_func.Code = {"S3Bucket": {"Ref": "bucketparameter"}, "S3Key": "key", "S3ObjectVersion": "version"} self.intrinsics_resolver_mock.resolve_parameter_refs.return_value = self.lambda_func.Code - version = self.sam_func._construct_version(self.lambda_func, self.intrinsics_resolver_mock) + version = self.sam_func._construct_version( + self.lambda_func, self.intrinsics_resolver_mock, self.resource_resolver_mock + ) self.assertEqual(version.logical_id, id_val) expected_prefix = self.sam_func.logical_id + "Version" @@ -660,7 +687,9 @@ def test_version_creation_intrinsic_function_in_code_s3version(self, LogicalIdGe self.lambda_func.Code = {"S3Bucket": "bucket", "S3Key": "key", "S3ObjectVersion": {"Ref": "versionparameter"}} self.intrinsics_resolver_mock.resolve_parameter_refs.return_value = self.lambda_func.Code - version = self.sam_func._construct_version(self.lambda_func, self.intrinsics_resolver_mock) + version = self.sam_func._construct_version( + self.lambda_func, self.intrinsics_resolver_mock, self.resource_resolver_mock + ) self.assertEqual(version.logical_id, id_val) expected_prefix = self.sam_func.logical_id + "Version" @@ -676,7 +705,7 @@ def test_version_logical_id_changes(self, LogicalIdGeneratorMock): # Test that logicalId changes with changes to CodeSha self.intrinsics_resolver_mock.resolve_parameter_refs.return_value = self.lambda_func.Code - self.sam_func._construct_version(self.lambda_func, self.intrinsics_resolver_mock) + self.sam_func._construct_version(self.lambda_func, self.intrinsics_resolver_mock, self.resource_resolver_mock) LogicalIdGeneratorMock.assert_called_once_with(prefix, self.lambda_func.Code, None) self.intrinsics_resolver_mock.resolve_parameter_refs.assert_called_with(self.lambda_func.Code) @@ -685,7 +714,7 @@ def test_version_logical_id_changes(self, LogicalIdGeneratorMock): self.lambda_func.Code["S3ObjectVersion"] = "new object version" new_code = self.lambda_func.Code.copy() self.intrinsics_resolver_mock.resolve_parameter_refs.return_value = new_code - self.sam_func._construct_version(self.lambda_func, self.intrinsics_resolver_mock) + self.sam_func._construct_version(self.lambda_func, self.intrinsics_resolver_mock, self.resource_resolver_mock) LogicalIdGeneratorMock.assert_called_with(prefix, new_code, None) self.intrinsics_resolver_mock.resolve_parameter_refs.assert_called_with(new_code) @@ -699,7 +728,7 @@ def test_version_logical_id_changes_with_intrinsic_functions(self, LogicalIdGene self.lambda_func.Code = {"S3Bucket": "bucket", "S3Key": {"Ref": "someparam"}} self.intrinsics_resolver_mock.resolve_parameter_refs.return_value = self.lambda_func.Code - self.sam_func._construct_version(self.lambda_func, self.intrinsics_resolver_mock) + self.sam_func._construct_version(self.lambda_func, self.intrinsics_resolver_mock, self.resource_resolver_mock) LogicalIdGeneratorMock.assert_called_once_with(prefix, self.lambda_func.Code, None) self.intrinsics_resolver_mock.resolve_parameter_refs.assert_called_with(self.lambda_func.Code) @@ -707,7 +736,7 @@ def test_version_logical_id_changes_with_intrinsic_functions(self, LogicalIdGene # Now, just let the intrinsics resolver return a different value. Let's make sure the new value gets wired up properly new_code = {"S3Bucket": "bucket", "S3Key": "some new value"} self.intrinsics_resolver_mock.resolve_parameter_refs.return_value = new_code - self.sam_func._construct_version(self.lambda_func, self.intrinsics_resolver_mock) + self.sam_func._construct_version(self.lambda_func, self.intrinsics_resolver_mock, self.resource_resolver_mock) LogicalIdGeneratorMock.assert_called_with(prefix, new_code, None) self.intrinsics_resolver_mock.resolve_parameter_refs.assert_called_with(self.lambda_func.Code) @@ -723,11 +752,14 @@ def test_version_logical_id_changes_with_snapstart(self): self.intrinsics_resolver_mock.resolve_parameter_refs.return_value = lambda_func.Code - version1 = self.sam_func._construct_version(lambda_func, self.intrinsics_resolver_mock) - version_snapstart = self.sam_func._construct_version(lambda_func_snapstart, self.intrinsics_resolver_mock) + version1 = self.sam_func._construct_version( + lambda_func, self.intrinsics_resolver_mock, self.resource_resolver_mock + ) + version_snapstart = self.sam_func._construct_version( + lambda_func_snapstart, self.intrinsics_resolver_mock, self.resource_resolver_mock + ) version_snapstart_none = self.sam_func._construct_version( - lambda_func_snapstart_none, - self.intrinsics_resolver_mock, + lambda_func_snapstart_none, self.intrinsics_resolver_mock, self.resource_resolver_mock ) # SnapStart config changes the hash, except when ApplyOn is "None" self.assertNotEqual(version1.logical_id, version_snapstart.logical_id) From 158c67440ba11c2c984a252370c5f077c5cf790b Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 8 Nov 2024 09:41:05 -0800 Subject: [PATCH 8/8] chore(schema): update (#3674) Co-authored-by: github-actions --- samtranslator/schema/schema.json | 42 ++-- schema_source/cloudformation-docs.json | 246 ++++++++++++++++++++--- schema_source/cloudformation.schema.json | 42 ++-- 3 files changed, 252 insertions(+), 78 deletions(-) diff --git a/samtranslator/schema/schema.json b/samtranslator/schema/schema.json index 6d4f0dca9..4cb9191f0 100644 --- a/samtranslator/schema/schema.json +++ b/samtranslator/schema/schema.json @@ -9394,13 +9394,9 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "The key-value string map. The valid character set is `[a-zA-Z+-=._:/]` . The tag key can be up to 128 characters and must not start with `aws:` .", - "title": "Key", "type": "string" }, "Value": { - "markdownDescription": "The tag value can be up to 256 characters.", - "title": "Value", "type": "string" } }, @@ -30252,7 +30248,7 @@ "additionalProperties": false, "properties": { "EmbeddingModelArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the model used to create vector embeddings for the knowledge base.", + "markdownDescription": "The Amazon Resource Name (ARN) of the model or inference profile used to create vector embeddings for the knowledge base.", "title": "EmbeddingModelArn", "type": "string" } @@ -63117,7 +63113,7 @@ "additionalProperties": false, "properties": { "CloudWatchLogGroupArn": { - "markdownDescription": "Specifies the Amazon Resource Name (ARN) of an Amazon CloudWatch log group for monitoring your task.\n\nFor more information, see [Monitoring DataSync with Amazon CloudWatch](https://docs.aws.amazon.com/datasync/latest/userguide/monitor-datasync.html) .", + "markdownDescription": "Specifies the Amazon Resource Name (ARN) of an Amazon CloudWatch log group for monitoring your task.\n\nFor Enhanced mode tasks, you don't need to specify anything. DataSync automatically sends logs to a CloudWatch log group named `/aws/datasync` .\n\nFor more information, see [Monitoring data transfers with CloudWatch Logs](https://docs.aws.amazon.com/datasync/latest/userguide/configure-logging.html) .", "title": "CloudWatchLogGroupArn", "type": "string" }, @@ -63138,7 +63134,7 @@ "items": { "$ref": "#/definitions/AWS::DataSync::Task.FilterRule" }, - "markdownDescription": "Specifies include filters define the files, objects, and folders in your source location that you want DataSync to transfer. For more information and examples, see [Specifying what DataSync transfers by using filters](https://docs.aws.amazon.com/datasync/latest/userguide/filtering.html) .", + "markdownDescription": "Specifies include filters that define the files, objects, and folders in your source location that you want DataSync to transfer. For more information and examples, see [Specifying what DataSync transfers by using filters](https://docs.aws.amazon.com/datasync/latest/userguide/filtering.html) .", "title": "Includes", "type": "array" }, @@ -66296,7 +66292,7 @@ "additionalProperties": false, "properties": { "AppBoundaryKey": { - "markdownDescription": "An AWS tag *key* that is used to identify the AWS resources that DevOps Guru analyzes. All AWS resources in your account and Region tagged with this *key* make up your DevOps Guru application and analysis boundary.\n\n> The string used for a *key* in a tag that you use to define your resource coverage must begin with the prefix `Devops-guru-` . The tag *key* might be `DevOps-Guru-deployment-application` or `devops-guru-rds-application` . When you create a *key* , the case of characters in the *key* can be whatever you choose. After you create a *key* , it is case-sensitive. For example, DevOps Guru works with a *key* named `devops-guru-rds` and a *key* named `DevOps-Guru-RDS` , and these act as two different *keys* . Possible *key* / *value* pairs in your application might be `Devops-Guru-production-application/RDS` or `Devops-Guru-production-application/containers` .", + "markdownDescription": "An AWS tag *key* that is used to identify the AWS resources that DevOps Guru analyzes. All AWS resources in your account and Region tagged with this *key* make up your DevOps Guru application and analysis boundary.\n\n> When you create a *key* , the case of characters in the *key* can be whatever you choose. After you create a *key* , it is case-sensitive. For example, DevOps Guru works with a *key* named `devops-guru-rds` and a *key* named `DevOps-Guru-RDS` , and these act as two different *keys* . Possible *key* / *value* pairs in your application might be `Devops-Guru-production-application/RDS` or `Devops-Guru-production-application/containers` .", "title": "AppBoundaryKey", "type": "string" }, @@ -92993,12 +92989,12 @@ "title": "MutualAuthentication" }, "Port": { - "markdownDescription": "The port on which the load balancer is listening. You cannot specify a port for a Gateway Load Balancer.", + "markdownDescription": "The port on which the load balancer is listening. You can't specify a port for a Gateway Load Balancer.", "title": "Port", "type": "number" }, "Protocol": { - "markdownDescription": "The protocol for connections from clients to the load balancer. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP, TLS, UDP, and TCP_UDP. You can\u2019t specify the UDP or TCP_UDP protocol if dual-stack mode is enabled. You cannot specify a protocol for a Gateway Load Balancer.", + "markdownDescription": "The protocol for connections from clients to the load balancer. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP, TLS, UDP, and TCP_UDP. You can\u2019t specify the UDP or TCP_UDP protocol if dual-stack mode is enabled. You can't specify a protocol for a Gateway Load Balancer.", "title": "Protocol", "type": "string" }, @@ -93314,7 +93310,7 @@ "type": "string" }, "Protocol": { - "markdownDescription": "The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You cannot redirect HTTPS to HTTP.", + "markdownDescription": "The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You can't redirect HTTPS to HTTP.", "title": "Protocol", "type": "string" }, @@ -93881,7 +93877,7 @@ "type": "string" }, "Protocol": { - "markdownDescription": "The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You cannot redirect HTTPS to HTTP.", + "markdownDescription": "The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You can't redirect HTTPS to HTTP.", "title": "Protocol", "type": "string" }, @@ -94037,7 +94033,7 @@ "type": "string" }, "IpAddressType": { - "markdownDescription": "Note: Internal load balancers must use the `ipv4` IP address type.\n\n[Application Load Balancers] The IP address type. The possible values are `ipv4` (for only IPv4 addresses), `dualstack` (for IPv4 and IPv6 addresses), and `dualstack-without-public-ipv4` (for IPv6 only public addresses, with private IPv4 and IPv6 addresses).\n\nNote: Application Load Balancer authentication only supports IPv4 addresses when connecting to an Identity Provider (IdP) or Amazon Cognito endpoint. Without a public IPv4 address the load balancer cannot complete the authentication process, resulting in HTTP 500 errors.\n\n[Network Load Balancers] The IP address type. The possible values are `ipv4` (for only IPv4 addresses) and `dualstack` (for IPv4 and IPv6 addresses). You can\u2019t specify `dualstack` for a load balancer with a UDP or TCP_UDP listener.\n\n[Gateway Load Balancers] The IP address type. The possible values are `ipv4` (for only IPv4 addresses) and `dualstack` (for IPv4 and IPv6 addresses).", + "markdownDescription": "The IP address type. Internal load balancers must use `ipv4` .\n\n[Application Load Balancers] The possible values are `ipv4` (IPv4 addresses), `dualstack` (IPv4 and IPv6 addresses), and `dualstack-without-public-ipv4` (public IPv6 addresses and private IPv4 and IPv6 addresses).\n\nApplication Load Balancer authentication supports IPv4 addresses only when connecting to an Identity Provider (IdP) or Amazon Cognito endpoint. Without a public IPv4 address the load balancer can't complete the authentication process, resulting in HTTP 500 errors.\n\n[Network Load Balancers and Gateway Load Balancers] The possible values are `ipv4` (IPv4 addresses) and `dualstack` (IPv4 and IPv6 addresses).", "title": "IpAddressType", "type": "string" }, @@ -94055,7 +94051,7 @@ "type": "string" }, "Scheme": { - "markdownDescription": "The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the internet.\n\nThe nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can route requests only from clients with access to the VPC for the load balancer.\n\nThe default is an Internet-facing load balancer.\n\nYou cannot specify a scheme for a Gateway Load Balancer.", + "markdownDescription": "The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the internet.\n\nThe nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can route requests only from clients with access to the VPC for the load balancer.\n\nThe default is an Internet-facing load balancer.\n\nYou can't specify a scheme for a Gateway Load Balancer.", "title": "Scheme", "type": "string" }, @@ -94071,7 +94067,7 @@ "items": { "$ref": "#/definitions/AWS::ElasticLoadBalancingV2::LoadBalancer.SubnetMapping" }, - "markdownDescription": "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.\n\n[Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You cannot specify Elastic IP addresses for your subnets.", + "markdownDescription": "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones. You can't specify Elastic IP addresses for your subnets.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.\n\n[Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You can't specify Elastic IP addresses for your subnets.", "title": "SubnetMappings", "type": "array" }, @@ -94079,7 +94075,7 @@ "items": { "type": "string" }, - "markdownDescription": "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers] You can specify subnets from one or more Availability Zones.\n\n[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.", + "markdownDescription": "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers and Gateway Load Balancers] You can specify subnets from one or more Availability Zones.", "title": "Subnets", "type": "array" }, @@ -94123,7 +94119,7 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deletion_protection.enabled` - Indicates whether deletion protection is enabled. The value is `true` or `false` . The default is `false` .\n- `load_balancing.cross_zone.enabled` - Indicates whether cross-zone load balancing is enabled. The possible values are `true` and `false` . The default for Network Load Balancers and Gateway Load Balancers is `false` . The default for Application Load Balancers is `true` , and cannot be changed.\n\nThe following attributes are supported by both Application Load Balancers and Network Load Balancers:\n\n- `access_logs.s3.enabled` - Indicates whether access logs are enabled. The value is `true` or `false` . The default is `false` .\n- `access_logs.s3.bucket` - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `access_logs.s3.prefix` - The prefix for the location in the S3 bucket for the access logs.\n- `ipv6.deny_all_igw_traffic` - Blocks internet gateway (IGW) access to the load balancer. It is set to `false` for internet-facing load balancers and `true` for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway.\n\nThe following attributes are supported by only Application Load Balancers:\n\n- `idle_timeout.timeout_seconds` - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.\n- `client_keep_alive.seconds` - The client keep alive value, in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds.\n- `connection_logs.s3.enabled` - Indicates whether connection logs are enabled. The value is `true` or `false` . The default is `false` .\n- `connection_logs.s3.bucket` - The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `connection_logs.s3.prefix` - The prefix for the location in the S3 bucket for the connection logs.\n- `routing.http.desync_mitigation_mode` - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are `monitor` , `defensive` , and `strictest` . The default is `defensive` .\n- `routing.http.drop_invalid_header_fields.enabled` - Indicates whether HTTP headers with invalid header fields are removed by the load balancer ( `true` ) or routed to targets ( `false` ). The default is `false` .\n- `routing.http.preserve_host_header.enabled` - Indicates whether the Application Load Balancer should preserve the `Host` header in the HTTP request and send it to the target without any change. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.x_amzn_tls_version_and_cipher_suite.enabled` - Indicates whether the two headers ( `x-amzn-tls-version` and `x-amzn-tls-cipher-suite` ), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The `x-amzn-tls-version` header has information about the TLS protocol version negotiated with the client, and the `x-amzn-tls-cipher-suite` header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are `true` and `false` . The default is `false` .\n- `routing.http.xff_client_port.enabled` - Indicates whether the `X-Forwarded-For` header should preserve the source port that the client used to connect to the load balancer. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.xff_header_processing.mode` - Enables you to modify, preserve, or remove the `X-Forwarded-For` header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are `append` , `preserve` , and `remove` . The default is `append` .\n\n- If the value is `append` , the Application Load Balancer adds the client IP address (of the last hop) to the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- If the value is `preserve` the Application Load Balancer preserves the `X-Forwarded-For` header in the HTTP request, and sends it to targets without any change.\n- If the value is `remove` , the Application Load Balancer removes the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- `routing.http2.enabled` - Indicates whether HTTP/2 is enabled. The possible values are `true` and `false` . The default is `true` . Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.\n- `waf.fail_open.enabled` - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. The possible values are `true` and `false` . The default is `false` .\n\nThe following attributes are supported by only Network Load Balancers:\n\n- `dns_record.client_routing_policy` - Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are `availability_zone_affinity` with 100 percent zonal affinity, `partial_availability_zone_affinity` with 85 percent zonal affinity, and `any_availability_zone` with 0 percent zonal affinity.\n- `zonal_shift.config.enabled` - Indicates whether zonal shift is enabled. The possible values are `true` and `false` . The default is `false` .", + "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deletion_protection.enabled` - Indicates whether deletion protection is enabled. The value is `true` or `false` . The default is `false` .\n- `load_balancing.cross_zone.enabled` - Indicates whether cross-zone load balancing is enabled. The possible values are `true` and `false` . The default for Network Load Balancers and Gateway Load Balancers is `false` . The default for Application Load Balancers is `true` , and can't be changed.\n\nThe following attributes are supported by both Application Load Balancers and Network Load Balancers:\n\n- `access_logs.s3.enabled` - Indicates whether access logs are enabled. The value is `true` or `false` . The default is `false` .\n- `access_logs.s3.bucket` - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `access_logs.s3.prefix` - The prefix for the location in the S3 bucket for the access logs.\n- `ipv6.deny_all_igw_traffic` - Blocks internet gateway (IGW) access to the load balancer. It is set to `false` for internet-facing load balancers and `true` for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway.\n\nThe following attributes are supported by only Application Load Balancers:\n\n- `idle_timeout.timeout_seconds` - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.\n- `client_keep_alive.seconds` - The client keep alive value, in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds.\n- `connection_logs.s3.enabled` - Indicates whether connection logs are enabled. The value is `true` or `false` . The default is `false` .\n- `connection_logs.s3.bucket` - The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `connection_logs.s3.prefix` - The prefix for the location in the S3 bucket for the connection logs.\n- `routing.http.desync_mitigation_mode` - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are `monitor` , `defensive` , and `strictest` . The default is `defensive` .\n- `routing.http.drop_invalid_header_fields.enabled` - Indicates whether HTTP headers with invalid header fields are removed by the load balancer ( `true` ) or routed to targets ( `false` ). The default is `false` .\n- `routing.http.preserve_host_header.enabled` - Indicates whether the Application Load Balancer should preserve the `Host` header in the HTTP request and send it to the target without any change. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.x_amzn_tls_version_and_cipher_suite.enabled` - Indicates whether the two headers ( `x-amzn-tls-version` and `x-amzn-tls-cipher-suite` ), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The `x-amzn-tls-version` header has information about the TLS protocol version negotiated with the client, and the `x-amzn-tls-cipher-suite` header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are `true` and `false` . The default is `false` .\n- `routing.http.xff_client_port.enabled` - Indicates whether the `X-Forwarded-For` header should preserve the source port that the client used to connect to the load balancer. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.xff_header_processing.mode` - Enables you to modify, preserve, or remove the `X-Forwarded-For` header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are `append` , `preserve` , and `remove` . The default is `append` .\n\n- If the value is `append` , the Application Load Balancer adds the client IP address (of the last hop) to the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- If the value is `preserve` the Application Load Balancer preserves the `X-Forwarded-For` header in the HTTP request, and sends it to targets without any change.\n- If the value is `remove` , the Application Load Balancer removes the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- `routing.http2.enabled` - Indicates whether HTTP/2 is enabled. The possible values are `true` and `false` . The default is `true` . Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.\n- `waf.fail_open.enabled` - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. The possible values are `true` and `false` . The default is `false` .\n\nThe following attributes are supported by only Network Load Balancers:\n\n- `dns_record.client_routing_policy` - Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are `availability_zone_affinity` with 100 percent zonal affinity, `partial_availability_zone_affinity` with 85 percent zonal affinity, and `any_availability_zone` with 0 percent zonal affinity.\n- `zonal_shift.config.enabled` - Indicates whether zonal shift is enabled. The possible values are `true` and `false` . The default is `false` .", "title": "Key", "type": "string" }, @@ -94200,7 +94196,7 @@ "additionalProperties": false, "properties": { "HealthCheckEnabled": { - "markdownDescription": "Indicates whether health checks are enabled. If the target type is `lambda` , health checks are disabled by default but can be enabled. If the target type is `instance` , `ip` , or `alb` , health checks are always enabled and cannot be disabled.", + "markdownDescription": "Indicates whether health checks are enabled. If the target type is `lambda` , health checks are disabled by default but can be enabled. If the target type is `instance` , `ip` , or `alb` , health checks are always enabled and can't be disabled.", "title": "HealthCheckEnabled", "type": "boolean" }, @@ -94235,7 +94231,7 @@ "type": "number" }, "IpAddressType": { - "markdownDescription": "The type of IP address used for this target group. The possible values are `ipv4` and `ipv6` . This is an optional parameter. If not specified, the IP address type defaults to `ipv4` .", + "markdownDescription": "The IP address type. The default value is `ipv4` .", "title": "IpAddressType", "type": "string" }, @@ -94370,7 +94366,7 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deregistration_delay.timeout_seconds` - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from `draining` to `unused` . The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.\n- `stickiness.enabled` - Indicates whether target stickiness is enabled. The value is `true` or `false` . The default is `false` .\n- `stickiness.type` - Indicates the type of stickiness. The possible values are:\n\n- `lb_cookie` and `app_cookie` for Application Load Balancers.\n- `source_ip` for Network Load Balancers.\n- `source_ip_dest_ip` and `source_ip_dest_ip_proto` for Gateway Load Balancers.\n\nThe following attributes are supported by Application Load Balancers and Network Load Balancers:\n\n- `load_balancing.cross_zone.enabled` - Indicates whether cross zone load balancing is enabled. The value is `true` , `false` or `use_load_balancer_configuration` . The default is `use_load_balancer_configuration` .\n- `target_group_health.dns_failover.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to the maximum number of targets. The default is `off` .\n- `target_group_health.dns_failover.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n\nThe following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:\n\n- `load_balancing.algorithm.type` - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is `round_robin` , `least_outstanding_requests` , or `weighted_random` . The default is `round_robin` .\n- `load_balancing.algorithm.anomaly_mitigation` - Only available when `load_balancing.algorithm.type` is `weighted_random` . Indicates whether anomaly mitigation is enabled. The value is `on` or `off` . The default is `off` .\n- `slow_start.duration_seconds` - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).\n- `stickiness.app_cookie.cookie_name` - Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: `AWSALB` , `AWSALBAPP` , and `AWSALBTG` ; they're reserved for use by the load balancer.\n- `stickiness.app_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n- `stickiness.lb_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n\nThe following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:\n\n- `lambda.multi_value_headers.enabled` - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is `true` or `false` . The default is `false` . If the value is `false` and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.\n\nThe following attributes are supported only by Network Load Balancers:\n\n- `deregistration_delay.connection_termination.enabled` - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is `true` or `false` . For new UDP/TCP_UDP target groups the default is `true` . Otherwise, the default is `false` .\n- `preserve_client_ip.enabled` - Indicates whether client IP preservation is enabled. The value is `true` or `false` . The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation cannot be disabled for UDP and TCP_UDP target groups.\n- `proxy_protocol_v2.enabled` - Indicates whether Proxy Protocol version 2 is enabled. The value is `true` or `false` . The default is `false` .\n- `target_health_state.unhealthy.connection_termination.enabled` - Indicates whether the load balancer terminates connections to unhealthy targets. The value is `true` or `false` . The default is `true` .\n- `target_health_state.unhealthy.draining_interval_seconds` - The amount of time for Elastic Load Balancing to wait before changing the state of an unhealthy target from `unhealthy.draining` to `unhealthy` . The range is 0-360000 seconds. The default value is 0 seconds.\n\nNote: This attribute can only be configured when `target_health_state.unhealthy.connection_termination.enabled` is `false` .\n\nThe following attributes are supported only by Gateway Load Balancers:\n\n- `target_failover.on_deregistration` - Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.\n- `target_failover.on_unhealthy` - Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) cannot be set independently. The value you set for both attributes must be the same.", + "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deregistration_delay.timeout_seconds` - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from `draining` to `unused` . The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.\n- `stickiness.enabled` - Indicates whether target stickiness is enabled. The value is `true` or `false` . The default is `false` .\n- `stickiness.type` - Indicates the type of stickiness. The possible values are:\n\n- `lb_cookie` and `app_cookie` for Application Load Balancers.\n- `source_ip` for Network Load Balancers.\n- `source_ip_dest_ip` and `source_ip_dest_ip_proto` for Gateway Load Balancers.\n\nThe following attributes are supported by Application Load Balancers and Network Load Balancers:\n\n- `load_balancing.cross_zone.enabled` - Indicates whether cross zone load balancing is enabled. The value is `true` , `false` or `use_load_balancer_configuration` . The default is `use_load_balancer_configuration` .\n- `target_group_health.dns_failover.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to the maximum number of targets. The default is `off` .\n- `target_group_health.dns_failover.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n\nThe following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:\n\n- `load_balancing.algorithm.type` - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is `round_robin` , `least_outstanding_requests` , or `weighted_random` . The default is `round_robin` .\n- `load_balancing.algorithm.anomaly_mitigation` - Only available when `load_balancing.algorithm.type` is `weighted_random` . Indicates whether anomaly mitigation is enabled. The value is `on` or `off` . The default is `off` .\n- `slow_start.duration_seconds` - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).\n- `stickiness.app_cookie.cookie_name` - Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: `AWSALB` , `AWSALBAPP` , and `AWSALBTG` ; they're reserved for use by the load balancer.\n- `stickiness.app_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n- `stickiness.lb_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n\nThe following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:\n\n- `lambda.multi_value_headers.enabled` - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is `true` or `false` . The default is `false` . If the value is `false` and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.\n\nThe following attributes are supported only by Network Load Balancers:\n\n- `deregistration_delay.connection_termination.enabled` - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is `true` or `false` . For new UDP/TCP_UDP target groups the default is `true` . Otherwise, the default is `false` .\n- `preserve_client_ip.enabled` - Indicates whether client IP preservation is enabled. The value is `true` or `false` . The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation can't be disabled for UDP and TCP_UDP target groups.\n- `proxy_protocol_v2.enabled` - Indicates whether Proxy Protocol version 2 is enabled. The value is `true` or `false` . The default is `false` .\n- `target_health_state.unhealthy.connection_termination.enabled` - Indicates whether the load balancer terminates connections to unhealthy targets. The value is `true` or `false` . The default is `true` .\n- `target_health_state.unhealthy.draining_interval_seconds` - The amount of time for Elastic Load Balancing to wait before changing the state of an unhealthy target from `unhealthy.draining` to `unhealthy` . The range is 0-360000 seconds. The default value is 0 seconds.\n\nNote: This attribute can only be configured when `target_health_state.unhealthy.connection_termination.enabled` is `false` .\n\nThe following attributes are supported only by Gateway Load Balancers:\n\n- `target_failover.on_deregistration` - Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.\n- `target_failover.on_unhealthy` - Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.", "title": "Key", "type": "string" }, @@ -143168,7 +143164,7 @@ "properties": { "Variables": { "additionalProperties": true, - "markdownDescription": "Environment variable key-value pairs. For more information, see [Using Lambda environment variables](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html) .", + "markdownDescription": "Environment variable key-value pairs. For more information, see [Using Lambda environment variables](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html) .\n\nIf the value of the environment variable is a time or a duration, enclose the value in quotes.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -232528,7 +232524,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `CAA`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", + "markdownDescription": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT` | `TLSA` | `SSHFP` | `SVCB` | `HTTPS`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `TLSA` | `SSHFP` | `SVCB` | `HTTPS` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `CAA` | `TLSA` | `SSHFP` | `SVCB` | `HTTPS`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", "title": "Type", "type": "string" }, @@ -232947,7 +232943,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `CAA`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", + "markdownDescription": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT` | `TLSA` | `SSHFP` | `SVCB` | `HTTPS`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `TLSA` | `SSHFP` | `SVCB` | `HTTPS` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `CAA` | `TLSA` | `SSHFP` | `SVCB` | `HTTPS`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", "title": "Type", "type": "string" }, diff --git a/schema_source/cloudformation-docs.json b/schema_source/cloudformation-docs.json index 3f0649f19..3b8da070e 100644 --- a/schema_source/cloudformation-docs.json +++ b/schema_source/cloudformation-docs.json @@ -1338,9 +1338,9 @@ "ReplicateTo": "Save the deployment strategy to a Systems Manager (SSM) document.", "Tags": "Assigns metadata to an AWS AppConfig resource. Tags help organize and categorize your AWS AppConfig resources. Each tag consists of a key and an optional value, both of which you define. You can specify a maximum of 50 tags for a resource." }, - "AWS::AppConfig::DeploymentStrategy Tags": { - "Key": "The key-value string map. The valid character set is `[a-zA-Z+-=._:/]` . The tag key can be up to 128 characters and must not start with `aws:` .", - "Value": "The tag value can be up to 256 characters." + "AWS::AppConfig::DeploymentStrategy Tag": { + "Key": "", + "Value": "" }, "AWS::AppConfig::Environment": { "ApplicationId": "The application ID.", @@ -3030,6 +3030,56 @@ "MessageAction": "The action to take for the welcome email that is sent to a user after the user is created in the user pool. If you specify SUPPRESS, no email is sent. If you specify RESEND, do not specify the first name or last name of the user. If the value is null, the email is sent.\n\n> The temporary password in the welcome email is valid for only 7 days. If users don\u2019t set their passwords within 7 days, you must send them a new welcome email.", "UserName": "The email address of the user.\n\nUsers' email addresses are case-sensitive. During login, if they specify an email address that doesn't use the same capitalization as the email address specified when their user pool account was created, a \"user does not exist\" error message displays." }, + "AWS::AppSync::Api": { + "EventConfig": "", + "Name": "The API name.", + "OwnerContact": "The owner contact information for an API resource.\n\nThis field accepts any string input with a length of 0 - 256 characters.", + "Tags": "The tags." + }, + "AWS::AppSync::Api AuthMode": { + "AuthType": "" + }, + "AWS::AppSync::Api AuthProvider": { + "AuthType": "", + "CognitoConfig": "", + "LambdaAuthorizerConfig": "", + "OpenIDConnectConfig": "" + }, + "AWS::AppSync::Api CognitoConfig": { + "AppIdClientRegex": "", + "AwsRegion": "", + "UserPoolId": "" + }, + "AWS::AppSync::Api DnsMap": { + "Http": "", + "Realtime": "" + }, + "AWS::AppSync::Api EventConfig": { + "AuthProviders": "", + "ConnectionAuthModes": "", + "DefaultPublishAuthModes": "", + "DefaultSubscribeAuthModes": "", + "LogConfig": "" + }, + "AWS::AppSync::Api EventLogConfig": { + "CloudWatchLogsRoleArn": "", + "LogLevel": "" + }, + "AWS::AppSync::Api LambdaAuthorizerConfig": { + "AuthorizerResultTtlInSeconds": "The number of seconds a response should be cached for. The default is 0 seconds, which disables caching. If you don't specify a value for `authorizerResultTtlInSeconds` , the default value is used. The maximum value is one hour (3600 seconds). The Lambda function can override this by returning a `ttlOverride` key in its response.", + "AuthorizerUri": "The Amazon Resource Name (ARN) of the Lambda function to be called for authorization. This can be a standard Lambda ARN, a version ARN ( `.../v3` ), or an alias ARN.\n\n*Note* : This Lambda function must have the following resource-based policy assigned to it. When configuring Lambda authorizers in the console, this is done for you. To use the AWS Command Line Interface ( AWS CLI ), run the following:\n\n`aws lambda add-permission --function-name \"arn:aws:lambda:us-east-2:111122223333:function:my-function\" --statement-id \"appsync\" --principal appsync.amazonaws.com --action lambda:InvokeFunction`", + "IdentityValidationExpression": "A regular expression for validation of tokens before the Lambda function is called." + }, + "AWS::AppSync::Api OpenIDConnectConfig": { + "AuthTTL": "The number of milliseconds that a token is valid after being authenticated.", + "ClientId": "The client identifier of the relying party at the OpenID identity provider. This identifier is typically obtained when the relying party is registered with the OpenID identity provider. You can specify a regular expression so that AWS AppSync can validate against multiple client identifiers at a time.", + "IatTTL": "The number of milliseconds that a token is valid after it's issued to a user.", + "Issuer": "The issuer for the OIDC configuration. The issuer returned by discovery must exactly match the value of `iss` in the ID token." + }, + "AWS::AppSync::Api Tag": { + "Key": "", + "Value": "" + }, "AWS::AppSync::ApiCache": { "ApiCachingBehavior": "Caching behavior.\n\n- *FULL_REQUEST_CACHING* : All requests are fully cached.\n- *PER_RESOLVER_CACHING* : Individual resolvers that you specify are cached.", "ApiId": "The GraphQL API ID.", @@ -3044,6 +3094,22 @@ "Description": "Unique description of your API key.", "Expires": "The time after which the API key expires. The date is represented as seconds since the epoch, rounded down to the nearest hour." }, + "AWS::AppSync::ChannelNamespace": { + "ApiId": "", + "CodeHandlers": "", + "CodeS3Location": "", + "Name": "", + "PublishAuthModes": "", + "SubscribeAuthModes": "", + "Tags": "" + }, + "AWS::AppSync::ChannelNamespace AuthMode": { + "AuthType": "" + }, + "AWS::AppSync::ChannelNamespace Tag": { + "Key": "", + "Value": "" + }, "AWS::AppSync::DataSource": { "ApiId": "Unique AWS AppSync GraphQL API identifier where this data source will be created.", "Description": "The description of the data source.", @@ -4971,6 +5037,22 @@ "AWS::Bedrock::AgentAlias AgentAliasRoutingConfigurationListItem": { "AgentVersion": "The version of the agent with which the alias is associated." }, + "AWS::Bedrock::ApplicationInferenceProfile": { + "Description": "The description of the inference profile.", + "InferenceProfileName": "The name of the inference profile.", + "ModelSource": "Contains configurations for the inference profile to copy as the resource.", + "Tags": "A list of tags associated with the inference profile." + }, + "AWS::Bedrock::ApplicationInferenceProfile InferenceProfileModel": { + "ModelArn": "The Amazon Resource Name (ARN) of the model." + }, + "AWS::Bedrock::ApplicationInferenceProfile InferenceProfileModelSource": { + "CopyFrom": "The ARN of the model or system-defined inference profile that is the source for the inference profile." + }, + "AWS::Bedrock::ApplicationInferenceProfile Tag": { + "Key": "Key for the tag.", + "Value": "Value for the tag." + }, "AWS::Bedrock::DataSource": { "DataDeletionPolicy": "The data deletion policy for the data source.", "DataSourceConfiguration": "The connection configuration for the data source.", @@ -5205,7 +5287,12 @@ "AWS::Bedrock::Flow FlowValidation": { "Message": "A message describing the validation error." }, + "AWS::Bedrock::Flow GuardrailConfiguration": { + "GuardrailIdentifier": "The identifier for the guardrail.", + "GuardrailVersion": "The version of the guardrail." + }, "AWS::Bedrock::Flow KnowledgeBaseFlowNodeConfiguration": { + "GuardrailConfiguration": "", "KnowledgeBaseId": "The unique identifier of the knowledge base to query.", "ModelId": "The unique identifier of the model or [inference profile](https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference.html) to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array." }, @@ -5217,6 +5304,7 @@ "LocaleId": "The Region to invoke the Amazon Lex bot in." }, "AWS::Bedrock::Flow PromptFlowNodeConfiguration": { + "GuardrailConfiguration": "", "SourceConfiguration": "Specifies whether the prompt is from Prompt management or defined inline." }, "AWS::Bedrock::Flow PromptFlowNodeInlineConfiguration": { @@ -5242,7 +5330,6 @@ "MaxTokens": "The maximum number of tokens to return in the response.", "StopSequences": "A list of strings that define sequences after which the model will stop generating.", "Temperature": "Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.", - "TopK": "", "TopP": "The percentage of most-likely candidates that the model considers for the next token." }, "AWS::Bedrock::Flow PromptTemplateConfiguration": { @@ -5351,7 +5438,12 @@ "Name": "A name for the output that you can reference.", "Type": "The data type of the output. If the output doesn't match this type at runtime, a validation error will be thrown." }, + "AWS::Bedrock::FlowVersion GuardrailConfiguration": { + "GuardrailIdentifier": "The identifier for the guardrail.", + "GuardrailVersion": "The version of the guardrail." + }, "AWS::Bedrock::FlowVersion KnowledgeBaseFlowNodeConfiguration": { + "GuardrailConfiguration": "", "KnowledgeBaseId": "The unique identifier of the knowledge base to query.", "ModelId": "The unique identifier of the model or [inference profile](https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference.html) to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array." }, @@ -5363,6 +5455,7 @@ "LocaleId": "The Region to invoke the Amazon Lex bot in." }, "AWS::Bedrock::FlowVersion PromptFlowNodeConfiguration": { + "GuardrailConfiguration": "", "SourceConfiguration": "Specifies whether the prompt is from Prompt management or defined inline." }, "AWS::Bedrock::FlowVersion PromptFlowNodeInlineConfiguration": { @@ -5388,7 +5481,6 @@ "MaxTokens": "The maximum number of tokens to return in the response.", "StopSequences": "A list of strings that define sequences after which the model will stop generating.", "Temperature": "Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.", - "TopK": "", "TopP": "The percentage of most-likely candidates that the model considers for the next token." }, "AWS::Bedrock::FlowVersion PromptTemplateConfiguration": { @@ -5558,7 +5650,7 @@ "Type": "The vector store service in which the knowledge base is stored." }, "AWS::Bedrock::KnowledgeBase VectorKnowledgeBaseConfiguration": { - "EmbeddingModelArn": "The Amazon Resource Name (ARN) of the model used to create vector embeddings for the knowledge base.", + "EmbeddingModelArn": "The Amazon Resource Name (ARN) of the model or inference profile used to create vector embeddings for the knowledge base.", "EmbeddingModelConfiguration": "The embeddings model configuration details for the vector model used in Knowledge Base." }, "AWS::Bedrock::Prompt": { @@ -5579,7 +5671,6 @@ "MaxTokens": "The maximum number of tokens to return in the response.", "StopSequences": "A list of strings that define sequences after which the model will stop generating.", "Temperature": "Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.", - "TopK": "", "TopP": "The percentage of most-likely candidates that the model considers for the next token." }, "AWS::Bedrock::Prompt PromptTemplateConfiguration": { @@ -5605,7 +5696,7 @@ "AWS::Bedrock::PromptVersion": { "Description": "The description of the prompt version.", "PromptArn": "The Amazon Resource Name (ARN) of the version of the prompt.", - "Tags": "" + "Tags": "A map of tags attached to the prompt version and their values." }, "AWS::Bedrock::PromptVersion PromptInferenceConfiguration": { "Text": "Contains inference configurations for a text prompt." @@ -5617,7 +5708,6 @@ "MaxTokens": "The maximum number of tokens to return in the response.", "StopSequences": "A list of strings that define sequences after which the model will stop generating.", "Temperature": "Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.", - "TopK": "", "TopP": "The percentage of most-likely candidates that the model considers for the next token." }, "AWS::Bedrock::PromptVersion PromptTemplateConfiguration": { @@ -7800,7 +7890,8 @@ }, "AWS::CodePipeline::Pipeline FailureConditions": { "Conditions": "The conditions that are configured as failure conditions.", - "Result": "The specified result for when the failure conditions are met, such as rolling back the stage." + "Result": "The specified result for when the failure conditions are met, such as rolling back the stage.", + "RetryConfiguration": "The retry configuration specifies automatic retry for a failed stage, along with the configured retry mode." }, "AWS::CodePipeline::Pipeline GitBranchFilterCriteria": { "Excludes": "The list of patterns of Git branches that, when a commit is pushed, are to be excluded from starting the pipeline.", @@ -7840,6 +7931,9 @@ "GitConfiguration": "Provides the filter criteria and the source stage for the repository event that starts the pipeline, such as Git tags.", "ProviderType": "The source provider for the event, such as connections configured for a repository with Git tags, for the specified trigger configuration." }, + "AWS::CodePipeline::Pipeline RetryConfiguration": { + "RetryMode": "The method that you want to configure for automatic stage retry on stage failure. You can specify to retry only failed action in the stage or all actions in the stage." + }, "AWS::CodePipeline::Pipeline RuleDeclaration": { "Configuration": "The action configuration fields for the rule.", "InputArtifacts": "The input artifacts fields for the rule, such as specifying an input file for the rule.", @@ -9671,6 +9765,31 @@ "CertificatePem": "The contents of a `.pem` file, which contains an X.509 certificate.", "CertificateWallet": "The location of an imported Oracle Wallet certificate for use with SSL. An example is: `filebase64(\"${path.root}/rds-ca-2019-root.sso\")`" }, + "AWS::DMS::DataMigration": { + "DataMigrationIdentifier": "", + "DataMigrationName": "The user-friendly name for the data migration.", + "DataMigrationSettings": "Specifies CloudWatch settings and selection rules for the data migration.", + "DataMigrationType": "Specifies whether the data migration is full-load only, change data capture (CDC) only, or full-load and CDC.", + "MigrationProjectIdentifier": "", + "ServiceAccessRoleArn": "The IAM role that the data migration uses to access AWS resources.", + "SourceDataSettings": "Specifies information about the data migration's source data provider.", + "Tags": "" + }, + "AWS::DMS::DataMigration DataMigrationSettings": { + "CloudwatchLogsEnabled": "Whether to enable CloudWatch logging for the data migration.", + "NumberOfJobs": "The number of parallel jobs that trigger parallel threads to unload the tables from the source, and then load them to the target.", + "SelectionRules": "A JSON-formatted string that defines what objects to include and exclude from the migration." + }, + "AWS::DMS::DataMigration SourceDataSettings": { + "CDCStartPosition": "", + "CDCStartTime": "", + "CDCStopTime": "", + "SlotName": "" + }, + "AWS::DMS::DataMigration Tag": { + "Key": "A key is the required name of the tag. The string value can be 1-128 Unicode characters in length and can't be prefixed with \"aws:\" or \"dms:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regular expressions: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", + "Value": "A value is the optional value of the tag. The string value can be 1-256 Unicode characters in length and can't be prefixed with \"aws:\" or \"dms:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regular expressions: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\")." + }, "AWS::DMS::DataProvider": { "DataProviderIdentifier": "The identifier of the data provider. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen, or contain two consecutive hyphens.", "DataProviderName": "The name of the data provider.", @@ -10833,16 +10952,17 @@ "Value": "The value for an AWS resource tag." }, "AWS::DataSync::Task": { - "CloudWatchLogGroupArn": "Specifies the Amazon Resource Name (ARN) of an Amazon CloudWatch log group for monitoring your task.\n\nFor more information, see [Monitoring DataSync with Amazon CloudWatch](https://docs.aws.amazon.com/datasync/latest/userguide/monitor-datasync.html) .", + "CloudWatchLogGroupArn": "Specifies the Amazon Resource Name (ARN) of an Amazon CloudWatch log group for monitoring your task.\n\nFor Enhanced mode tasks, you don't need to specify anything. DataSync automatically sends logs to a CloudWatch log group named `/aws/datasync` .\n\nFor more information, see [Monitoring data transfers with CloudWatch Logs](https://docs.aws.amazon.com/datasync/latest/userguide/configure-logging.html) .", "DestinationLocationArn": "The Amazon Resource Name (ARN) of an AWS storage resource's location.", "Excludes": "Specifies exclude filters that define the files, objects, and folders in your source location that you don't want DataSync to transfer. For more information and examples, see [Specifying what DataSync transfers by using filters](https://docs.aws.amazon.com/datasync/latest/userguide/filtering.html) .", - "Includes": "Specifies include filters define the files, objects, and folders in your source location that you want DataSync to transfer. For more information and examples, see [Specifying what DataSync transfers by using filters](https://docs.aws.amazon.com/datasync/latest/userguide/filtering.html) .", + "Includes": "Specifies include filters that define the files, objects, and folders in your source location that you want DataSync to transfer. For more information and examples, see [Specifying what DataSync transfers by using filters](https://docs.aws.amazon.com/datasync/latest/userguide/filtering.html) .", "ManifestConfig": "The configuration of the manifest that lists the files or objects that you want DataSync to transfer. For more information, see [Specifying what DataSync transfers by using a manifest](https://docs.aws.amazon.com/datasync/latest/userguide/transferring-with-manifest.html) .", "Name": "Specifies the name of your task.", "Options": "Specifies your task's settings, such as preserving file metadata, verifying data integrity, among other options.", "Schedule": "Specifies a schedule for when you want your task to run. For more information, see [Scheduling your task](https://docs.aws.amazon.com/datasync/latest/userguide/task-scheduling.html) .", "SourceLocationArn": "Specifies the ARN of your transfer's source location.", "Tags": "Specifies the tags that you want to apply to your task.\n\n*Tags* are key-value pairs that help you manage, filter, and search for your DataSync resources.", + "TaskMode": "The task mode that you're using. For more information, see [Choosing a task mode for your data transfer](https://docs.aws.amazon.com/datasync/latest/userguide/choosing-task-mode.html) .", "TaskReportConfig": "Specifies how you want to configure a task report, which provides detailed information about your DataSync transfer. For more information, see [Monitoring your DataSync transfers with task reports](https://docs.aws.amazon.com/datasync/latest/userguide/task-reports.html) .\n\nWhen using this parameter, your caller identity (the role that you're using DataSync with) must have the `iam:PassRole` permission. The [AWSDataSyncFullAccess](https://docs.aws.amazon.com/datasync/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-awsdatasyncfullaccess) policy includes this permission." }, "AWS::DataSync::Task Deleted": { @@ -11324,7 +11444,7 @@ "Tags": "The AWS tags used to filter the resources in the resource collection.\n\nTags help you identify and organize your AWS resources. Many AWS services support tagging, so you can assign the same tag to resources from different services to indicate that the resources are related. For example, you can assign the same tag to an Amazon DynamoDB table resource that you assign to an AWS Lambda function. For more information about using tags, see the [Tagging best practices](https://docs.aws.amazon.com/whitepapers/latest/tagging-best-practices/tagging-best-practices.html) whitepaper.\n\nEach AWS tag has two parts.\n\n- A tag *key* (for example, `CostCenter` , `Environment` , `Project` , or `Secret` ). Tag *keys* are case-sensitive.\n- A field known as a tag *value* (for example, `111122223333` , `Production` , or a team name). Omitting the tag *value* is the same as using an empty string. Like tag *keys* , tag *values* are case-sensitive. The tag value is a required property when AppBoundaryKey is specified.\n\nTogether these are known as *key* - *value* pairs.\n\n> The string used for a *key* in a tag that you use to define your resource coverage must begin with the prefix `Devops-guru-` . The tag *key* might be `DevOps-Guru-deployment-application` or `devops-guru-rds-application` . When you create a *key* , the case of characters in the *key* can be whatever you choose. After you create a *key* , it is case-sensitive. For example, DevOps Guru works with a *key* named `devops-guru-rds` and a *key* named `DevOps-Guru-RDS` , and these act as two different *keys* . Possible *key* / *value* pairs in your application might be `Devops-Guru-production-application/RDS` or `Devops-Guru-production-application/containers` ." }, "AWS::DevOpsGuru::ResourceCollection TagCollection": { - "AppBoundaryKey": "An AWS tag *key* that is used to identify the AWS resources that DevOps Guru analyzes. All AWS resources in your account and Region tagged with this *key* make up your DevOps Guru application and analysis boundary.\n\n> The string used for a *key* in a tag that you use to define your resource coverage must begin with the prefix `Devops-guru-` . The tag *key* might be `DevOps-Guru-deployment-application` or `devops-guru-rds-application` . When you create a *key* , the case of characters in the *key* can be whatever you choose. After you create a *key* , it is case-sensitive. For example, DevOps Guru works with a *key* named `devops-guru-rds` and a *key* named `DevOps-Guru-RDS` , and these act as two different *keys* . Possible *key* / *value* pairs in your application might be `Devops-Guru-production-application/RDS` or `Devops-Guru-production-application/containers` .", + "AppBoundaryKey": "An AWS tag *key* that is used to identify the AWS resources that DevOps Guru analyzes. All AWS resources in your account and Region tagged with this *key* make up your DevOps Guru application and analysis boundary.\n\n> When you create a *key* , the case of characters in the *key* can be whatever you choose. After you create a *key* , it is case-sensitive. For example, DevOps Guru works with a *key* named `devops-guru-rds` and a *key* named `DevOps-Guru-RDS` , and these act as two different *keys* . Possible *key* / *value* pairs in your application might be `Devops-Guru-production-application/RDS` or `Devops-Guru-production-application/containers` .", "TagValues": "The values in an AWS tag collection.\n\nThe tag's *value* is a field used to associate a string with the tag *key* (for example, `111122223333` , `Production` , or a team name). The *key* and *value* are the tag's *key* pair. Omitting the tag *value* is the same as using an empty string. Like tag *keys* , tag *values* are case-sensitive. You can specify a maximum of 256 characters for a tag value. The tag value is a required property when *AppBoundaryKey* is specified." }, "AWS::DeviceFarm::DevicePool": { @@ -12064,8 +12184,10 @@ "Min": "The minimum number of vCPUs. To specify no minimum limit, specify `0` ." }, "AWS::EC2::EIP": { + "Address": "An Elastic IP address or a carrier IP address in a Wavelength Zone.", "Domain": "The network ( `vpc` ).\n\nIf you define an Elastic IP address and associate it with a VPC that is defined in the same template, you must declare a dependency on the VPC-gateway attachment by using the [DependsOn Attribute](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-dependson.html) on this resource.", "InstanceId": "The ID of the instance.\n\n> Updates to the `InstanceId` property may require *some interruptions* . Updates on an EIP reassociates the address on its associated resource.", + "IpamPoolId": "The ID of an IPAM pool which has an Amazon-provided or BYOIP public IPv4 CIDR provisioned to it. For more information, see [Allocate sequential Elastic IP addresses from an IPAM pool](https://docs.aws.amazon.com/vpc/latest/ipam/tutorials-eip-pool.html) in the *Amazon VPC IPAM User Guide* .", "NetworkBorderGroup": "A unique set of Availability Zones, Local Zones, or Wavelength Zones from which AWS advertises IP addresses. Use this parameter to limit the IP address to this location. IP addresses cannot move between network border groups.\n\nUse [DescribeAvailabilityZones](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) to view the network border groups.", "PublicIpv4Pool": "The ID of an address pool that you own. Use this parameter to let Amazon EC2 select an address from the address pool.\n\n> Updates to the `PublicIpv4Pool` property may require *some interruptions* . Updates on an EIP reassociates the address on its associated resource.", "Tags": "Any tags assigned to the Elastic IP address.\n\n> Updates to the `Tags` property may require *some interruptions* . Updates on an EIP reassociates the address on its associated resource.", @@ -15630,8 +15752,8 @@ "ListenerAttributes": "The listener attributes.", "LoadBalancerArn": "The Amazon Resource Name (ARN) of the load balancer.", "MutualAuthentication": "The mutual authentication configuration information.", - "Port": "The port on which the load balancer is listening. You cannot specify a port for a Gateway Load Balancer.", - "Protocol": "The protocol for connections from clients to the load balancer. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP, TLS, UDP, and TCP_UDP. You can\u2019t specify the UDP or TCP_UDP protocol if dual-stack mode is enabled. You cannot specify a protocol for a Gateway Load Balancer.", + "Port": "The port on which the load balancer is listening. You can't specify a port for a Gateway Load Balancer.", + "Protocol": "The protocol for connections from clients to the load balancer. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP, TLS, UDP, and TCP_UDP. You can\u2019t specify the UDP or TCP_UDP protocol if dual-stack mode is enabled. You can't specify a protocol for a Gateway Load Balancer.", "SslPolicy": "[HTTPS and TLS listeners] The security policy that defines which protocols and ciphers are supported.\n\nUpdating the security policy can result in interruptions if the load balancer is handling a high volume of traffic.\n\nFor more information, see [Security policies](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies) in the *Application Load Balancers Guide* and [Security policies](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html#describe-ssl-policies) in the *Network Load Balancers Guide* ." }, "AWS::ElasticLoadBalancingV2::Listener Action": { @@ -15693,7 +15815,7 @@ "Host": "The hostname. This component is not percent-encoded. The hostname can contain #{host}.", "Path": "The absolute path, starting with the leading \"/\". This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}.", "Port": "The port. You can specify a value from 1 to 65535 or #{port}.", - "Protocol": "The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You cannot redirect HTTPS to HTTP.", + "Protocol": "The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You can't redirect HTTPS to HTTP.", "Query": "The query parameters, URL-encoded when necessary, but not percent-encoded. Do not include the leading \"?\", as it is automatically added. You can specify any of the reserved keywords.", "StatusCode": "The HTTP redirect code. The redirect is either permanent (HTTP 301) or temporary (HTTP 302)." }, @@ -15785,7 +15907,7 @@ "Host": "The hostname. This component is not percent-encoded. The hostname can contain #{host}.", "Path": "The absolute path, starting with the leading \"/\". This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}.", "Port": "The port. You can specify a value from 1 to 65535 or #{port}.", - "Protocol": "The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You cannot redirect HTTPS to HTTP.", + "Protocol": "The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You can't redirect HTTPS to HTTP.", "Query": "The query parameters, URL-encoded when necessary, but not percent-encoded. Do not include the leading \"?\", as it is automatically added. You can specify any of the reserved keywords.", "StatusCode": "The HTTP redirect code. The redirect is either permanent (HTTP 301) or temporary (HTTP 302)." }, @@ -15812,18 +15934,18 @@ }, "AWS::ElasticLoadBalancingV2::LoadBalancer": { "EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic": "Indicates whether to evaluate inbound security group rules for traffic sent to a Network Load Balancer through AWS PrivateLink .", - "IpAddressType": "Note: Internal load balancers must use the `ipv4` IP address type.\n\n[Application Load Balancers] The IP address type. The possible values are `ipv4` (for only IPv4 addresses), `dualstack` (for IPv4 and IPv6 addresses), and `dualstack-without-public-ipv4` (for IPv6 only public addresses, with private IPv4 and IPv6 addresses).\n\nNote: Application Load Balancer authentication only supports IPv4 addresses when connecting to an Identity Provider (IdP) or Amazon Cognito endpoint. Without a public IPv4 address the load balancer cannot complete the authentication process, resulting in HTTP 500 errors.\n\n[Network Load Balancers] The IP address type. The possible values are `ipv4` (for only IPv4 addresses) and `dualstack` (for IPv4 and IPv6 addresses). You can\u2019t specify `dualstack` for a load balancer with a UDP or TCP_UDP listener.\n\n[Gateway Load Balancers] The IP address type. The possible values are `ipv4` (for only IPv4 addresses) and `dualstack` (for IPv4 and IPv6 addresses).", + "IpAddressType": "The IP address type. Internal load balancers must use `ipv4` .\n\n[Application Load Balancers] The possible values are `ipv4` (IPv4 addresses), `dualstack` (IPv4 and IPv6 addresses), and `dualstack-without-public-ipv4` (public IPv6 addresses and private IPv4 and IPv6 addresses).\n\nApplication Load Balancer authentication supports IPv4 addresses only when connecting to an Identity Provider (IdP) or Amazon Cognito endpoint. Without a public IPv4 address the load balancer can't complete the authentication process, resulting in HTTP 500 errors.\n\n[Network Load Balancers and Gateway Load Balancers] The possible values are `ipv4` (IPv4 addresses) and `dualstack` (IPv4 and IPv6 addresses).", "LoadBalancerAttributes": "The load balancer attributes.", "Name": "The name of the load balancer. This name must be unique per region per account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, must not begin or end with a hyphen, and must not begin with \"internal-\".\n\nIf you don't specify a name, AWS CloudFormation generates a unique physical ID for the load balancer. If you specify a name, you cannot perform updates that require replacement of this resource, but you can perform other updates. To replace the resource, specify a new name.", - "Scheme": "The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the internet.\n\nThe nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can route requests only from clients with access to the VPC for the load balancer.\n\nThe default is an Internet-facing load balancer.\n\nYou cannot specify a scheme for a Gateway Load Balancer.", + "Scheme": "The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the internet.\n\nThe nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can route requests only from clients with access to the VPC for the load balancer.\n\nThe default is an Internet-facing load balancer.\n\nYou can't specify a scheme for a Gateway Load Balancer.", "SecurityGroups": "[Application Load Balancers and Network Load Balancers] The IDs of the security groups for the load balancer.", - "SubnetMappings": "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.\n\n[Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You cannot specify Elastic IP addresses for your subnets.", - "Subnets": "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers] You can specify subnets from one or more Availability Zones.\n\n[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.", + "SubnetMappings": "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones. You can't specify Elastic IP addresses for your subnets.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.\n\n[Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You can't specify Elastic IP addresses for your subnets.", + "Subnets": "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers and Gateway Load Balancers] You can specify subnets from one or more Availability Zones.", "Tags": "The tags to assign to the load balancer.", "Type": "The type of load balancer. The default is `application` ." }, "AWS::ElasticLoadBalancingV2::LoadBalancer LoadBalancerAttribute": { - "Key": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deletion_protection.enabled` - Indicates whether deletion protection is enabled. The value is `true` or `false` . The default is `false` .\n- `load_balancing.cross_zone.enabled` - Indicates whether cross-zone load balancing is enabled. The possible values are `true` and `false` . The default for Network Load Balancers and Gateway Load Balancers is `false` . The default for Application Load Balancers is `true` , and cannot be changed.\n\nThe following attributes are supported by both Application Load Balancers and Network Load Balancers:\n\n- `access_logs.s3.enabled` - Indicates whether access logs are enabled. The value is `true` or `false` . The default is `false` .\n- `access_logs.s3.bucket` - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `access_logs.s3.prefix` - The prefix for the location in the S3 bucket for the access logs.\n- `ipv6.deny_all_igw_traffic` - Blocks internet gateway (IGW) access to the load balancer. It is set to `false` for internet-facing load balancers and `true` for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway.\n\nThe following attributes are supported by only Application Load Balancers:\n\n- `idle_timeout.timeout_seconds` - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.\n- `client_keep_alive.seconds` - The client keep alive value, in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds.\n- `connection_logs.s3.enabled` - Indicates whether connection logs are enabled. The value is `true` or `false` . The default is `false` .\n- `connection_logs.s3.bucket` - The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `connection_logs.s3.prefix` - The prefix for the location in the S3 bucket for the connection logs.\n- `routing.http.desync_mitigation_mode` - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are `monitor` , `defensive` , and `strictest` . The default is `defensive` .\n- `routing.http.drop_invalid_header_fields.enabled` - Indicates whether HTTP headers with invalid header fields are removed by the load balancer ( `true` ) or routed to targets ( `false` ). The default is `false` .\n- `routing.http.preserve_host_header.enabled` - Indicates whether the Application Load Balancer should preserve the `Host` header in the HTTP request and send it to the target without any change. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.x_amzn_tls_version_and_cipher_suite.enabled` - Indicates whether the two headers ( `x-amzn-tls-version` and `x-amzn-tls-cipher-suite` ), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The `x-amzn-tls-version` header has information about the TLS protocol version negotiated with the client, and the `x-amzn-tls-cipher-suite` header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are `true` and `false` . The default is `false` .\n- `routing.http.xff_client_port.enabled` - Indicates whether the `X-Forwarded-For` header should preserve the source port that the client used to connect to the load balancer. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.xff_header_processing.mode` - Enables you to modify, preserve, or remove the `X-Forwarded-For` header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are `append` , `preserve` , and `remove` . The default is `append` .\n\n- If the value is `append` , the Application Load Balancer adds the client IP address (of the last hop) to the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- If the value is `preserve` the Application Load Balancer preserves the `X-Forwarded-For` header in the HTTP request, and sends it to targets without any change.\n- If the value is `remove` , the Application Load Balancer removes the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- `routing.http2.enabled` - Indicates whether HTTP/2 is enabled. The possible values are `true` and `false` . The default is `true` . Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.\n- `waf.fail_open.enabled` - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. The possible values are `true` and `false` . The default is `false` .\n\nThe following attributes are supported by only Network Load Balancers:\n\n- `dns_record.client_routing_policy` - Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are `availability_zone_affinity` with 100 percent zonal affinity, `partial_availability_zone_affinity` with 85 percent zonal affinity, and `any_availability_zone` with 0 percent zonal affinity.\n- `zonal_shift.config.enabled` - Indicates whether zonal shift is enabled. The possible values are `true` and `false` . The default is `false` .", + "Key": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deletion_protection.enabled` - Indicates whether deletion protection is enabled. The value is `true` or `false` . The default is `false` .\n- `load_balancing.cross_zone.enabled` - Indicates whether cross-zone load balancing is enabled. The possible values are `true` and `false` . The default for Network Load Balancers and Gateway Load Balancers is `false` . The default for Application Load Balancers is `true` , and can't be changed.\n\nThe following attributes are supported by both Application Load Balancers and Network Load Balancers:\n\n- `access_logs.s3.enabled` - Indicates whether access logs are enabled. The value is `true` or `false` . The default is `false` .\n- `access_logs.s3.bucket` - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `access_logs.s3.prefix` - The prefix for the location in the S3 bucket for the access logs.\n- `ipv6.deny_all_igw_traffic` - Blocks internet gateway (IGW) access to the load balancer. It is set to `false` for internet-facing load balancers and `true` for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway.\n\nThe following attributes are supported by only Application Load Balancers:\n\n- `idle_timeout.timeout_seconds` - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.\n- `client_keep_alive.seconds` - The client keep alive value, in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds.\n- `connection_logs.s3.enabled` - Indicates whether connection logs are enabled. The value is `true` or `false` . The default is `false` .\n- `connection_logs.s3.bucket` - The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `connection_logs.s3.prefix` - The prefix for the location in the S3 bucket for the connection logs.\n- `routing.http.desync_mitigation_mode` - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are `monitor` , `defensive` , and `strictest` . The default is `defensive` .\n- `routing.http.drop_invalid_header_fields.enabled` - Indicates whether HTTP headers with invalid header fields are removed by the load balancer ( `true` ) or routed to targets ( `false` ). The default is `false` .\n- `routing.http.preserve_host_header.enabled` - Indicates whether the Application Load Balancer should preserve the `Host` header in the HTTP request and send it to the target without any change. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.x_amzn_tls_version_and_cipher_suite.enabled` - Indicates whether the two headers ( `x-amzn-tls-version` and `x-amzn-tls-cipher-suite` ), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The `x-amzn-tls-version` header has information about the TLS protocol version negotiated with the client, and the `x-amzn-tls-cipher-suite` header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are `true` and `false` . The default is `false` .\n- `routing.http.xff_client_port.enabled` - Indicates whether the `X-Forwarded-For` header should preserve the source port that the client used to connect to the load balancer. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.xff_header_processing.mode` - Enables you to modify, preserve, or remove the `X-Forwarded-For` header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are `append` , `preserve` , and `remove` . The default is `append` .\n\n- If the value is `append` , the Application Load Balancer adds the client IP address (of the last hop) to the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- If the value is `preserve` the Application Load Balancer preserves the `X-Forwarded-For` header in the HTTP request, and sends it to targets without any change.\n- If the value is `remove` , the Application Load Balancer removes the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- `routing.http2.enabled` - Indicates whether HTTP/2 is enabled. The possible values are `true` and `false` . The default is `true` . Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.\n- `waf.fail_open.enabled` - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. The possible values are `true` and `false` . The default is `false` .\n\nThe following attributes are supported by only Network Load Balancers:\n\n- `dns_record.client_routing_policy` - Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are `availability_zone_affinity` with 100 percent zonal affinity, `partial_availability_zone_affinity` with 85 percent zonal affinity, and `any_availability_zone` with 0 percent zonal affinity.\n- `zonal_shift.config.enabled` - Indicates whether zonal shift is enabled. The possible values are `true` and `false` . The default is `false` .", "Value": "The value of the attribute." }, "AWS::ElasticLoadBalancingV2::LoadBalancer SubnetMapping": { @@ -15837,14 +15959,14 @@ "Value": "The value of the tag." }, "AWS::ElasticLoadBalancingV2::TargetGroup": { - "HealthCheckEnabled": "Indicates whether health checks are enabled. If the target type is `lambda` , health checks are disabled by default but can be enabled. If the target type is `instance` , `ip` , or `alb` , health checks are always enabled and cannot be disabled.", + "HealthCheckEnabled": "Indicates whether health checks are enabled. If the target type is `lambda` , health checks are disabled by default but can be enabled. If the target type is `instance` , `ip` , or `alb` , health checks are always enabled and can't be disabled.", "HealthCheckIntervalSeconds": "The approximate amount of time, in seconds, between health checks of an individual target. The range is 5-300. If the target group protocol is TCP, TLS, UDP, TCP_UDP, HTTP or HTTPS, the default is 30 seconds. If the target group protocol is GENEVE, the default is 10 seconds. If the target type is `lambda` , the default is 35 seconds.", "HealthCheckPath": "[HTTP/HTTPS health checks] The destination for health checks on the targets.\n\n[HTTP1 or HTTP2 protocol version] The ping path. The default is /.\n\n[GRPC protocol version] The path of a custom health check method with the format /package.service/method. The default is / AWS .ALB/healthcheck.", "HealthCheckPort": "The port the load balancer uses when performing health checks on targets. If the protocol is HTTP, HTTPS, TCP, TLS, UDP, or TCP_UDP, the default is `traffic-port` , which is the port on which each target receives traffic from the load balancer. If the protocol is GENEVE, the default is port 80.", "HealthCheckProtocol": "The protocol the load balancer uses when performing health checks on targets. For Application Load Balancers, the default is HTTP. For Network Load Balancers and Gateway Load Balancers, the default is TCP. The TCP protocol is not supported for health checks if the protocol of the target group is HTTP or HTTPS. The GENEVE, TLS, UDP, and TCP_UDP protocols are not supported for health checks.", "HealthCheckTimeoutSeconds": "The amount of time, in seconds, during which no response from a target means a failed health check. The range is 2\u2013120 seconds. For target groups with a protocol of HTTP, the default is 6 seconds. For target groups with a protocol of TCP, TLS or HTTPS, the default is 10 seconds. For target groups with a protocol of GENEVE, the default is 5 seconds. If the target type is `lambda` , the default is 30 seconds.", "HealthyThresholdCount": "The number of consecutive health check successes required before considering a target healthy. The range is 2-10. If the target group protocol is TCP, TCP_UDP, UDP, TLS, HTTP or HTTPS, the default is 5. For target groups with a protocol of GENEVE, the default is 5. If the target type is `lambda` , the default is 5.", - "IpAddressType": "The type of IP address used for this target group. The possible values are `ipv4` and `ipv6` . This is an optional parameter. If not specified, the IP address type defaults to `ipv4` .", + "IpAddressType": "The IP address type. The default value is `ipv4` .", "Matcher": "[HTTP/HTTPS health checks] The HTTP or gRPC codes to use when checking for a successful response from a target. For target groups with a protocol of TCP, TCP_UDP, UDP or TLS the range is 200-599. For target groups with a protocol of HTTP or HTTPS, the range is 200-499. For target groups with a protocol of GENEVE, the range is 200-399.", "Name": "The name of the target group.\n\nThis name must be unique per region per account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, and must not begin or end with a hyphen.", "Port": "The port on which the targets receive traffic. This port is used unless you specify a port override when registering the target. If the target is a Lambda function, this parameter does not apply. If the protocol is GENEVE, the supported port is 6081.", @@ -15871,7 +15993,7 @@ "Port": "The port on which the target is listening. If the target group protocol is GENEVE, the supported port is 6081. If the target type is `alb` , the targeted Application Load Balancer must have at least one listener whose port matches the target group port. This parameter is not used if the target is a Lambda function." }, "AWS::ElasticLoadBalancingV2::TargetGroup TargetGroupAttribute": { - "Key": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deregistration_delay.timeout_seconds` - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from `draining` to `unused` . The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.\n- `stickiness.enabled` - Indicates whether target stickiness is enabled. The value is `true` or `false` . The default is `false` .\n- `stickiness.type` - Indicates the type of stickiness. The possible values are:\n\n- `lb_cookie` and `app_cookie` for Application Load Balancers.\n- `source_ip` for Network Load Balancers.\n- `source_ip_dest_ip` and `source_ip_dest_ip_proto` for Gateway Load Balancers.\n\nThe following attributes are supported by Application Load Balancers and Network Load Balancers:\n\n- `load_balancing.cross_zone.enabled` - Indicates whether cross zone load balancing is enabled. The value is `true` , `false` or `use_load_balancer_configuration` . The default is `use_load_balancer_configuration` .\n- `target_group_health.dns_failover.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to the maximum number of targets. The default is `off` .\n- `target_group_health.dns_failover.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n\nThe following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:\n\n- `load_balancing.algorithm.type` - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is `round_robin` , `least_outstanding_requests` , or `weighted_random` . The default is `round_robin` .\n- `load_balancing.algorithm.anomaly_mitigation` - Only available when `load_balancing.algorithm.type` is `weighted_random` . Indicates whether anomaly mitigation is enabled. The value is `on` or `off` . The default is `off` .\n- `slow_start.duration_seconds` - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).\n- `stickiness.app_cookie.cookie_name` - Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: `AWSALB` , `AWSALBAPP` , and `AWSALBTG` ; they're reserved for use by the load balancer.\n- `stickiness.app_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n- `stickiness.lb_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n\nThe following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:\n\n- `lambda.multi_value_headers.enabled` - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is `true` or `false` . The default is `false` . If the value is `false` and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.\n\nThe following attributes are supported only by Network Load Balancers:\n\n- `deregistration_delay.connection_termination.enabled` - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is `true` or `false` . For new UDP/TCP_UDP target groups the default is `true` . Otherwise, the default is `false` .\n- `preserve_client_ip.enabled` - Indicates whether client IP preservation is enabled. The value is `true` or `false` . The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation cannot be disabled for UDP and TCP_UDP target groups.\n- `proxy_protocol_v2.enabled` - Indicates whether Proxy Protocol version 2 is enabled. The value is `true` or `false` . The default is `false` .\n- `target_health_state.unhealthy.connection_termination.enabled` - Indicates whether the load balancer terminates connections to unhealthy targets. The value is `true` or `false` . The default is `true` .\n- `target_health_state.unhealthy.draining_interval_seconds` - The amount of time for Elastic Load Balancing to wait before changing the state of an unhealthy target from `unhealthy.draining` to `unhealthy` . The range is 0-360000 seconds. The default value is 0 seconds.\n\nNote: This attribute can only be configured when `target_health_state.unhealthy.connection_termination.enabled` is `false` .\n\nThe following attributes are supported only by Gateway Load Balancers:\n\n- `target_failover.on_deregistration` - Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.\n- `target_failover.on_unhealthy` - Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) cannot be set independently. The value you set for both attributes must be the same.", + "Key": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deregistration_delay.timeout_seconds` - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from `draining` to `unused` . The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.\n- `stickiness.enabled` - Indicates whether target stickiness is enabled. The value is `true` or `false` . The default is `false` .\n- `stickiness.type` - Indicates the type of stickiness. The possible values are:\n\n- `lb_cookie` and `app_cookie` for Application Load Balancers.\n- `source_ip` for Network Load Balancers.\n- `source_ip_dest_ip` and `source_ip_dest_ip_proto` for Gateway Load Balancers.\n\nThe following attributes are supported by Application Load Balancers and Network Load Balancers:\n\n- `load_balancing.cross_zone.enabled` - Indicates whether cross zone load balancing is enabled. The value is `true` , `false` or `use_load_balancer_configuration` . The default is `use_load_balancer_configuration` .\n- `target_group_health.dns_failover.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to the maximum number of targets. The default is `off` .\n- `target_group_health.dns_failover.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n\nThe following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:\n\n- `load_balancing.algorithm.type` - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is `round_robin` , `least_outstanding_requests` , or `weighted_random` . The default is `round_robin` .\n- `load_balancing.algorithm.anomaly_mitigation` - Only available when `load_balancing.algorithm.type` is `weighted_random` . Indicates whether anomaly mitigation is enabled. The value is `on` or `off` . The default is `off` .\n- `slow_start.duration_seconds` - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).\n- `stickiness.app_cookie.cookie_name` - Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: `AWSALB` , `AWSALBAPP` , and `AWSALBTG` ; they're reserved for use by the load balancer.\n- `stickiness.app_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n- `stickiness.lb_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n\nThe following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:\n\n- `lambda.multi_value_headers.enabled` - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is `true` or `false` . The default is `false` . If the value is `false` and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.\n\nThe following attributes are supported only by Network Load Balancers:\n\n- `deregistration_delay.connection_termination.enabled` - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is `true` or `false` . For new UDP/TCP_UDP target groups the default is `true` . Otherwise, the default is `false` .\n- `preserve_client_ip.enabled` - Indicates whether client IP preservation is enabled. The value is `true` or `false` . The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation can't be disabled for UDP and TCP_UDP target groups.\n- `proxy_protocol_v2.enabled` - Indicates whether Proxy Protocol version 2 is enabled. The value is `true` or `false` . The default is `false` .\n- `target_health_state.unhealthy.connection_termination.enabled` - Indicates whether the load balancer terminates connections to unhealthy targets. The value is `true` or `false` . The default is `true` .\n- `target_health_state.unhealthy.draining_interval_seconds` - The amount of time for Elastic Load Balancing to wait before changing the state of an unhealthy target from `unhealthy.draining` to `unhealthy` . The range is 0-360000 seconds. The default value is 0 seconds.\n\nNote: This attribute can only be configured when `target_health_state.unhealthy.connection_termination.enabled` is `false` .\n\nThe following attributes are supported only by Gateway Load Balancers:\n\n- `target_failover.on_deregistration` - Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.\n- `target_failover.on_unhealthy` - Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.", "Value": "The value of the attribute." }, "AWS::ElasticLoadBalancingV2::TrustStore": { @@ -23641,7 +23763,7 @@ "TargetArn": "The Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic." }, "AWS::Lambda::Function Environment": { - "Variables": "Environment variable key-value pairs. For more information, see [Using Lambda environment variables](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html) ." + "Variables": "Environment variable key-value pairs. For more information, see [Using Lambda environment variables](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html) .\n\nIf the value of the environment variable is a time or a duration, enclose the value in quotes." }, "AWS::Lambda::Function EphemeralStorage": { "Size": "The size of the function's `/tmp` directory." @@ -23732,7 +23854,6 @@ "CodeSha256": "Only publish a version if the hash value matches the value that's specified. Use this option to avoid publishing a version if the function code has changed since you last updated it. Updates are not supported for this property.", "Description": "A description for the version to override the description in the function configuration. Updates are not supported for this property.", "FunctionName": "The name or ARN of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", - "Policy": "", "ProvisionedConcurrencyConfig": "Specifies a provisioned concurrency configuration for a function's version. Updates are not supported for this property.", "RuntimePolicy": "" }, @@ -27320,6 +27441,7 @@ "TsEncryptionMethod": "The encryption method to use." }, "AWS::MediaPackageV2::OriginEndpoint FilterConfiguration": { + "ClipStartTime": "Optionally specify the clip start time for all of your manifest egress requests. When you include clip start time, note that you cannot use clip start time query parameters for this manifest's endpoint URL.", "End": "Optionally specify the end time for all of your manifest egress requests. When you include end time, note that you cannot use end time query parameters for this manifest's endpoint URL.", "ManifestFilter": "Optionally specify one or more manifest filters for all of your manifest egress requests. When you include a manifest filter, note that you cannot use an identical manifest filter query parameter for this manifest's endpoint URL.", "Start": "Optionally specify the start time for all of your manifest egress requests. When you include start time, note that you cannot use start time query parameters for this manifest's endpoint URL.", @@ -27335,6 +27457,7 @@ "ManifestWindowSeconds": "The duration of the manifest window, in seconds, for the HLS manifest configuration.", "ProgramDateTimeIntervalSeconds": "The `EXT-X-PROGRAM-DATE-TIME` interval, in seconds, associated with the HLS manifest configuration.", "ScteHls": "THE SCTE-35 HLS configuration associated with the HLS manifest configuration.", + "StartTag": "", "Url": "The URL of the HLS manifest configuration." }, "AWS::MediaPackageV2::OriginEndpoint LowLatencyHlsManifestConfiguration": { @@ -27344,6 +27467,7 @@ "ManifestWindowSeconds": "The total duration (in seconds) of the manifest's content.", "ProgramDateTimeIntervalSeconds": "Inserts `EXT-X-PROGRAM-DATE-TIME` tags in the output manifest at the interval that you specify. If you don't enter an interval, `EXT-X-PROGRAM-DATE-TIME` tags aren't included in the manifest. The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player. `ID3Timed` metadata messages generate every 5 seconds whenever MediaPackage ingests the content.\n\nIrrespective of this parameter, if any `ID3Timed` metadata is in the HLS input, MediaPackage passes through that metadata to the HLS output.", "ScteHls": "The SCTE-35 HLS configuration associated with the low-latency HLS (LL-HLS) manifest configuration of the origin endpoint.", + "StartTag": "", "Url": "The URL of the low-latency HLS (LL-HLS) manifest configuration of the origin endpoint." }, "AWS::MediaPackageV2::OriginEndpoint Scte": { @@ -27371,6 +27495,10 @@ "RoleArn": "The ARN for the IAM role granted by the key provider that provides access to the key provider API. This role must have a trust policy that allows MediaPackage to assume the role, and it must have a sufficient permissions policy to allow access to the specific key retrieval URL. Get this from your DRM solution provider.\n\nValid format: `arn:aws:iam::{accountID}:role/{name}` . The following example shows a role ARN: `arn:aws:iam::444455556666:role/SpekeAccess`", "Url": "The URL of the SPEKE key provider." }, + "AWS::MediaPackageV2::OriginEndpoint StartTag": { + "Precise": "Specify the value for PRECISE within your EXT-X-START tag. Leave blank, or choose false, to use the default value NO. Choose yes to use the value YES.", + "TimeOffset": "Specify the value for TIME-OFFSET within your EXT-X-START tag. Enter a signed floating point value which, if positive, must be less than the configured manifest duration minus three times the configured segment target duration. If negative, the absolute value must be larger than three times the configured segment target duration, and the absolute value must be smaller than the configured manifest duration." + }, "AWS::MediaPackageV2::OriginEndpoint Tag": { "Key": "", "Value": "" @@ -27812,7 +27940,7 @@ "TLSInspectionConfigurationArn": "The Amazon Resource Name (ARN) of the TLS inspection configuration." }, "AWS::NetworkFirewall::FirewallPolicy FlowTimeouts": { - "TcpIdleTimeoutSeconds": "" + "TcpIdleTimeoutSeconds": "The number of seconds that can pass without any TCP traffic sent through the firewall before the firewall determines that the connection is idle. After the idle timeout passes, data packets are dropped, however, the next TCP SYN packet is considered a new flow and is processed by the firewall. Clients or targets can use TCP keepalive packets to reset the idle timeout.\n\nYou can define the `TcpIdleTimeoutSeconds` value to be between 60 and 6000 seconds. If no value is provided, it defaults to 350 seconds." }, "AWS::NetworkFirewall::FirewallPolicy IPSet": { "Definition": "The list of IP addresses and address ranges, in CIDR notation." @@ -27824,7 +27952,7 @@ "Dimensions": "" }, "AWS::NetworkFirewall::FirewallPolicy StatefulEngineOptions": { - "FlowTimeouts": "", + "FlowTimeouts": "Configures the amount of time that can pass without any traffic sent through the firewall before the firewall determines that the connection is idle.", "RuleOrder": "Indicates how to manage the order of stateful rule evaluation for the policy. `DEFAULT_ACTION_ORDER` is the default behavior. Stateful rules are provided to the rule engine as Suricata compatible strings, and Suricata evaluates them based on certain settings. For more information, see [Evaluation order for stateful rules](https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-rule-evaluation-order.html) in the *AWS Network Firewall Developer Guide* .", "StreamExceptionPolicy": "Configures how Network Firewall processes traffic when a network connection breaks midstream. Network connections can break due to disruptions in external networks or within the firewall itself.\n\n- `DROP` - Network Firewall fails closed and drops all subsequent traffic going to the firewall. This is the default behavior.\n- `CONTINUE` - Network Firewall continues to apply rules to the subsequent traffic without context from traffic before the break. This impacts the behavior of rules that depend on this context. For example, if you have a stateful rule to `drop http` traffic, Network Firewall won't match the traffic for this rule because the service won't have the context from session initialization defining the application layer protocol as HTTP. However, this behavior is rule dependent\u2014a TCP-layer rule using a `flow:stateless` rule would still match, as would the `aws:drop_strict` default action.\n- `REJECT` - Network Firewall fails closed and drops all subsequent traffic going to the firewall. Network Firewall also sends a TCP reject packet back to your client so that the client can immediately establish a new session. Network Firewall will have context about the new session and will apply rules to the subsequent traffic." }, @@ -28559,6 +28687,31 @@ "SubnetIds": "The ID of the subnets from which you access OpenSearch Serverless.", "VpcId": "The ID of the VPC from which you access OpenSearch Serverless." }, + "AWS::OpenSearchService::Application": { + "AppConfigs": "", + "DataSources": "", + "Endpoint": "Endpoint URL of an OpenSearch Application.", + "IamIdentityCenterOptions": "Container for IAM Identity Center Options settings.", + "Name": "Name of an OpenSearch Application.", + "Tags": "" + }, + "AWS::OpenSearchService::Application AppConfig": { + "Key": "Specify the item to configure, such as admin role for the OpenSearch Application.", + "Value": "Specifies the value to configure for the key, such as an IAM user ARN." + }, + "AWS::OpenSearchService::Application DataSource": { + "DataSourceArn": "", + "DataSourceDescription": "Detailed description of a data source." + }, + "AWS::OpenSearchService::Application IamIdentityCenterOptions": { + "Enabled": "IAM Identity Center is enabled for the OpenSearch Application.", + "IamIdentityCenterInstanceArn": "", + "IamRoleForIdentityCenterApplicationArn": "Amazon Resource Name of the IAM Identity Center's Application created for the OpenSearch Application after enabling IAM Identity Center." + }, + "AWS::OpenSearchService::Application Tag": { + "Key": "The tag key. Tag keys must be unique for the domain to which they are attached.", + "Value": "The value assigned to the corresponding tag key. Tag values can be null and don't have to be unique in a tag set. For example, you can have a key value pair in a tag set of `project : Trinity` and `cost-center : Trinity`" + }, "AWS::OpenSearchService::Domain": { "AccessPolicies": "An AWS Identity and Access Management ( IAM ) policy document that specifies who can access the OpenSearch Service domain and their permissions. For more information, see [Configuring access policies](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/ac.html#ac-creating) in the *Amazon OpenSearch Service Developer Guide* .", "AdvancedOptions": "Additional options to specify for the OpenSearch Service domain. For more information, see [AdvancedOptions](https://docs.aws.amazon.com/opensearch-service/latest/APIReference/API_CreateDomain.html#API_CreateDomain_RequestBody) in the OpenSearch Service API reference.", @@ -28571,6 +28724,7 @@ "EncryptionAtRestOptions": "Whether the domain should encrypt data at rest, and if so, the AWS KMS key to use. See [Encryption of data at rest for Amazon OpenSearch Service](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/encryption-at-rest.html) .\n\nIf no encryption at rest options were initially specified in the template, updating this property by adding it causes no interruption. However, if you change this property after it's already been set within a template, the domain is deleted and recreated in order to modify the property.", "EngineVersion": "The version of OpenSearch to use. The value must be in the format `OpenSearch_X.Y` or `Elasticsearch_X.Y` . If not specified, the latest version of OpenSearch is used. For information about the versions that OpenSearch Service supports, see [Supported versions of OpenSearch and Elasticsearch](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/what-is.html#choosing-version) in the *Amazon OpenSearch Service Developer Guide* .\n\nIf you set the [EnableVersionUpgrade](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-upgradeopensearchdomain) update policy to `true` , you can update `EngineVersion` without interruption. When `EnableVersionUpgrade` is set to `false` , or is not specified, updating `EngineVersion` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "IPAddressType": "Choose either dual stack or IPv4 as your IP address type. Dual stack allows you to share domain resources across IPv4 and IPv6 address types, and is the recommended option. If you set your IP address type to dual stack, you can't change your address type later.", + "IdentityCenterOptions": "Container for IAM Identity Center Option control for the domain.", "LogPublishingOptions": "An object with one or more of the following keys: `SEARCH_SLOW_LOGS` , `ES_APPLICATION_LOGS` , `INDEX_SLOW_LOGS` , `AUDIT_LOGS` , depending on the types of logs you want to publish. Each key needs a valid `LogPublishingOption` value. For the full syntax, see the [examples](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-opensearchservice-domain.html#aws-resource-opensearchservice-domain--examples) .", "NodeToNodeEncryptionOptions": "Specifies whether node-to-node encryption is enabled. See [Node-to-node encryption for Amazon OpenSearch Service](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/ntn.html) .", "OffPeakWindowOptions": "Options for a domain's off-peak window, during which OpenSearch Service can perform mandatory configuration changes on the domain.", @@ -28630,6 +28784,14 @@ "Enabled": "Specify `true` to enable encryption at rest. Required if you enable fine-grained access control in [AdvancedSecurityOptionsInput](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-opensearchservice-domain-advancedsecurityoptionsinput.html) .\n\nIf no encryption at rest options were initially specified in the template, updating this property by adding it causes no interruption. However, if you change this property after it's already been set within a template, the domain is deleted and recreated in order to modify the property.", "KmsKeyId": "The KMS key ID. Takes the form `1a2a3a4-1a2a-3a4a-5a6a-1a2a3a4a5a6a` . Required if you enable encryption at rest.\n\nYou can also use `keyAlias` as a value.\n\nIf no encryption at rest options were initially specified in the template, updating this property by adding it causes no interruption. However, if you change this property after it's already been set within a template, the domain is deleted and recreated in order to modify the property." }, + "AWS::OpenSearchService::Domain IdentityCenterOptions": { + "EnabledAPIAccess": "True to enable IAM Identity Center for API access in Amazon OpenSearch Service.", + "IdentityCenterApplicationARN": "The ARN for IAM Identity Center Application which will integrate with Amazon OpenSearch Service.", + "IdentityCenterInstanceARN": "The ARN for IAM Identity Center Instance.", + "IdentityStoreId": "The ID of IAM Identity Store.", + "RolesKey": "Specify the attribute that contains the backend role (groupName, groupID) of IAM Identity Center", + "SubjectKey": "Specify the attribute that contains the subject (username, userID, email) of IAM Identity Center." + }, "AWS::OpenSearchService::Domain Idp": { "EntityId": "The unique entity ID of the application in the SAML identity provider.", "MetadataContent": "The metadata of the SAML application, in XML format." @@ -30457,7 +30619,7 @@ "AWS::QBusiness::WebExperience": { "ApplicationId": "The identifier of the Amazon Q Business web experience.", "IdentityProviderConfiguration": "Provides information about the identity provider (IdP) used to authenticate end users of an Amazon Q Business web experience.", - "Origins": "", + "Origins": "Sets the website domain origins that are allowed to embed the Amazon Q Business web experience. The *domain origin* refers to the base URL for accessing a website including the protocol ( `http/https` ), the domain name, and the port number (if specified).\n\n> You must only submit a *base URL* and not a full path. For example, `https://docs.aws.amazon.com` .", "RoleArn": "The Amazon Resource Name (ARN) of the service role attached to your web experience.\n\n> You must provide this value if you're using IAM Identity Center to manage end user access to your application. If you're using legacy identity management to manage user access, you don't need to provide this value.", "SamplePromptsControlMode": "Determines whether sample prompts are enabled in the web experience for an end user.", "Subtitle": "A subtitle to personalize your Amazon Q Business web experience.", @@ -39218,6 +39380,7 @@ "AvailabilityZones": "A list of Availability Zones (AZs) where instances in the DB cluster can be created. For information on AWS Regions and Availability Zones, see [Choosing the Regions and Availability Zones](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.RegionsAndAvailabilityZones.html) in the *Amazon Aurora User Guide* .\n\nValid for: Aurora DB clusters only", "BacktrackWindow": "The target backtrack window, in seconds. To disable backtracking, set this value to `0` .\n\nValid for Cluster Type: Aurora MySQL DB clusters only\n\nDefault: `0`\n\nConstraints:\n\n- If specified, this value must be set to a number from 0 to 259,200 (72 hours).", "BackupRetentionPeriod": "The number of days for which automated backups are retained.\n\nDefault: 1\n\nConstraints:\n\n- Must be a value from 1 to 35\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", + "ClusterScalabilityType": "Specifies the scalability mode of the Aurora DB cluster. When set to `limitless` , the cluster operates as an Aurora Limitless Database, allowing you to create a DB shard group for horizontal scaling (sharding) capabilities. When set to `standard` (the default), the cluster uses normal DB instance creation.", "CopyTagsToSnapshot": "A value that indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "DBClusterIdentifier": "The DB cluster identifier. This parameter is stored as a lowercase string.\n\nConstraints:\n\n- Must contain from 1 to 63 letters, numbers, or hyphens.\n- First character must be a letter.\n- Can't end with a hyphen or contain two consecutive hyphens.\n\nExample: `my-cluster1`\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "DBClusterInstanceClass": "The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example `db.m6gd.xlarge` . Not all DB instance classes are available in all AWS Regions , or for all database engines.\n\nFor the full list of DB instance classes and availability for your engine, see [DB instance class](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) in the *Amazon RDS User Guide* .\n\nThis setting is required to create a Multi-AZ DB cluster.\n\nValid for Cluster Type: Multi-AZ DB clusters only", @@ -39499,6 +39662,19 @@ "EC2SecurityGroupName": "Name of the EC2 security group to authorize. For VPC DB security groups, `EC2SecurityGroupId` must be provided. Otherwise, `EC2SecurityGroupOwnerId` and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.", "EC2SecurityGroupOwnerId": "AWS account number of the owner of the EC2 security group specified in the `EC2SecurityGroupName` parameter. The AWS access key ID isn't an acceptable value. For VPC DB security groups, `EC2SecurityGroupId` must be provided. Otherwise, `EC2SecurityGroupOwnerId` and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided." }, + "AWS::RDS::DBShardGroup": { + "ComputeRedundancy": "Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following:\n\n- 0 - Creates a DB shard group without a standby DB shard group. This is the default value.\n- 1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ).\n- 2 - Creates a DB shard group with two standby DB shard groups in two different AZs.", + "DBClusterIdentifier": "The name of the primary DB cluster for the DB shard group.", + "DBShardGroupIdentifier": "The name of the DB shard group.", + "MaxACU": "The maximum capacity of the DB shard group in Aurora capacity units (ACUs).", + "MinACU": "The minimum capacity of the DB shard group in Aurora capacity units (ACUs).", + "PubliclyAccessible": "Specifies whether the DB shard group is publicly accessible.\n\nWhen the DB shard group is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB shard group's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB shard group's VPC. Access to the DB shard group is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB shard group doesn't permit it.\n\nWhen the DB shard group isn't publicly accessible, it is an internal DB shard group with a DNS name that resolves to a private IP address.\n\nDefault: The default behavior varies depending on whether `DBSubnetGroupName` is specified.\n\nIf `DBSubnetGroupName` isn't specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the default VPC in the target Region doesn\u2019t have an internet gateway attached to it, the DB shard group is private.\n- If the default VPC in the target Region has an internet gateway attached to it, the DB shard group is public.\n\nIf `DBSubnetGroupName` is specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the subnets are part of a VPC that doesn\u2019t have an internet gateway attached to it, the DB shard group is private.\n- If the subnets are part of a VPC that has an internet gateway attached to it, the DB shard group is public.", + "Tags": "An optional set of key-value pairs to associate arbitrary data of your choosing with the DB shard group." + }, + "AWS::RDS::DBShardGroup Tag": { + "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", + "Value": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\")." + }, "AWS::RDS::DBSubnetGroup": { "DBSubnetGroupDescription": "The description for the DB subnet group.", "DBSubnetGroupName": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints:\n\n- Must contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens.\n- Must not be default.\n- First character must be a letter.\n\nExample: `mydbsubnetgroup`", @@ -40346,7 +40522,7 @@ "ResourceRecords": "One or more values that correspond with the value that you specified for the `Type` property. For example, if you specified `A` for `Type` , you specify one or more IP addresses in IPv4 format for `ResourceRecords` . For information about the format of values for each record type, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nNote the following:\n\n- You can specify more than one value for all record types except CNAME and SOA.\n- The maximum length of a value is 4000 characters.\n- If you're creating an alias record, omit `ResourceRecords` .", "SetIdentifier": "*Resource record sets that have a routing policy other than simple:* An identifier that differentiates among multiple resource record sets that have the same combination of name and type, such as multiple weighted resource record sets named acme.example.com that have a type of A. In a group of resource record sets that have the same name and type, the value of `SetIdentifier` must be unique for each resource record set.\n\nFor information about routing policies, see [Choosing a Routing Policy](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html) in the *Amazon Route 53 Developer Guide* .", "TTL": "The resource record cache time to live (TTL), in seconds. Note the following:\n\n- If you're creating or updating an alias resource record set, omit `TTL` . Amazon Route 53 uses the value of `TTL` for the alias target.\n- If you're associating this resource record set with a health check (if you're adding a `HealthCheckId` element), we recommend that you specify a `TTL` of 60 seconds or less so clients respond quickly to changes in health status.\n- All of the resource record sets in a group of weighted resource record sets must have the same value for `TTL` .\n- If a group of weighted resource record sets includes one or more weighted alias resource record sets for which the alias target is an ELB load balancer, we recommend that you specify a `TTL` of 60 seconds for all of the non-alias weighted resource record sets that have the same name and type. Values other than 60 seconds (the TTL for load balancers) will change the effect of the values that you specify for `Weight` .", - "Type": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `CAA`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", + "Type": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT` | `TLSA` | `SSHFP` | `SVCB` | `HTTPS`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `TLSA` | `SSHFP` | `SVCB` | `HTTPS` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `CAA` | `TLSA` | `SSHFP` | `SVCB` | `HTTPS`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", "Weight": "*Weighted resource record sets only:* Among resource record sets that have the same combination of DNS name and type, a value that determines the proportion of DNS queries that Amazon Route 53 responds to using the current resource record set. Route 53 calculates the sum of the weights for the resource record sets that have the same combination of DNS name and type. Route 53 then responds to queries based on the ratio of a resource's weight to the total. Note the following:\n\n- You must specify a value for the `Weight` element for every weighted resource record set.\n- You can only specify one `ResourceRecord` per weighted resource record set.\n- You can't create latency, failover, or geolocation resource record sets that have the same values for the `Name` and `Type` elements as weighted resource record sets.\n- You can create a maximum of 100 weighted resource record sets that have the same values for the `Name` and `Type` elements.\n- For weighted (but not weighted alias) resource record sets, if you set `Weight` to `0` for a resource record set, Route 53 never responds to queries with the applicable value for that resource record set. However, if you set `Weight` to `0` for all resource record sets that have the same combination of DNS name and type, traffic is routed to all resources with equal probability.\n\nThe effect of setting `Weight` to `0` is different when you associate health checks with weighted resource record sets. For more information, see [Options for Configuring Route 53 Active-Active and Active-Passive Failover](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-configuring-options.html) in the *Amazon Route 53 Developer Guide* ." }, "AWS::Route53::RecordSet AliasTarget": { @@ -40418,7 +40594,7 @@ "ResourceRecords": "Information about the records that you want to create. Each record should be in the format appropriate for the record type specified by the `Type` property. For information about different record types and their record formats, see [Values That You Specify When You Create or Edit Amazon Route 53 Records](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-values.html) in the *Amazon Route 53 Developer Guide* .", "SetIdentifier": "*Resource record sets that have a routing policy other than simple:* An identifier that differentiates among multiple resource record sets that have the same combination of name and type, such as multiple weighted resource record sets named acme.example.com that have a type of A. In a group of resource record sets that have the same name and type, the value of `SetIdentifier` must be unique for each resource record set.\n\nFor information about routing policies, see [Choosing a Routing Policy](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html) in the *Amazon Route 53 Developer Guide* .", "TTL": "The resource record cache time to live (TTL), in seconds. Note the following:\n\n- If you're creating or updating an alias resource record set, omit `TTL` . Amazon Route 53 uses the value of `TTL` for the alias target.\n- If you're associating this resource record set with a health check (if you're adding a `HealthCheckId` element), we recommend that you specify a `TTL` of 60 seconds or less so clients respond quickly to changes in health status.\n- All of the resource record sets in a group of weighted resource record sets must have the same value for `TTL` .\n- If a group of weighted resource record sets includes one or more weighted alias resource record sets for which the alias target is an ELB load balancer, we recommend that you specify a `TTL` of 60 seconds for all of the non-alias weighted resource record sets that have the same name and type. Values other than 60 seconds (the TTL for load balancers) will change the effect of the values that you specify for `Weight` .", - "Type": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `CAA`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", + "Type": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT` | `TLSA` | `SSHFP` | `SVCB` | `HTTPS`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `TLSA` | `SSHFP` | `SVCB` | `HTTPS` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `CAA` | `TLSA` | `SSHFP` | `SVCB` | `HTTPS`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", "Weight": "*Weighted resource record sets only:* Among resource record sets that have the same combination of DNS name and type, a value that determines the proportion of DNS queries that Amazon Route 53 responds to using the current resource record set. Route 53 calculates the sum of the weights for the resource record sets that have the same combination of DNS name and type. Route 53 then responds to queries based on the ratio of a resource's weight to the total. Note the following:\n\n- You must specify a value for the `Weight` element for every weighted resource record set.\n- You can only specify one `ResourceRecord` per weighted resource record set.\n- You can't create latency, failover, or geolocation resource record sets that have the same values for the `Name` and `Type` elements as weighted resource record sets.\n- You can create a maximum of 100 weighted resource record sets that have the same values for the `Name` and `Type` elements.\n- For weighted (but not weighted alias) resource record sets, if you set `Weight` to `0` for a resource record set, Route 53 never responds to queries with the applicable value for that resource record set. However, if you set `Weight` to `0` for all resource record sets that have the same combination of DNS name and type, traffic is routed to all resources with equal probability.\n\nThe effect of setting `Weight` to `0` is different when you associate health checks with weighted resource record sets. For more information, see [Options for Configuring Route 53 Active-Active and Active-Passive Failover](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-configuring-options.html) in the *Amazon Route 53 Developer Guide* ." }, "AWS::Route53Profiles::Profile": { @@ -41320,6 +41496,7 @@ "EngagementMetrics": "Specifies the status of your VDM engagement metrics collection. Can be one of the following:\n\n- `ENABLED` \u2013 Amazon SES enables engagement metrics for the configuration set.\n- `DISABLED` \u2013 Amazon SES disables engagement metrics for the configuration set." }, "AWS::SES::ConfigurationSet DeliveryOptions": { + "MaxDeliverySeconds": "The maximum amount of time, in seconds, that Amazon SES API v2 will attempt delivery of email. If specified, the value must greater than or equal to 300 seconds (5 minutes) and less than or equal to 50400 seconds (840 minutes).", "SendingPoolName": "The name of the dedicated IP pool to associate with the configuration set.", "TlsPolicy": "Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). If the value is `REQUIRE` , messages are only delivered if a TLS connection can be established. If the value is `OPTIONAL` , messages can be delivered in plain text if a TLS connection can't be established.\n\nValid Values: `REQUIRE | OPTIONAL`" }, @@ -46826,6 +47003,11 @@ "AWS::Wisdom::AIPrompt TextFullAIPromptEditTemplateConfiguration": { "Text": "" }, + "AWS::Wisdom::AIPromptVersion": { + "AIPromptId": "", + "AssistantId": "", + "ModifiedTimeSeconds": "" + }, "AWS::Wisdom::Assistant": { "Description": "The description of the assistant.", "Name": "The name of the assistant.", diff --git a/schema_source/cloudformation.schema.json b/schema_source/cloudformation.schema.json index 1e37b45b3..d704906d4 100644 --- a/schema_source/cloudformation.schema.json +++ b/schema_source/cloudformation.schema.json @@ -9380,13 +9380,9 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "The key-value string map. The valid character set is `[a-zA-Z+-=._:/]` . The tag key can be up to 128 characters and must not start with `aws:` .", - "title": "Key", "type": "string" }, "Value": { - "markdownDescription": "The tag value can be up to 256 characters.", - "title": "Value", "type": "string" } }, @@ -30224,7 +30220,7 @@ "additionalProperties": false, "properties": { "EmbeddingModelArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the model used to create vector embeddings for the knowledge base.", + "markdownDescription": "The Amazon Resource Name (ARN) of the model or inference profile used to create vector embeddings for the knowledge base.", "title": "EmbeddingModelArn", "type": "string" } @@ -63089,7 +63085,7 @@ "additionalProperties": false, "properties": { "CloudWatchLogGroupArn": { - "markdownDescription": "Specifies the Amazon Resource Name (ARN) of an Amazon CloudWatch log group for monitoring your task.\n\nFor more information, see [Monitoring DataSync with Amazon CloudWatch](https://docs.aws.amazon.com/datasync/latest/userguide/monitor-datasync.html) .", + "markdownDescription": "Specifies the Amazon Resource Name (ARN) of an Amazon CloudWatch log group for monitoring your task.\n\nFor Enhanced mode tasks, you don't need to specify anything. DataSync automatically sends logs to a CloudWatch log group named `/aws/datasync` .\n\nFor more information, see [Monitoring data transfers with CloudWatch Logs](https://docs.aws.amazon.com/datasync/latest/userguide/configure-logging.html) .", "title": "CloudWatchLogGroupArn", "type": "string" }, @@ -63110,7 +63106,7 @@ "items": { "$ref": "#/definitions/AWS::DataSync::Task.FilterRule" }, - "markdownDescription": "Specifies include filters define the files, objects, and folders in your source location that you want DataSync to transfer. For more information and examples, see [Specifying what DataSync transfers by using filters](https://docs.aws.amazon.com/datasync/latest/userguide/filtering.html) .", + "markdownDescription": "Specifies include filters that define the files, objects, and folders in your source location that you want DataSync to transfer. For more information and examples, see [Specifying what DataSync transfers by using filters](https://docs.aws.amazon.com/datasync/latest/userguide/filtering.html) .", "title": "Includes", "type": "array" }, @@ -66268,7 +66264,7 @@ "additionalProperties": false, "properties": { "AppBoundaryKey": { - "markdownDescription": "An AWS tag *key* that is used to identify the AWS resources that DevOps Guru analyzes. All AWS resources in your account and Region tagged with this *key* make up your DevOps Guru application and analysis boundary.\n\n> The string used for a *key* in a tag that you use to define your resource coverage must begin with the prefix `Devops-guru-` . The tag *key* might be `DevOps-Guru-deployment-application` or `devops-guru-rds-application` . When you create a *key* , the case of characters in the *key* can be whatever you choose. After you create a *key* , it is case-sensitive. For example, DevOps Guru works with a *key* named `devops-guru-rds` and a *key* named `DevOps-Guru-RDS` , and these act as two different *keys* . Possible *key* / *value* pairs in your application might be `Devops-Guru-production-application/RDS` or `Devops-Guru-production-application/containers` .", + "markdownDescription": "An AWS tag *key* that is used to identify the AWS resources that DevOps Guru analyzes. All AWS resources in your account and Region tagged with this *key* make up your DevOps Guru application and analysis boundary.\n\n> When you create a *key* , the case of characters in the *key* can be whatever you choose. After you create a *key* , it is case-sensitive. For example, DevOps Guru works with a *key* named `devops-guru-rds` and a *key* named `DevOps-Guru-RDS` , and these act as two different *keys* . Possible *key* / *value* pairs in your application might be `Devops-Guru-production-application/RDS` or `Devops-Guru-production-application/containers` .", "title": "AppBoundaryKey", "type": "string" }, @@ -92958,12 +92954,12 @@ "title": "MutualAuthentication" }, "Port": { - "markdownDescription": "The port on which the load balancer is listening. You cannot specify a port for a Gateway Load Balancer.", + "markdownDescription": "The port on which the load balancer is listening. You can't specify a port for a Gateway Load Balancer.", "title": "Port", "type": "number" }, "Protocol": { - "markdownDescription": "The protocol for connections from clients to the load balancer. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP, TLS, UDP, and TCP_UDP. You can\u2019t specify the UDP or TCP_UDP protocol if dual-stack mode is enabled. You cannot specify a protocol for a Gateway Load Balancer.", + "markdownDescription": "The protocol for connections from clients to the load balancer. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP, TLS, UDP, and TCP_UDP. You can\u2019t specify the UDP or TCP_UDP protocol if dual-stack mode is enabled. You can't specify a protocol for a Gateway Load Balancer.", "title": "Protocol", "type": "string" }, @@ -93279,7 +93275,7 @@ "type": "string" }, "Protocol": { - "markdownDescription": "The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You cannot redirect HTTPS to HTTP.", + "markdownDescription": "The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You can't redirect HTTPS to HTTP.", "title": "Protocol", "type": "string" }, @@ -93846,7 +93842,7 @@ "type": "string" }, "Protocol": { - "markdownDescription": "The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You cannot redirect HTTPS to HTTP.", + "markdownDescription": "The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You can't redirect HTTPS to HTTP.", "title": "Protocol", "type": "string" }, @@ -94002,7 +93998,7 @@ "type": "string" }, "IpAddressType": { - "markdownDescription": "Note: Internal load balancers must use the `ipv4` IP address type.\n\n[Application Load Balancers] The IP address type. The possible values are `ipv4` (for only IPv4 addresses), `dualstack` (for IPv4 and IPv6 addresses), and `dualstack-without-public-ipv4` (for IPv6 only public addresses, with private IPv4 and IPv6 addresses).\n\nNote: Application Load Balancer authentication only supports IPv4 addresses when connecting to an Identity Provider (IdP) or Amazon Cognito endpoint. Without a public IPv4 address the load balancer cannot complete the authentication process, resulting in HTTP 500 errors.\n\n[Network Load Balancers] The IP address type. The possible values are `ipv4` (for only IPv4 addresses) and `dualstack` (for IPv4 and IPv6 addresses). You can\u2019t specify `dualstack` for a load balancer with a UDP or TCP_UDP listener.\n\n[Gateway Load Balancers] The IP address type. The possible values are `ipv4` (for only IPv4 addresses) and `dualstack` (for IPv4 and IPv6 addresses).", + "markdownDescription": "The IP address type. Internal load balancers must use `ipv4` .\n\n[Application Load Balancers] The possible values are `ipv4` (IPv4 addresses), `dualstack` (IPv4 and IPv6 addresses), and `dualstack-without-public-ipv4` (public IPv6 addresses and private IPv4 and IPv6 addresses).\n\nApplication Load Balancer authentication supports IPv4 addresses only when connecting to an Identity Provider (IdP) or Amazon Cognito endpoint. Without a public IPv4 address the load balancer can't complete the authentication process, resulting in HTTP 500 errors.\n\n[Network Load Balancers and Gateway Load Balancers] The possible values are `ipv4` (IPv4 addresses) and `dualstack` (IPv4 and IPv6 addresses).", "title": "IpAddressType", "type": "string" }, @@ -94020,7 +94016,7 @@ "type": "string" }, "Scheme": { - "markdownDescription": "The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the internet.\n\nThe nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can route requests only from clients with access to the VPC for the load balancer.\n\nThe default is an Internet-facing load balancer.\n\nYou cannot specify a scheme for a Gateway Load Balancer.", + "markdownDescription": "The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the internet.\n\nThe nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can route requests only from clients with access to the VPC for the load balancer.\n\nThe default is an Internet-facing load balancer.\n\nYou can't specify a scheme for a Gateway Load Balancer.", "title": "Scheme", "type": "string" }, @@ -94036,7 +94032,7 @@ "items": { "$ref": "#/definitions/AWS::ElasticLoadBalancingV2::LoadBalancer.SubnetMapping" }, - "markdownDescription": "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.\n\n[Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You cannot specify Elastic IP addresses for your subnets.", + "markdownDescription": "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones. You can't specify Elastic IP addresses for your subnets.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.\n\n[Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You can't specify Elastic IP addresses for your subnets.", "title": "SubnetMappings", "type": "array" }, @@ -94044,7 +94040,7 @@ "items": { "type": "string" }, - "markdownDescription": "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers] You can specify subnets from one or more Availability Zones.\n\n[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.", + "markdownDescription": "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers and Gateway Load Balancers] You can specify subnets from one or more Availability Zones.", "title": "Subnets", "type": "array" }, @@ -94088,7 +94084,7 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deletion_protection.enabled` - Indicates whether deletion protection is enabled. The value is `true` or `false` . The default is `false` .\n- `load_balancing.cross_zone.enabled` - Indicates whether cross-zone load balancing is enabled. The possible values are `true` and `false` . The default for Network Load Balancers and Gateway Load Balancers is `false` . The default for Application Load Balancers is `true` , and cannot be changed.\n\nThe following attributes are supported by both Application Load Balancers and Network Load Balancers:\n\n- `access_logs.s3.enabled` - Indicates whether access logs are enabled. The value is `true` or `false` . The default is `false` .\n- `access_logs.s3.bucket` - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `access_logs.s3.prefix` - The prefix for the location in the S3 bucket for the access logs.\n- `ipv6.deny_all_igw_traffic` - Blocks internet gateway (IGW) access to the load balancer. It is set to `false` for internet-facing load balancers and `true` for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway.\n\nThe following attributes are supported by only Application Load Balancers:\n\n- `idle_timeout.timeout_seconds` - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.\n- `client_keep_alive.seconds` - The client keep alive value, in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds.\n- `connection_logs.s3.enabled` - Indicates whether connection logs are enabled. The value is `true` or `false` . The default is `false` .\n- `connection_logs.s3.bucket` - The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `connection_logs.s3.prefix` - The prefix for the location in the S3 bucket for the connection logs.\n- `routing.http.desync_mitigation_mode` - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are `monitor` , `defensive` , and `strictest` . The default is `defensive` .\n- `routing.http.drop_invalid_header_fields.enabled` - Indicates whether HTTP headers with invalid header fields are removed by the load balancer ( `true` ) or routed to targets ( `false` ). The default is `false` .\n- `routing.http.preserve_host_header.enabled` - Indicates whether the Application Load Balancer should preserve the `Host` header in the HTTP request and send it to the target without any change. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.x_amzn_tls_version_and_cipher_suite.enabled` - Indicates whether the two headers ( `x-amzn-tls-version` and `x-amzn-tls-cipher-suite` ), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The `x-amzn-tls-version` header has information about the TLS protocol version negotiated with the client, and the `x-amzn-tls-cipher-suite` header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are `true` and `false` . The default is `false` .\n- `routing.http.xff_client_port.enabled` - Indicates whether the `X-Forwarded-For` header should preserve the source port that the client used to connect to the load balancer. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.xff_header_processing.mode` - Enables you to modify, preserve, or remove the `X-Forwarded-For` header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are `append` , `preserve` , and `remove` . The default is `append` .\n\n- If the value is `append` , the Application Load Balancer adds the client IP address (of the last hop) to the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- If the value is `preserve` the Application Load Balancer preserves the `X-Forwarded-For` header in the HTTP request, and sends it to targets without any change.\n- If the value is `remove` , the Application Load Balancer removes the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- `routing.http2.enabled` - Indicates whether HTTP/2 is enabled. The possible values are `true` and `false` . The default is `true` . Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.\n- `waf.fail_open.enabled` - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. The possible values are `true` and `false` . The default is `false` .\n\nThe following attributes are supported by only Network Load Balancers:\n\n- `dns_record.client_routing_policy` - Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are `availability_zone_affinity` with 100 percent zonal affinity, `partial_availability_zone_affinity` with 85 percent zonal affinity, and `any_availability_zone` with 0 percent zonal affinity.\n- `zonal_shift.config.enabled` - Indicates whether zonal shift is enabled. The possible values are `true` and `false` . The default is `false` .", + "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deletion_protection.enabled` - Indicates whether deletion protection is enabled. The value is `true` or `false` . The default is `false` .\n- `load_balancing.cross_zone.enabled` - Indicates whether cross-zone load balancing is enabled. The possible values are `true` and `false` . The default for Network Load Balancers and Gateway Load Balancers is `false` . The default for Application Load Balancers is `true` , and can't be changed.\n\nThe following attributes are supported by both Application Load Balancers and Network Load Balancers:\n\n- `access_logs.s3.enabled` - Indicates whether access logs are enabled. The value is `true` or `false` . The default is `false` .\n- `access_logs.s3.bucket` - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `access_logs.s3.prefix` - The prefix for the location in the S3 bucket for the access logs.\n- `ipv6.deny_all_igw_traffic` - Blocks internet gateway (IGW) access to the load balancer. It is set to `false` for internet-facing load balancers and `true` for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway.\n\nThe following attributes are supported by only Application Load Balancers:\n\n- `idle_timeout.timeout_seconds` - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.\n- `client_keep_alive.seconds` - The client keep alive value, in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds.\n- `connection_logs.s3.enabled` - Indicates whether connection logs are enabled. The value is `true` or `false` . The default is `false` .\n- `connection_logs.s3.bucket` - The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `connection_logs.s3.prefix` - The prefix for the location in the S3 bucket for the connection logs.\n- `routing.http.desync_mitigation_mode` - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are `monitor` , `defensive` , and `strictest` . The default is `defensive` .\n- `routing.http.drop_invalid_header_fields.enabled` - Indicates whether HTTP headers with invalid header fields are removed by the load balancer ( `true` ) or routed to targets ( `false` ). The default is `false` .\n- `routing.http.preserve_host_header.enabled` - Indicates whether the Application Load Balancer should preserve the `Host` header in the HTTP request and send it to the target without any change. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.x_amzn_tls_version_and_cipher_suite.enabled` - Indicates whether the two headers ( `x-amzn-tls-version` and `x-amzn-tls-cipher-suite` ), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The `x-amzn-tls-version` header has information about the TLS protocol version negotiated with the client, and the `x-amzn-tls-cipher-suite` header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are `true` and `false` . The default is `false` .\n- `routing.http.xff_client_port.enabled` - Indicates whether the `X-Forwarded-For` header should preserve the source port that the client used to connect to the load balancer. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.xff_header_processing.mode` - Enables you to modify, preserve, or remove the `X-Forwarded-For` header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are `append` , `preserve` , and `remove` . The default is `append` .\n\n- If the value is `append` , the Application Load Balancer adds the client IP address (of the last hop) to the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- If the value is `preserve` the Application Load Balancer preserves the `X-Forwarded-For` header in the HTTP request, and sends it to targets without any change.\n- If the value is `remove` , the Application Load Balancer removes the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- `routing.http2.enabled` - Indicates whether HTTP/2 is enabled. The possible values are `true` and `false` . The default is `true` . Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.\n- `waf.fail_open.enabled` - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. The possible values are `true` and `false` . The default is `false` .\n\nThe following attributes are supported by only Network Load Balancers:\n\n- `dns_record.client_routing_policy` - Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are `availability_zone_affinity` with 100 percent zonal affinity, `partial_availability_zone_affinity` with 85 percent zonal affinity, and `any_availability_zone` with 0 percent zonal affinity.\n- `zonal_shift.config.enabled` - Indicates whether zonal shift is enabled. The possible values are `true` and `false` . The default is `false` .", "title": "Key", "type": "string" }, @@ -94165,7 +94161,7 @@ "additionalProperties": false, "properties": { "HealthCheckEnabled": { - "markdownDescription": "Indicates whether health checks are enabled. If the target type is `lambda` , health checks are disabled by default but can be enabled. If the target type is `instance` , `ip` , or `alb` , health checks are always enabled and cannot be disabled.", + "markdownDescription": "Indicates whether health checks are enabled. If the target type is `lambda` , health checks are disabled by default but can be enabled. If the target type is `instance` , `ip` , or `alb` , health checks are always enabled and can't be disabled.", "title": "HealthCheckEnabled", "type": "boolean" }, @@ -94200,7 +94196,7 @@ "type": "number" }, "IpAddressType": { - "markdownDescription": "The type of IP address used for this target group. The possible values are `ipv4` and `ipv6` . This is an optional parameter. If not specified, the IP address type defaults to `ipv4` .", + "markdownDescription": "The IP address type. The default value is `ipv4` .", "title": "IpAddressType", "type": "string" }, @@ -94335,7 +94331,7 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deregistration_delay.timeout_seconds` - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from `draining` to `unused` . The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.\n- `stickiness.enabled` - Indicates whether target stickiness is enabled. The value is `true` or `false` . The default is `false` .\n- `stickiness.type` - Indicates the type of stickiness. The possible values are:\n\n- `lb_cookie` and `app_cookie` for Application Load Balancers.\n- `source_ip` for Network Load Balancers.\n- `source_ip_dest_ip` and `source_ip_dest_ip_proto` for Gateway Load Balancers.\n\nThe following attributes are supported by Application Load Balancers and Network Load Balancers:\n\n- `load_balancing.cross_zone.enabled` - Indicates whether cross zone load balancing is enabled. The value is `true` , `false` or `use_load_balancer_configuration` . The default is `use_load_balancer_configuration` .\n- `target_group_health.dns_failover.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to the maximum number of targets. The default is `off` .\n- `target_group_health.dns_failover.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n\nThe following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:\n\n- `load_balancing.algorithm.type` - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is `round_robin` , `least_outstanding_requests` , or `weighted_random` . The default is `round_robin` .\n- `load_balancing.algorithm.anomaly_mitigation` - Only available when `load_balancing.algorithm.type` is `weighted_random` . Indicates whether anomaly mitigation is enabled. The value is `on` or `off` . The default is `off` .\n- `slow_start.duration_seconds` - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).\n- `stickiness.app_cookie.cookie_name` - Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: `AWSALB` , `AWSALBAPP` , and `AWSALBTG` ; they're reserved for use by the load balancer.\n- `stickiness.app_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n- `stickiness.lb_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n\nThe following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:\n\n- `lambda.multi_value_headers.enabled` - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is `true` or `false` . The default is `false` . If the value is `false` and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.\n\nThe following attributes are supported only by Network Load Balancers:\n\n- `deregistration_delay.connection_termination.enabled` - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is `true` or `false` . For new UDP/TCP_UDP target groups the default is `true` . Otherwise, the default is `false` .\n- `preserve_client_ip.enabled` - Indicates whether client IP preservation is enabled. The value is `true` or `false` . The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation cannot be disabled for UDP and TCP_UDP target groups.\n- `proxy_protocol_v2.enabled` - Indicates whether Proxy Protocol version 2 is enabled. The value is `true` or `false` . The default is `false` .\n- `target_health_state.unhealthy.connection_termination.enabled` - Indicates whether the load balancer terminates connections to unhealthy targets. The value is `true` or `false` . The default is `true` .\n- `target_health_state.unhealthy.draining_interval_seconds` - The amount of time for Elastic Load Balancing to wait before changing the state of an unhealthy target from `unhealthy.draining` to `unhealthy` . The range is 0-360000 seconds. The default value is 0 seconds.\n\nNote: This attribute can only be configured when `target_health_state.unhealthy.connection_termination.enabled` is `false` .\n\nThe following attributes are supported only by Gateway Load Balancers:\n\n- `target_failover.on_deregistration` - Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.\n- `target_failover.on_unhealthy` - Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) cannot be set independently. The value you set for both attributes must be the same.", + "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deregistration_delay.timeout_seconds` - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from `draining` to `unused` . The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.\n- `stickiness.enabled` - Indicates whether target stickiness is enabled. The value is `true` or `false` . The default is `false` .\n- `stickiness.type` - Indicates the type of stickiness. The possible values are:\n\n- `lb_cookie` and `app_cookie` for Application Load Balancers.\n- `source_ip` for Network Load Balancers.\n- `source_ip_dest_ip` and `source_ip_dest_ip_proto` for Gateway Load Balancers.\n\nThe following attributes are supported by Application Load Balancers and Network Load Balancers:\n\n- `load_balancing.cross_zone.enabled` - Indicates whether cross zone load balancing is enabled. The value is `true` , `false` or `use_load_balancer_configuration` . The default is `use_load_balancer_configuration` .\n- `target_group_health.dns_failover.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to the maximum number of targets. The default is `off` .\n- `target_group_health.dns_failover.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n\nThe following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:\n\n- `load_balancing.algorithm.type` - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is `round_robin` , `least_outstanding_requests` , or `weighted_random` . The default is `round_robin` .\n- `load_balancing.algorithm.anomaly_mitigation` - Only available when `load_balancing.algorithm.type` is `weighted_random` . Indicates whether anomaly mitigation is enabled. The value is `on` or `off` . The default is `off` .\n- `slow_start.duration_seconds` - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).\n- `stickiness.app_cookie.cookie_name` - Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: `AWSALB` , `AWSALBAPP` , and `AWSALBTG` ; they're reserved for use by the load balancer.\n- `stickiness.app_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n- `stickiness.lb_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n\nThe following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:\n\n- `lambda.multi_value_headers.enabled` - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is `true` or `false` . The default is `false` . If the value is `false` and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.\n\nThe following attributes are supported only by Network Load Balancers:\n\n- `deregistration_delay.connection_termination.enabled` - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is `true` or `false` . For new UDP/TCP_UDP target groups the default is `true` . Otherwise, the default is `false` .\n- `preserve_client_ip.enabled` - Indicates whether client IP preservation is enabled. The value is `true` or `false` . The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation can't be disabled for UDP and TCP_UDP target groups.\n- `proxy_protocol_v2.enabled` - Indicates whether Proxy Protocol version 2 is enabled. The value is `true` or `false` . The default is `false` .\n- `target_health_state.unhealthy.connection_termination.enabled` - Indicates whether the load balancer terminates connections to unhealthy targets. The value is `true` or `false` . The default is `true` .\n- `target_health_state.unhealthy.draining_interval_seconds` - The amount of time for Elastic Load Balancing to wait before changing the state of an unhealthy target from `unhealthy.draining` to `unhealthy` . The range is 0-360000 seconds. The default value is 0 seconds.\n\nNote: This attribute can only be configured when `target_health_state.unhealthy.connection_termination.enabled` is `false` .\n\nThe following attributes are supported only by Gateway Load Balancers:\n\n- `target_failover.on_deregistration` - Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.\n- `target_failover.on_unhealthy` - Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.", "title": "Key", "type": "string" }, @@ -143119,7 +143115,7 @@ "properties": { "Variables": { "additionalProperties": true, - "markdownDescription": "Environment variable key-value pairs. For more information, see [Using Lambda environment variables](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html) .", + "markdownDescription": "Environment variable key-value pairs. For more information, see [Using Lambda environment variables](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html) .\n\nIf the value of the environment variable is a time or a duration, enclose the value in quotes.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -232479,7 +232475,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `CAA`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", + "markdownDescription": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT` | `TLSA` | `SSHFP` | `SVCB` | `HTTPS`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `TLSA` | `SSHFP` | `SVCB` | `HTTPS` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `CAA` | `TLSA` | `SSHFP` | `SVCB` | `HTTPS`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", "title": "Type", "type": "string" }, @@ -232898,7 +232894,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `CAA`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", + "markdownDescription": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT` | `TLSA` | `SSHFP` | `SVCB` | `HTTPS`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `TLSA` | `SSHFP` | `SVCB` | `HTTPS` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `CAA` | `TLSA` | `SSHFP` | `SVCB` | `HTTPS`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", "title": "Type", "type": "string" },