diff --git a/CHANGELOG.md b/CHANGELOG.md index 8dda618e73c..7b9ce12b38b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,24 @@ Unreleased Changes ------------------ +* Feature - Aws::CodePipeline - Updated the API, and documentation for AWS CodePipeline. + +* Feature - Aws::DatabaseMigrationService - Updated the API, and documentation for AWS Database Migration Service. + +* Feature - Aws::IoTSecureTunneling - Updated the API, and documentation for AWS IoT Secure Tunneling. + +* Feature - Aws::IoTSiteWise - Updated the API, and documentation for AWS IoT SiteWise. + +* Feature - Aws::QuickSight - Updated the API, and documentation for Amazon QuickSight. + +* Feature - Aws::SageMaker - Updated the API, and documentation for Amazon SageMaker Service. + +* Feature - Aws::ServiceCatalog - Updated the API, and documentation for AWS Service Catalog. + +* Feature - Aws::SNS - Updated the documentation for Amazon Simple Notification Service. + +* Feature - Aws::Synthetics - Updated the API, and documentation for Synthetics. + 2.11.627 (2020-11-13) ------------------ diff --git a/aws-sdk-core/apis/codepipeline/2015-07-09/api-2.json b/aws-sdk-core/apis/codepipeline/2015-07-09/api-2.json index 301a02c1c63..935f759bb3c 100644 --- a/aws-sdk-core/apis/codepipeline/2015-07-09/api-2.json +++ b/aws-sdk-core/apis/codepipeline/2015-07-09/api-2.json @@ -462,6 +462,7 @@ "output":{"shape":"RetryStageExecutionOutput"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"PipelineNotFoundException"}, {"shape":"StageNotFoundException"}, {"shape":"StageNotRetryableException"}, @@ -478,6 +479,7 @@ "output":{"shape":"StartPipelineExecutionOutput"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"PipelineNotFoundException"} ] }, @@ -491,6 +493,7 @@ "output":{"shape":"StopPipelineExecutionOutput"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"PipelineNotFoundException"}, {"shape":"PipelineExecutionNotStoppableException"}, {"shape":"DuplicatedStopRequestException"} @@ -705,6 +708,7 @@ "ActionExecution":{ "type":"structure", "members":{ + "actionExecutionId":{"shape":"ActionExecutionId"}, "status":{"shape":"ActionExecutionStatus"}, "summary":{"shape":"ExecutionSummary"}, "lastStatusChange":{"shape":"Timestamp"}, @@ -1063,6 +1067,13 @@ }, "exception":true }, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "exception":true + }, "ContinuationToken":{ "type":"string", "max":2048, @@ -1227,7 +1238,7 @@ }, "EncryptionKeyId":{ "type":"string", - "max":100, + "max":400, "min":1 }, "EncryptionKeyType":{ @@ -2189,6 +2200,7 @@ "type":"structure", "members":{ "stageName":{"shape":"StageName"}, + "inboundExecution":{"shape":"StageExecution"}, "inboundTransitionState":{"shape":"TransitionState"}, "actionStates":{"shape":"ActionStateList"}, "latestExecution":{"shape":"StageExecution"} diff --git a/aws-sdk-core/apis/codepipeline/2015-07-09/docs-2.json b/aws-sdk-core/apis/codepipeline/2015-07-09/docs-2.json index 7c8a8d260a8..b03b5c56138 100644 --- a/aws-sdk-core/apis/codepipeline/2015-07-09/docs-2.json +++ b/aws-sdk-core/apis/codepipeline/2015-07-09/docs-2.json @@ -92,8 +92,8 @@ "ActionCategory": { "base": null, "refs": { - "ActionTypeId$category": "

A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the following values.

", - "CreateCustomActionTypeInput$category": "

The category of the custom action, such as a build action or a test action.

Although Source and Approval are listed as valid values, they are not currently functional. These values are reserved for future use.

", + "ActionTypeId$category": "

A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the following values.

", + "CreateCustomActionTypeInput$category": "

The category of the custom action, such as a build action or a test action.

", "DeleteCustomActionTypeInput$category": "

The category of the custom action that you want to delete, such as source or deploy.

" } }, @@ -191,6 +191,7 @@ "base": null, "refs": { "ActionContext$actionExecutionId": "

The system-generated unique ID that corresponds to an action's execution.

", + "ActionExecution$actionExecutionId": "

ID of the workflow action execution in the current stage. Use the GetPipelineState action to retrieve the current action execution details of the current stage.

For older executions, this field might be empty. The action execution ID is available for executions run on or after March 2020.

", "ActionExecutionDetail$actionExecutionId": "

The action execution ID.

" } }, @@ -253,7 +254,7 @@ "ActionOwner": { "base": null, "refs": { - "ActionTypeId$owner": "

The creator of the action being called.

", + "ActionTypeId$owner": "

The creator of the action being called. There are three valid values for the Owner field in the action category section within your pipeline structure: AWS, ThirdParty, and Custom. For more information, see Valid Action Types and Providers in CodePipeline.

", "ListActionTypesInput$actionOwnerFilter": "

Filters the list of action types to those created by a specified entity.

" } }, @@ -513,6 +514,11 @@ "refs": { } }, + "ConflictException": { + "base": "

Your request cannot be handled because the pipeline is busy handling ongoing activities. Try again later.

", + "refs": { + } + }, "ContinuationToken": { "base": null, "refs": { @@ -1001,6 +1007,7 @@ "base": null, "refs": { "ConcurrentModificationException$message": null, + "ConflictException$message": null, "DuplicatedStopRequestException$message": null, "ErrorDetails$message": "

The text of the error message.

", "FailureDetails$message": "

The message about the failure.

", @@ -1193,7 +1200,7 @@ "ListActionExecutionsInput$pipelineName": "

The name of the pipeline for which you want to list action execution history.

", "ListPipelineExecutionsInput$pipelineName": "

The name of the pipeline for which you want to get execution summary information.

", "PipelineContext$pipelineName": "

The name of the pipeline. This is a user-specified value. Pipeline names must be unique across all pipeline names under an Amazon Web Services account.

", - "PipelineDeclaration$name": "

The name of the action to be performed.

", + "PipelineDeclaration$name": "

The name of the pipeline.

", "PipelineExecution$pipelineName": "

The name of the pipeline with the specified pipeline execution.

", "PipelineSummary$name": "

The name of the pipeline.

", "PutActionRevisionInput$pipelineName": "

The name of the pipeline that starts processing the revision to the source.

", @@ -1478,6 +1485,7 @@ "StageExecution": { "base": "

Represents information about the run of a stage.

", "refs": { + "StageState$inboundExecution": null, "StageState$latestExecution": "

Information about the latest execution in the stage, including its ID and status.

" } }, diff --git a/aws-sdk-core/apis/dms/2016-01-01/api-2.json b/aws-sdk-core/apis/dms/2016-01-01/api-2.json index 50b9382231d..ba11dbf6d4e 100644 --- a/aws-sdk-core/apis/dms/2016-01-01/api-2.json +++ b/aws-sdk-core/apis/dms/2016-01-01/api-2.json @@ -64,7 +64,8 @@ {"shape":"ResourceQuotaExceededFault"}, {"shape":"InvalidResourceStateFault"}, {"shape":"ResourceNotFoundFault"}, - {"shape":"AccessDeniedFault"} + {"shape":"AccessDeniedFault"}, + {"shape":"S3AccessDeniedFault"} ] }, "CreateEventSubscription":{ @@ -604,6 +605,20 @@ {"shape":"KMSKeyNotAccessibleFault"} ] }, + "MoveReplicationTask":{ + "name":"MoveReplicationTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"MoveReplicationTaskMessage"}, + "output":{"shape":"MoveReplicationTaskResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"InvalidResourceStateFault"}, + {"shape":"ResourceNotFoundFault"} + ] + }, "RebootReplicationInstance":{ "name":"RebootReplicationInstance", "http":{ @@ -2002,6 +2017,23 @@ "KmsKeyId":{"shape":"String"} } }, + "MoveReplicationTaskMessage":{ + "type":"structure", + "required":[ + "ReplicationTaskArn", + "TargetReplicationInstanceArn" + ], + "members":{ + "ReplicationTaskArn":{"shape":"String"}, + "TargetReplicationInstanceArn":{"shape":"String"} + } + }, + "MoveReplicationTaskResponse":{ + "type":"structure", + "members":{ + "ReplicationTask":{"shape":"ReplicationTask"} + } + }, "MySQLSettings":{ "type":"structure", "members":{ @@ -2379,7 +2411,8 @@ "RecoveryCheckpoint":{"shape":"String"}, "ReplicationTaskArn":{"shape":"String"}, "ReplicationTaskStats":{"shape":"ReplicationTaskStats"}, - "TaskData":{"shape":"String"} + "TaskData":{"shape":"String"}, + "TargetReplicationInstanceArn":{"shape":"String"} } }, "ReplicationTaskAssessmentResult":{ diff --git a/aws-sdk-core/apis/dms/2016-01-01/docs-2.json b/aws-sdk-core/apis/dms/2016-01-01/docs-2.json index bfc8133cfb4..1222a2891bc 100644 --- a/aws-sdk-core/apis/dms/2016-01-01/docs-2.json +++ b/aws-sdk-core/apis/dms/2016-01-01/docs-2.json @@ -46,6 +46,7 @@ "ModifyReplicationInstance": "

Modifies the replication instance to apply new settings. You can change one or more parameters by specifying these parameters and the new values in the request.

Some settings are applied during the maintenance window.

", "ModifyReplicationSubnetGroup": "

Modifies the settings for the specified replication subnet group.

", "ModifyReplicationTask": "

Modifies the specified replication task.

You can't modify the task endpoints. The task must be stopped before you can modify it.

For more information about AWS DMS tasks, see Working with Migration Tasks in the AWS Database Migration Service User Guide.

", + "MoveReplicationTask": "

Moves a replication task from its current replication instance to a different target replication instance using the specified parameters. The target replication instance must be created with the same or later AWS DMS version as the current replication instance.

", "RebootReplicationInstance": "

Reboots a replication instance. Rebooting results in a momentary outage, until the replication instance becomes available again.

", "RefreshSchemas": "

Populates the schema for the specified endpoint. This is an asynchronous operation and can take several minutes. You can check the status of this operation by calling the DescribeRefreshSchemasStatus operation.

", "ReloadTables": "

Reloads the target database table with the source data.

", @@ -1091,6 +1092,16 @@ "ModifyEndpointMessage$MongoDbSettings": "

Settings in JSON format for the source MongoDB endpoint. For more information about the available settings, see the configuration properties section in Using MongoDB as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.

" } }, + "MoveReplicationTaskMessage": { + "base": "

", + "refs": { + } + }, + "MoveReplicationTaskResponse": { + "base": "

", + "refs": { + } + }, "MySQLSettings": { "base": "

Provides information that defines a MySQL endpoint.

", "refs": { @@ -1320,6 +1331,7 @@ "CreateReplicationTaskResponse$ReplicationTask": "

The replication task that was created.

", "DeleteReplicationTaskResponse$ReplicationTask": "

The deleted replication task.

", "ModifyReplicationTaskResponse$ReplicationTask": "

The replication task that was modified.

", + "MoveReplicationTaskResponse$ReplicationTask": "

The replication task that was moved.

", "ReplicationTaskList$member": null, "StartReplicationTaskAssessmentResponse$ReplicationTask": "

The assessed replication task.

", "StartReplicationTaskResponse$ReplicationTask": "

The replication task started.

", @@ -1741,6 +1753,8 @@ "MongoDbSettings$DocsToInvestigate": "

Indicates the number of documents to preview to determine the document organization. Use this setting when NestingLevel is set to \"one\".

Must be a positive value greater than 0. Default value is 1000.

", "MongoDbSettings$AuthSource": "

The MongoDB database name. This setting isn't used when AuthType is set to \"no\".

The default is \"admin\".

", "MongoDbSettings$KmsKeyId": "

The AWS KMS key identifier that is used to encrypt the content on the replication instance. If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

", + "MoveReplicationTaskMessage$ReplicationTaskArn": "

The Amazon Resource Name (ARN) of the task that you want to move.

", + "MoveReplicationTaskMessage$TargetReplicationInstanceArn": "

The ARN of the replication instance where you want to move the task to.

", "MySQLSettings$AfterConnectScript": "

Specifies a script to run immediately after AWS DMS connects to the endpoint. The migration task continues running regardless if the SQL statement succeeds or fails.

", "MySQLSettings$DatabaseName": "

Database name for the endpoint.

", "MySQLSettings$ServerName": "

Fully qualified domain name of the endpoint.

", @@ -1813,19 +1827,20 @@ "ReplicationSubnetGroup$VpcId": "

The ID of the VPC.

", "ReplicationSubnetGroup$SubnetGroupStatus": "

The status of the subnet group.

", "ReplicationTask$ReplicationTaskIdentifier": "

The user-assigned replication task identifier or name.

Constraints:

", - "ReplicationTask$SourceEndpointArn": "

The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

", - "ReplicationTask$TargetEndpointArn": "

The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

", - "ReplicationTask$ReplicationInstanceArn": "

The Amazon Resource Name (ARN) of the replication instance.

", + "ReplicationTask$SourceEndpointArn": "

The Amazon Resource Name (ARN) that uniquely identifies the endpoint.

", + "ReplicationTask$TargetEndpointArn": "

The ARN that uniquely identifies the endpoint.

", + "ReplicationTask$ReplicationInstanceArn": "

The ARN of the replication instance.

", "ReplicationTask$TableMappings": "

Table mappings specified in the task.

", "ReplicationTask$ReplicationTaskSettings": "

The settings for the replication task.

", - "ReplicationTask$Status": "

The status of the replication task.

", + "ReplicationTask$Status": "

The status of the replication task. This response parameter can return one of the following values:

", "ReplicationTask$LastFailureMessage": "

The last error (failure) message generated for the replication task.

", - "ReplicationTask$StopReason": "

The reason the replication task was stopped. This response parameter can return one of the following values:

", + "ReplicationTask$StopReason": "

The reason the replication task was stopped. This response parameter can return one of the following values:

", "ReplicationTask$CdcStartPosition": "

Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want the CDC operation to start. Specifying both values results in an error.

The value can be in date, checkpoint, or LSN/SCN format.

Date Example: --cdc-start-position “2018-03-08T12:12:12”

Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"

LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”

", "ReplicationTask$CdcStopPosition": "

Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.

Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12”

Commit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12 “

", "ReplicationTask$RecoveryCheckpoint": "

Indicates the last checkpoint that occurred during a change data capture (CDC) operation. You can provide this value to the CdcStartPosition parameter to start a CDC operation that begins at that checkpoint.

", "ReplicationTask$ReplicationTaskArn": "

The Amazon Resource Name (ARN) of the replication task.

", "ReplicationTask$TaskData": "

Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration Service User Guide.

", + "ReplicationTask$TargetReplicationInstanceArn": "

The ARN of the replication instance to which this task is moved in response to running the MoveReplicationTask operation. Otherwise, this response parameter isn't a member of the ReplicationTask object.

", "ReplicationTaskAssessmentResult$ReplicationTaskIdentifier": "

The replication task identifier of the task on which the task assessment was run.

", "ReplicationTaskAssessmentResult$ReplicationTaskArn": "

The Amazon Resource Name (ARN) of the replication task.

", "ReplicationTaskAssessmentResult$AssessmentStatus": "

The status of the task assessment.

", diff --git a/aws-sdk-core/apis/iotsecuretunneling/2018-10-05/api-2.json b/aws-sdk-core/apis/iotsecuretunneling/2018-10-05/api-2.json index c627acd4111..a385ff1c0ce 100644 --- a/aws-sdk-core/apis/iotsecuretunneling/2018-10-05/api-2.json +++ b/aws-sdk-core/apis/iotsecuretunneling/2018-10-05/api-2.json @@ -156,10 +156,7 @@ }, "DestinationConfig":{ "type":"structure", - "required":[ - "thingName", - "services" - ], + "required":["services"], "members":{ "thingName":{"shape":"ThingName"}, "services":{"shape":"ServiceList"} @@ -240,14 +237,13 @@ }, "Service":{ "type":"string", - "max":8, + "max":128, "min":1, "pattern":"[a-zA-Z0-9:_-]+" }, "ServiceList":{ "type":"list", "member":{"shape":"Service"}, - "max":1, "min":1 }, "Tag":{ diff --git a/aws-sdk-core/apis/iotsecuretunneling/2018-10-05/docs-2.json b/aws-sdk-core/apis/iotsecuretunneling/2018-10-05/docs-2.json index 64a2f282ff2..f89905bf778 100644 --- a/aws-sdk-core/apis/iotsecuretunneling/2018-10-05/docs-2.json +++ b/aws-sdk-core/apis/iotsecuretunneling/2018-10-05/docs-2.json @@ -1,12 +1,12 @@ { "version": "2.0", - "service": "AWS IoT Secure Tunneling

AWS IoT Secure Tunnling enables you to create remote connections to devices deployed in the field.

For more information about how AWS IoT Secure Tunneling works, see the User Guide.

", + "service": "AWS IoT Secure Tunneling

AWS IoT Secure Tunnling enables you to create remote connections to devices deployed in the field.

For more information about how AWS IoT Secure Tunneling works, see AWS IoT Secure Tunneling.

", "operations": { "CloseTunnel": "

Closes a tunnel identified by the unique tunnel id. When a CloseTunnel request is received, we close the WebSocket connections between the client and proxy server so no data can be transmitted.

", "DescribeTunnel": "

Gets information about a tunnel identified by the unique tunnel id.

", "ListTagsForResource": "

Lists the tags for the specified resource.

", "ListTunnels": "

List all tunnels for an AWS account. Tunnels are listed by creation time in descending order, newer tunnels will be listed before older tunnels.

", - "OpenTunnel": "

Creates a new tunnel, and returns two client access tokens for clients to use to connect to the AWS IoT Secure Tunneling proxy server. .

", + "OpenTunnel": "

Creates a new tunnel, and returns two client access tokens for clients to use to connect to the AWS IoT Secure Tunneling proxy server.

", "TagResource": "

A resource tag.

", "UntagResource": "

Removes a tag from a resource.

" }, @@ -159,7 +159,7 @@ "ServiceList": { "base": null, "refs": { - "DestinationConfig$services": "

A list of service names that identity the target application. Currently, you can only specify a single name. The AWS IoT client running on the destination device reads this value and uses it to look up a port or an IP address and a port. The AWS IoT client instantiates the local proxy which uses this information to connect to the destination application.

" + "DestinationConfig$services": "

A list of service names that identity the target application. The AWS IoT client running on the destination device reads this value and uses it to look up a port or an IP address and a port. The AWS IoT client instantiates the local proxy which uses this information to connect to the destination application.

" } }, "Tag": { diff --git a/aws-sdk-core/apis/iotsitewise/2019-12-02/api-2.json b/aws-sdk-core/apis/iotsitewise/2019-12-02/api-2.json index 36f1b99b3e7..a5abf8eb8ee 100644 --- a/aws-sdk-core/apis/iotsitewise/2019-12-02/api-2.json +++ b/aws-sdk-core/apis/iotsitewise/2019-12-02/api-2.json @@ -195,6 +195,22 @@ ], "endpoint":{"hostPrefix":"monitor."} }, + "CreatePresignedPortalUrl":{ + "name":"CreatePresignedPortalUrl", + "http":{ + "method":"GET", + "requestUri":"/portals/{portalId}/presigned-url", + "responseCode":200 + }, + "input":{"shape":"CreatePresignedPortalUrlRequest"}, + "output":{"shape":"CreatePresignedPortalUrlResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "endpoint":{"hostPrefix":"monitor."} + }, "CreateProject":{ "name":"CreateProject", "http":{ @@ -1639,6 +1655,29 @@ "ssoApplicationId":{"shape":"SSOApplicationId"} } }, + "CreatePresignedPortalUrlRequest":{ + "type":"structure", + "required":["portalId"], + "members":{ + "portalId":{ + "shape":"ID", + "location":"uri", + "locationName":"portalId" + }, + "sessionDurationSeconds":{ + "shape":"SessionDurationSeconds", + "location":"querystring", + "locationName":"sessionDurationSeconds" + } + } + }, + "CreatePresignedPortalUrlResponse":{ + "type":"structure", + "required":["presignedPortalUrl"], + "members":{ + "presignedPortalUrl":{"shape":"Url"} + } + }, "CreateProjectRequest":{ "type":"structure", "required":[ @@ -2227,8 +2266,7 @@ "Expression":{ "type":"string", "max":1024, - "min":1, - "pattern":"^[a-z0-9._+\\-*%/^, ()]+$" + "min":1 }, "ExpressionVariable":{ "type":"structure", @@ -3220,6 +3258,11 @@ "exception":true, "fault":true }, + "SessionDurationSeconds":{ + "type":"integer", + "max":43200, + "min":900 + }, "TagKey":{ "type":"string", "max":128, diff --git a/aws-sdk-core/apis/iotsitewise/2019-12-02/docs-2.json b/aws-sdk-core/apis/iotsitewise/2019-12-02/docs-2.json index 79b7386f7af..49f03a1ccc0 100644 --- a/aws-sdk-core/apis/iotsitewise/2019-12-02/docs-2.json +++ b/aws-sdk-core/apis/iotsitewise/2019-12-02/docs-2.json @@ -12,6 +12,7 @@ "CreateDashboard": "

Creates a dashboard in an AWS IoT SiteWise Monitor project.

", "CreateGateway": "

Creates a gateway, which is a virtual or edge device that delivers industrial data streams from local servers to AWS IoT SiteWise. For more information, see Ingesting data using a gateway in the AWS IoT SiteWise User Guide.

", "CreatePortal": "

Creates a portal, which can contain projects and dashboards. AWS IoT SiteWise Monitor uses AWS SSO or IAM to authenticate portal users and manage user permissions.

Before you can sign in to a new portal, you must add at least one identity to that portal. For more information, see Adding or removing portal administrators in the AWS IoT SiteWise User Guide.

", + "CreatePresignedPortalUrl": "

Creates a pre-signed URL to a portal. Use this operation to create URLs to portals that use AWS Identity and Access Management (IAM) to authenticate users. An IAM user with access to a portal can call this API to get a URL to that portal. The URL contains a session token that lets the IAM user access the portal.

", "CreateProject": "

Creates a project in the specified portal.

", "DeleteAccessPolicy": "

Deletes an access policy that grants the specified identity access to the specified AWS IoT SiteWise Monitor resource. You can use this operation to revoke access to an AWS IoT SiteWise Monitor resource.

", "DeleteAsset": "

Deletes an asset. This action can't be undone. For more information, see Deleting assets and models in the AWS IoT SiteWise User Guide.

You can't delete an asset that's associated to another asset. For more information, see DisassociateAssets.

", @@ -556,6 +557,16 @@ "refs": { } }, + "CreatePresignedPortalUrlRequest": { + "base": null, + "refs": { + } + }, + "CreatePresignedPortalUrlResponse": { + "base": null, + "refs": { + } + }, "CreateProjectRequest": { "base": null, "refs": { @@ -962,6 +973,7 @@ "CreateDashboardResponse$dashboardId": "

The ID of the dashboard.

", "CreateGatewayResponse$gatewayId": "

The ID of the gateway device. You can use this ID when you call other AWS IoT SiteWise APIs.

", "CreatePortalResponse$portalId": "

The ID of the created portal.

", + "CreatePresignedPortalUrlRequest$portalId": "

The ID of the portal to access.

", "CreateProjectRequest$portalId": "

The ID of the portal in which to create the project.

", "CreateProjectResponse$projectId": "

The ID of the project.

", "DashboardSummary$id": "

The ID of the dashboard.

", @@ -1609,6 +1621,12 @@ "refs": { } }, + "SessionDurationSeconds": { + "base": null, + "refs": { + "CreatePresignedPortalUrlRequest$sessionDurationSeconds": "

The duration (in seconds) for which the session at the URL is valid.

Default: 900 seconds (15 minutes)

" + } + }, "TagKey": { "base": null, "refs": { @@ -1840,6 +1858,7 @@ "base": null, "refs": { "CreatePortalResponse$portalStartUrl": "

The URL for the AWS IoT SiteWise Monitor portal. You can use this URL to access portals that use AWS SSO for authentication. For portals that use IAM for authentication, you must use the CreatePresignedPortalUrl operation to create a URL that you can use to access the portal.

", + "CreatePresignedPortalUrlResponse$presignedPortalUrl": "

The pre-signed URL to the portal. The URL contains the portal ID and a session token that lets you access the portal. The URL has the following format.

https://<portal-id>.app.iotsitewise.aws/auth?token=<encrypted-token>

", "DescribePortalResponse$portalStartUrl": "

The URL for the AWS IoT SiteWise Monitor portal. You can use this URL to access portals that use AWS SSO for authentication. For portals that use IAM for authentication, you must use the CreatePresignedPortalUrl operation to create a URL that you can use to access the portal.

", "ImageLocation$url": "

The URL where the image is available. The URL is valid for 15 minutes so that you can view and download the image

", "PortalSummary$startUrl": "

The URL for the AWS IoT SiteWise Monitor portal. You can use this URL to access portals that use AWS SSO for authentication. For portals that use IAM for authentication, you must use the CreatePresignedPortalUrl operation to create a URL that you can use to access the portal.

" diff --git a/aws-sdk-core/apis/quicksight/2018-04-01/api-2.json b/aws-sdk-core/apis/quicksight/2018-04-01/api-2.json index 3d94faeba9d..da087382ae0 100644 --- a/aws-sdk-core/apis/quicksight/2018-04-01/api-2.json +++ b/aws-sdk-core/apis/quicksight/2018-04-01/api-2.json @@ -4552,15 +4552,20 @@ "locationName":"session-lifetime" }, "UndoRedoDisabled":{ - "shape":"boolean", + "shape":"Boolean", "location":"querystring", "locationName":"undo-redo-disabled" }, "ResetDisabled":{ - "shape":"boolean", + "shape":"Boolean", "location":"querystring", "locationName":"reset-disabled" }, + "StatePersistenceEnabled":{ + "shape":"Boolean", + "location":"querystring", + "locationName":"state-persistence-enabled" + }, "UserArn":{ "shape":"Arn", "location":"querystring", diff --git a/aws-sdk-core/apis/quicksight/2018-04-01/docs-2.json b/aws-sdk-core/apis/quicksight/2018-04-01/docs-2.json index 610b2949bba..7435add014b 100644 --- a/aws-sdk-core/apis/quicksight/2018-04-01/docs-2.json +++ b/aws-sdk-core/apis/quicksight/2018-04-01/docs-2.json @@ -510,6 +510,9 @@ "base": null, "refs": { "DataSetSummary$ColumnLevelPermissionRulesApplied": "

Indicates if the dataset has column level permission configured.

", + "GetDashboardEmbedUrlRequest$UndoRedoDisabled": "

Remove the undo/redo button on the embedded dashboard. The default is FALSE, which enables the undo/redo button.

", + "GetDashboardEmbedUrlRequest$ResetDisabled": "

Remove the reset button on the embedded dashboard. The default is FALSE, which enables the reset button.

", + "GetDashboardEmbedUrlRequest$StatePersistenceEnabled": "

Adds persistence of state for the user session in an embedded dashboard. Persistence applies to the sheet and the parameter settings. These are control settings that the dashboard subscriber (QuickSight reader) chooses while viewing the dashboard. If this is set to TRUE, the settings are the same when the the subscriber reopens the same dashboard URL. The state is stored in QuickSight, not in a browser cookie. If this is set to FALSE, the state of the user session is not persisted. The default is FALSE.

", "SslProperties$DisableSsl": "

A Boolean option to control whether SSL should be disabled.

", "UpdateUserRequest$UnapplyCustomPermissions": "

A flag that you use to indicate that you want to remove all custom permissions from this user. Using this parameter resets the user to the state it was in before a custom permissions profile was applied. This parameter defaults to NULL and it doesn't accept any other value.

", "UploadSettings$ContainsHeader": "

Whether the file has a header row, or the files each have a header row.

", @@ -2371,7 +2374,7 @@ } }, "OracleParameters": { - "base": null, + "base": "

Oracle parameters.

", "refs": { "DataSourceParameters$OracleParameters": "

Oracle parameters.

" } @@ -3945,8 +3948,6 @@ "BorderStyle$Show": "

The option to enable display of borders for visuals.

", "DeleteAnalysisRequest$ForceDeleteWithoutRecovery": "

This option defaults to the value NoForceDeleteWithoutRecovery. To immediately delete the analysis, add the ForceDeleteWithoutRecovery option. You can't restore an analysis after it's deleted.

", "DescribeAccountCustomizationRequest$Resolved": "

The Resolved flag works with the other parameters to determine which view of QuickSight customizations is returned. You can add this flag to your command to use the same view that QuickSight uses to identify which customizations to apply to the console. Omit this flag, or set it to no-resolved, to reveal customizations that are configured at different levels.

", - "GetDashboardEmbedUrlRequest$UndoRedoDisabled": "

Remove the undo/redo button on the embedded dashboard. The default is FALSE, which enables the undo/redo button.

", - "GetDashboardEmbedUrlRequest$ResetDisabled": "

Remove the reset button on the embedded dashboard. The default is FALSE, which enables the reset button.

", "GutterStyle$Show": "

This Boolean value controls whether to display a gutter space between sheet tiles.

", "MarginStyle$Show": "

This Boolean value controls whether to display sheet margins.

" } diff --git a/aws-sdk-core/apis/sagemaker/2017-07-24/api-2.json b/aws-sdk-core/apis/sagemaker/2017-07-24/api-2.json index 7f6ac0d1b07..a94a4f7ec94 100644 --- a/aws-sdk-core/apis/sagemaker/2017-07-24/api-2.json +++ b/aws-sdk-core/apis/sagemaker/2017-07-24/api-2.json @@ -2926,7 +2926,12 @@ "VpcId":{"shape":"VpcId"}, "Tags":{"shape":"TagList"}, "AppNetworkAccessType":{"shape":"AppNetworkAccessType"}, - "HomeEfsFileSystemKmsKeyId":{"shape":"KmsKeyId"} + "HomeEfsFileSystemKmsKeyId":{ + "shape":"KmsKeyId", + "deprecated":true, + "deprecatedMessage":"This property is deprecated, use KmsKeyId instead." + }, + "KmsKeyId":{"shape":"KmsKeyId"} } }, "CreateDomainResponse":{ @@ -4068,10 +4073,15 @@ "AuthMode":{"shape":"AuthMode"}, "DefaultUserSettings":{"shape":"UserSettings"}, "AppNetworkAccessType":{"shape":"AppNetworkAccessType"}, - "HomeEfsFileSystemKmsKeyId":{"shape":"KmsKeyId"}, + "HomeEfsFileSystemKmsKeyId":{ + "shape":"KmsKeyId", + "deprecated":true, + "deprecatedMessage":"This property is deprecated, use KmsKeyId instead." + }, "SubnetIds":{"shape":"Subnets"}, "Url":{"shape":"String1024"}, - "VpcId":{"shape":"VpcId"} + "VpcId":{"shape":"VpcId"}, + "KmsKeyId":{"shape":"KmsKeyId"} } }, "DescribeEndpointConfigInput":{ diff --git a/aws-sdk-core/apis/sagemaker/2017-07-24/docs-2.json b/aws-sdk-core/apis/sagemaker/2017-07-24/docs-2.json index 14790aadab6..5861a80fd4c 100644 --- a/aws-sdk-core/apis/sagemaker/2017-07-24/docs-2.json +++ b/aws-sdk-core/apis/sagemaker/2017-07-24/docs-2.json @@ -6,18 +6,18 @@ "AssociateTrialComponent": "

Associates a trial component with a trial. A trial component can be associated with multiple trials. To disassociate a trial component from a trial, call the DisassociateTrialComponent API.

", "CreateAlgorithm": "

Create a machine learning algorithm that you can use in Amazon SageMaker and list in the AWS Marketplace.

", "CreateApp": "

Creates a running App for the specified UserProfile. Supported Apps are JupyterServer and KernelGateway. This operation is automatically invoked by Amazon SageMaker Studio upon access to the associated Domain, and when new kernel configurations are selected by the user. A user may have multiple Apps active simultaneously.

", - "CreateAppImageConfig": "

Creates a configuration for running an Amazon SageMaker image as a KernelGateway app.

", + "CreateAppImageConfig": "

Creates a configuration for running a SageMaker image as a KernelGateway app. The configuration specifies the Amazon Elastic File System (EFS) storage volume on the image, and a list of the kernels in the image.

", "CreateAutoMLJob": "

Creates an Autopilot job.

Find the best performing model after you run an Autopilot job by calling . Deploy that model by following the steps described in Step 6.1: Deploy the Model to Amazon SageMaker Hosting Services.

For information about how to use Autopilot, see Automate Model Development with Amazon SageMaker Autopilot.

", "CreateCodeRepository": "

Creates a Git repository as a resource in your Amazon SageMaker account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your Amazon SageMaker account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with.

The repository can be hosted either in AWS CodeCommit or in any other Git repository.

", "CreateCompilationJob": "

Starts a model compilation job. After the model has been compiled, Amazon SageMaker saves the resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts with AWS IoT Greengrass. In that case, deploy them as an ML resource.

In the request body, you provide the following:

You can also provide a Tag to track the model compilation job's resource use and costs. The response body contains the CompilationJobArn for the compiled job.

To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation job, use DescribeCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs.

", "CreateDomain": "

Creates a Domain used by Amazon SageMaker Studio. A domain consists of an associated Amazon Elastic File System (EFS) volume, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. An AWS account is limited to one domain per region. Users within a domain can share notebook files and other artifacts with each other.

When a domain is created, an EFS volume is created for use by all of the users within the domain. Each user receives a private home directory within the EFS volume for notebooks, Git repositories, and data files.

VPC configuration

All SageMaker Studio traffic between the domain and the EFS volume is through the specified VPC and subnets. For other Studio traffic, you can specify the AppNetworkAccessType parameter. AppNetworkAccessType corresponds to the network access type that you choose when you onboard to Studio. The following options are available:

For more information, see Connect SageMaker Studio Notebooks to Resources in a VPC.

", - "CreateEndpoint": "

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API to deploy models using Amazon SageMaker hosting services.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto 3)).

You must not delete an EndpointConfig that is in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an AWS Region in your AWS account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provided. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

", + "CreateEndpoint": "

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API to deploy models using Amazon SageMaker hosting services.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto 3)).

You must not delete an EndpointConfig that is in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an AWS Region in your AWS account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provided. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

To add the IAM role policies for using this API operation, go to the IAM console, and choose Roles in the left navigation pane. Search the IAM role that you want to grant access to use the CreateEndpoint and CreateEndpointConfig API operations, add the following policies to the role.

", "CreateEndpointConfig": "

Creates an endpoint configuration that Amazon SageMaker hosting services uses to deploy models. In the configuration, you identify one or more models, created using the CreateModel API, to deploy and the resources that you want Amazon SageMaker to provision. Then you call the CreateEndpoint API.

Use this API if you want to use Amazon SageMaker hosting services to deploy models into production.

In the request, you define a ProductionVariant, for each model that you want to deploy. Each ProductionVariant parameter also describes the resources that you want Amazon SageMaker to provision. This includes the number and type of ML compute instances to deploy.

If you are hosting multiple models, you also assign a VariantWeight to specify how much traffic you want to allocate to each model. For example, suppose that you want to host two models, A and B, and you assign traffic weight 2 for model A and 1 for model B. Amazon SageMaker distributes two-thirds of the traffic to Model A, and one-third to model B.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto 3)).

When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

", "CreateExperiment": "

Creates an SageMaker experiment. An experiment is a collection of trials that are observed, compared and evaluated as a group. A trial is a set of steps, called trial components, that produce a machine learning model.

The goal of an experiment is to determine the components that produce the best model. Multiple trials are performed, each one isolating and measuring the impact of a change to one or more inputs, while keeping the remaining inputs constant.

When you use Amazon SageMaker Studio or the Amazon SageMaker Python SDK, all experiments, trials, and trial components are automatically tracked, logged, and indexed. When you use the AWS SDK for Python (Boto), you must use the logging APIs provided by the SDK.

You can add tags to experiments, trials, trial components and then use the Search API to search for the tags.

To add a description to an experiment, specify the optional Description parameter. To add a description later, or to change the description, call the UpdateExperiment API.

To get a list of all your experiments, call the ListExperiments API. To view an experiment's properties, call the DescribeExperiment API. To get a list of all the trials associated with an experiment, call the ListTrials API. To create a trial call the CreateTrial API.

", "CreateFlowDefinition": "

Creates a flow definition.

", "CreateHumanTaskUi": "

Defines the settings you will use for the human review workflow user interface. Reviewers will see a three-panel interface with an instruction area, the item to review, and an input area.

", "CreateHyperParameterTuningJob": "

Starts a hyperparameter tuning job. A hyperparameter tuning job finds the best version of a model by running many training jobs on your dataset using the algorithm you choose and values for hyperparameters within ranges that you specify. It then chooses the hyperparameter values that result in a model that performs the best, as measured by an objective metric that you choose.

", - "CreateImage": "

Creates a SageMaker Image. A SageMaker image represents a set of container images. Each of these container images is represented by a SageMaker ImageVersion.

", + "CreateImage": "

Creates a custom SageMaker image. A SageMaker image is a set of image versions. Each image version represents a container image stored in Amazon Container Registry (ECR). For more information, see Bring your own SageMaker image.

", "CreateImageVersion": "

Creates a version of the SageMaker image specified by ImageName. The version represents the Amazon Container Registry (ECR) container image specified by BaseImage.

", "CreateLabelingJob": "

Creates a job that uses workers to label the data objects in your input dataset. You can use the labeled data to train machine learning models.

You can select your workforce from one of three providers:

You can also use automated data labeling to reduce the number of data objects that need to be labeled by a human. Automated data labeling uses active learning to determine if a data object can be labeled by machine or if it needs to be sent to a human worker. For more information, see Using Automated Data Labeling.

The data objects to be labeled are contained in an Amazon S3 bucket. You create a manifest file that describes the location of each object. For more information, see Using Input and Output Data.

The output can be used as the manifest file for another labeling job or as training data for your machine learning models.

", "CreateModel": "

Creates a model in Amazon SageMaker. In the request, you name the model and describe a primary container. For the primary container, you specify the Docker image that contains inference code, artifacts (from prior training), and a custom environment map that the inference code uses when you deploy the model for predictions.

Use this API to create a model if you want to use Amazon SageMaker hosting services or run a batch transform job.

To host your model, you create an endpoint configuration with the CreateEndpointConfig API, and then create an endpoint with the CreateEndpoint API. Amazon SageMaker then deploys all of the containers that you defined for the model in the hosting environment.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto 3)).

To run a batch transform using your model, you start a job with the CreateTransformJob API. Amazon SageMaker uses your model and your dataset to get inferences which are then saved to a specified S3 location.

In the CreateModel request, you must define a container with the PrimaryContainer parameter.

In the request, you also provide an IAM role that Amazon SageMaker can assume to access model artifacts and docker image for deployment on ML compute hosting instances or for batch transform jobs. In addition, you also use the IAM role to manage permissions the inference code needs. For example, if the inference code access any other AWS resources, you grant necessary permissions via this role.

", @@ -302,7 +302,7 @@ } }, "AppImageConfigDetails": { - "base": "

The configuration for running an Amazon SageMaker image as a KernelGateway app.

", + "base": "

The configuration for running a SageMaker image as a KernelGateway app.

", "refs": { "AppImageConfigList$member": null } @@ -316,7 +316,7 @@ "AppImageConfigName": { "base": null, "refs": { - "AppImageConfigDetails$AppImageConfigName": "

The name of the AppImageConfig.

", + "AppImageConfigDetails$AppImageConfigName": "

The name of the AppImageConfig. Must be unique to your account.

", "CreateAppImageConfigRequest$AppImageConfigName": "

The name of the AppImageConfig. Must be unique to your account.

", "CustomImage$AppImageConfigName": "

The name of the AppImageConfig.

", "DeleteAppImageConfigRequest$AppImageConfigName": "

The name of the AppImageConfig to delete.

", @@ -1524,7 +1524,7 @@ } }, "CustomImage": { - "base": "

A custom image.

", + "base": "

A custom SageMaker image. For more information, see Bring your own SageMaker image.

", "refs": { "CustomImages$member": null } @@ -1532,7 +1532,7 @@ "CustomImages": { "base": null, "refs": { - "KernelGatewayAppSettings$CustomImages": "

A list of custom images that are configured to run as a KernelGateway app.

" + "KernelGatewayAppSettings$CustomImages": "

A list of custom SageMaker images that are configured to run as a KernelGateway app.

" } }, "DataCaptureConfig": { @@ -1557,7 +1557,7 @@ "DataInputConfig": { "base": null, "refs": { - "InputConfig$DataInputConfig": "

Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary form. The data inputs are InputConfig$Framework specific.

DataInputConfig supports the following parameters for CoreML OutputConfig$TargetDevice (ML Model format):

CoreML ClassifierConfig parameters can be specified using OutputConfig$CompilerOptions. CoreML converter supports Tensorflow and PyTorch models. CoreML conversion examples:

" + "InputConfig$DataInputConfig": "

Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary form. The data inputs are InputConfig$Framework specific.

DataInputConfig supports the following parameters for CoreML OutputConfig$TargetDevice (ML Model format):

CoreML ClassifierConfig parameters can be specified using OutputConfig$CompilerOptions. CoreML converter supports Tensorflow and PyTorch models. CoreML conversion examples:

" } }, "DataProcessing": { @@ -1612,13 +1612,13 @@ "DefaultGid": { "base": null, "refs": { - "FileSystemConfig$DefaultGid": "

The default POSIX group ID. If not specified, defaults to 100.

" + "FileSystemConfig$DefaultGid": "

The default POSIX group ID (GID). If not specified, defaults to 100.

" } }, "DefaultUid": { "base": null, "refs": { - "FileSystemConfig$DefaultUid": "

The default POSIX user ID. If not specified, defaults to 1000.

" + "FileSystemConfig$DefaultUid": "

The default POSIX user ID (UID). If not specified, defaults to 1000.

" } }, "DeleteAlgorithmInput": { @@ -2616,9 +2616,9 @@ } }, "FileSystemConfig": { - "base": "

The Amazon Elastic File System (EFS) storage configuration for an image.

", + "base": "

The Amazon Elastic File System (EFS) storage configuration for a SageMaker image.

", "refs": { - "KernelGatewayImageConfig$FileSystemConfig": "

The file system configuration.

" + "KernelGatewayImageConfig$FileSystemConfig": "

The Amazon Elastic File System (EFS) storage configuration for a SageMaker image.

" } }, "FileSystemDataSource": { @@ -3159,7 +3159,7 @@ "ImageDisplayName": { "base": null, "refs": { - "CreateImageRequest$DisplayName": "

The display name of the image. When the image is added to a domain, DisplayName must be unique to the domain.

", + "CreateImageRequest$DisplayName": "

The display name of the image. If not provided, ImageName is displayed.

", "DescribeImageResponse$DisplayName": "

The name of the image as displayed.

", "Image$DisplayName": "

The name of the image as displayed.

", "UpdateImageRequest$DisplayName": "

The new display name for the image.

" @@ -3379,7 +3379,7 @@ } }, "JupyterServerAppSettings": { - "base": "

Jupyter server's app settings.

", + "base": "

The JupyterServer app settings.

", "refs": { "UserSettings$JupyterServerAppSettings": "

The Jupyter server's app settings.

" } @@ -3397,22 +3397,22 @@ } }, "KernelGatewayImageConfig": { - "base": "

The configuration for an Amazon SageMaker KernelGateway app.

", + "base": "

The configuration for the file system and kernels in a SageMaker image running as a KernelGateway app.

", "refs": { - "AppImageConfigDetails$KernelGatewayImageConfig": "

The KernelGateway app.

", + "AppImageConfigDetails$KernelGatewayImageConfig": "

The configuration for the file system and kernels in the SageMaker image.

", "CreateAppImageConfigRequest$KernelGatewayImageConfig": "

The KernelGatewayImageConfig.

", - "DescribeAppImageConfigResponse$KernelGatewayImageConfig": "

The KernelGateway app.

", + "DescribeAppImageConfigResponse$KernelGatewayImageConfig": "

The configuration of a KernelGateway app.

", "UpdateAppImageConfigRequest$KernelGatewayImageConfig": "

The new KernelGateway app to run on the image.

" } }, "KernelName": { "base": null, "refs": { - "KernelSpec$Name": "

The name of the kernel. Must be unique to your account.

" + "KernelSpec$Name": "

The name of the kernel.

" } }, "KernelSpec": { - "base": "

Defines how a kernel is started and the arguments, environment variables, and metadata that are available to the kernel.

", + "base": "

The specification of a Jupyter kernel.

", "refs": { "KernelSpecs$member": null } @@ -3420,7 +3420,7 @@ "KernelSpecs": { "base": null, "refs": { - "KernelGatewayImageConfig$KernelSpecs": "

Defines how a kernel is started and the arguments, environment variables, and metadata that are available to the kernel.

" + "KernelGatewayImageConfig$KernelSpecs": "

The specification of the Jupyter kernels in the image.

" } }, "KmsKeyId": { @@ -3428,12 +3428,14 @@ "refs": { "AutoMLOutputDataConfig$KmsKeyId": "

The AWS KMS encryption key ID.

", "AutoMLSecurityConfig$VolumeKmsKeyId": "

The key used to encrypt stored data.

", - "CreateDomainRequest$HomeEfsFileSystemKmsKeyId": "

The AWS Key Management Service (KMS) encryption key ID. Encryption with a customer master key (CMK) is not supported.

", + "CreateDomainRequest$HomeEfsFileSystemKmsKeyId": "

This member is deprecated and replaced with KmsKeyId.

", + "CreateDomainRequest$KmsKeyId": "

SageMaker uses AWS KMS to encrypt the EFS volume attached to the domain with an AWS managed customer master key (CMK) by default. For more control, specify a customer managed CMK.

", "CreateEndpointConfigInput$KmsKeyId": "

The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint.

The KmsKeyId can be any of the following formats:

The KMS key policy must grant permission to the IAM role that you specify in your CreateEndpoint, UpdateEndpoint requests. For more information, refer to the AWS Key Management Service section Using Key Policies in AWS KMS

Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a KmsKeyId when using an instance type with local storage. If any of the models that you specify in the ProductionVariants parameter use nitro-based instances with local storage, do not specify a value for the KmsKeyId parameter. If you specify a value for KmsKeyId when using any nitro-based instances with local storage, the call to CreateEndpointConfig fails.

For a list of instance types that support local instance storage, see Instance Store Volumes.

For more information about local instance storage encryption, see SSD Instance Store Volumes.

", "CreateNotebookInstanceInput$KmsKeyId": "

The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and Disabling Keys in the AWS Key Management Service Developer Guide.

", "DataCaptureConfig$KmsKeyId": "

", "DataCaptureConfigSummary$KmsKeyId": "

", - "DescribeDomainResponse$HomeEfsFileSystemKmsKeyId": "

The AWS Key Management Service encryption key ID.

", + "DescribeDomainResponse$HomeEfsFileSystemKmsKeyId": "

This member is deprecated and replaced with KmsKeyId.

", + "DescribeDomainResponse$KmsKeyId": "

The AWS KMS customer managed CMK used to encrypt the EFS volume attached to the domain.

", "DescribeEndpointConfigOutput$KmsKeyId": "

AWS KMS key ID Amazon SageMaker uses to encrypt data when storing it on the ML storage volume attached to the instance.

", "DescribeNotebookInstanceOutput$KmsKeyId": "

The AWS KMS key ID Amazon SageMaker uses to encrypt data when storing it on the ML storage volume attached to the instance.

", "FlowDefinitionOutputConfig$KmsKeyId": "

The Amazon Key Management Service (KMS) key ID for server-side encryption.

", @@ -5463,7 +5465,7 @@ "refs": { "CreateAppRequest$ResourceSpec": "

The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance.

", "DescribeAppResponse$ResourceSpec": "

The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance.

", - "JupyterServerAppSettings$DefaultResourceSpec": "

The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance.

", + "JupyterServerAppSettings$DefaultResourceSpec": "

The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app.

", "KernelGatewayAppSettings$DefaultResourceSpec": "

The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.

", "TensorBoardAppSettings$DefaultResourceSpec": "

The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance.

" } @@ -7163,7 +7165,7 @@ "refs": { "CreateDomainRequest$DefaultUserSettings": "

The default user settings.

", "CreateUserProfileRequest$UserSettings": "

A collection of settings.

", - "DescribeDomainResponse$DefaultUserSettings": "

Settings which are applied to all UserProfile in this domain, if settings are not explicitly specified in a given UserProfile.

", + "DescribeDomainResponse$DefaultUserSettings": "

Settings which are applied to all UserProfiles in this domain, if settings are not explicitly specified in a given UserProfile.

", "DescribeUserProfileResponse$UserSettings": "

A collection of settings.

", "UpdateDomainRequest$DefaultUserSettings": "

A collection of settings.

", "UpdateUserProfileRequest$UserSettings": "

A collection of settings.

" diff --git a/aws-sdk-core/apis/servicecatalog/2015-12-10/api-2.json b/aws-sdk-core/apis/servicecatalog/2015-12-10/api-2.json index 842406c4a73..9ea094adfd2 100644 --- a/aws-sdk-core/apis/servicecatalog/2015-12-10/api-2.json +++ b/aws-sdk-core/apis/servicecatalog/2015-12-10/api-2.json @@ -700,6 +700,21 @@ {"shape":"ResourceNotFoundException"} ] }, + "ImportAsProvisionedProduct":{ + "name":"ImportAsProvisionedProduct", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportAsProvisionedProductInput"}, + "output":{"shape":"ImportAsProvisionedProductOutput"}, + "errors":[ + {"shape":"DuplicateResourceException"}, + {"shape":"InvalidStateException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParametersException"} + ] + }, "ListAcceptedPortfolioShares":{ "name":"ListAcceptedPortfolioShares", "http":{ @@ -2268,6 +2283,33 @@ "pattern":"[a-zA-Z0-9][a-zA-Z0-9_-]*" }, "IgnoreErrors":{"type":"boolean"}, + "ImportAsProvisionedProductInput":{ + "type":"structure", + "required":[ + "ProductId", + "ProvisioningArtifactId", + "ProvisionedProductName", + "PhysicalId", + "IdempotencyToken" + ], + "members":{ + "AcceptLanguage":{"shape":"AcceptLanguage"}, + "ProductId":{"shape":"Id"}, + "ProvisioningArtifactId":{"shape":"Id"}, + "ProvisionedProductName":{"shape":"ProvisionedProductName"}, + "PhysicalId":{"shape":"PhysicalId"}, + "IdempotencyToken":{ + "shape":"IdempotencyToken", + "idempotencyToken":true + } + } + }, + "ImportAsProvisionedProductOutput":{ + "type":"structure", + "members":{ + "RecordDetail":{"shape":"RecordDetail"} + } + }, "InstructionType":{"type":"string"}, "InstructionValue":{"type":"string"}, "InvalidParametersException":{ @@ -3455,6 +3497,7 @@ } }, "ResourceType":{"type":"string"}, + "RetainPhysicalResources":{"type":"boolean"}, "RoleArn":{ "type":"string", "max":1224, @@ -3860,7 +3903,8 @@ "idempotencyToken":true }, "IgnoreErrors":{"shape":"IgnoreErrors"}, - "AcceptLanguage":{"shape":"AcceptLanguage"} + "AcceptLanguage":{"shape":"AcceptLanguage"}, + "RetainPhysicalResources":{"shape":"RetainPhysicalResources"} } }, "TerminateProvisionedProductOutput":{ diff --git a/aws-sdk-core/apis/servicecatalog/2015-12-10/docs-2.json b/aws-sdk-core/apis/servicecatalog/2015-12-10/docs-2.json index 3698999465a..2268b3d78e2 100644 --- a/aws-sdk-core/apis/servicecatalog/2015-12-10/docs-2.json +++ b/aws-sdk-core/apis/servicecatalog/2015-12-10/docs-2.json @@ -1,6 +1,6 @@ { "version": "2.0", - "service": "AWS Service Catalog

AWS Service Catalog enables organizations to create and manage catalogs of IT services that are approved for use on AWS. To get the most out of this documentation, you should be familiar with the terminology discussed in AWS Service Catalog Concepts.

", + "service": "AWS Service Catalog

AWS Service Catalog enables organizations to create and manage catalogs of IT services that are approved for AWS. To get the most out of this documentation, you should be familiar with the terminology discussed in AWS Service Catalog Concepts.

", "operations": { "AcceptPortfolioShare": "

Accepts an offer to share the specified portfolio.

", "AssociateBudgetWithResource": "

Associates the specified budget with the specified resource.

", @@ -53,6 +53,7 @@ "ExecuteProvisionedProductServiceAction": "

Executes a self-service action against a provisioned product.

", "GetAWSOrganizationsAccessStatus": "

Get the Access Status for AWS Organization portfolio share feature. This API can only be called by the management account in the organization or by a delegated admin.

", "GetProvisionedProductOutputs": "

This API takes either a ProvisonedProductId or a ProvisionedProductName, along with a list of one or more output keys, and responds with the key/value pairs of those outputs.

", + "ImportAsProvisionedProduct": "

Requests the import of a resource as a Service Catalog provisioned product that is associated to a Service Catalog product and provisioning artifact. Once imported all supported Service Catalog governance actions are supported on the provisioned product.

Resource import only supports CloudFormation stack ARNs. CloudFormation StackSets and non-root nested stacks are not supported.

The CloudFormation stack must have one of the following statuses to be imported: CREATE_COMPLETE, UPDATE_COMPLETE, UPDATE_ROLLBACK_COMPLETE, IMPORT_COMPLETE, IMPORT_ROLLBACK_COMPLETE.

Import of the resource requires that the CloudFormation stack template matches the associated Service Catalog product provisioning artifact.

", "ListAcceptedPortfolioShares": "

Lists all portfolios for which sharing was accepted by this account.

", "ListBudgetsForResource": "

Lists all the budgets associated to the specified resource.

", "ListConstraintsForPortfolio": "

Lists the constraints for the specified portfolio and product.

", @@ -131,6 +132,7 @@ "ExecuteProvisionedProductPlanInput$AcceptLanguage": "

The language code.

", "ExecuteProvisionedProductServiceActionInput$AcceptLanguage": "

The language code.

", "GetProvisionedProductOutputsInput$AcceptLanguage": "

The language code.

", + "ImportAsProvisionedProductInput$AcceptLanguage": "

The language code.

", "ListAcceptedPortfolioSharesInput$AcceptLanguage": "

The language code.

", "ListBudgetsForResourceInput$AcceptLanguage": "

The language code.

", "ListConstraintsForPortfolioInput$AcceptLanguage": "

The language code.

", @@ -1078,6 +1080,8 @@ "FailedServiceActionAssociation$ProductId": "

The product identifier. For example, prod-abcdzk7xy33qa.

", "FailedServiceActionAssociation$ProvisioningArtifactId": "

The identifier of the provisioning artifact. For example, pa-4abcdjnxjj6ne.

", "GetProvisionedProductOutputsInput$ProvisionedProductId": "

The identifier of the provisioned product that you want the outputs from.

", + "ImportAsProvisionedProductInput$ProductId": "

The product identifier.

", + "ImportAsProvisionedProductInput$ProvisioningArtifactId": "

The identifier of the provisioning artifact.

", "LaunchPath$Id": "

The identifier of the launch path.

", "LaunchPathSummary$Id": "

The identifier of the product path.

", "ListBudgetsForResourceInput$ResourceId": "

The resource identifier.

", @@ -1161,6 +1165,7 @@ "CreateServiceActionInput$IdempotencyToken": "

A unique identifier that you provide to ensure idempotency. If multiple requests differ only by the idempotency token, the same response is returned for each repeated request.

", "ExecuteProvisionedProductPlanInput$IdempotencyToken": "

A unique identifier that you provide to ensure idempotency. If multiple requests differ only by the idempotency token, the same response is returned for each repeated request.

", "ExecuteProvisionedProductServiceActionInput$ExecuteToken": "

An idempotency token that uniquely identifies the execute request.

", + "ImportAsProvisionedProductInput$IdempotencyToken": "

A unique identifier that you provide to ensure idempotency. If multiple requests differ only by the idempotency token, the same response is returned for each repeated request.

", "ProvisionProductInput$ProvisionToken": "

An idempotency token that uniquely identifies the provisioning request.

", "ProvisionedProductAttribute$IdempotencyToken": "

A unique identifier that you provide to ensure idempotency. If multiple requests differ only by the idempotency token, the same response is returned for each repeated request.

", "ProvisionedProductDetail$IdempotencyToken": "

A unique identifier that you provide to ensure idempotency. If multiple requests differ only by the idempotency token, the same response is returned for each repeated request.

", @@ -1176,6 +1181,16 @@ "TerminateProvisionedProductInput$IgnoreErrors": "

If set to true, AWS Service Catalog stops managing the specified provisioned product even if it cannot delete the underlying resources.

" } }, + "ImportAsProvisionedProductInput": { + "base": null, + "refs": { + } + }, + "ImportAsProvisionedProductOutput": { + "base": null, + "refs": { + } + }, "InstructionType": { "base": null, "refs": { @@ -1638,6 +1653,7 @@ "PhysicalId": { "base": null, "refs": { + "ImportAsProvisionedProductInput$PhysicalId": "

The unique identifier of the resource to be imported. It only currently supports CloudFormation stack IDs.

", "ProvisionedProductAttribute$PhysicalId": "

The assigned identifier for the resource, such as an EC2 instance ID or an S3 bucket name.

" } }, @@ -1957,6 +1973,7 @@ "CreateProvisionedProductPlanOutput$ProvisionedProductName": "

The user-friendly name of the provisioned product.

", "DescribeProvisionedProductInput$Name": "

The name of the provisioned product. You must provide the name or ID, but not both.

If you do not provide a name or ID, or you provide both name and ID, an InvalidParametersException will occur.

", "GetProvisionedProductOutputsInput$ProvisionedProductName": "

The name of the provisioned product that you want the outputs from.

", + "ImportAsProvisionedProductInput$ProvisionedProductName": "

The user-friendly name of the provisioned product. The value must be unique for the AWS account. The name cannot be updated after the product is provisioned.

", "ProvisionProductInput$ProvisionedProductName": "

A user-friendly name for the provisioned product. This value must be unique for the AWS account and cannot be updated after the product is provisioned.

", "ProvisionedProductPlanDetails$ProvisionProductName": "

The user-friendly name of the provisioned product.

", "ProvisionedProductPlanSummary$ProvisionProductName": "

The user-friendly name of the provisioned product.

", @@ -2018,7 +2035,7 @@ "ProvisionedProductProperties": { "base": null, "refs": { - "UpdateProvisionedProductPropertiesInput$ProvisionedProductProperties": "

A map that contains the provisioned product properties to be updated.

The LAUNCH_ROLE key accepts role ARNs. This key allows an administrator to call UpdateProvisionedProductProperties to update the launch role that is associated with a provisioned product. This role is used when an end user calls a provisioning operation such as UpdateProvisionedProduct, TerminateProvisionedProduct, or ExecuteProvisionedProductServiceAction. Only a role ARN or an empty string \"\" is valid. A user ARN is invalid. if an admin user passes an empty string \"\" as the value for the key LAUNCH_ROLE, the admin removes the launch role that is associated with the provisioned product. As a result, the end user operations use the credentials of the end user.

The OWNER key accepts user ARNs and role ARNs. The owner is the user that has permission to see, update, terminate, and execute service actions in the provisioned product.

The administrator can change the owner of a provisioned product to another IAM user within the same account. Both end user owners and administrators can see ownership history of the provisioned product using the ListRecordHistory API. The new owner can describe all past records for the provisioned product using the DescribeRecord API. The previous owner can no longer use DescribeRecord, but can still see the product's history from when he was an owner using ListRecordHistory.

If a provisioned product ownership is assigned to an end user, they can see and perform any action through the API or Service Catalog console such as update, terminate, and execute service actions. If an end user provisions a product and the owner is updated to someone else, they will no longer be able to see or perform any actions through API or the Service Catalog console on that provisioned product.

", + "UpdateProvisionedProductPropertiesInput$ProvisionedProductProperties": "

A map that contains the provisioned product properties to be updated.

The LAUNCH_ROLE key accepts role ARNs. This key allows an administrator to call UpdateProvisionedProductProperties to update the launch role that is associated with a provisioned product. This role is used when an end user calls a provisioning operation such as UpdateProvisionedProduct, TerminateProvisionedProduct, or ExecuteProvisionedProductServiceAction. Only a role ARN is valid. A user ARN is invalid.

The OWNER key accepts user ARNs and role ARNs. The owner is the user that has permission to see, update, terminate, and execute service actions in the provisioned product.

The administrator can change the owner of a provisioned product to another IAM user within the same account. Both end user owners and administrators can see ownership history of the provisioned product using the ListRecordHistory API. The new owner can describe all past records for the provisioned product using the DescribeRecord API. The previous owner can no longer use DescribeRecord, but can still see the product's history from when he was an owner using ListRecordHistory.

If a provisioned product ownership is assigned to an end user, they can see and perform any action through the API or Service Catalog console such as update, terminate, and execute service actions. If an end user provisions a product and the owner is updated to someone else, they will no longer be able to see or perform any actions through API or the Service Catalog console on that provisioned product.

", "UpdateProvisionedProductPropertiesOutput$ProvisionedProductProperties": "

A map that contains the properties updated.

" } }, @@ -2095,7 +2112,7 @@ "ProvisioningArtifactDetail": { "base": "

Information about a provisioning artifact (also known as a version) for a product.

", "refs": { - "CreateProductOutput$ProvisioningArtifactDetail": "

Information about the provisioning artifact.

", + "CreateProductOutput$ProvisioningArtifactDetail": "

Information about the provisioning artifact.

", "CreateProvisioningArtifactOutput$ProvisioningArtifactDetail": "

Information about the provisioning artifact.

", "DescribeProvisioningArtifactOutput$ProvisioningArtifactDetail": "

Information about the provisioning artifact.

", "ProvisioningArtifactDetails$member": null, @@ -2193,8 +2210,8 @@ "ProvisioningArtifactProperties": { "base": "

Information about a provisioning artifact (also known as a version) for a product.

", "refs": { - "CreateProductInput$ProvisioningArtifactParameters": "

The configuration of the provisioning artifact.

", - "CreateProvisioningArtifactInput$Parameters": "

The configuration for the provisioning artifact.

" + "CreateProductInput$ProvisioningArtifactParameters": "

The configuration of the provisioning artifact. The info field accepts ImportFromPhysicalID.

", + "CreateProvisioningArtifactInput$Parameters": "

The configuration for the provisioning artifact. The info field accepts ImportFromPhysicalID.

" } }, "ProvisioningArtifactPropertyName": { @@ -2271,6 +2288,7 @@ "DescribeRecordOutput$RecordDetail": "

Information about the product.

", "ExecuteProvisionedProductPlanOutput$RecordDetail": "

Information about the result of provisioning the product.

", "ExecuteProvisionedProductServiceActionOutput$RecordDetail": "

An object containing detailed information about the result of provisioning the product.

", + "ImportAsProvisionedProductOutput$RecordDetail": null, "ProvisionProductOutput$RecordDetail": "

Information about the result of provisioning the product.

", "RecordDetails$member": null, "TerminateProvisionedProductOutput$RecordDetail": "

Information about the result of this request.

", @@ -2483,6 +2501,12 @@ "ListResourcesForTagOptionInput$ResourceType": "

The resource type.

" } }, + "RetainPhysicalResources": { + "base": null, + "refs": { + "TerminateProvisionedProductInput$RetainPhysicalResources": "

When this boolean parameter is set to true, the TerminateProvisionedProduct API deletes the Service Catalog provisioned product. However, it does not remove the CloudFormation stack, stack set, or the underlying resources of the deleted provisioned product. The default value is false.

" + } + }, "RoleArn": { "base": null, "refs": { diff --git a/aws-sdk-core/apis/sns/2010-03-31/docs-2.json b/aws-sdk-core/apis/sns/2010-03-31/docs-2.json index 40a14bbb52e..879a6d70e3f 100644 --- a/aws-sdk-core/apis/sns/2010-03-31/docs-2.json +++ b/aws-sdk-core/apis/sns/2010-03-31/docs-2.json @@ -1,6 +1,6 @@ { "version": "2.0", - "service": "Amazon Simple Notification Service

Amazon Simple Notification Service (Amazon SNS) is a web service that enables you to build distributed web-enabled applications. Applications can use Amazon SNS to easily push real-time notification messages to interested subscribers over multiple delivery protocols. For more information about this product see https://aws.amazon.com/sns. For detailed information about Amazon SNS features and their associated API calls, see the Amazon SNS Developer Guide.

We also provide SDKs that enable you to access Amazon SNS from your preferred programming language. The SDKs contain functionality that automatically takes care of tasks such as: cryptographically signing your service requests, retrying requests, and handling error responses. For a list of available SDKs, go to Tools for Amazon Web Services.

", + "service": "Amazon Simple Notification Service

Amazon Simple Notification Service (Amazon SNS) is a web service that enables you to build distributed web-enabled applications. Applications can use Amazon SNS to easily push real-time notification messages to interested subscribers over multiple delivery protocols. For more information about this product see https://aws.amazon.com/sns. For detailed information about Amazon SNS features and their associated API calls, see the Amazon SNS Developer Guide.

For information on the permissions you need to use this API, see Identity and access management in Amazon SNS in the Amazon SNS Developer Guide.

We also provide SDKs that enable you to access Amazon SNS from your preferred programming language. The SDKs contain functionality that automatically takes care of tasks such as: cryptographically signing your service requests, retrying requests, and handling error responses. For a list of available SDKs, go to Tools for Amazon Web Services.

", "operations": { "AddPermission": "

Adds a statement to a topic's access control policy, granting access for the specified AWS accounts to the specified actions.

", "CheckIfPhoneNumberIsOptedOut": "

Accepts a phone number and indicates whether the phone holder has opted out of receiving SMS messages from your account. You cannot send SMS messages to a number that is opted out.

To resume sending messages, you can opt in the number by using the OptInPhoneNumber action.

", @@ -31,7 +31,7 @@ "SetSMSAttributes": "

Use this request to set the default settings for sending SMS messages and receiving daily SMS usage reports.

You can override some of these settings for a single message when you use the Publish action with the MessageAttributes.entry.N parameter. For more information, see Publishing to a mobile phone in the Amazon SNS Developer Guide.

", "SetSubscriptionAttributes": "

Allows a subscription owner to set an attribute of the subscription to a new value.

", "SetTopicAttributes": "

Allows a topic owner to set an attribute of the topic to a new value.

", - "Subscribe": "

Subscribes an endpoint to an Amazon SNS topic. If the endpoint type is HTTP/S or email, or if the endpoint and the topic are not in the same AWS account, the endpoint owner must the ConfirmSubscription action to confirm the subscription.

You call the ConfirmSubscription action with the token from the subscription response. Confirmation tokens are valid for three days.

This action is throttled at 100 transactions per second (TPS).

", + "Subscribe": "

Subscribes an endpoint to an Amazon SNS topic. If the endpoint type is HTTP/S or email, or if the endpoint and the topic are not in the same AWS account, the endpoint owner must run the ConfirmSubscription action to confirm the subscription.

You call the ConfirmSubscription action with the token from the subscription response. Confirmation tokens are valid for three days.

This action is throttled at 100 transactions per second (TPS).

", "TagResource": "

Add tags to the specified Amazon SNS topic. For an overview, see Amazon SNS Tags in the Amazon SNS Developer Guide.

When you use topic tags, keep the following guidelines in mind:

", "Unsubscribe": "

Deletes a subscription. If the subscription requires authentication for deletion, only the owner of the subscription or the topic's owner can unsubscribe, and an AWS signature is required. If the Unsubscribe call does not require authentication and the requester is not the subscription owner, a final cancellation message is delivered to the endpoint, so that the endpoint owner can easily resubscribe to the topic if the Unsubscribe request was unintended.

This action is throttled at 100 transactions per second (TPS).

", "UntagResource": "

Remove tags from the specified Amazon SNS topic. For an overview, see Amazon SNS Tags in the Amazon SNS Developer Guide.

" @@ -652,7 +652,7 @@ "attributeName": { "base": null, "refs": { - "SetSubscriptionAttributesInput$AttributeName": "

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the SetTopicAttributes action uses:

", + "SetSubscriptionAttributesInput$AttributeName": "

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that this action uses:

", "SetTopicAttributesInput$AttributeName": "

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the SetTopicAttributes action uses:

The following attribute applies only to server-side-encryption:

The following attribute applies only to FIFO topics:

", "SubscriptionAttributesMap$key": null, "TopicAttributesMap$key": null diff --git a/aws-sdk-core/apis/synthetics/2017-10-11/api-2.json b/aws-sdk-core/apis/synthetics/2017-10-11/api-2.json index d285fc3d533..66d16d3089b 100644 --- a/aws-sdk-core/apis/synthetics/2017-10-11/api-2.json +++ b/aws-sdk-core/apis/synthetics/2017-10-11/api-2.json @@ -281,7 +281,8 @@ "members":{ "TimeoutInSeconds":{"shape":"MaxFifteenMinutesInSeconds"}, "MemoryInMB":{"shape":"MaxSize3008"}, - "ActiveTracing":{"shape":"NullableBoolean"} + "ActiveTracing":{"shape":"NullableBoolean"}, + "EnvironmentVariables":{"shape":"EnvironmentVariablesMap"} } }, "CanaryRunConfigOutput":{ @@ -472,6 +473,16 @@ "NextToken":{"shape":"Token"} } }, + "EnvironmentVariableName":{ + "type":"string", + "pattern":"[a-zA-Z]([a-zA-Z0-9_])+" + }, + "EnvironmentVariableValue":{"type":"string"}, + "EnvironmentVariablesMap":{ + "type":"map", + "key":{"shape":"EnvironmentVariableName"}, + "value":{"shape":"EnvironmentVariableValue"} + }, "ErrorMessage":{"type":"string"}, "FunctionArn":{ "type":"string", diff --git a/aws-sdk-core/apis/synthetics/2017-10-11/docs-2.json b/aws-sdk-core/apis/synthetics/2017-10-11/docs-2.json index 601617d6c86..62de3d992c4 100644 --- a/aws-sdk-core/apis/synthetics/2017-10-11/docs-2.json +++ b/aws-sdk-core/apis/synthetics/2017-10-11/docs-2.json @@ -227,6 +227,24 @@ "refs": { } }, + "EnvironmentVariableName": { + "base": null, + "refs": { + "EnvironmentVariablesMap$key": null + } + }, + "EnvironmentVariableValue": { + "base": null, + "refs": { + "EnvironmentVariablesMap$value": null + } + }, + "EnvironmentVariablesMap": { + "base": null, + "refs": { + "CanaryRunConfigInput$EnvironmentVariables": "

Specifies the keys and values to use for any environment variables used in the canary script. Use the following format:

{ \"key1\" : \"value1\", \"key2\" : \"value2\", ...}

Keys must start with a letter and be at least two characters. The total size of your environment variables cannot exceed 4 KB. You can't specify any Lambda reserved environment variables as the keys for your environment variables. For more information about reserved keys, see Runtime environment variables.

" + } + }, "ErrorMessage": { "base": null, "refs": { @@ -392,7 +410,7 @@ "base": null, "refs": { "Canary$ArtifactS3Location": "

The location in Amazon S3 where Synthetics stores artifacts from the runs of this canary. Artifacts include the log file, screenshots, and HAR files.

", - "Canary$RuntimeVersion": "

Specifies the runtime version to use for the canary. Currently, the only valid values are syn-nodejs-2.0, syn-nodejs-2.0-beta, and syn-1.0. For more information about runtime versions, see Canary Runtime Versions.

", + "Canary$RuntimeVersion": "

Specifies the runtime version to use for the canary. For more information about runtime versions, see Canary Runtime Versions.

", "CanaryCodeInput$S3Bucket": "

If your canary script is located in S3, specify the full bucket name here. The bucket must already exist. Specify the full bucket name, including s3:// as the start of the bucket name.

", "CanaryCodeInput$S3Key": "

The S3 key of your script. For more information, see Working with Amazon S3 Objects.

", "CanaryCodeInput$S3Version": "

The S3 version ID of your script.

", @@ -405,10 +423,10 @@ "CanaryScheduleOutput$Expression": "

A rate expression that defines how often the canary is to run. The syntax is rate(number unit). unit can be minute, minutes, or hour.

For example, rate(1 minute) runs the canary once a minute, rate(10 minutes) runs it once every 10 minutes, and rate(1 hour) runs it once every hour.

Specifying rate(0 minute) or rate(0 hour) is a special value that causes the canary to run only once when it is started.

", "CanaryStatus$StateReason": "

If the canary has insufficient permissions to run, this field provides more details.

", "CreateCanaryRequest$ArtifactS3Location": "

The location in Amazon S3 where Synthetics stores artifacts from the test runs of this canary. Artifacts include the log file, screenshots, and HAR files.

", - "CreateCanaryRequest$RuntimeVersion": "

Specifies the runtime version to use for the canary. Currently, the only valid values are syn-nodejs-2.0, syn-nodejs-2.0-beta, and syn-1.0. For more information about runtime versions, see Canary Runtime Versions.

", - "RuntimeVersion$VersionName": "

The name of the runtime version. Currently, the only valid values are syn-nodejs-2.0, syn-nodejs-2.0-beta, and syn-1.0.

", + "CreateCanaryRequest$RuntimeVersion": "

Specifies the runtime version to use for the canary. For a list of valid runtime versions and more information about runtime versions, see Canary Runtime Versions.

", + "RuntimeVersion$VersionName": "

The name of the runtime version. For a list of valid runtime versions, see Canary Runtime Versions.

", "RuntimeVersion$Description": "

A description of the runtime version, created by Amazon.

", - "UpdateCanaryRequest$RuntimeVersion": "

Specifies the runtime version to use for the canary. Currently, the only valid values are syn-nodejs-2.0, syn-nodejs-2.0-beta, and syn-1.0. For more information about runtime versions, see Canary Runtime Versions.

" + "UpdateCanaryRequest$RuntimeVersion": "

Specifies the runtime version to use for the canary. For a list of valid runtime versions and for more information about runtime versions, see Canary Runtime Versions.

" } }, "SubnetId": {