diff --git a/sample/sagemaker/2017-07-24/service-2.json b/sample/sagemaker/2017-07-24/service-2.json index df871a7..4461e9a 100644 --- a/sample/sagemaker/2017-07-24/service-2.json +++ b/sample/sagemaker/2017-07-24/service-2.json @@ -4974,7 +4974,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"arn:aws(-cn|-us-gov|-iso-f)?:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:algorithm/[\\S]{1,2048}" + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:algorithm/[\\S]{1,2048}" }, "AlgorithmImage":{ "type":"string", @@ -8850,6 +8850,7 @@ "ml.p5e.48xlarge", "ml.p5en.48xlarge", "ml.p6-b200.48xlarge", + "ml.trn2.3xlarge", "ml.trn2.48xlarge", "ml.c6i.large", "ml.c6i.xlarge", @@ -9470,7 +9471,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"arn:aws(-cn|-us-gov|-iso-f)?:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:code-repository/[\\S]{1,2048}" + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:code-repository/[\\S]{1,2048}" }, "CodeRepositoryContains":{ "type":"string", @@ -13039,8 +13040,7 @@ "required":[ "TrainingJobName", "RoleArn", - "OutputDataConfig", - "StoppingCondition" + "OutputDataConfig" ], "members":{ "TrainingJobName":{ @@ -19342,8 +19342,6 @@ "ModelArtifacts", "TrainingJobStatus", "SecondaryStatus", - "AlgorithmSpecification", - "ResourceConfig", "StoppingCondition", "CreationTime" ], @@ -31204,7 +31202,7 @@ "type":"string", "max":258, "min":20, - "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:ml-reservation/.*" + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:[a-z0-9\\-]{1,14}/.*" }, "MlTools":{ "type":"string", @@ -32322,7 +32320,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"arn:aws(-cn|-us-gov|-iso-f)?:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:model-package/[\\S]{1,2048}" + "pattern":"arn:arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:model-package/[\\S]{1,2048}" }, "ModelPackageArnList":{ "type":"list", @@ -32332,7 +32330,6 @@ }, "ModelPackageContainerDefinition":{ "type":"structure", - "required":["Image"], "members":{ "ContainerHostname":{ "shape":"ContainerHostname", @@ -32436,7 +32433,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"arn:aws(-cn|-us-gov|-iso-f)?:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:model-package-group/[\\S]{1,2048}" + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:model-package-group/[\\S]{1,2048}" }, "ModelPackageGroupSortBy":{ "type":"string", @@ -37099,7 +37096,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"arn:aws(-cn|-us-gov|-iso-f)?:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:project/[\\S]{1,2048}" + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:project/[\\S]{1,2048}" }, "ProjectEntityName":{ "type":"string", diff --git a/src/sagemaker_core/main/config_schema.py b/src/sagemaker_core/main/config_schema.py index d9397de..c3d7992 100644 --- a/src/sagemaker_core/main/config_schema.py +++ b/src/sagemaker_core/main/config_schema.py @@ -801,14 +801,14 @@ "model_artifacts": { "s3_model_artifacts": {"type": "string"} }, - "resource_config": { - "volume_kms_key_id": {"type": "string"} - }, "role_arn": {"type": "string"}, "output_data_config": { "s3_output_path": {"type": "string"}, "kms_key_id": {"type": "string"}, }, + "resource_config": { + "volume_kms_key_id": {"type": "string"} + }, "vpc_config": { "security_group_ids": { "type": "array", diff --git a/src/sagemaker_core/main/resources.py b/src/sagemaker_core/main/resources.py index 6dd7c35..76e57bf 100644 --- a/src/sagemaker_core/main/resources.py +++ b/src/sagemaker_core/main/resources.py @@ -28339,8 +28339,6 @@ class TrainingJob(Base): model_artifacts: Information about the Amazon S3 location that is configured for storing model artifacts. training_job_status: The status of the training job. SageMaker provides the following training job statuses: InProgress - The training is in progress. Completed - The training job has completed. Failed - The training job has failed. To see the reason for the failure, see the FailureReason field in the response to a DescribeTrainingJobResponse call. Stopping - The training job is stopping. Stopped - The training job has stopped. For more detailed information, see SecondaryStatus. secondary_status: Provides detailed information about the state of the training job. For detailed information on the secondary status of the training job, see StatusMessage under SecondaryStatusTransition. SageMaker provides primary statuses and secondary statuses that apply to each of them: InProgress Starting - Starting the training job. Downloading - An optional stage for algorithms that support File training input mode. It indicates that data is being downloaded to the ML storage volumes. Training - Training is in progress. Interrupted - The job stopped because the managed spot training instances were interrupted. Uploading - Training is complete and the model artifacts are being uploaded to the S3 location. Completed Completed - The training job has completed. Failed Failed - The training job has failed. The reason for the failure is returned in the FailureReason field of DescribeTrainingJobResponse. Stopped MaxRuntimeExceeded - The job stopped because it exceeded the maximum allowed runtime. MaxWaitTimeExceeded - The job stopped because it exceeded the maximum allowed wait time. Stopped - The training job has stopped. Stopping Stopping - Stopping the training job. Valid values for SecondaryStatus are subject to change. We no longer support the following secondary statuses: LaunchingMLInstances PreparingTraining DownloadingTrainingImage - algorithm_specification: Information about the algorithm used for training, and algorithm metadata. - resource_config: Resources, including ML compute instances and ML storage volumes, that are configured for model training. stopping_condition: Specifies a limit to how long a model training job can run. It also specifies how long a managed Spot training job has to complete. When the job reaches the time limit, SageMaker ends the training job. Use this API to cap model training costs. To stop a job, SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost. creation_time: A timestamp that indicates when the training job was created. tuning_job_arn: The Amazon Resource Name (ARN) of the associated hyperparameter tuning job if the training job was launched by a hyperparameter tuning job. @@ -28348,9 +28346,11 @@ class TrainingJob(Base): auto_ml_job_arn: The Amazon Resource Name (ARN) of an AutoML job. failure_reason: If the training job failed, the reason it failed. hyper_parameters: Algorithm-specific parameters. + algorithm_specification: Information about the algorithm used for training, and algorithm metadata. role_arn: The Amazon Web Services Identity and Access Management (IAM) role configured for the training job. input_data_config: An array of Channel objects that describes each data input channel. output_data_config: The S3 path where model artifacts that you configured when creating the job are stored. SageMaker creates subfolders for model artifacts. + resource_config: Resources, including ML compute instances and ML storage volumes, that are configured for model training. warm_pool_status: The status of the warm pool associated with the training job. vpc_config: A VpcConfig object that specifies the VPC that this training job has access to. For more information, see Protect Training Jobs by Using an Amazon Virtual Private Cloud. training_start_time: Indicates the time when the training job starts on training instances. You are billed for the time interval between this time and the value of TrainingEndTime. The start time in CloudWatch Logs might be later than this time. The difference is due to the time it takes to download the training data and to the size of the training container. @@ -28479,12 +28479,12 @@ def create( training_job_name: str, role_arn: str, output_data_config: shapes.OutputDataConfig, - stopping_condition: shapes.StoppingCondition, hyper_parameters: Optional[Dict[str, str]] = Unassigned(), algorithm_specification: Optional[shapes.AlgorithmSpecification] = Unassigned(), input_data_config: Optional[List[shapes.Channel]] = Unassigned(), resource_config: Optional[shapes.ResourceConfig] = Unassigned(), vpc_config: Optional[shapes.VpcConfig] = Unassigned(), + stopping_condition: Optional[shapes.StoppingCondition] = Unassigned(), tags: Optional[List[shapes.Tag]] = Unassigned(), enable_network_isolation: Optional[bool] = Unassigned(), enable_inter_container_traffic_encryption: Optional[bool] = Unassigned(), @@ -28513,12 +28513,12 @@ def create( training_job_name: The name of the training job. The name must be unique within an Amazon Web Services Region in an Amazon Web Services account. role_arn: The Amazon Resource Name (ARN) of an IAM role that SageMaker can assume to perform tasks on your behalf. During model training, SageMaker needs your permission to read input data from an S3 bucket, download a Docker image that contains training code, write model artifacts to an S3 bucket, write logs to Amazon CloudWatch Logs, and publish metrics to Amazon CloudWatch. You grant permissions for all of these tasks to an IAM role. For more information, see SageMaker Roles. To be able to pass this role to SageMaker, the caller of this API must have the iam:PassRole permission. output_data_config: Specifies the path to the S3 location where you want to store model artifacts. SageMaker creates subfolders for the artifacts. - stopping_condition: Specifies a limit to how long a model training job can run. It also specifies how long a managed Spot training job has to complete. When the job reaches the time limit, SageMaker ends the training job. Use this API to cap model training costs. To stop a job, SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost. hyper_parameters: Algorithm-specific parameters that influence the quality of the model. You set hyperparameters before you start the learning process. For a list of hyperparameters for each training algorithm provided by SageMaker, see Algorithms. You can specify a maximum of 100 hyperparameters. Each hyperparameter is a key-value pair. Each key and value is limited to 256 characters, as specified by the Length Constraint. Do not include any security-sensitive information including account access IDs, secrets, or tokens in any hyperparameter fields. As part of the shared responsibility model, you are responsible for any potential exposure, unauthorized access, or compromise of your sensitive data if caused by any security-sensitive information included in the request hyperparameter variable or plain text fields. algorithm_specification: The registry path of the Docker image that contains the training algorithm and algorithm-specific metadata, including the input mode. For more information about algorithms provided by SageMaker, see Algorithms. For information about providing your own algorithms, see Using Your Own Algorithms with Amazon SageMaker. input_data_config: An array of Channel objects. Each channel is a named input source. InputDataConfig describes the input data and its location. Algorithms can accept input data from one or more channels. For example, an algorithm might have two channels of input data, training_data and validation_data. The configuration for each channel provides the S3, EFS, or FSx location where the input data is stored. It also provides information about the stored data: the MIME type, compression method, and whether the data is wrapped in RecordIO format. Depending on the input mode that the algorithm supports, SageMaker either copies input data files from an S3 bucket to a local directory in the Docker container, or makes it available as input streams. For example, if you specify an EFS location, input data files are available as input streams. They do not need to be downloaded. Your input must be in the same Amazon Web Services region as your training job. resource_config: The resources, including the ML compute instances and ML storage volumes, to use for model training. ML storage volumes store model artifacts and incremental states. Training algorithms might also use ML storage volumes for scratch space. If you want SageMaker to use the ML storage volume to store the training data, choose File as the TrainingInputMode in the algorithm specification. For distributed training algorithms, specify an instance count greater than 1. vpc_config: A VpcConfig object that specifies the VPC that you want your training job to connect to. Control access to and from your training container by configuring the VPC. For more information, see Protect Training Jobs by Using an Amazon Virtual Private Cloud. + stopping_condition: Specifies a limit to how long a model training job can run. It also specifies how long a managed Spot training job has to complete. When the job reaches the time limit, SageMaker ends the training job. Use this API to cap model training costs. To stop a job, SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost. tags: An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources. Do not include any security-sensitive information including account access IDs, secrets, or tokens in any tags. As part of the shared responsibility model, you are responsible for any potential exposure, unauthorized access, or compromise of your sensitive data if caused by any security-sensitive information included in the request tag variable or plain text fields. enable_network_isolation: Isolates the training container. No inbound or outbound network calls can be made, except for calls between peers within a training cluster for distributed training. If you enable network isolation for training jobs that are configured to use a VPC, SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access. enable_inter_container_traffic_encryption: To encrypt all communications between ML compute instances in distributed training, choose True. Encryption provides greater security for distributed training, but training might take longer. How long it takes depends on the amount of communication between compute instances, especially if you use a deep learning algorithm in distributed training. For more information, see Protect Communications Between ML Compute Instances in a Distributed Training Job. diff --git a/src/sagemaker_core/main/shapes.py b/src/sagemaker_core/main/shapes.py index 10713f4..37265f6 100644 --- a/src/sagemaker_core/main/shapes.py +++ b/src/sagemaker_core/main/shapes.py @@ -657,8 +657,8 @@ class ModelPackageContainerDefinition(Base): model_data_e_tag: The ETag associated with Model Data URL. """ - image: str container_hostname: Optional[str] = Unassigned() + image: Optional[str] = Unassigned() image_digest: Optional[str] = Unassigned() model_data_url: Optional[str] = Unassigned() model_data_source: Optional[ModelDataSource] = Unassigned()