From 85e39d6cd8ef7c91b32f5b393c168955a0b64584 Mon Sep 17 00:00:00 2001 From: AWS SDK for Ruby Date: Mon, 23 May 2022 18:07:05 +0000 Subject: [PATCH] Updated API models and rebuilt service gems. --- apis/elasticache/2015-02-02/api-2.json | 3 +- apis/elasticache/2015-02-02/docs-2.json | 19 +- apis/forecast/2018-06-26/api-2.json | 291 +++++- apis/forecast/2018-06-26/docs-2.json | 277 +++++- apis/forecast/2018-06-26/paginators-1.json | 24 + apis/personalize/2018-05-22/api-2.json | 4 +- apis/personalize/2018-05-22/docs-2.json | 13 +- gems/aws-partitions/CHANGELOG.md | 5 + gems/aws-partitions/VERSION | 2 +- gems/aws-partitions/partitions.json | 31 + gems/aws-sdk-elasticache/CHANGELOG.md | 5 + gems/aws-sdk-elasticache/VERSION | 2 +- .../lib/aws-sdk-elasticache.rb | 2 +- .../lib/aws-sdk-elasticache/client.rb | 60 +- .../lib/aws-sdk-elasticache/client_api.rb | 1 + .../lib/aws-sdk-elasticache/types.rb | 123 +-- gems/aws-sdk-forecastservice/CHANGELOG.md | 5 + gems/aws-sdk-forecastservice/VERSION | 2 +- .../lib/aws-sdk-forecastservice.rb | 2 +- .../lib/aws-sdk-forecastservice/client.rb | 577 ++++++++++-- .../lib/aws-sdk-forecastservice/client_api.rb | 228 +++++ .../lib/aws-sdk-forecastservice/types.rb | 838 +++++++++++++++++- gems/aws-sdk-personalize/CHANGELOG.md | 5 + gems/aws-sdk-personalize/VERSION | 2 +- .../lib/aws-sdk-personalize.rb | 2 +- .../lib/aws-sdk-personalize/client.rb | 13 +- .../lib/aws-sdk-personalize/client_api.rb | 2 + .../lib/aws-sdk-personalize/types.rb | 29 +- 28 files changed, 2298 insertions(+), 269 deletions(-) diff --git a/apis/elasticache/2015-02-02/api-2.json b/apis/elasticache/2015-02-02/api-2.json index 92c5db20658..ec6e6a328df 100644 --- a/apis/elasticache/2015-02-02/api-2.json +++ b/apis/elasticache/2015-02-02/api-2.json @@ -1957,7 +1957,8 @@ "OutpostMode":{"shape":"OutpostMode"}, "PreferredOutpostArn":{"shape":"String"}, "PreferredOutpostArns":{"shape":"PreferredOutpostArnList"}, - "LogDeliveryConfigurations":{"shape":"LogDeliveryConfigurationRequestList"} + "LogDeliveryConfigurations":{"shape":"LogDeliveryConfigurationRequestList"}, + "TransitEncryptionEnabled":{"shape":"BooleanOptional"} } }, "CreateCacheClusterResult":{ diff --git a/apis/elasticache/2015-02-02/docs-2.json b/apis/elasticache/2015-02-02/docs-2.json index caa793ce7e8..8cad9c8aec7 100644 --- a/apis/elasticache/2015-02-02/docs-2.json +++ b/apis/elasticache/2015-02-02/docs-2.json @@ -226,6 +226,7 @@ "CacheCluster$TransitEncryptionEnabled": "

A flag that enables in-transit encryption when set to true.

You cannot modify the value of TransitEncryptionEnabled after the cluster is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled to true when you create a cluster.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

Default: false

", "CacheCluster$AtRestEncryptionEnabled": "

A flag that enables encryption at-rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

Default: false

", "CreateCacheClusterMessage$AutoMinorVersionUpgrade": "

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

", + "CreateCacheClusterMessage$TransitEncryptionEnabled": "

A flag that enables in-transit encryption when set to true. You cannot modify the value of TransitEncryptionEnabled after the cluster is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled to true when you create a cluster.

Required: Only available when creating a cache cluster in an Amazon VPC using Memcached version 1.6.12 or later.

", "CreateReplicationGroupMessage$AutomaticFailoverEnabled": "

Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.

AutomaticFailoverEnabled must be enabled for Redis (cluster mode enabled) replication groups.

Default: false

", "CreateReplicationGroupMessage$MultiAZEnabled": "

A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see Minimizing Downtime: Multi-AZ.

", "CreateReplicationGroupMessage$AutoMinorVersionUpgrade": "

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

", @@ -315,7 +316,7 @@ } }, "CacheNode": { - "base": "

Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached or Redis.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", + "base": "

Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached or Redis.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", "refs": { "CacheNodeList$member": null } @@ -2002,7 +2003,7 @@ "BatchStopUpdateActionMessage$ServiceUpdateName": "

The unique ID of the service update

", "CacheCluster$CacheClusterId": "

The user-supplied identifier of the cluster. This identifier is a unique key that identifies a cluster.

", "CacheCluster$ClientDownloadLandingPage": "

The URL of the web page where you can download the latest ElastiCache client library.

", - "CacheCluster$CacheNodeType": "

The name of the compute and memory capacity node type for the cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", + "CacheCluster$CacheNodeType": "

The name of the compute and memory capacity node type for the cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", "CacheCluster$Engine": "

The name of the cache engine (memcached or redis) to be used for this cluster.

", "CacheCluster$EngineVersion": "

The version of the cache engine that is used in this cluster.

", "CacheCluster$CacheClusterStatus": "

The current state of this cluster, one of the following values: available, creating, deleted, deleting, incompatible-network, modifying, rebooting cluster nodes, restore-failed, or snapshotting.

", @@ -2069,7 +2070,7 @@ "CreateCacheClusterMessage$CacheClusterId": "

The node group (shard) identifier. This parameter is stored as a lowercase string.

Constraints:

", "CreateCacheClusterMessage$ReplicationGroupId": "

The ID of the replication group to which this cluster should belong. If this parameter is specified, the cluster is added to the specified replication group as a read replica; otherwise, the cluster is a standalone primary that is not part of any replication group.

If the specified replication group is Multi-AZ enabled and the Availability Zone is not specified, the cluster is created in Availability Zones that provide the best spread of read replicas across Availability Zones.

This parameter is only valid if the Engine parameter is redis.

", "CreateCacheClusterMessage$PreferredAvailabilityZone": "

The EC2 Availability Zone in which the cluster is created.

All nodes belonging to this cluster are placed in the preferred Availability Zone. If you want to create your nodes across multiple Availability Zones, use PreferredAvailabilityZones.

Default: System chosen Availability Zone.

", - "CreateCacheClusterMessage$CacheNodeType": "

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", + "CreateCacheClusterMessage$CacheNodeType": "

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", "CreateCacheClusterMessage$Engine": "

The name of the cache engine to be used for this cluster.

Valid values for this parameter are: memcached | redis

", "CreateCacheClusterMessage$EngineVersion": "

The version number of the cache engine to be used for this cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.

Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.

", "CreateCacheClusterMessage$CacheParameterGroupName": "

The name of the parameter group to associate with this cluster. If this argument is omitted, the default parameter group for the specified engine is used. You cannot use any parameter group which has cluster-enabled='yes' when creating a cluster.

", @@ -2094,7 +2095,7 @@ "CreateReplicationGroupMessage$ReplicationGroupDescription": "

A user-created description for the replication group.

", "CreateReplicationGroupMessage$GlobalReplicationGroupId": "

The name of the Global datastore

", "CreateReplicationGroupMessage$PrimaryClusterId": "

The identifier of the cluster that serves as the primary for this replication group. This cluster must already exist and have a status of available.

This parameter is not required if NumCacheClusters, NumNodeGroups, or ReplicasPerNodeGroup is specified.

", - "CreateReplicationGroupMessage$CacheNodeType": "

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", + "CreateReplicationGroupMessage$CacheNodeType": "

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", "CreateReplicationGroupMessage$Engine": "

The name of the cache engine to be used for the clusters in this replication group. Must be Redis.

", "CreateReplicationGroupMessage$EngineVersion": "

The version number of the cache engine to be used for the clusters in this replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.

Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version) in the ElastiCache User Guide, but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.

", "CreateReplicationGroupMessage$CacheParameterGroupName": "

The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.

If you are running Redis version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.

", @@ -2149,13 +2150,13 @@ "DescribeReplicationGroupsMessage$Marker": "

An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DescribeReservedCacheNodesMessage$ReservedCacheNodeId": "

The reserved cache node identifier filter value. Use this parameter to show only the reservation that matches the specified reservation ID.

", "DescribeReservedCacheNodesMessage$ReservedCacheNodesOfferingId": "

The offering identifier filter value. Use this parameter to show only purchased reservations matching the specified offering identifier.

", - "DescribeReservedCacheNodesMessage$CacheNodeType": "

The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", + "DescribeReservedCacheNodesMessage$CacheNodeType": "

The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", "DescribeReservedCacheNodesMessage$Duration": "

The duration filter value, specified in years or seconds. Use this parameter to show only reservations for this duration.

Valid Values: 1 | 3 | 31536000 | 94608000

", "DescribeReservedCacheNodesMessage$ProductDescription": "

The product description filter value. Use this parameter to show only those reservations matching the specified product description.

", "DescribeReservedCacheNodesMessage$OfferingType": "

The offering type filter value. Use this parameter to show only the available offerings matching the specified offering type.

Valid values: \"Light Utilization\"|\"Medium Utilization\"|\"Heavy Utilization\"|\"All Upfront\"|\"Partial Upfront\"| \"No Upfront\"

", "DescribeReservedCacheNodesMessage$Marker": "

An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DescribeReservedCacheNodesOfferingsMessage$ReservedCacheNodesOfferingId": "

The offering identifier filter value. Use this parameter to show only the available offering that matches the specified reservation identifier.

Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

", - "DescribeReservedCacheNodesOfferingsMessage$CacheNodeType": "

The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", + "DescribeReservedCacheNodesOfferingsMessage$CacheNodeType": "

The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", "DescribeReservedCacheNodesOfferingsMessage$Duration": "

Duration filter value, specified in years or seconds. Use this parameter to show only reservations for a given duration.

Valid Values: 1 | 3 | 31536000 | 94608000

", "DescribeReservedCacheNodesOfferingsMessage$ProductDescription": "

The product description filter value. Use this parameter to show only the available offerings matching the specified product description.

", "DescribeReservedCacheNodesOfferingsMessage$OfferingType": "

The offering type filter value. Use this parameter to show only the available offerings matching the specified offering type.

Valid Values: \"Light Utilization\"|\"Medium Utilization\"|\"Heavy Utilization\" |\"All Upfront\"|\"Partial Upfront\"| \"No Upfront\"

", @@ -2309,14 +2310,14 @@ "ReplicationGroupPendingModifiedValues$PrimaryClusterId": "

The primary cluster ID that is applied immediately (if --apply-immediately was specified), or during the next maintenance window.

", "ReservedCacheNode$ReservedCacheNodeId": "

The unique identifier for the reservation.

", "ReservedCacheNode$ReservedCacheNodesOfferingId": "

The offering identifier.

", - "ReservedCacheNode$CacheNodeType": "

The cache node type for the reserved cache nodes.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", + "ReservedCacheNode$CacheNodeType": "

The cache node type for the reserved cache nodes.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", "ReservedCacheNode$ProductDescription": "

The description of the reserved cache node.

", "ReservedCacheNode$OfferingType": "

The offering type of this reserved cache node.

", "ReservedCacheNode$State": "

The state of the reserved cache node.

", "ReservedCacheNode$ReservationARN": "

The Amazon Resource Name (ARN) of the reserved cache node.

Example: arn:aws:elasticache:us-east-1:123456789012:reserved-instance:ri-2017-03-27-08-33-25-582

", "ReservedCacheNodeMessage$Marker": "

Provides an identifier to allow retrieval of paginated results.

", "ReservedCacheNodesOffering$ReservedCacheNodesOfferingId": "

A unique identifier for the reserved cache node offering.

", - "ReservedCacheNodesOffering$CacheNodeType": "

The cache node type for the reserved cache node.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", + "ReservedCacheNodesOffering$CacheNodeType": "

The cache node type for the reserved cache node.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", "ReservedCacheNodesOffering$ProductDescription": "

The cache engine used by the offering.

", "ReservedCacheNodesOffering$OfferingType": "

The offering type.

", "ReservedCacheNodesOfferingMessage$Marker": "

Provides an identifier to allow retrieval of paginated results.

", @@ -2339,7 +2340,7 @@ "Snapshot$CacheClusterId": "

The user-supplied identifier of the source cluster.

", "Snapshot$SnapshotStatus": "

The status of the snapshot. Valid values: creating | available | restoring | copying | deleting.

", "Snapshot$SnapshotSource": "

Indicates whether the snapshot is from an automatic backup (automated) or was created manually (manual).

", - "Snapshot$CacheNodeType": "

The name of the compute and memory capacity node type for the source cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", + "Snapshot$CacheNodeType": "

The name of the compute and memory capacity node type for the source cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

", "Snapshot$Engine": "

The name of the cache engine (memcached or redis) used by the source cluster.

", "Snapshot$EngineVersion": "

The version of the cache engine version that is used by the source cluster.

", "Snapshot$PreferredAvailabilityZone": "

The name of the Availability Zone in which the source cluster is located.

", diff --git a/apis/forecast/2018-06-26/api-2.json b/apis/forecast/2018-06-26/api-2.json index 38185c2f2ee..ba91e35caf2 100644 --- a/apis/forecast/2018-06-26/api-2.json +++ b/apis/forecast/2018-06-26/api-2.json @@ -139,6 +139,22 @@ {"shape":"LimitExceededException"} ] }, + "CreateMonitor":{ + "name":"CreateMonitor", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateMonitorRequest"}, + "output":{"shape":"CreateMonitorResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"LimitExceededException"} + ] + }, "CreatePredictor":{ "name":"CreatePredictor", "http":{ @@ -269,6 +285,20 @@ ], "idempotent":true }, + "DeleteMonitor":{ + "name":"DeleteMonitor", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteMonitorRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "idempotent":true + }, "DeletePredictor":{ "name":"DeletePredictor", "http":{ @@ -423,6 +453,20 @@ ], "idempotent":true }, + "DescribeMonitor":{ + "name":"DescribeMonitor", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMonitorRequest"}, + "output":{"shape":"DescribeMonitorResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "idempotent":true + }, "DescribePredictor":{ "name":"DescribePredictor", "http":{ @@ -562,6 +606,35 @@ ], "idempotent":true }, + "ListMonitorEvaluations":{ + "name":"ListMonitorEvaluations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListMonitorEvaluationsRequest"}, + "output":{"shape":"ListMonitorEvaluationsResponse"}, + "errors":[ + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "idempotent":true + }, + "ListMonitors":{ + "name":"ListMonitors", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListMonitorsRequest"}, + "output":{"shape":"ListMonitorsResponse"}, + "errors":[ + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidInputException"} + ], + "idempotent":true + }, "ListPredictorBacktestExportJobs":{ "name":"ListPredictorBacktestExportJobs", "http":{ @@ -603,6 +676,21 @@ {"shape":"InvalidInputException"} ] }, + "ResumeResource":{ + "name":"ResumeResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResumeResourceRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "idempotent":true + }, "StopResource":{ "name":"StopResource", "http":{ @@ -718,6 +806,23 @@ "AccuracyOptimized" ] }, + "Baseline":{ + "type":"structure", + "members":{ + "PredictorBaseline":{"shape":"PredictorBaseline"} + } + }, + "BaselineMetric":{ + "type":"structure", + "members":{ + "Name":{"shape":"Name"}, + "Value":{"shape":"Double"} + } + }, + "BaselineMetrics":{ + "type":"list", + "member":{"shape":"BaselineMetric"} + }, "Boolean":{"type":"boolean"}, "CategoricalParameterRange":{ "type":"structure", @@ -775,7 +880,8 @@ "ReferencePredictorArn":{"shape":"Arn"}, "OptimizationMetric":{"shape":"OptimizationMetric"}, "ExplainPredictor":{"shape":"Boolean"}, - "Tags":{"shape":"Tags"} + "Tags":{"shape":"Tags"}, + "MonitorConfig":{"shape":"MonitorConfig"} } }, "CreateAutoPredictorResponse":{ @@ -935,6 +1041,24 @@ "ForecastArn":{"shape":"Arn"} } }, + "CreateMonitorRequest":{ + "type":"structure", + "required":[ + "MonitorName", + "ResourceArn" + ], + "members":{ + "MonitorName":{"shape":"Name"}, + "ResourceArn":{"shape":"Arn"}, + "Tags":{"shape":"Tags"} + } + }, + "CreateMonitorResponse":{ + "type":"structure", + "members":{ + "MonitorArn":{"shape":"Arn"} + } + }, "CreatePredictorBacktestExportJobRequest":{ "type":"structure", "required":[ @@ -1111,6 +1235,13 @@ "ForecastArn":{"shape":"Arn"} } }, + "DeleteMonitorRequest":{ + "type":"structure", + "required":["MonitorArn"], + "members":{ + "MonitorArn":{"shape":"Arn"} + } + }, "DeletePredictorBacktestExportJobRequest":{ "type":"structure", "required":["PredictorBacktestExportJobArn"], @@ -1158,7 +1289,8 @@ "CreationTime":{"shape":"Timestamp"}, "LastModificationTime":{"shape":"Timestamp"}, "OptimizationMetric":{"shape":"OptimizationMetric"}, - "ExplainabilityInfo":{"shape":"ExplainabilityInfo"} + "ExplainabilityInfo":{"shape":"ExplainabilityInfo"}, + "MonitorInfo":{"shape":"MonitorInfo"} } }, "DescribeDatasetGroupRequest":{ @@ -1317,6 +1449,29 @@ "LastModificationTime":{"shape":"Timestamp"} } }, + "DescribeMonitorRequest":{ + "type":"structure", + "required":["MonitorArn"], + "members":{ + "MonitorArn":{"shape":"Arn"} + } + }, + "DescribeMonitorResponse":{ + "type":"structure", + "members":{ + "MonitorName":{"shape":"Name"}, + "MonitorArn":{"shape":"Arn"}, + "ResourceArn":{"shape":"Arn"}, + "Status":{"shape":"Status"}, + "LastEvaluationTime":{"shape":"Timestamp"}, + "LastEvaluationState":{"shape":"EvaluationState"}, + "Baseline":{"shape":"Baseline"}, + "Message":{"shape":"Message"}, + "CreationTime":{"shape":"Timestamp"}, + "LastModificationTime":{"shape":"Timestamp"}, + "EstimatedEvaluationTimeRemainingInMinutes":{"shape":"Long"} + } + }, "DescribePredictorBacktestExportJobRequest":{ "type":"structure", "required":["PredictorBacktestExportJobArn"], @@ -1373,6 +1528,10 @@ "OptimizationMetric":{"shape":"OptimizationMetric"} } }, + "Detail":{ + "type":"string", + "max":256 + }, "Domain":{ "type":"string", "enum":[ @@ -1426,6 +1585,10 @@ "TestWindows":{"shape":"TestWindows"} } }, + "EvaluationState":{ + "type":"string", + "max":256 + }, "EvaluationType":{ "type":"string", "enum":[ @@ -1804,6 +1967,38 @@ "NextToken":{"shape":"NextToken"} } }, + "ListMonitorEvaluationsRequest":{ + "type":"structure", + "required":["MonitorArn"], + "members":{ + "NextToken":{"shape":"NextToken"}, + "MaxResults":{"shape":"MaxResults"}, + "MonitorArn":{"shape":"Arn"}, + "Filters":{"shape":"Filters"} + } + }, + "ListMonitorEvaluationsResponse":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"}, + "PredictorMonitorEvaluations":{"shape":"PredictorMonitorEvaluations"} + } + }, + "ListMonitorsRequest":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"}, + "MaxResults":{"shape":"MaxResults"}, + "Filters":{"shape":"Filters"} + } + }, + "ListMonitorsResponse":{ + "type":"structure", + "members":{ + "Monitors":{"shape":"Monitors"}, + "NextToken":{"shape":"NextToken"} + } + }, "ListPredictorBacktestExportJobsRequest":{ "type":"structure", "members":{ @@ -1859,6 +2054,21 @@ "min":1 }, "Message":{"type":"string"}, + "MetricName":{ + "type":"string", + "max":256 + }, + "MetricResult":{ + "type":"structure", + "members":{ + "MetricName":{"shape":"MetricName"}, + "MetricValue":{"shape":"Double"} + } + }, + "MetricResults":{ + "type":"list", + "member":{"shape":"MetricResult"} + }, "Metrics":{ "type":"structure", "members":{ @@ -1872,6 +2082,43 @@ "AverageWeightedQuantileLoss":{"shape":"Double"} } }, + "MonitorConfig":{ + "type":"structure", + "required":["MonitorName"], + "members":{ + "MonitorName":{"shape":"Name"} + } + }, + "MonitorDataSource":{ + "type":"structure", + "members":{ + "DatasetImportJobArn":{"shape":"Arn"}, + "ForecastArn":{"shape":"Arn"}, + "PredictorArn":{"shape":"Arn"} + } + }, + "MonitorInfo":{ + "type":"structure", + "members":{ + "MonitorArn":{"shape":"Arn"}, + "Status":{"shape":"Status"} + } + }, + "MonitorSummary":{ + "type":"structure", + "members":{ + "MonitorArn":{"shape":"Arn"}, + "MonitorName":{"shape":"Name"}, + "ResourceArn":{"shape":"Arn"}, + "Status":{"shape":"Status"}, + "CreationTime":{"shape":"Timestamp"}, + "LastModificationTime":{"shape":"Timestamp"} + } + }, + "Monitors":{ + "type":"list", + "member":{"shape":"MonitorSummary"} + }, "Name":{ "type":"string", "max":63, @@ -1928,10 +2175,23 @@ "type":"list", "member":{"shape":"PredictorBacktestExportJobSummary"} }, + "PredictorBaseline":{ + "type":"structure", + "members":{ + "BaselineMetrics":{"shape":"BaselineMetrics"} + } + }, "PredictorEvaluationResults":{ "type":"list", "member":{"shape":"EvaluationResult"} }, + "PredictorEvent":{ + "type":"structure", + "members":{ + "Detail":{"shape":"Detail"}, + "Datetime":{"shape":"Timestamp"} + } + }, "PredictorExecution":{ "type":"structure", "members":{ @@ -1951,6 +2211,26 @@ "max":5, "min":1 }, + "PredictorMonitorEvaluation":{ + "type":"structure", + "members":{ + "ResourceArn":{"shape":"Arn"}, + "MonitorArn":{"shape":"Arn"}, + "EvaluationTime":{"shape":"Timestamp"}, + "EvaluationState":{"shape":"EvaluationState"}, + "WindowStartDatetime":{"shape":"Timestamp"}, + "WindowEndDatetime":{"shape":"Timestamp"}, + "PredictorEvent":{"shape":"PredictorEvent"}, + "MonitorDataSource":{"shape":"MonitorDataSource"}, + "MetricResults":{"shape":"MetricResults"}, + "NumItemsEvaluated":{"shape":"Long"}, + "Message":{"shape":"Message"} + } + }, + "PredictorMonitorEvaluations":{ + "type":"list", + "member":{"shape":"PredictorMonitorEvaluation"} + }, "PredictorSummary":{ "type":"structure", "members":{ @@ -1997,6 +2277,13 @@ }, "exception":true }, + "ResumeResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{"shape":"Arn"} + } + }, "S3Config":{ "type":"structure", "required":[ diff --git a/apis/forecast/2018-06-26/docs-2.json b/apis/forecast/2018-06-26/docs-2.json index 706f9e88777..0ab7e84fa7a 100644 --- a/apis/forecast/2018-06-26/docs-2.json +++ b/apis/forecast/2018-06-26/docs-2.json @@ -2,51 +2,57 @@ "version": "2.0", "service": "

Provides APIs for creating and managing Amazon Forecast resources.

", "operations": { - "CreateAutoPredictor": "

Creates an Amazon Forecast predictor.

Amazon Forecast creates predictors with AutoPredictor, which involves applying the optimal combination of algorithms to each time series in your datasets. You can use CreateAutoPredictor to create new predictors or upgrade/retrain existing predictors.

Creating new predictors

The following parameters are required when creating a new predictor:

When creating a new predictor, do not specify a value for ReferencePredictorArn.

Upgrading and retraining predictors

The following parameters are required when retraining or upgrading a predictor:

When upgrading or retraining a predictor, only specify values for the ReferencePredictorArn and PredictorName.

", - "CreateDataset": "

Creates an Amazon Forecast dataset. The information about the dataset that you provide helps Forecast understand how to consume the data for model training. This includes the following:

After creating a dataset, you import your training data into it and add the dataset to a dataset group. You use the dataset group to create a predictor. For more information, see howitworks-datasets-groups.

To get a list of all your datasets, use the ListDatasets operation.

For example Forecast datasets, see the Amazon Forecast Sample GitHub repository.

The Status of a dataset must be ACTIVE before you can import training data. Use the DescribeDataset operation to get the status.

", - "CreateDatasetGroup": "

Creates a dataset group, which holds a collection of related datasets. You can add datasets to the dataset group when you create the dataset group, or later by using the UpdateDatasetGroup operation.

After creating a dataset group and adding datasets, you use the dataset group when you create a predictor. For more information, see howitworks-datasets-groups.

To get a list of all your datasets groups, use the ListDatasetGroups operation.

The Status of a dataset group must be ACTIVE before you can use the dataset group to create a predictor. To get the status, use the DescribeDatasetGroup operation.

", - "CreateDatasetImportJob": "

Imports your training data to an Amazon Forecast dataset. You provide the location of your training data in an Amazon Simple Storage Service (Amazon S3) bucket and the Amazon Resource Name (ARN) of the dataset that you want to import the data to.

You must specify a DataSource object that includes an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data, as Amazon Forecast makes a copy of your data and processes it in an internal AWS system. For more information, see aws-forecast-iam-roles.

The training data must be in CSV format. The delimiter must be a comma (,).

You can specify the path to a specific CSV file, the S3 bucket, or to a folder in the S3 bucket. For the latter two cases, Amazon Forecast imports all files up to the limit of 10,000 files.

Because dataset imports are not aggregated, your most recent dataset import is the one that is used when training a predictor or generating a forecast. Make sure that your most recent dataset import contains all of the data you want to model off of, and not just the new data collected since the previous import.

To get a list of all your dataset import jobs, filtered by specified criteria, use the ListDatasetImportJobs operation.

", + "CreateAutoPredictor": "

Creates an Amazon Forecast predictor.

Amazon Forecast creates predictors with AutoPredictor, which involves applying the optimal combination of algorithms to each time series in your datasets. You can use CreateAutoPredictor to create new predictors or upgrade/retrain existing predictors.

Creating new predictors

The following parameters are required when creating a new predictor:

When creating a new predictor, do not specify a value for ReferencePredictorArn.

Upgrading and retraining predictors

The following parameters are required when retraining or upgrading a predictor:

When upgrading or retraining a predictor, only specify values for the ReferencePredictorArn and PredictorName.

", + "CreateDataset": "

Creates an Amazon Forecast dataset. The information about the dataset that you provide helps Forecast understand how to consume the data for model training. This includes the following:

After creating a dataset, you import your training data into it and add the dataset to a dataset group. You use the dataset group to create a predictor. For more information, see Importing datasets.

To get a list of all your datasets, use the ListDatasets operation.

For example Forecast datasets, see the Amazon Forecast Sample GitHub repository.

The Status of a dataset must be ACTIVE before you can import training data. Use the DescribeDataset operation to get the status.

", + "CreateDatasetGroup": "

Creates a dataset group, which holds a collection of related datasets. You can add datasets to the dataset group when you create the dataset group, or later by using the UpdateDatasetGroup operation.

After creating a dataset group and adding datasets, you use the dataset group when you create a predictor. For more information, see Dataset groups.

To get a list of all your datasets groups, use the ListDatasetGroups operation.

The Status of a dataset group must be ACTIVE before you can use the dataset group to create a predictor. To get the status, use the DescribeDatasetGroup operation.

", + "CreateDatasetImportJob": "

Imports your training data to an Amazon Forecast dataset. You provide the location of your training data in an Amazon Simple Storage Service (Amazon S3) bucket and the Amazon Resource Name (ARN) of the dataset that you want to import the data to.

You must specify a DataSource object that includes an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data, as Amazon Forecast makes a copy of your data and processes it in an internal AWS system. For more information, see Set up permissions.

The training data must be in CSV format. The delimiter must be a comma (,).

You can specify the path to a specific CSV file, the S3 bucket, or to a folder in the S3 bucket. For the latter two cases, Amazon Forecast imports all files up to the limit of 10,000 files.

Because dataset imports are not aggregated, your most recent dataset import is the one that is used when training a predictor or generating a forecast. Make sure that your most recent dataset import contains all of the data you want to model off of, and not just the new data collected since the previous import.

To get a list of all your dataset import jobs, filtered by specified criteria, use the ListDatasetImportJobs operation.

", "CreateExplainability": "

Explainability is only available for Forecasts and Predictors generated from an AutoPredictor (CreateAutoPredictor)

Creates an Amazon Forecast Explainability.

Explainability helps you better understand how the attributes in your datasets impact forecast. Amazon Forecast uses a metric called Impact scores to quantify the relative impact of each attribute and determine whether they increase or decrease forecast values.

To enable Forecast Explainability, your predictor must include at least one of the following: related time series, item metadata, or additional datasets like Holidays and the Weather Index.

CreateExplainability accepts either a Predictor ARN or Forecast ARN. To receive aggregated Impact scores for all time series and time points in your datasets, provide a Predictor ARN. To receive Impact scores for specific time series and time points, provide a Forecast ARN.

CreateExplainability with a Predictor ARN

You can only have one Explainability resource per predictor. If you already enabled ExplainPredictor in CreateAutoPredictor, that predictor already has an Explainability resource.

The following parameters are required when providing a Predictor ARN:

Do not specify a value for the following parameters:

CreateExplainability with a Forecast ARN

You can specify a maximum of 50 time series and 500 time points.

The following parameters are required when providing a Predictor ARN:

If you set TimeSeriesGranularity to “SPECIFIC”, you must also provide the following:

If you set TimePointGranularity to “SPECIFIC”, you must also provide the following:

", "CreateExplainabilityExport": "

Exports an Explainability resource created by the CreateExplainability operation. Exported files are exported to an Amazon Simple Storage Service (Amazon S3) bucket.

You must specify a DataDestination object that includes an Amazon S3 bucket and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket. For more information, see aws-forecast-iam-roles.

The Status of the export job must be ACTIVE before you can access the export in your Amazon S3 bucket. To get the status, use the DescribeExplainabilityExport operation.

", "CreateForecast": "

Creates a forecast for each item in the TARGET_TIME_SERIES dataset that was used to train the predictor. This is known as inference. To retrieve the forecast for a single item at low latency, use the operation. To export the complete forecast into your Amazon Simple Storage Service (Amazon S3) bucket, use the CreateForecastExportJob operation.

The range of the forecast is determined by the ForecastHorizon value, which you specify in the CreatePredictor request. When you query a forecast, you can request a specific date range within the forecast.

To get a list of all your forecasts, use the ListForecasts operation.

The forecasts generated by Amazon Forecast are in the same time zone as the dataset that was used to create the predictor.

For more information, see howitworks-forecast.

The Status of the forecast must be ACTIVE before you can query or export the forecast. Use the DescribeForecast operation to get the status.

", "CreateForecastExportJob": "

Exports a forecast created by the CreateForecast operation to your Amazon Simple Storage Service (Amazon S3) bucket. The forecast file name will match the following conventions:

<ForecastExportJobName>_<ExportTimestamp>_<PartNumber>

where the <ExportTimestamp> component is in Java SimpleDateFormat (yyyy-MM-ddTHH-mm-ssZ).

You must specify a DataDestination object that includes an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket. For more information, see aws-forecast-iam-roles.

For more information, see howitworks-forecast.

To get a list of all your forecast export jobs, use the ListForecastExportJobs operation.

The Status of the forecast export job must be ACTIVE before you can access the forecast in your Amazon S3 bucket. To get the status, use the DescribeForecastExportJob operation.

", + "CreateMonitor": "

Creates a predictor monitor resource for an existing auto predictor. Predictor monitoring allows you to see how your predictor's performance changes over time. For more information, see Predictor Monitoring.

", "CreatePredictor": "

This operation creates a legacy predictor that does not include all the predictor functionalities provided by Amazon Forecast. To create a predictor that is compatible with all aspects of Forecast, use CreateAutoPredictor.

Creates an Amazon Forecast predictor.

In the request, provide a dataset group and either specify an algorithm or let Amazon Forecast choose an algorithm for you using AutoML. If you specify an algorithm, you also can override algorithm-specific hyperparameters.

Amazon Forecast uses the algorithm to train a predictor using the latest version of the datasets in the specified dataset group. You can then generate a forecast using the CreateForecast operation.

To see the evaluation metrics, use the GetAccuracyMetrics operation.

You can specify a featurization configuration to fill and aggregate the data fields in the TARGET_TIME_SERIES dataset to improve model training. For more information, see FeaturizationConfig.

For RELATED_TIME_SERIES datasets, CreatePredictor verifies that the DataFrequency specified when the dataset was created matches the ForecastFrequency. TARGET_TIME_SERIES datasets don't have this restriction. Amazon Forecast also verifies the delimiter and timestamp format. For more information, see howitworks-datasets-groups.

By default, predictors are trained and evaluated at the 0.1 (P10), 0.5 (P50), and 0.9 (P90) quantiles. You can choose custom forecast types to train and evaluate your predictor by setting the ForecastTypes.

AutoML

If you want Amazon Forecast to evaluate each algorithm and choose the one that minimizes the objective function, set PerformAutoML to true. The objective function is defined as the mean of the weighted losses over the forecast types. By default, these are the p10, p50, and p90 quantile losses. For more information, see EvaluationResult.

When AutoML is enabled, the following properties are disallowed:

To get a list of all of your predictors, use the ListPredictors operation.

Before you can use the predictor to create a forecast, the Status of the predictor must be ACTIVE, signifying that training has completed. To get the status, use the DescribePredictor operation.

", "CreatePredictorBacktestExportJob": "

Exports backtest forecasts and accuracy metrics generated by the CreateAutoPredictor or CreatePredictor operations. Two folders containing CSV files are exported to your specified S3 bucket.

The export file names will match the following conventions:

<ExportJobName>_<ExportTimestamp>_<PartNumber>.csv

The <ExportTimestamp> component is in Java SimpleDate format (yyyy-MM-ddTHH-mm-ssZ).

You must specify a DataDestination object that includes an Amazon S3 bucket and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket. For more information, see aws-forecast-iam-roles.

The Status of the export job must be ACTIVE before you can access the export in your Amazon S3 bucket. To get the status, use the DescribePredictorBacktestExportJob operation.

", - "DeleteDataset": "

Deletes an Amazon Forecast dataset that was created using the CreateDataset operation. You can only delete datasets that have a status of ACTIVE or CREATE_FAILED. To get the status use the DescribeDataset operation.

Forecast does not automatically update any dataset groups that contain the deleted dataset. In order to update the dataset group, use the operation, omitting the deleted dataset's ARN.

", - "DeleteDatasetGroup": "

Deletes a dataset group created using the CreateDatasetGroup operation. You can only delete dataset groups that have a status of ACTIVE, CREATE_FAILED, or UPDATE_FAILED. To get the status, use the DescribeDatasetGroup operation.

This operation deletes only the dataset group, not the datasets in the group.

", - "DeleteDatasetImportJob": "

Deletes a dataset import job created using the CreateDatasetImportJob operation. You can delete only dataset import jobs that have a status of ACTIVE or CREATE_FAILED. To get the status, use the DescribeDatasetImportJob operation.

", + "DeleteDataset": "

Deletes an Amazon Forecast dataset that was created using the CreateDataset operation. You can only delete datasets that have a status of ACTIVE or CREATE_FAILED. To get the status use the DescribeDataset operation.

Forecast does not automatically update any dataset groups that contain the deleted dataset. In order to update the dataset group, use the UpdateDatasetGroup operation, omitting the deleted dataset's ARN.

", + "DeleteDatasetGroup": "

Deletes a dataset group created using the CreateDatasetGroup operation. You can only delete dataset groups that have a status of ACTIVE, CREATE_FAILED, or UPDATE_FAILED. To get the status, use the DescribeDatasetGroup operation.

This operation deletes only the dataset group, not the datasets in the group.

", + "DeleteDatasetImportJob": "

Deletes a dataset import job created using the CreateDatasetImportJob operation. You can delete only dataset import jobs that have a status of ACTIVE or CREATE_FAILED. To get the status, use the DescribeDatasetImportJob operation.

", "DeleteExplainability": "

Deletes an Explainability resource.

You can delete only predictor that have a status of ACTIVE or CREATE_FAILED. To get the status, use the DescribeExplainability operation.

", "DeleteExplainabilityExport": "

Deletes an Explainability export.

", "DeleteForecast": "

Deletes a forecast created using the CreateForecast operation. You can delete only forecasts that have a status of ACTIVE or CREATE_FAILED. To get the status, use the DescribeForecast operation.

You can't delete a forecast while it is being exported. After a forecast is deleted, you can no longer query the forecast.

", "DeleteForecastExportJob": "

Deletes a forecast export job created using the CreateForecastExportJob operation. You can delete only export jobs that have a status of ACTIVE or CREATE_FAILED. To get the status, use the DescribeForecastExportJob operation.

", + "DeleteMonitor": "

Deletes a monitor resource. You can only delete a monitor resource with a status of ACTIVE, ACTIVE_STOPPED, CREATE_FAILED, or CREATE_STOPPED.

", "DeletePredictor": "

Deletes a predictor created using the DescribePredictor or CreatePredictor operations. You can delete only predictor that have a status of ACTIVE or CREATE_FAILED. To get the status, use the DescribePredictor operation.

", "DeletePredictorBacktestExportJob": "

Deletes a predictor backtest export job.

", "DeleteResourceTree": "

Deletes an entire resource tree. This operation will delete the parent resource and its child resources.

Child resources are resources that were created from another resource. For example, when a forecast is generated from a predictor, the forecast is the child resource and the predictor is the parent resource.

Amazon Forecast resources possess the following parent-child resource hierarchies:

DeleteResourceTree will only delete Amazon Forecast resources, and will not delete datasets or exported files stored in Amazon S3.

", "DescribeAutoPredictor": "

Describes a predictor created using the CreateAutoPredictor operation.

", - "DescribeDataset": "

Describes an Amazon Forecast dataset created using the CreateDataset operation.

In addition to listing the parameters specified in the CreateDataset request, this operation includes the following dataset properties:

", - "DescribeDatasetGroup": "

Describes a dataset group created using the CreateDatasetGroup operation.

In addition to listing the parameters provided in the CreateDatasetGroup request, this operation includes the following properties:

", - "DescribeDatasetImportJob": "

Describes a dataset import job created using the CreateDatasetImportJob operation.

In addition to listing the parameters provided in the CreateDatasetImportJob request, this operation includes the following properties:

", + "DescribeDataset": "

Describes an Amazon Forecast dataset created using the CreateDataset operation.

In addition to listing the parameters specified in the CreateDataset request, this operation includes the following dataset properties:

", + "DescribeDatasetGroup": "

Describes a dataset group created using the CreateDatasetGroup operation.

In addition to listing the parameters provided in the CreateDatasetGroup request, this operation includes the following properties:

", + "DescribeDatasetImportJob": "

Describes a dataset import job created using the CreateDatasetImportJob operation.

In addition to listing the parameters provided in the CreateDatasetImportJob request, this operation includes the following properties:

", "DescribeExplainability": "

Describes an Explainability resource created using the CreateExplainability operation.

", "DescribeExplainabilityExport": "

Describes an Explainability export created using the CreateExplainabilityExport operation.

", "DescribeForecast": "

Describes a forecast created using the CreateForecast operation.

In addition to listing the properties provided in the CreateForecast request, this operation lists the following properties:

", "DescribeForecastExportJob": "

Describes a forecast export job created using the CreateForecastExportJob operation.

In addition to listing the properties provided by the user in the CreateForecastExportJob request, this operation lists the following properties:

", + "DescribeMonitor": "

Describes a monitor resource. In addition to listing the properties provided in the CreateMonitor request, this operation lists the following properties:

", "DescribePredictor": "

This operation is only valid for legacy predictors created with CreatePredictor. If you are not using a legacy predictor, use DescribeAutoPredictor.

Describes a predictor created using the CreatePredictor operation.

In addition to listing the properties provided in the CreatePredictor request, this operation lists the following properties:

", "DescribePredictorBacktestExportJob": "

Describes a predictor backtest export job created using the CreatePredictorBacktestExportJob operation.

In addition to listing the properties provided by the user in the CreatePredictorBacktestExportJob request, this operation lists the following properties:

", "GetAccuracyMetrics": "

Provides metrics on the accuracy of the models that were trained by the CreatePredictor operation. Use metrics to see how well the model performed and to decide whether to use the predictor to generate a forecast. For more information, see Predictor Metrics.

This operation generates metrics for each backtest window that was evaluated. The number of backtest windows (NumberOfBacktestWindows) is specified using the EvaluationParameters object, which is optionally included in the CreatePredictor request. If NumberOfBacktestWindows isn't specified, the number defaults to one.

The parameters of the filling method determine which items contribute to the metrics. If you want all items to contribute, specify zero. If you want only those items that have complete data in the range being evaluated to contribute, specify nan. For more information, see FeaturizationMethod.

Before you can get accuracy metrics, the Status of the predictor must be ACTIVE, signifying that training has completed. To get the status, use the DescribePredictor operation.

", - "ListDatasetGroups": "

Returns a list of dataset groups created using the CreateDatasetGroup operation. For each dataset group, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). You can retrieve the complete set of properties by using the dataset group ARN with the DescribeDatasetGroup operation.

", - "ListDatasetImportJobs": "

Returns a list of dataset import jobs created using the CreateDatasetImportJob operation. For each import job, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). You can retrieve the complete set of properties by using the ARN with the DescribeDatasetImportJob operation. You can filter the list by providing an array of Filter objects.

", - "ListDatasets": "

Returns a list of datasets created using the CreateDataset operation. For each dataset, a summary of its properties, including its Amazon Resource Name (ARN), is returned. To retrieve the complete set of properties, use the ARN with the DescribeDataset operation.

", + "ListDatasetGroups": "

Returns a list of dataset groups created using the CreateDatasetGroup operation. For each dataset group, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). You can retrieve the complete set of properties by using the dataset group ARN with the DescribeDatasetGroup operation.

", + "ListDatasetImportJobs": "

Returns a list of dataset import jobs created using the CreateDatasetImportJob operation. For each import job, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). You can retrieve the complete set of properties by using the ARN with the DescribeDatasetImportJob operation. You can filter the list by providing an array of Filter objects.

", + "ListDatasets": "

Returns a list of datasets created using the CreateDataset operation. For each dataset, a summary of its properties, including its Amazon Resource Name (ARN), is returned. To retrieve the complete set of properties, use the ARN with the DescribeDataset operation.

", "ListExplainabilities": "

Returns a list of Explainability resources created using the CreateExplainability operation. This operation returns a summary for each Explainability. You can filter the list using an array of Filter objects.

To retrieve the complete set of properties for a particular Explainability resource, use the ARN with the DescribeExplainability operation.

", "ListExplainabilityExports": "

Returns a list of Explainability exports created using the CreateExplainabilityExport operation. This operation returns a summary for each Explainability export. You can filter the list using an array of Filter objects.

To retrieve the complete set of properties for a particular Explainability export, use the ARN with the DescribeExplainability operation.

", "ListForecastExportJobs": "

Returns a list of forecast export jobs created using the CreateForecastExportJob operation. For each forecast export job, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). To retrieve the complete set of properties, use the ARN with the DescribeForecastExportJob operation. You can filter the list using an array of Filter objects.

", "ListForecasts": "

Returns a list of forecasts created using the CreateForecast operation. For each forecast, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). To retrieve the complete set of properties, specify the ARN with the DescribeForecast operation. You can filter the list using an array of Filter objects.

", + "ListMonitorEvaluations": "

Returns a list of the monitoring evaluation results and predictor events collected by the monitor resource during different windows of time.

For information about monitoring see Viewing Monitoring Results. For more information about retrieving monitoring results see Viewing Monitoring Results.

", + "ListMonitors": "

Returns a list of monitors created with the CreateMonitor operation and CreateAutoPredictor operation. For each monitor resource, this operation returns of a summary of its properties, including its Amazon Resource Name (ARN). You can retrieve a complete set of properties of a monitor resource by specify the monitor's ARN in the DescribeMonitor operation.

", "ListPredictorBacktestExportJobs": "

Returns a list of predictor backtest export jobs created using the CreatePredictorBacktestExportJob operation. This operation returns a summary for each backtest export job. You can filter the list using an array of Filter objects.

To retrieve the complete set of properties for a particular backtest export job, use the ARN with the DescribePredictorBacktestExportJob operation.

", "ListPredictors": "

Returns a list of predictors created using the CreateAutoPredictor or CreatePredictor operations. For each predictor, this operation returns a summary of its properties, including its Amazon Resource Name (ARN).

You can retrieve the complete set of properties by using the ARN with the DescribeAutoPredictor and DescribePredictor operations. You can filter the list using an array of Filter objects.

", "ListTagsForResource": "

Lists the tags for an Amazon Forecast resource.

", + "ResumeResource": "

Resumes a stopped monitor resource.

", "StopResource": "

Stops a resource.

The resource undergoes the following states: CREATE_STOPPING and CREATE_STOPPED. You cannot resume a resource once it has been stopped.

This operation can be applied to the following resources (and their corresponding child resources):

", "TagResource": "

Associates the specified tags to a resource with the specified resourceArn. If existing tags on a resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags associated with that resource are also deleted.

", "UntagResource": "

Deletes the specified tags from a resource.

", - "UpdateDatasetGroup": "

Replaces the datasets in a dataset group with the specified datasets.

The Status of the dataset group must be ACTIVE before you can use the dataset group to create a predictor. Use the DescribeDatasetGroup operation to get the status.

" + "UpdateDatasetGroup": "

Replaces the datasets in a dataset group with the specified datasets.

The Status of the dataset group must be ACTIVE before you can use the dataset group to create a predictor. Use the DescribeDatasetGroup operation to get the status.

" }, "shapes": { "AdditionalDataset": { @@ -79,6 +85,8 @@ "CreateForecastExportJobResponse$ForecastExportJobArn": "

The Amazon Resource Name (ARN) of the export job.

", "CreateForecastRequest$PredictorArn": "

The Amazon Resource Name (ARN) of the predictor to use to generate the forecast.

", "CreateForecastResponse$ForecastArn": "

The Amazon Resource Name (ARN) of the forecast.

", + "CreateMonitorRequest$ResourceArn": "

The Amazon Resource Name (ARN) of the predictor to monitor.

", + "CreateMonitorResponse$MonitorArn": "

The Amazon Resource Name (ARN) of the monitor resource.

", "CreatePredictorBacktestExportJobRequest$PredictorArn": "

The Amazon Resource Name (ARN) of the predictor that you want to export.

", "CreatePredictorBacktestExportJobResponse$PredictorBacktestExportJobArn": "

The Amazon Resource Name (ARN) of the predictor backtest export job that you want to export.

", "CreatePredictorRequest$AlgorithmArn": "

The Amazon Resource Name (ARN) of the algorithm to use for model training. Required if PerformAutoML is not set to true.

Supported algorithms:

", @@ -94,6 +102,7 @@ "DeleteExplainabilityRequest$ExplainabilityArn": "

The Amazon Resource Name (ARN) of the Explainability resource to delete.

", "DeleteForecastExportJobRequest$ForecastExportJobArn": "

The Amazon Resource Name (ARN) of the forecast export job to delete.

", "DeleteForecastRequest$ForecastArn": "

The Amazon Resource Name (ARN) of the forecast to delete.

", + "DeleteMonitorRequest$MonitorArn": "

The Amazon Resource Name (ARN) of the monitor resource to delete.

", "DeletePredictorBacktestExportJobRequest$PredictorBacktestExportJobArn": "

The Amazon Resource Name (ARN) of the predictor backtest export job to delete.

", "DeletePredictorRequest$PredictorArn": "

The Amazon Resource Name (ARN) of the predictor to delete.

", "DeleteResourceTreeRequest$ResourceArn": "

The Amazon Resource Name (ARN) of the parent resource to delete. All child resources of the parent resource will also be deleted.

", @@ -119,6 +128,9 @@ "DescribeForecastResponse$ForecastArn": "

The forecast ARN as specified in the request.

", "DescribeForecastResponse$PredictorArn": "

The ARN of the predictor used to generate the forecast.

", "DescribeForecastResponse$DatasetGroupArn": "

The ARN of the dataset group that provided the data used to train the predictor.

", + "DescribeMonitorRequest$MonitorArn": "

The Amazon Resource Name (ARN) of the monitor resource to describe.

", + "DescribeMonitorResponse$MonitorArn": "

The Amazon Resource Name (ARN) of the monitor resource described.

", + "DescribeMonitorResponse$ResourceArn": "

The Amazon Resource Name (ARN) of the auto predictor being monitored.

", "DescribePredictorBacktestExportJobRequest$PredictorBacktestExportJobArn": "

The Amazon Resource Name (ARN) of the predictor backtest export job.

", "DescribePredictorBacktestExportJobResponse$PredictorBacktestExportJobArn": "

The Amazon Resource Name (ARN) of the predictor backtest export job.

", "DescribePredictorBacktestExportJobResponse$PredictorArn": "

The Amazon Resource Name (ARN) of the predictor.

", @@ -135,12 +147,22 @@ "ForecastSummary$ForecastArn": "

The ARN of the forecast.

", "GetAccuracyMetricsRequest$PredictorArn": "

The Amazon Resource Name (ARN) of the predictor to get metrics for.

", "InputDataConfig$DatasetGroupArn": "

The Amazon Resource Name (ARN) of the dataset group.

", + "ListMonitorEvaluationsRequest$MonitorArn": "

The Amazon Resource Name (ARN) of the monitor resource to get results from.

", "ListTagsForResourceRequest$ResourceArn": "

The Amazon Resource Name (ARN) that identifies the resource for which to list the tags.

", + "MonitorDataSource$DatasetImportJobArn": "

The Amazon Resource Name (ARN) of the dataset import job used to import the data that initiated the monitor evaluation.

", + "MonitorDataSource$ForecastArn": "

The Amazon Resource Name (ARN) of the forecast the monitor used during the evaluation.

", + "MonitorDataSource$PredictorArn": "

The Amazon Resource Name (ARN) of the predictor resource you are monitoring.

", + "MonitorInfo$MonitorArn": "

The Amazon Resource Name (ARN) of the monitor resource.

", + "MonitorSummary$MonitorArn": "

The Amazon Resource Name (ARN) of the monitor resource.

", + "MonitorSummary$ResourceArn": "

The Amazon Resource Name (ARN) of the predictor being monitored.

", "PredictorBacktestExportJobSummary$PredictorBacktestExportJobArn": "

The Amazon Resource Name (ARN) of the predictor backtest export job.

", "PredictorExecution$AlgorithmArn": "

The ARN of the algorithm used to test the predictor.

", + "PredictorMonitorEvaluation$ResourceArn": null, + "PredictorMonitorEvaluation$MonitorArn": null, "PredictorSummary$PredictorArn": "

The ARN of the predictor.

", "PredictorSummary$DatasetGroupArn": "

The Amazon Resource Name (ARN) of the dataset group that contains the data used to train the predictor.

", "ReferencePredictorSummary$Arn": "

The ARN of the reference predictor.

", + "ResumeResourceRequest$ResourceArn": "

The Amazon Resource Name (ARN) of the monitor resource to resume.

", "S3Config$RoleArn": "

The ARN of the AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket or files. If you provide a value for the KMSKeyArn key, the role must allow access to the key.

Passing a role across AWS accounts is not allowed. If you pass a role that isn't in your account, you get an InvalidInputException error.

", "StopResourceRequest$ResourceArn": "

The Amazon Resource Name (ARN) that identifies the resource to stop. The supported ARNs are DatasetImportJobArn, PredictorArn, PredictorBacktestExportJobArn, ForecastArn, ForecastExportJobArn, ExplainabilityArn, and ExplainabilityExportArn.

", "TagResourceRequest$ResourceArn": "

The Amazon Resource Name (ARN) that identifies the resource for which to list the tags.

", @@ -174,7 +196,7 @@ "AttributeType": { "base": null, "refs": { - "SchemaAttribute$AttributeType": "

The data type of the field.

" + "SchemaAttribute$AttributeType": "

The data type of the field.

For a related time series dataset, other than date, item_id, and forecast dimensions attributes, all attributes should be of numerical type (integer/float).

" } }, "AutoMLOverrideStrategy": { @@ -185,11 +207,29 @@ "GetAccuracyMetricsResponse$AutoMLOverrideStrategy": "

The LatencyOptimized AutoML override strategy is only available in private beta. Contact AWS Support or your account manager to learn more about access privileges.

The AutoML strategy used to train the predictor. Unless LatencyOptimized is specified, the AutoML strategy optimizes predictor accuracy.

This parameter is only valid for predictors trained using AutoML.

" } }, + "Baseline": { + "base": "

Metrics you can use as a baseline for comparison purposes. Use these metrics when you interpret monitoring results for an auto predictor.

", + "refs": { + "DescribeMonitorResponse$Baseline": "

Metrics you can use as a baseline for comparison purposes. Use these values you interpret monitoring results for an auto predictor.

" + } + }, + "BaselineMetric": { + "base": "

An individual metric that you can use for comparison as you evaluate your monitoring results.

", + "refs": { + "BaselineMetrics$member": null + } + }, + "BaselineMetrics": { + "base": null, + "refs": { + "PredictorBaseline$BaselineMetrics": "

The initial accuracy metrics for the predictor. Use these metrics as a baseline for comparison purposes as you use your predictor and the metrics change.

" + } + }, "Boolean": { "base": null, "refs": { "CreateAutoPredictorRequest$ExplainPredictor": "

Create an Explainability resource for the predictor.

", - "CreateExplainabilityRequest$EnableVisualization": "

Create an Expainability visualization that is viewable within the AWS console.

", + "CreateExplainabilityRequest$EnableVisualization": "

Create an Explainability visualization that is viewable within the AWS console.

", "CreatePredictorRequest$PerformAutoML": "

Whether to perform AutoML. When Amazon Forecast performs AutoML, it evaluates the algorithms it provides and chooses the best algorithm and configuration for your training dataset.

The default value is false. In this case, you are required to specify an algorithm.

Set PerformAutoML to true to have Amazon Forecast perform AutoML. This is a good option if you aren't sure which algorithm is suitable for your training data. In this case, PerformHPO must be false.

", "CreatePredictorRequest$PerformHPO": "

Whether to perform hyperparameter optimization (HPO). HPO finds optimal hyperparameter values for your training data. The process of performing HPO is known as running a hyperparameter tuning job.

The default value is false. In this case, Amazon Forecast uses default hyperparameter values from the chosen algorithm.

To override the default values, set PerformHPO to true and, optionally, supply the HyperParameterTuningJobConfig object. The tuning job specifies a metric to optimize, which hyperparameters participate in tuning, and the valid range for each tunable hyperparameter. In this case, you are required to specify an algorithm and PerformAutoML must be false.

The following algorithms support HPO:

", "DescribeExplainabilityResponse$EnableVisualization": "

Whether the visualization was enabled for the Explainability resource.

", @@ -216,7 +256,7 @@ "Configuration": { "base": null, "refs": { - "AdditionalDataset$Configuration": "

Weather Index

To enable the Weather Index, do not specify a value for Configuration.

Holidays

To enable Holidays, set CountryCode to one of the following two-letter country codes:

" + "AdditionalDataset$Configuration": "

Weather Index

To enable the Weather Index, do not specify a value for Configuration.

Holidays

Holidays

To enable Holidays, set CountryCode to one of the following two-letter country codes:

" } }, "ContinuousParameterRange": { @@ -311,6 +351,16 @@ "refs": { } }, + "CreateMonitorRequest": { + "base": null, + "refs": { + } + }, + "CreateMonitorResponse": { + "base": null, + "refs": { + } + }, "CreatePredictorBacktestExportJobRequest": { "base": null, "refs": { @@ -355,7 +405,7 @@ "DataSource": { "base": "

The source of your data, an AWS Identity and Access Management (IAM) role that allows Amazon Forecast to access the data and, optionally, an AWS Key Management Service (KMS) key.

", "refs": { - "CreateDatasetImportJobRequest$DataSource": "

The location of the training data to import and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data. The training data must be stored in an Amazon S3 bucket.

If encryption is used, DataSource must include an AWS Key Management Service (KMS) key and the IAM role must allow Amazon Forecast permission to access the key. The KMS key and IAM role must match those specified in the EncryptionConfig parameter of the CreateDataset operation.

", + "CreateDatasetImportJobRequest$DataSource": "

The location of the training data to import and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data. The training data must be stored in an Amazon S3 bucket.

If encryption is used, DataSource must include an AWS Key Management Service (KMS) key and the IAM role must allow Amazon Forecast permission to access the key. The KMS key and IAM role must match those specified in the EncryptionConfig parameter of the CreateDataset operation.

", "CreateExplainabilityRequest$DataSource": null, "DatasetImportJobSummary$DataSource": "

The location of the training data to import and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data. The training data must be stored in an Amazon S3 bucket.

If encryption is used, DataSource includes an AWS Key Management Service (KMS) key.

", "DescribeDatasetImportJobResponse$DataSource": "

The location of the training data to import and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data.

If encryption is used, DataSource includes an AWS Key Management Service (KMS) key.

", @@ -363,7 +413,7 @@ } }, "DatasetGroupSummary": { - "base": "

Provides a summary of the dataset group properties used in the ListDatasetGroups operation. To get the complete set of properties, call the DescribeDatasetGroup operation, and provide the DatasetGroupArn.

", + "base": "

Provides a summary of the dataset group properties used in the ListDatasetGroups operation. To get the complete set of properties, call the DescribeDatasetGroup operation, and provide the DatasetGroupArn.

", "refs": { "DatasetGroups$member": null } @@ -375,7 +425,7 @@ } }, "DatasetImportJobSummary": { - "base": "

Provides a summary of the dataset import job properties used in the ListDatasetImportJobs operation. To get the complete set of properties, call the DescribeDatasetImportJob operation, and provide the DatasetImportJobArn.

", + "base": "

Provides a summary of the dataset import job properties used in the ListDatasetImportJobs operation. To get the complete set of properties, call the DescribeDatasetImportJob operation, and provide the DatasetImportJobArn.

", "refs": { "DatasetImportJobs$member": null } @@ -387,7 +437,7 @@ } }, "DatasetSummary": { - "base": "

Provides a summary of the dataset properties used in the ListDatasets operation. To get the complete set of properties, call the DescribeDataset operation, and provide the DatasetArn.

", + "base": "

Provides a summary of the dataset properties used in the ListDatasets operation. To get the complete set of properties, call the DescribeDataset operation, and provide the DatasetArn.

", "refs": { "Datasets$member": null } @@ -441,6 +491,11 @@ "refs": { } }, + "DeleteMonitorRequest": { + "base": null, + "refs": { + } + }, "DeletePredictorBacktestExportJobRequest": { "base": null, "refs": { @@ -536,6 +591,16 @@ "refs": { } }, + "DescribeMonitorRequest": { + "base": null, + "refs": { + } + }, + "DescribeMonitorResponse": { + "base": null, + "refs": { + } + }, "DescribePredictorBacktestExportJobRequest": { "base": null, "refs": { @@ -556,11 +621,17 @@ "refs": { } }, + "Detail": { + "base": null, + "refs": { + "PredictorEvent$Detail": "

The type of event. For example, Retrain. A retraining event denotes the timepoint when a predictor was retrained. Any monitor results from before the Datetime are from the previous predictor. Any new metrics are for the newly retrained predictor.

" + } + }, "Domain": { "base": null, "refs": { - "CreateDatasetGroupRequest$Domain": "

The domain associated with the dataset group. When you add a dataset to a dataset group, this value and the value specified for the Domain parameter of the CreateDataset operation must match.

The Domain and DatasetType that you choose determine the fields that must be present in training data that you import to a dataset. For example, if you choose the RETAIL domain and TARGET_TIME_SERIES as the DatasetType, Amazon Forecast requires that item_id, timestamp, and demand fields are present in your data. For more information, see howitworks-datasets-groups.

", - "CreateDatasetRequest$Domain": "

The domain associated with the dataset. When you add a dataset to a dataset group, this value and the value specified for the Domain parameter of the CreateDatasetGroup operation must match.

The Domain and DatasetType that you choose determine the fields that must be present in the training data that you import to the dataset. For example, if you choose the RETAIL domain and TARGET_TIME_SERIES as the DatasetType, Amazon Forecast requires item_id, timestamp, and demand fields to be present in your data. For more information, see howitworks-datasets-groups.

", + "CreateDatasetGroupRequest$Domain": "

The domain associated with the dataset group. When you add a dataset to a dataset group, this value and the value specified for the Domain parameter of the CreateDataset operation must match.

The Domain and DatasetType that you choose determine the fields that must be present in training data that you import to a dataset. For example, if you choose the RETAIL domain and TARGET_TIME_SERIES as the DatasetType, Amazon Forecast requires that item_id, timestamp, and demand fields are present in your data. For more information, see Dataset groups.

", + "CreateDatasetRequest$Domain": "

The domain associated with the dataset. When you add a dataset to a dataset group, this value and the value specified for the Domain parameter of the CreateDatasetGroup operation must match.

The Domain and DatasetType that you choose determine the fields that must be present in the training data that you import to the dataset. For example, if you choose the RETAIL domain and TARGET_TIME_SERIES as the DatasetType, Amazon Forecast requires item_id, timestamp, and demand fields to be present in your data. For more information, see Importing datasets.

", "DatasetSummary$Domain": "

The domain associated with the dataset.

", "DescribeDatasetGroupResponse$Domain": "

The domain associated with the dataset group.

", "DescribeDatasetResponse$Domain": "

The domain associated with the dataset.

" @@ -569,6 +640,7 @@ "Double": { "base": null, "refs": { + "BaselineMetric$Value": "

The value for the metric.

", "ContinuousParameterRange$MaxValue": "

The maximum tunable value of the hyperparameter.

", "ContinuousParameterRange$MinValue": "

The minimum tunable value of the hyperparameter.

", "DescribeDatasetImportJobResponse$DataSize": "

The size of the dataset in gigabytes (GB) after the import job has finished.

", @@ -576,6 +648,7 @@ "ErrorMetric$RMSE": "

The root-mean-square error (RMSE).

", "ErrorMetric$MASE": "

The Mean Absolute Scaled Error (MASE)

", "ErrorMetric$MAPE": "

The Mean Absolute Percentage Error (MAPE)

", + "MetricResult$MetricValue": "

The value for the metric.

", "Metrics$RMSE": "

The root-mean-square error (RMSE).

", "Metrics$AverageWeightedQuantileLoss": "

The average value of all weighted quantile losses.

", "Statistics$Avg": "

For a numeric field, the average value in the field.

", @@ -639,6 +712,13 @@ "PredictorEvaluationResults$member": null } }, + "EvaluationState": { + "base": null, + "refs": { + "DescribeMonitorResponse$LastEvaluationState": "

The state of the monitor's latest evaluation.

", + "PredictorMonitorEvaluation$EvaluationState": "

The status of the monitor evaluation. The state can be SUCCESS or FAILURE.

" + } + }, "EvaluationType": { "base": null, "refs": { @@ -752,6 +832,8 @@ "ListExplainabilityExportsRequest$Filters": "

An array of filters. For each filter, provide a condition and a match statement. The condition is either IS or IS_NOT, which specifies whether to include or exclude resources that match the statement from the list. The match statement consists of a key and a value.

Filter properties

", "ListForecastExportJobsRequest$Filters": "

An array of filters. For each filter, you provide a condition and a match statement. The condition is either IS or IS_NOT, which specifies whether to include or exclude the forecast export jobs that match the statement from the list, respectively. The match statement consists of a key and a value.

Filter properties

For example, to list all jobs that export a forecast named electricityforecast, specify the following filter:

\"Filters\": [ { \"Condition\": \"IS\", \"Key\": \"ForecastArn\", \"Value\": \"arn:aws:forecast:us-west-2:<acct-id>:forecast/electricityforecast\" } ]

", "ListForecastsRequest$Filters": "

An array of filters. For each filter, you provide a condition and a match statement. The condition is either IS or IS_NOT, which specifies whether to include or exclude the forecasts that match the statement from the list, respectively. The match statement consists of a key and a value.

Filter properties

For example, to list all forecasts whose status is not ACTIVE, you would specify:

\"Filters\": [ { \"Condition\": \"IS_NOT\", \"Key\": \"Status\", \"Value\": \"ACTIVE\" } ]

", + "ListMonitorEvaluationsRequest$Filters": "

An array of filters. For each filter, provide a condition and a match statement. The condition is either IS or IS_NOT, which specifies whether to include or exclude the resources that match the statement from the list. The match statement consists of a key and a value.

Filter properties

For example, to list only successful monitor evaluations, you would specify:

\"Filters\": [ { \"Condition\": \"IS\", \"Key\": \"EvaluationState\", \"Value\": \"SUCCESS\" } ]

", + "ListMonitorsRequest$Filters": "

An array of filters. For each filter, provide a condition and a match statement. The condition is either IS or IS_NOT, which specifies whether to include or exclude the resources that match the statement from the list. The match statement consists of a key and a value.

Filter properties

For example, to list all monitors who's status is ACTIVE, you would specify:

\"Filters\": [ { \"Condition\": \"IS\", \"Key\": \"Status\", \"Value\": \"ACTIVE\" } ]

", "ListPredictorBacktestExportJobsRequest$Filters": "

An array of filters. For each filter, provide a condition and a match statement. The condition is either IS or IS_NOT, which specifies whether to include or exclude the predictor backtest export jobs that match the statement from the list. The match statement consists of a key and a value.

Filter properties

", "ListPredictorsRequest$Filters": "

An array of filters. For each filter, you provide a condition and a match statement. The condition is either IS or IS_NOT, which specifies whether to include or exclude the predictors that match the statement from the list, respectively. The match statement consists of a key and a value.

Filter properties

For example, to list all predictors whose status is ACTIVE, you would specify:

\"Filters\": [ { \"Condition\": \"IS\", \"Key\": \"Status\", \"Value\": \"ACTIVE\" } ]

" } @@ -793,7 +875,7 @@ "base": null, "refs": { "CreateAutoPredictorRequest$ForecastTypes": "

The forecast types used to train a predictor. You can specify up to five forecast types. Forecast types can be quantiles from 0.01 to 0.99, by increments of 0.01 or higher. You can also specify the mean forecast with mean.

", - "CreateForecastRequest$ForecastTypes": "

The quantiles at which probabilistic forecasts are generated. You can currently specify up to 5 quantiles per forecast. Accepted values include 0.01 to 0.99 (increments of .01 only) and mean. The mean forecast is different from the median (0.50) when the distribution is not symmetric (for example, Beta and Negative Binomial). The default value is [\"0.1\", \"0.5\", \"0.9\"].

", + "CreateForecastRequest$ForecastTypes": "

The quantiles at which probabilistic forecasts are generated. You can currently specify up to 5 quantiles per forecast. Accepted values include 0.01 to 0.99 (increments of .01 only) and mean. The mean forecast is different from the median (0.50) when the distribution is not symmetric (for example, Beta and Negative Binomial).

The default quantiles are the quantiles you specified during predictor creation. If you didn't specify quantiles, the default values are [\"0.1\", \"0.5\", \"0.9\"].

", "CreatePredictorRequest$ForecastTypes": "

Specifies the forecast types used to train a predictor. You can specify up to five forecast types. Forecast types can be quantiles from 0.01 to 0.99, by increments of 0.01 or higher. You can also specify the mean forecast with mean.

The default value is [\"0.10\", \"0.50\", \"0.9\"].

", "DescribeAutoPredictorResponse$ForecastTypes": "

The forecast types used during predictor training. Default value is [\"0.1\",\"0.5\",\"0.9\"].

", "DescribeForecastResponse$ForecastTypes": "

The quantiles at which probabilistic forecasts were generated.

", @@ -850,7 +932,7 @@ "Integer": { "base": null, "refs": { - "CreateAutoPredictorRequest$ForecastHorizon": "

The number of time-steps that the model predicts. The forecast horizon is also called the prediction length.

", + "CreateAutoPredictorRequest$ForecastHorizon": "

The number of time-steps that the model predicts. The forecast horizon is also called the prediction length.

The maximum forecast horizon is the lesser of 500 time-steps or 1/4 of the TARGET_TIME_SERIES dataset length. If you are retraining an existing AutoPredictor, then the maximum forecast horizon is the lesser of 500 time-steps or 1/3 of the TARGET_TIME_SERIES dataset length.

If you are upgrading to an AutoPredictor or retraining an existing AutoPredictor, you cannot update the forecast horizon parameter. You can meet this requirement by providing longer time-series in the dataset.

", "CreatePredictorRequest$ForecastHorizon": "

Specifies the number of time-steps that the model is trained to predict. The forecast horizon is also called the prediction length.

For example, if you configure a dataset for daily data collection (using the DataFrequency parameter of the CreateDataset operation) and set the forecast horizon to 10, the model returns predictions for 10 days.

The maximum forecast horizon is the lesser of 500 time-steps or 1/3 of the TARGET_TIME_SERIES dataset length.

", "DescribeAutoPredictorResponse$ForecastHorizon": "

The number of time-steps that the model predicts. The forecast horizon is also called the prediction length.

", "DescribePredictorResponse$ForecastHorizon": "

The number of time-steps of the forecast. The forecast horizon is also called the prediction length.

", @@ -969,6 +1051,26 @@ "refs": { } }, + "ListMonitorEvaluationsRequest": { + "base": null, + "refs": { + } + }, + "ListMonitorEvaluationsResponse": { + "base": null, + "refs": { + } + }, + "ListMonitorsRequest": { + "base": null, + "refs": { + } + }, + "ListMonitorsResponse": { + "base": null, + "refs": { + } + }, "ListPredictorBacktestExportJobsRequest": { "base": null, "refs": { @@ -1015,7 +1117,9 @@ "DescribeDatasetImportJobResponse$EstimatedTimeRemainingInMinutes": "

The estimated time remaining in minutes for the dataset import job to complete.

", "DescribeExplainabilityResponse$EstimatedTimeRemainingInMinutes": "

The estimated time remaining in minutes for the CreateExplainability job to complete.

", "DescribeForecastResponse$EstimatedTimeRemainingInMinutes": "

The estimated time remaining in minutes for the forecast job to complete.

", + "DescribeMonitorResponse$EstimatedEvaluationTimeRemainingInMinutes": "

The estimated number of minutes remaining before the monitor resource finishes its current evaluation.

", "DescribePredictorResponse$EstimatedTimeRemainingInMinutes": "

The estimated time remaining in minutes for the predictor training job to complete.

", + "PredictorMonitorEvaluation$NumItemsEvaluated": "

The number of items considered during the evaluation.

", "Statistics$CountLong": "

The number of values in the field. CountLong is used instead of Count if the value is greater than 2,147,483,647.

", "Statistics$CountDistinctLong": "

The number of distinct values in the field. CountDistinctLong is used instead of CountDistinct if the value is greater than 2,147,483,647.

", "Statistics$CountNullLong": "

The number of null values in the field. CountNullLong is used instead of CountNull if the value is greater than 2,147,483,647.

", @@ -1032,6 +1136,8 @@ "ListExplainabilityExportsRequest$MaxResults": "

The number of items to return in the response.

", "ListForecastExportJobsRequest$MaxResults": "

The number of items to return in the response.

", "ListForecastsRequest$MaxResults": "

The number of items to return in the response.

", + "ListMonitorEvaluationsRequest$MaxResults": "

The maximum number of monitoring results to return.

", + "ListMonitorsRequest$MaxResults": "

The maximum number of monitors to include in the response.

", "ListPredictorBacktestExportJobsRequest$MaxResults": "

The number of items to return in the response.

", "ListPredictorsRequest$MaxResults": "

The number of items to return in the response.

" } @@ -1044,9 +1150,29 @@ "DescribeExplainabilityExportResponse$Message": "

Information about any errors that occurred during the export.

", "DescribeExplainabilityResponse$Message": "

If an error occurred, a message about the error.

", "DescribeForecastExportJobResponse$Message": "

If an error occurred, an informational message about the error.

", + "DescribeMonitorResponse$Message": "

An error message, if any, for the monitor.

", "DescribePredictorBacktestExportJobResponse$Message": "

Information about any errors that may have occurred during the backtest export.

", "DescribePredictorResponse$Message": "

If an error occurred, an informational message about the error.

", - "ExplainabilitySummary$Message": "

Information about any errors that may have occurred during the Explainability creation process.

" + "ExplainabilitySummary$Message": "

Information about any errors that may have occurred during the Explainability creation process.

", + "PredictorMonitorEvaluation$Message": "

Information about any errors that may have occurred during the monitor evaluation.

" + } + }, + "MetricName": { + "base": null, + "refs": { + "MetricResult$MetricName": "

The name of the metric.

" + } + }, + "MetricResult": { + "base": "

An individual metric Forecast calculated when monitoring predictor usage. You can compare the value for this metric to the metric's value in the Baseline to see how your predictor's performance is changing.

For more information about metrics generated by Forecast see Evaluating Predictor Accuracy

", + "refs": { + "MetricResults$member": null + } + }, + "MetricResults": { + "base": null, + "refs": { + "PredictorMonitorEvaluation$MetricResults": "

A list of metrics Forecast calculated when monitoring a predictor. You can compare the value for each metric in the list to the metric's value in the Baseline to see how your predictor's performance is changing.

" } }, "Metrics": { @@ -1055,11 +1181,42 @@ "WindowSummary$Metrics": "

Provides metrics used to evaluate the performance of a predictor.

" } }, + "MonitorConfig": { + "base": "

The configuration details for the predictor monitor.

", + "refs": { + "CreateAutoPredictorRequest$MonitorConfig": "

The configuration details for predictor monitoring. Provide a name for the monitor resource to enable predictor monitoring.

Predictor monitoring allows you to see how your predictor's performance changes over time. For more information, see Predictor Monitoring.

" + } + }, + "MonitorDataSource": { + "base": "

The source of the data the monitor used during the evaluation.

", + "refs": { + "PredictorMonitorEvaluation$MonitorDataSource": "

The source of the data the monitor resource used during the evaluation.

" + } + }, + "MonitorInfo": { + "base": "

Provides information about the monitor resource.

", + "refs": { + "DescribeAutoPredictorResponse$MonitorInfo": "

A object with the Amazon Resource Name (ARN) and status of the monitor resource.

" + } + }, + "MonitorSummary": { + "base": "

Provides a summary of the monitor properties used in the ListMonitors operation. To get a complete set of properties, call the DescribeMonitor operation, and provide the listed MonitorArn.

", + "refs": { + "Monitors$member": null + } + }, + "Monitors": { + "base": null, + "refs": { + "ListMonitorsResponse$Monitors": "

An array of objects that summarize each monitor's properties.

" + } + }, "Name": { "base": null, "refs": { "AdditionalDataset$Name": "

The name of the additional dataset. Valid names: \"holiday\" and \"weather\".

", "AttributeConfig$AttributeName": "

The name of the attribute as specified in the schema. Amazon Forecast supports the target field of the target time series and the related time series datasets. For example, for the RETAIL domain, the target is demand.

", + "BaselineMetric$Name": "

The name of the metric.

", "CategoricalParameterRange$Name": "

The name of the categorical hyperparameter to tune.

", "Configuration$key": null, "ContinuousParameterRange$Name": "

The name of the hyperparameter to tune.

", @@ -1071,6 +1228,7 @@ "CreateExplainabilityRequest$ExplainabilityName": "

A unique name for the Explainability.

", "CreateForecastExportJobRequest$ForecastExportJobName": "

The name for the forecast export job.

", "CreateForecastRequest$ForecastName": "

A name for the forecast.

", + "CreateMonitorRequest$MonitorName": "

The name of the monitor resource.

", "CreatePredictorBacktestExportJobRequest$PredictorBacktestExportJobName": "

The name for the backtest export job.

", "CreatePredictorRequest$PredictorName": "

A name for the predictor.

", "DatasetGroupSummary$DatasetGroupName": "

The name of the dataset group.

", @@ -1084,6 +1242,7 @@ "DescribeExplainabilityResponse$ExplainabilityName": "

The name of the Explainability.

", "DescribeForecastExportJobResponse$ForecastExportJobName": "

The name of the forecast export job.

", "DescribeForecastResponse$ForecastName": "

The name of the forecast.

", + "DescribeMonitorResponse$MonitorName": "

The name of the monitor.

", "DescribePredictorBacktestExportJobResponse$PredictorBacktestExportJobName": "

The name of the predictor backtest export job.

", "DescribePredictorResponse$PredictorArn": "

The ARN of the predictor.

", "DescribePredictorResponse$PredictorName": "

The name of the predictor.

", @@ -1094,6 +1253,8 @@ "ForecastExportJobSummary$ForecastExportJobName": "

The name of the forecast export job.

", "ForecastSummary$ForecastName": "

The name of the forecast.

", "IntegerParameterRange$Name": "

The name of the hyperparameter to tune.

", + "MonitorConfig$MonitorName": "

The name of the monitor resource.

", + "MonitorSummary$MonitorName": "

The name of the monitor resource.

", "PredictorBacktestExportJobSummary$PredictorBacktestExportJobName": "

The name of the predictor backtest export job.

", "PredictorSummary$PredictorName": "

The name of the predictor.

", "SchemaAttribute$AttributeName": "

The name of the dataset field.

", @@ -1118,6 +1279,10 @@ "ListForecastExportJobsResponse$NextToken": "

If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.

", "ListForecastsRequest$NextToken": "

If the result of the previous request was truncated, the response includes a NextToken. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.

", "ListForecastsResponse$NextToken": "

If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.

", + "ListMonitorEvaluationsRequest$NextToken": "

If the result of the previous request was truncated, the response includes a NextToken. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.

", + "ListMonitorEvaluationsResponse$NextToken": "

If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.

", + "ListMonitorsRequest$NextToken": "

If the result of the previous request was truncated, the response includes a NextToken. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.

", + "ListMonitorsResponse$NextToken": "

If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.

", "ListPredictorBacktestExportJobsRequest$NextToken": "

If the result of the previous request was truncated, the response includes a NextToken. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.

", "ListPredictorBacktestExportJobsResponse$NextToken": "

Returns this token if the response is truncated. To retrieve the next set of results, use the token in the next request.

", "ListPredictorsRequest$NextToken": "

If the result of the previous request was truncated, the response includes a NextToken. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.

", @@ -1166,12 +1331,24 @@ "ListPredictorBacktestExportJobsResponse$PredictorBacktestExportJobs": "

An array of objects that summarize the properties of each predictor backtest export job.

" } }, + "PredictorBaseline": { + "base": "

Metrics you can use as a baseline for comparison purposes. Use these metrics when you interpret monitoring results for an auto predictor.

", + "refs": { + "Baseline$PredictorBaseline": "

The initial accuracy metrics for the predictor you are monitoring. Use these metrics as a baseline for comparison purposes as you use your predictor and the metrics change.

" + } + }, "PredictorEvaluationResults": { "base": null, "refs": { "GetAccuracyMetricsResponse$PredictorEvaluationResults": "

An array of results from evaluating the predictor.

" } }, + "PredictorEvent": { + "base": "

Provides details about a predictor event, such as a retraining.

", + "refs": { + "PredictorMonitorEvaluation$PredictorEvent": "

Provides details about a predictor event, such as a retraining.

" + } + }, "PredictorExecution": { "base": "

The algorithm used to perform a backtest and the status of those tests.

", "refs": { @@ -1190,6 +1367,18 @@ "PredictorExecutionDetails$PredictorExecutions": "

An array of the backtests performed to evaluate the accuracy of the predictor against a particular algorithm. The NumberOfBacktestWindows from the object determines the number of windows in the array.

" } }, + "PredictorMonitorEvaluation": { + "base": "

Describes the results of a monitor evaluation.

", + "refs": { + "PredictorMonitorEvaluations$member": null + } + }, + "PredictorMonitorEvaluations": { + "base": null, + "refs": { + "ListMonitorEvaluationsResponse$PredictorMonitorEvaluations": "

The monitoring results and predictor events collected by the monitor resource during different windows of time.

For information about monitoring see Viewing Monitoring Results. For more information about retrieving monitoring results see Viewing Monitoring Results.

" + } + }, "PredictorSummary": { "base": "

Provides a summary of the predictor properties that are used in the ListPredictors operation. To get the complete set of properties, call the DescribePredictor operation, and provide the listed PredictorArn.

", "refs": { @@ -1224,6 +1413,11 @@ "refs": { } }, + "ResumeResourceRequest": { + "base": null, + "refs": { + } + }, "S3Config": { "base": "

The path to the file(s) in an Amazon Simple Storage Service (Amazon S3) bucket, and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the file(s). Optionally, includes an AWS Key Management Service (KMS) key. This object is part of the DataSource object that is submitted in the CreateDatasetImportJob request, and part of the DataDestination object.

", "refs": { @@ -1247,14 +1441,14 @@ "Schema": { "base": "

Defines the fields of a dataset.

", "refs": { - "CreateDatasetRequest$Schema": "

The schema for the dataset. The schema attributes and their order must match the fields in your data. The dataset Domain and DatasetType that you choose determine the minimum required fields in your training data. For information about the required fields for a specific dataset domain and type, see howitworks-domains-ds-types.

", + "CreateDatasetRequest$Schema": "

The schema for the dataset. The schema attributes and their order must match the fields in your data. The dataset Domain and DatasetType that you choose determine the minimum required fields in your training data. For information about the required fields for a specific dataset domain and type, see Dataset Domains and Dataset Types.

", "CreateExplainabilityRequest$Schema": null, "DescribeDatasetResponse$Schema": "

An array of SchemaAttribute objects that specify the dataset fields. Each SchemaAttribute specifies the name and data type of a field.

", "DescribeExplainabilityResponse$Schema": null } }, "SchemaAttribute": { - "base": "

An attribute of a schema, which defines a dataset field. A schema attribute is required for every field in a dataset. The Schema object contains an array of SchemaAttribute objects.

", + "base": "

An attribute of a schema, which defines a dataset field. A schema attribute is required for every field in a dataset. The Schema object contains an array of SchemaAttribute objects.

", "refs": { "SchemaAttributes$member": null } @@ -1272,7 +1466,7 @@ } }, "Statistics": { - "base": "

Provides statistics for each data field imported into to an Amazon Forecast dataset with the CreateDatasetImportJob operation.

", + "base": "

Provides statistics for each data field imported into to an Amazon Forecast dataset with the CreateDatasetImportJob operation.

", "refs": { "FieldStatistics$value": null } @@ -1282,12 +1476,13 @@ "refs": { "DatasetImportJobSummary$Status": "

The status of the dataset import job. States include:

", "DescribeAutoPredictorResponse$Status": "

The status of the predictor. States include:

", - "DescribeDatasetGroupResponse$Status": "

The status of the dataset group. States include:

The UPDATE states apply when you call the UpdateDatasetGroup operation.

The Status of the dataset group must be ACTIVE before you can use the dataset group to create a predictor.

", + "DescribeDatasetGroupResponse$Status": "

The status of the dataset group. States include:

The UPDATE states apply when you call the UpdateDatasetGroup operation.

The Status of the dataset group must be ACTIVE before you can use the dataset group to create a predictor.

", "DescribeDatasetImportJobResponse$Status": "

The status of the dataset import job. States include:

", - "DescribeDatasetResponse$Status": "

The status of the dataset. States include:

The UPDATE states apply while data is imported to the dataset from a call to the CreateDatasetImportJob operation and reflect the status of the dataset import job. For example, when the import job status is CREATE_IN_PROGRESS, the status of the dataset is UPDATE_IN_PROGRESS.

The Status of the dataset must be ACTIVE before you can import training data.

", + "DescribeDatasetResponse$Status": "

The status of the dataset. States include:

The UPDATE states apply while data is imported to the dataset from a call to the CreateDatasetImportJob operation and reflect the status of the dataset import job. For example, when the import job status is CREATE_IN_PROGRESS, the status of the dataset is UPDATE_IN_PROGRESS.

The Status of the dataset must be ACTIVE before you can import training data.

", "DescribeExplainabilityExportResponse$Status": "

The status of the Explainability export. States include:

", "DescribeExplainabilityResponse$Status": "

The status of the Explainability resource. States include:

", "DescribeForecastExportJobResponse$Status": "

The status of the forecast export job. States include:

The Status of the forecast export job must be ACTIVE before you can access the forecast in your S3 bucket.

", + "DescribeMonitorResponse$Status": "

The status of the monitor resource.

", "DescribePredictorBacktestExportJobResponse$Status": "

The status of the predictor backtest export job. States include:

", "DescribePredictorResponse$Status": "

The status of the predictor. States include:

The Status of the predictor must be ACTIVE before you can use the predictor to create a forecast.

", "ExplainabilityExportSummary$Status": "

The status of the Explainability export. States include:

", @@ -1295,6 +1490,8 @@ "ExplainabilitySummary$Status": "

The status of the Explainability. States include:

", "ForecastExportJobSummary$Status": "

The status of the forecast export job. States include:

The Status of the forecast export job must be ACTIVE before you can access the forecast in your S3 bucket.

", "ForecastSummary$Status": "

The status of the forecast. States include:

The Status of the forecast must be ACTIVE before you can query or export the forecast.

", + "MonitorInfo$Status": "

The status of the monitor. States include:

", + "MonitorSummary$Status": "

The status of the monitor. States include:

", "PredictorBacktestExportJobSummary$Status": "

The status of the predictor backtest export job. States include:

", "PredictorSummary$Status": "

The status of the predictor. States include:

The Status of the predictor must be ACTIVE before you can use the predictor to create a forecast.

", "TestWindowSummary$Status": "

The status of the test. Possible status values are:

" @@ -1375,6 +1572,7 @@ "CreateExplainabilityRequest$Tags": "

Optional metadata to help you categorize and organize your resources. Each tag consists of a key and an optional value, both of which you define. Tag keys and values are case sensitive.

The following restrictions apply to tags:

", "CreateForecastExportJobRequest$Tags": "

The optional metadata that you apply to the forecast export job to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

", "CreateForecastRequest$Tags": "

The optional metadata that you apply to the forecast to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

", + "CreateMonitorRequest$Tags": "

A list of tags to apply to the monitor resource.

", "CreatePredictorBacktestExportJobRequest$Tags": "

Optional metadata to help you categorize and organize your backtests. Each tag consists of a key and an optional value, both of which you define. Tag keys and values are case sensitive.

The following restrictions apply to tags:

", "CreatePredictorRequest$Tags": "

The optional metadata that you apply to the predictor to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

", "ListTagsForResourceResponse$Tags": "

The tags for the resource.

", @@ -1422,19 +1620,19 @@ "base": null, "refs": { "DatasetGroupSummary$CreationTime": "

When the dataset group was created.

", - "DatasetGroupSummary$LastModificationTime": "

When the dataset group was created or last updated from a call to the UpdateDatasetGroup operation. While the dataset group is being updated, LastModificationTime is the current time of the ListDatasetGroups call.

", + "DatasetGroupSummary$LastModificationTime": "

When the dataset group was created or last updated from a call to the UpdateDatasetGroup operation. While the dataset group is being updated, LastModificationTime is the current time of the ListDatasetGroups call.

", "DatasetImportJobSummary$CreationTime": "

When the dataset import job was created.

", "DatasetImportJobSummary$LastModificationTime": "

The last time the resource was modified. The timestamp depends on the status of the job:

", "DatasetSummary$CreationTime": "

When the dataset was created.

", - "DatasetSummary$LastModificationTime": "

When you create a dataset, LastModificationTime is the same as CreationTime. While data is being imported to the dataset, LastModificationTime is the current time of the ListDatasets call. After a CreateDatasetImportJob operation has finished, LastModificationTime is when the import job completed or failed.

", + "DatasetSummary$LastModificationTime": "

When you create a dataset, LastModificationTime is the same as CreationTime. While data is being imported to the dataset, LastModificationTime is the current time of the ListDatasets call. After a CreateDatasetImportJob operation has finished, LastModificationTime is when the import job completed or failed.

", "DescribeAutoPredictorResponse$CreationTime": "

The timestamp of the CreateAutoPredictor request.

", "DescribeAutoPredictorResponse$LastModificationTime": "

The last time the resource was modified. The timestamp depends on the status of the job:

", "DescribeDatasetGroupResponse$CreationTime": "

When the dataset group was created.

", - "DescribeDatasetGroupResponse$LastModificationTime": "

When the dataset group was created or last updated from a call to the UpdateDatasetGroup operation. While the dataset group is being updated, LastModificationTime is the current time of the DescribeDatasetGroup call.

", + "DescribeDatasetGroupResponse$LastModificationTime": "

When the dataset group was created or last updated from a call to the UpdateDatasetGroup operation. While the dataset group is being updated, LastModificationTime is the current time of the DescribeDatasetGroup call.

", "DescribeDatasetImportJobResponse$CreationTime": "

When the dataset import job was created.

", "DescribeDatasetImportJobResponse$LastModificationTime": "

The last time the resource was modified. The timestamp depends on the status of the job:

", "DescribeDatasetResponse$CreationTime": "

When the dataset was created.

", - "DescribeDatasetResponse$LastModificationTime": "

When you create a dataset, LastModificationTime is the same as CreationTime. While data is being imported to the dataset, LastModificationTime is the current time of the DescribeDataset call. After a CreateDatasetImportJob operation has finished, LastModificationTime is when the import job completed or failed.

", + "DescribeDatasetResponse$LastModificationTime": "

When you create a dataset, LastModificationTime is the same as CreationTime. While data is being imported to the dataset, LastModificationTime is the current time of the DescribeDataset call. After a CreateDatasetImportJob operation has finished, LastModificationTime is when the import job completed or failed.

", "DescribeExplainabilityExportResponse$CreationTime": "

When the Explainability export was created.

", "DescribeExplainabilityExportResponse$LastModificationTime": "

The last time the resource was modified. The timestamp depends on the status of the job:

", "DescribeExplainabilityResponse$CreationTime": "

When the Explainability resource was created.

", @@ -1443,6 +1641,9 @@ "DescribeForecastExportJobResponse$LastModificationTime": "

The last time the resource was modified. The timestamp depends on the status of the job:

", "DescribeForecastResponse$CreationTime": "

When the forecast creation task was created.

", "DescribeForecastResponse$LastModificationTime": "

The last time the resource was modified. The timestamp depends on the status of the job:

", + "DescribeMonitorResponse$LastEvaluationTime": "

The timestamp of the latest evaluation completed by the monitor.

", + "DescribeMonitorResponse$CreationTime": "

The timestamp for when the monitor resource was created.

", + "DescribeMonitorResponse$LastModificationTime": "

The timestamp of the latest modification to the monitor.

", "DescribePredictorBacktestExportJobResponse$CreationTime": "

When the predictor backtest export job was created.

", "DescribePredictorBacktestExportJobResponse$LastModificationTime": "

The last time the resource was modified. The timestamp depends on the status of the job:

", "DescribePredictorResponse$CreationTime": "

When the model training task was created.

", @@ -1455,8 +1656,14 @@ "ForecastExportJobSummary$LastModificationTime": "

The last time the resource was modified. The timestamp depends on the status of the job:

", "ForecastSummary$CreationTime": "

When the forecast creation task was created.

", "ForecastSummary$LastModificationTime": "

The last time the resource was modified. The timestamp depends on the status of the job:

", + "MonitorSummary$CreationTime": "

When the monitor resource was created.

", + "MonitorSummary$LastModificationTime": "

The last time the monitor resource was modified. The timestamp depends on the status of the job:

", "PredictorBacktestExportJobSummary$CreationTime": "

When the predictor backtest export job was created.

", "PredictorBacktestExportJobSummary$LastModificationTime": "

The last time the resource was modified. The timestamp depends on the status of the job:

", + "PredictorEvent$Datetime": "

The timestamp for when the event occurred.

", + "PredictorMonitorEvaluation$EvaluationTime": "

The timestamp that indicates when the monitor evaluation was started.

", + "PredictorMonitorEvaluation$WindowStartDatetime": "

The timestamp that indicates the start of the window that is used for monitor evaluation.

", + "PredictorMonitorEvaluation$WindowEndDatetime": "

The timestamp that indicates the end of the window that is used for monitor evaluation.

", "PredictorSummary$CreationTime": "

When the model training task was created.

", "PredictorSummary$LastModificationTime": "

The last time the resource was modified. The timestamp depends on the status of the job:

", "TestWindowSummary$TestWindowStart": "

The time at which the test began.

", diff --git a/apis/forecast/2018-06-26/paginators-1.json b/apis/forecast/2018-06-26/paginators-1.json index da6de7d8f44..6543bf3f279 100644 --- a/apis/forecast/2018-06-26/paginators-1.json +++ b/apis/forecast/2018-06-26/paginators-1.json @@ -18,6 +18,18 @@ "output_token": "NextToken", "result_key": "Datasets" }, + "ListExplainabilities": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Explainabilities" + }, + "ListExplainabilityExports": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ExplainabilityExports" + }, "ListForecastExportJobs": { "input_token": "NextToken", "limit_key": "MaxResults", @@ -30,6 +42,18 @@ "output_token": "NextToken", "result_key": "Forecasts" }, + "ListMonitorEvaluations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "PredictorMonitorEvaluations" + }, + "ListMonitors": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Monitors" + }, "ListPredictorBacktestExportJobs": { "input_token": "NextToken", "limit_key": "MaxResults", diff --git a/apis/personalize/2018-05-22/api-2.json b/apis/personalize/2018-05-22/api-2.json index e90fda0bec0..765053c4817 100644 --- a/apis/personalize/2018-05-22/api-2.json +++ b/apis/personalize/2018-05-22/api-2.json @@ -180,6 +180,7 @@ {"shape":"ResourceAlreadyExistsException"}, {"shape":"ResourceNotFoundException"}, {"shape":"LimitExceededException"}, + {"shape":"ResourceInUseException"}, {"shape":"TooManyTagsException"} ], "idempotent":true @@ -2450,7 +2451,8 @@ "lastUpdatedDateTime":{"shape":"Date"}, "status":{"shape":"Status"}, "failureReason":{"shape":"FailureReason"}, - "latestRecommenderUpdate":{"shape":"RecommenderUpdateSummary"} + "latestRecommenderUpdate":{"shape":"RecommenderUpdateSummary"}, + "modelMetrics":{"shape":"Metrics"} } }, "RecommenderConfig":{ diff --git a/apis/personalize/2018-05-22/docs-2.json b/apis/personalize/2018-05-22/docs-2.json index fc7233daf96..a1e21a3a89c 100644 --- a/apis/personalize/2018-05-22/docs-2.json +++ b/apis/personalize/2018-05-22/docs-2.json @@ -11,7 +11,7 @@ "CreateDatasetImportJob": "

Creates a job that imports training data from your data source (an Amazon S3 bucket) to an Amazon Personalize dataset. To allow Amazon Personalize to import the training data, you must specify an IAM service role that has permission to read from the data source, as Amazon Personalize makes a copy of your data and processes it internally. For information on granting access to your Amazon S3 bucket, see Giving Amazon Personalize Access to Amazon S3 Resources.

The dataset import job replaces any existing data in the dataset that you imported in bulk.

Status

A dataset import job can be in one of the following states:

To get the status of the import job, call DescribeDatasetImportJob, providing the Amazon Resource Name (ARN) of the dataset import job. The dataset import is complete when the status shows as ACTIVE. If the status shows as CREATE FAILED, the response includes a failureReason key, which describes why the job failed.

Importing takes time. You must wait until the status shows as ACTIVE before training a model using the dataset.

Related APIs

", "CreateEventTracker": "

Creates an event tracker that you use when adding event data to a specified dataset group using the PutEvents API.

Only one event tracker can be associated with a dataset group. You will get an error if you call CreateEventTracker using the same dataset group as an existing event tracker.

When you create an event tracker, the response includes a tracking ID, which you pass as a parameter when you use the PutEvents operation. Amazon Personalize then appends the event data to the Interactions dataset of the dataset group you specify in your event tracker.

The event tracker can be in one of the following states:

To get the status of the event tracker, call DescribeEventTracker.

The event tracker must be in the ACTIVE state before using the tracking ID.

Related APIs

", "CreateFilter": "

Creates a recommendation filter. For more information, see Filtering recommendations and user segments.

", - "CreateRecommender": "

Creates a recommender with the recipe (a Domain dataset group use case) you specify. You create recommenders for a Domain dataset group and specify the recommender's Amazon Resource Name (ARN) when you make a GetRecommendations request.

Minimum recommendation requests per second

When you create a recommender, you can configure the recommender's minimum recommendation requests per second. The minimum recommendation requests per second (minRecommendationRequestsPerSecond) specifies the baseline recommendation request throughput provisioned by Amazon Personalize. The default minRecommendationRequestsPerSecond is 1. A recommendation request is a single GetRecommendations operation. Request throughput is measured in requests per second and Amazon Personalize uses your requests per second to derive your requests per hour and the price of your recommender usage.

If your requests per second increases beyond minRecommendationRequestsPerSecond, Amazon Personalize auto-scales the provisioned capacity up and down, but never below minRecommendationRequestsPerSecond. There's a short time delay while the capacity is increased that might cause loss of requests.

Your bill is the greater of either the minimum requests per hour (based on minRecommendationRequestsPerSecond) or the actual number of requests. The actual request throughput used is calculated as the average requests/second within a one-hour window. We recommend starting with the default minRecommendationRequestsPerSecond, track your usage using Amazon CloudWatch metrics, and then increase the minRecommendationRequestsPerSecond as necessary.

Status

A recommender can be in one of the following states:

To get the recommender status, call DescribeRecommender.

Wait until the status of the recommender is ACTIVE before asking the recommender for recommendations.

Related APIs

", + "CreateRecommender": "

Creates a recommender with the recipe (a Domain dataset group use case) you specify. You create recommenders for a Domain dataset group and specify the recommender's Amazon Resource Name (ARN) when you make a GetRecommendations request.

Minimum recommendation requests per second

When you create a recommender, you can configure the recommender's minimum recommendation requests per second. The minimum recommendation requests per second (minRecommendationRequestsPerSecond) specifies the baseline recommendation request throughput provisioned by Amazon Personalize. The default minRecommendationRequestsPerSecond is 1. A recommendation request is a single GetRecommendations operation. Request throughput is measured in requests per second and Amazon Personalize uses your requests per second to derive your requests per hour and the price of your recommender usage.

If your requests per second increases beyond minRecommendationRequestsPerSecond, Amazon Personalize auto-scales the provisioned capacity up and down, but never below minRecommendationRequestsPerSecond. There's a short time delay while the capacity is increased that might cause loss of requests.

Your bill is the greater of either the minimum requests per hour (based on minRecommendationRequestsPerSecond) or the actual number of requests. The actual request throughput used is calculated as the average requests/second within a one-hour window. We recommend starting with the default minRecommendationRequestsPerSecond, track your usage using Amazon CloudWatch metrics, and then increase the minRecommendationRequestsPerSecond as necessary.

Status

A recommender can be in one of the following states:

To get the recommender status, call DescribeRecommender.

Wait until the status of the recommender is ACTIVE before asking the recommender for recommendations.

Related APIs

", "CreateSchema": "

Creates an Amazon Personalize schema from the specified schema string. The schema you create must be in Avro JSON format.

Amazon Personalize recognizes three schema variants. Each schema is associated with a dataset type and has a set of required field and keywords. If you are creating a schema for a dataset in a Domain dataset group, you provide the domain of the Domain dataset group. You specify a schema when you call CreateDataset.

Related APIs

", "CreateSolution": "

Creates the configuration for training a model. A trained model is known as a solution. After the configuration is created, you train the model (create a solution) by calling the CreateSolutionVersion operation. Every time you call CreateSolutionVersion, a new version of the solution is created.

After creating a solution version, you check its accuracy by calling GetSolutionMetrics. When you are satisfied with the version, you deploy it using CreateCampaign. The campaign provides recommendations to a client through the GetRecommendations API.

To train a model, Amazon Personalize requires training data and a recipe. The training data comes from the dataset group that you provide in the request. A recipe specifies the training algorithm and a feature transformation. You can specify one of the predefined recipes provided by Amazon Personalize. Alternatively, you can specify performAutoML and Amazon Personalize will analyze your data and select the optimum USER_PERSONALIZATION recipe for you.

Amazon Personalize doesn't support configuring the hpoObjective for solution hyperparameter optimization at this time.

Status

A solution can be in one of the following states:

To get the status of the solution, call DescribeSolution. Wait until the status shows as ACTIVE before calling CreateSolutionVersion.

Related APIs

", "CreateSolutionVersion": "

Trains or retrains an active solution in a Custom dataset group. A solution is created using the CreateSolution operation and must be in the ACTIVE state before calling CreateSolutionVersion. A new version of the solution is created every time you call this operation.

Status

A solution version can be in one of the following states:

To get the status of the version, call DescribeSolutionVersion. Wait until the status shows as ACTIVE before calling CreateCampaign.

If the status shows as CREATE FAILED, the response includes a failureReason key, which describes why the job failed.

Related APIs

", @@ -35,7 +35,7 @@ "DescribeFeatureTransformation": "

Describes the given feature transformation.

", "DescribeFilter": "

Describes a filter's properties.

", "DescribeRecipe": "

Describes a recipe.

A recipe contains three items:

Amazon Personalize provides a set of predefined recipes. You specify a recipe when you create a solution with the CreateSolution API. CreateSolution trains a model by using the algorithm in the specified recipe and a training dataset. The solution, when deployed as a campaign, can provide recommendations using the GetRecommendations API.

", - "DescribeRecommender": "

Describes the given recommender, including its status.

A recommender can be in one of the following states:

When the status is CREATE FAILED, the response includes the failureReason key, which describes why.

For more information on recommenders, see CreateRecommender.

", + "DescribeRecommender": "

Describes the given recommender, including its status.

A recommender can be in one of the following states:

When the status is CREATE FAILED, the response includes the failureReason key, which describes why.

The modelMetrics key is null when the recommender is being created or deleted.

For more information on recommenders, see CreateRecommender.

", "DescribeSchema": "

Describes a schema. For more information on schemas, see CreateSchema.

", "DescribeSolution": "

Describes a solution. For more information on solutions, see CreateSolution.

", "DescribeSolutionVersion": "

Describes a specific version of a solution. For more information on solutions, see CreateSolution

", @@ -1410,7 +1410,8 @@ "Metrics": { "base": null, "refs": { - "GetSolutionMetricsResponse$metrics": "

The metrics for the solution version.

" + "GetSolutionMetricsResponse$metrics": "

The metrics for the solution version. For more information, see Evaluating a solution version with metrics .

", + "Recommender$modelMetrics": "

Provides evaluation metrics that help you determine the performance of a recommender. For more information, see Evaluating a recommender.

" } }, "Name": { @@ -1753,9 +1754,9 @@ "FilterSummary$status": "

The status of the filter.

", "Recipe$status": "

The status of the recipe.

", "RecipeSummary$status": "

The status of the recipe.

", - "Recommender$status": "

The status of the recommender.

A recommender can be in one of the following states:

", - "RecommenderSummary$status": "

The status of the recommender. A recommender can be in one of the following states:

", - "RecommenderUpdateSummary$status": "

The status of the recommender update.

A recommender can be in one of the following states:

", + "Recommender$status": "

The status of the recommender.

A recommender can be in one of the following states:

", + "RecommenderSummary$status": "

The status of the recommender. A recommender can be in one of the following states:

", + "RecommenderUpdateSummary$status": "

The status of the recommender update.

A recommender can be in one of the following states:

", "Solution$status": "

The status of the solution.

A solution can be in one of the following states:

", "SolutionSummary$status": "

The status of the solution.

A solution can be in one of the following states:

", "SolutionVersion$status": "

The status of the solution version.

A solution version can be in one of the following states:

", diff --git a/gems/aws-partitions/CHANGELOG.md b/gems/aws-partitions/CHANGELOG.md index 4484ac6e0c9..a9213f93b92 100644 --- a/gems/aws-partitions/CHANGELOG.md +++ b/gems/aws-partitions/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.590.0 (2022-05-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + 1.589.0 (2022-05-19) ------------------ diff --git a/gems/aws-partitions/VERSION b/gems/aws-partitions/VERSION index ff1424e9e3a..6a763c17d76 100644 --- a/gems/aws-partitions/VERSION +++ b/gems/aws-partitions/VERSION @@ -1 +1 @@ -1.589.0 +1.590.0 diff --git a/gems/aws-partitions/partitions.json b/gems/aws-partitions/partitions.json index bea417b5d59..c4a2e961d95 100644 --- a/gems/aws-partitions/partitions.json +++ b/gems/aws-partitions/partitions.json @@ -9150,6 +9150,15 @@ "us-west-2" : { } } }, + "proton" : { + "endpoints" : { + "ap-northeast-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "qldb" : { "endpoints" : { "ap-northeast-1" : { }, @@ -13168,6 +13177,28 @@ } } }, + "wellarchitected" : { + "endpoints" : { + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, "wisdom" : { "endpoints" : { "ap-northeast-1" : { }, diff --git a/gems/aws-sdk-elasticache/CHANGELOG.md b/gems/aws-sdk-elasticache/CHANGELOG.md index f36fbeb48ef..a87a2cd3406 100644 --- a/gems/aws-sdk-elasticache/CHANGELOG.md +++ b/gems/aws-sdk-elasticache/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.78.0 (2022-05-23) +------------------ + +* Feature - Added support for encryption in transit for Memcached clusters. Customers can now launch Memcached cluster with encryption in transit enabled when using Memcached version 1.6.12 or later. + 1.77.0 (2022-04-21) ------------------ diff --git a/gems/aws-sdk-elasticache/VERSION b/gems/aws-sdk-elasticache/VERSION index 79e15fd4937..54227249d1f 100644 --- a/gems/aws-sdk-elasticache/VERSION +++ b/gems/aws-sdk-elasticache/VERSION @@ -1 +1 @@ -1.77.0 +1.78.0 diff --git a/gems/aws-sdk-elasticache/lib/aws-sdk-elasticache.rb b/gems/aws-sdk-elasticache/lib/aws-sdk-elasticache.rb index 41535462543..d73735f9ce6 100644 --- a/gems/aws-sdk-elasticache/lib/aws-sdk-elasticache.rb +++ b/gems/aws-sdk-elasticache/lib/aws-sdk-elasticache.rb @@ -49,6 +49,6 @@ # @!group service module Aws::ElastiCache - GEM_VERSION = '1.77.0' + GEM_VERSION = '1.78.0' end diff --git a/gems/aws-sdk-elasticache/lib/aws-sdk-elasticache/client.rb b/gems/aws-sdk-elasticache/lib/aws-sdk-elasticache/client.rb index 5a7386c1a9c..8a3da32d0b7 100644 --- a/gems/aws-sdk-elasticache/lib/aws-sdk-elasticache/client.rb +++ b/gems/aws-sdk-elasticache/lib/aws-sdk-elasticache/client.rb @@ -1080,9 +1080,7 @@ def copy_snapshot(params = {}, options = {}) # **T2 node types:** `cache.t2.micro`, `cache.t2.small`, # `cache.t2.medium` # - # * Previous generation: (not recommended. Existing clusters are still - # supported but creation of new clusters is not supported for these - # types.) + # * Previous generation: (not recommended) # # **T1 node types:** `cache.t1.micro` # @@ -1094,9 +1092,7 @@ def copy_snapshot(params = {}, options = {}) # # * Compute optimized: # - # * Previous generation: (not recommended. Existing clusters are still - # supported but creation of new clusters is not supported for these - # types.) + # * Previous generation: (not recommended) # # **C1 node types:** `cache.c1.xlarge` # @@ -1123,9 +1119,7 @@ def copy_snapshot(params = {}, options = {}) # `cache.r4.2xlarge`, `cache.r4.4xlarge`, `cache.r4.8xlarge`, # `cache.r4.16xlarge` # - # * Previous generation: (not recommended. Existing clusters are still - # supported but creation of new clusters is not supported for these - # types.) + # * Previous generation: (not recommended) # # **M2 node types:** `cache.m2.xlarge`, `cache.m2.2xlarge`, # `cache.m2.4xlarge` @@ -1310,6 +1304,15 @@ def copy_snapshot(params = {}, options = {}) # @option params [Array] :log_delivery_configurations # Specifies the destination, format and type of the logs. # + # @option params [Boolean] :transit_encryption_enabled + # A flag that enables in-transit encryption when set to true. You cannot + # modify the value of `TransitEncryptionEnabled` after the cluster is + # created. To enable in-transit encryption on a cluster you must set + # `TransitEncryptionEnabled` to true when you create a cluster. + # + # **Required:** Only available when creating a cache cluster in an + # Amazon VPC using Memcached version `1.6.12` or later. + # # @return [Types::CreateCacheClusterResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::CreateCacheClusterResult#cache_cluster #cache_cluster} => Types::CacheCluster @@ -1453,6 +1456,7 @@ def copy_snapshot(params = {}, options = {}) # enabled: false, # }, # ], + # transit_encryption_enabled: false, # }) # # @example Response structure @@ -2087,9 +2091,7 @@ def create_global_replication_group(params = {}, options = {}) # **T2 node types:** `cache.t2.micro`, `cache.t2.small`, # `cache.t2.medium` # - # * Previous generation: (not recommended. Existing clusters are still - # supported but creation of new clusters is not supported for these - # types.) + # * Previous generation: (not recommended) # # **T1 node types:** `cache.t1.micro` # @@ -2101,9 +2103,7 @@ def create_global_replication_group(params = {}, options = {}) # # * Compute optimized: # - # * Previous generation: (not recommended. Existing clusters are still - # supported but creation of new clusters is not supported for these - # types.) + # * Previous generation: (not recommended) # # **C1 node types:** `cache.c1.xlarge` # @@ -2140,9 +2140,7 @@ def create_global_replication_group(params = {}, options = {}) # `cache.r4.2xlarge`, `cache.r4.4xlarge`, `cache.r4.8xlarge`, # `cache.r4.16xlarge` # - # * Previous generation: (not recommended. Existing clusters are still - # supported but creation of new clusters is not supported for these - # types.) + # * Previous generation: (not recommended) # # **M2 node types:** `cache.m2.xlarge`, `cache.m2.2xlarge`, # `cache.m2.4xlarge` @@ -6391,9 +6389,7 @@ def describe_replication_groups(params = {}, options = {}) # **T2 node types:** `cache.t2.micro`, `cache.t2.small`, # `cache.t2.medium` # - # * Previous generation: (not recommended. Existing clusters are still - # supported but creation of new clusters is not supported for these - # types.) + # * Previous generation: (not recommended) # # **T1 node types:** `cache.t1.micro` # @@ -6405,9 +6401,7 @@ def describe_replication_groups(params = {}, options = {}) # # * Compute optimized: # - # * Previous generation: (not recommended. Existing clusters are still - # supported but creation of new clusters is not supported for these - # types.) + # * Previous generation: (not recommended) # # **C1 node types:** `cache.c1.xlarge` # @@ -6444,9 +6438,7 @@ def describe_replication_groups(params = {}, options = {}) # `cache.r4.2xlarge`, `cache.r4.4xlarge`, `cache.r4.8xlarge`, # `cache.r4.16xlarge` # - # * Previous generation: (not recommended. Existing clusters are still - # supported but creation of new clusters is not supported for these - # types.) + # * Previous generation: (not recommended) # # **M2 node types:** `cache.m2.xlarge`, `cache.m2.2xlarge`, # `cache.m2.4xlarge` @@ -6613,9 +6605,7 @@ def describe_reserved_cache_nodes(params = {}, options = {}) # **T2 node types:** `cache.t2.micro`, `cache.t2.small`, # `cache.t2.medium` # - # * Previous generation: (not recommended. Existing clusters are still - # supported but creation of new clusters is not supported for these - # types.) + # * Previous generation: (not recommended) # # **T1 node types:** `cache.t1.micro` # @@ -6627,9 +6617,7 @@ def describe_reserved_cache_nodes(params = {}, options = {}) # # * Compute optimized: # - # * Previous generation: (not recommended. Existing clusters are still - # supported but creation of new clusters is not supported for these - # types.) + # * Previous generation: (not recommended) # # **C1 node types:** `cache.c1.xlarge` # @@ -6666,9 +6654,7 @@ def describe_reserved_cache_nodes(params = {}, options = {}) # `cache.r4.2xlarge`, `cache.r4.4xlarge`, `cache.r4.8xlarge`, # `cache.r4.16xlarge` # - # * Previous generation: (not recommended. Existing clusters are still - # supported but creation of new clusters is not supported for these - # types.) + # * Previous generation: (not recommended) # # **M2 node types:** `cache.m2.xlarge`, `cache.m2.2xlarge`, # `cache.m2.4xlarge` @@ -10239,7 +10225,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-elasticache' - context[:gem_version] = '1.77.0' + context[:gem_version] = '1.78.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-elasticache/lib/aws-sdk-elasticache/client_api.rb b/gems/aws-sdk-elasticache/lib/aws-sdk-elasticache/client_api.rb index d87d15099b6..c3bd256fde5 100644 --- a/gems/aws-sdk-elasticache/lib/aws-sdk-elasticache/client_api.rb +++ b/gems/aws-sdk-elasticache/lib/aws-sdk-elasticache/client_api.rb @@ -654,6 +654,7 @@ module ClientApi CreateCacheClusterMessage.add_member(:preferred_outpost_arn, Shapes::ShapeRef.new(shape: String, location_name: "PreferredOutpostArn")) CreateCacheClusterMessage.add_member(:preferred_outpost_arns, Shapes::ShapeRef.new(shape: PreferredOutpostArnList, location_name: "PreferredOutpostArns")) CreateCacheClusterMessage.add_member(:log_delivery_configurations, Shapes::ShapeRef.new(shape: LogDeliveryConfigurationRequestList, location_name: "LogDeliveryConfigurations")) + CreateCacheClusterMessage.add_member(:transit_encryption_enabled, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "TransitEncryptionEnabled")) CreateCacheClusterMessage.struct_class = Types::CreateCacheClusterMessage CreateCacheClusterResult.add_member(:cache_cluster, Shapes::ShapeRef.new(shape: CacheCluster, location_name: "CacheCluster")) diff --git a/gems/aws-sdk-elasticache/lib/aws-sdk-elasticache/types.rb b/gems/aws-sdk-elasticache/lib/aws-sdk-elasticache/types.rb index e314af5cc2a..ee29767fbae 100644 --- a/gems/aws-sdk-elasticache/lib/aws-sdk-elasticache/types.rb +++ b/gems/aws-sdk-elasticache/lib/aws-sdk-elasticache/types.rb @@ -314,9 +314,7 @@ class BatchStopUpdateActionMessage < Struct.new( # **T2 node types:** `cache.t2.micro`, `cache.t2.small`, # `cache.t2.medium` # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **T1 node types:** `cache.t1.micro` # @@ -328,9 +326,7 @@ class BatchStopUpdateActionMessage < Struct.new( # # * Compute optimized: # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **C1 node types:** `cache.c1.xlarge` # @@ -368,9 +364,7 @@ class BatchStopUpdateActionMessage < Struct.new( # `cache.r4.2xlarge`, `cache.r4.4xlarge`, `cache.r4.8xlarge`, # `cache.r4.16xlarge` # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **M2 node types:** `cache.m2.xlarge`, `cache.m2.2xlarge`, # `cache.m2.4xlarge` @@ -740,9 +734,7 @@ class CacheEngineVersionMessage < Struct.new( # **T2 node types:** `cache.t2.micro`, `cache.t2.small`, # `cache.t2.medium` # - # * Previous generation: (not recommended. Existing clusters are still - # supported but creation of new clusters is not supported for these - # types.) + # * Previous generation: (not recommended) # # **T1 node types:** `cache.t1.micro` # @@ -754,9 +746,7 @@ class CacheEngineVersionMessage < Struct.new( # # * Compute optimized: # - # * Previous generation: (not recommended. Existing clusters are still - # supported but creation of new clusters is not supported for these - # types.) + # * Previous generation: (not recommended) # # **C1 node types:** `cache.c1.xlarge` # @@ -793,9 +783,7 @@ class CacheEngineVersionMessage < Struct.new( # `cache.r4.2xlarge`, `cache.r4.4xlarge`, `cache.r4.8xlarge`, # `cache.r4.16xlarge` # - # * Previous generation: (not recommended. Existing clusters are still - # supported but creation of new clusters is not supported for these - # types.) + # * Previous generation: (not recommended) # # **M2 node types:** `cache.m2.xlarge`, `cache.m2.2xlarge`, # `cache.m2.4xlarge` @@ -1626,6 +1614,7 @@ class CopySnapshotResult < Struct.new( # enabled: false, # }, # ], + # transit_encryption_enabled: false, # } # # @!attribute [rw] cache_cluster_id @@ -1758,9 +1747,7 @@ class CopySnapshotResult < Struct.new( # **T2 node types:** `cache.t2.micro`, `cache.t2.small`, # `cache.t2.medium` # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **T1 node types:** `cache.t1.micro` # @@ -1772,9 +1759,7 @@ class CopySnapshotResult < Struct.new( # # * Compute optimized: # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **C1 node types:** `cache.c1.xlarge` # @@ -1801,9 +1786,7 @@ class CopySnapshotResult < Struct.new( # `cache.r4.2xlarge`, `cache.r4.4xlarge`, `cache.r4.8xlarge`, # `cache.r4.16xlarge` # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **M2 node types:** `cache.m2.xlarge`, `cache.m2.2xlarge`, # `cache.m2.4xlarge` @@ -2012,6 +1995,17 @@ class CopySnapshotResult < Struct.new( # Specifies the destination, format and type of the logs. # @return [Array] # + # @!attribute [rw] transit_encryption_enabled + # A flag that enables in-transit encryption when set to true. You + # cannot modify the value of `TransitEncryptionEnabled` after the + # cluster is created. To enable in-transit encryption on a cluster you + # must set `TransitEncryptionEnabled` to true when you create a + # cluster. + # + # **Required:** Only available when creating a cache cluster in an + # Amazon VPC using Memcached version `1.6.12` or later. + # @return [Boolean] + # # @see http://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateCacheClusterMessage AWS API Documentation # class CreateCacheClusterMessage < Struct.new( @@ -2041,7 +2035,8 @@ class CreateCacheClusterMessage < Struct.new( :outpost_mode, :preferred_outpost_arn, :preferred_outpost_arns, - :log_delivery_configurations) + :log_delivery_configurations, + :transit_encryption_enabled) SENSITIVE = [] include Aws::Structure end @@ -2549,9 +2544,7 @@ class CreateGlobalReplicationGroupResult < Struct.new( # **T2 node types:** `cache.t2.micro`, `cache.t2.small`, # `cache.t2.medium` # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **T1 node types:** `cache.t1.micro` # @@ -2563,9 +2556,7 @@ class CreateGlobalReplicationGroupResult < Struct.new( # # * Compute optimized: # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **C1 node types:** `cache.c1.xlarge` # @@ -2603,9 +2594,7 @@ class CreateGlobalReplicationGroupResult < Struct.new( # `cache.r4.2xlarge`, `cache.r4.4xlarge`, `cache.r4.8xlarge`, # `cache.r4.16xlarge` # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **M2 node types:** `cache.m2.xlarge`, `cache.m2.2xlarge`, # `cache.m2.4xlarge` @@ -4230,9 +4219,7 @@ class DescribeReplicationGroupsMessage < Struct.new( # **T2 node types:** `cache.t2.micro`, `cache.t2.small`, # `cache.t2.medium` # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **T1 node types:** `cache.t1.micro` # @@ -4244,9 +4231,7 @@ class DescribeReplicationGroupsMessage < Struct.new( # # * Compute optimized: # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **C1 node types:** `cache.c1.xlarge` # @@ -4284,9 +4269,7 @@ class DescribeReplicationGroupsMessage < Struct.new( # `cache.r4.2xlarge`, `cache.r4.4xlarge`, `cache.r4.8xlarge`, # `cache.r4.16xlarge` # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **M2 node types:** `cache.m2.xlarge`, `cache.m2.2xlarge`, # `cache.m2.4xlarge` @@ -4430,9 +4413,7 @@ class DescribeReservedCacheNodesMessage < Struct.new( # **T2 node types:** `cache.t2.micro`, `cache.t2.small`, # `cache.t2.medium` # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **T1 node types:** `cache.t1.micro` # @@ -4444,9 +4425,7 @@ class DescribeReservedCacheNodesMessage < Struct.new( # # * Compute optimized: # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **C1 node types:** `cache.c1.xlarge` # @@ -4484,9 +4463,7 @@ class DescribeReservedCacheNodesMessage < Struct.new( # `cache.r4.2xlarge`, `cache.r4.4xlarge`, `cache.r4.8xlarge`, # `cache.r4.16xlarge` # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **M2 node types:** `cache.m2.xlarge`, `cache.m2.2xlarge`, # `cache.m2.4xlarge` @@ -8082,9 +8059,7 @@ class ReplicationGroupPendingModifiedValues < Struct.new( # **T2 node types:** `cache.t2.micro`, `cache.t2.small`, # `cache.t2.medium` # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **T1 node types:** `cache.t1.micro` # @@ -8096,9 +8071,7 @@ class ReplicationGroupPendingModifiedValues < Struct.new( # # * Compute optimized: # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **C1 node types:** `cache.c1.xlarge` # @@ -8136,9 +8109,7 @@ class ReplicationGroupPendingModifiedValues < Struct.new( # `cache.r4.2xlarge`, `cache.r4.4xlarge`, `cache.r4.8xlarge`, # `cache.r4.16xlarge` # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **M2 node types:** `cache.m2.xlarge`, `cache.m2.2xlarge`, # `cache.m2.4xlarge` @@ -8312,9 +8283,7 @@ class ReservedCacheNodeQuotaExceededFault < Aws::EmptyStructure; end # **T2 node types:** `cache.t2.micro`, `cache.t2.small`, # `cache.t2.medium` # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **T1 node types:** `cache.t1.micro` # @@ -8326,9 +8295,7 @@ class ReservedCacheNodeQuotaExceededFault < Aws::EmptyStructure; end # # * Compute optimized: # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **C1 node types:** `cache.c1.xlarge` # @@ -8366,9 +8333,7 @@ class ReservedCacheNodeQuotaExceededFault < Aws::EmptyStructure; end # `cache.r4.2xlarge`, `cache.r4.4xlarge`, `cache.r4.8xlarge`, # `cache.r4.16xlarge` # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **M2 node types:** `cache.m2.xlarge`, `cache.m2.2xlarge`, # `cache.m2.4xlarge` @@ -8821,9 +8786,7 @@ class SlotMigration < Struct.new( # **T2 node types:** `cache.t2.micro`, `cache.t2.small`, # `cache.t2.medium` # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **T1 node types:** `cache.t1.micro` # @@ -8835,9 +8798,7 @@ class SlotMigration < Struct.new( # # * Compute optimized: # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **C1 node types:** `cache.c1.xlarge` # @@ -8879,9 +8840,7 @@ class SlotMigration < Struct.new( # `cache.r4.2xlarge`, `cache.r4.4xlarge`, `cache.r4.8xlarge`, # `cache.r4.16xlarge` # - # * Previous generation: (not recommended. Existing clusters are - # still supported but creation of new clusters is not supported - # for these types.) + # * Previous generation: (not recommended) # # **M2 node types:** `cache.m2.xlarge`, `cache.m2.2xlarge`, # `cache.m2.4xlarge` diff --git a/gems/aws-sdk-forecastservice/CHANGELOG.md b/gems/aws-sdk-forecastservice/CHANGELOG.md index 8730537d695..4bab880e5ce 100644 --- a/gems/aws-sdk-forecastservice/CHANGELOG.md +++ b/gems/aws-sdk-forecastservice/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.34.0 (2022-05-23) +------------------ + +* Feature - New APIs for Monitor that help you understand how your predictors perform over time. + 1.33.0 (2022-02-24) ------------------ diff --git a/gems/aws-sdk-forecastservice/VERSION b/gems/aws-sdk-forecastservice/VERSION index 7aa332e4163..2b17ffd5042 100644 --- a/gems/aws-sdk-forecastservice/VERSION +++ b/gems/aws-sdk-forecastservice/VERSION @@ -1 +1 @@ -1.33.0 +1.34.0 diff --git a/gems/aws-sdk-forecastservice/lib/aws-sdk-forecastservice.rb b/gems/aws-sdk-forecastservice/lib/aws-sdk-forecastservice.rb index 04c8d789b7f..a897de71552 100644 --- a/gems/aws-sdk-forecastservice/lib/aws-sdk-forecastservice.rb +++ b/gems/aws-sdk-forecastservice/lib/aws-sdk-forecastservice.rb @@ -48,6 +48,6 @@ # @!group service module Aws::ForecastService - GEM_VERSION = '1.33.0' + GEM_VERSION = '1.34.0' end diff --git a/gems/aws-sdk-forecastservice/lib/aws-sdk-forecastservice/client.rb b/gems/aws-sdk-forecastservice/lib/aws-sdk-forecastservice/client.rb index 8a4ff3ee2a9..13eed33e720 100644 --- a/gems/aws-sdk-forecastservice/lib/aws-sdk-forecastservice/client.rb +++ b/gems/aws-sdk-forecastservice/lib/aws-sdk-forecastservice/client.rb @@ -380,7 +380,8 @@ def initialize(*args) # * `ForecastFrequency` - The granularity of your forecasts (hourly, # daily, weekly, etc). # - # * `ForecastHorizon` - The number of time steps being forecasted. + # * `ForecastHorizon` - The number of time-steps that the model + # predicts. The forecast horizon is also called the prediction length. # # When creating a new predictor, do not specify a value for # `ReferencePredictorArn`. @@ -405,6 +406,17 @@ def initialize(*args) # The number of time-steps that the model predicts. The forecast horizon # is also called the prediction length. # + # The maximum forecast horizon is the lesser of 500 time-steps or 1/4 of + # the TARGET\_TIME\_SERIES dataset length. If you are retraining an + # existing AutoPredictor, then the maximum forecast horizon is the + # lesser of 500 time-steps or 1/3 of the TARGET\_TIME\_SERIES dataset + # length. + # + # If you are upgrading to an AutoPredictor or retraining an existing + # AutoPredictor, you cannot update the forecast horizon parameter. You + # can meet this requirement by providing longer time-series in the + # dataset. + # # @option params [Array] :forecast_types # The forecast types used to train a predictor. You can specify up to # five forecast types. Forecast types can be quantiles from 0.01 to @@ -487,6 +499,18 @@ def initialize(*args) # only the key prefix of `aws` do not count against your tags per # resource limit. You cannot edit or delete tag keys with this prefix. # + # @option params [Types::MonitorConfig] :monitor_config + # The configuration details for predictor monitoring. Provide a name for + # the monitor resource to enable predictor monitoring. + # + # Predictor monitoring allows you to see how your predictor's + # performance changes over time. For more information, see [Predictor + # Monitoring][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/predictor-monitoring.html + # # @return [Types::CreateAutoPredictorResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::CreateAutoPredictorResponse#predictor_arn #predictor_arn} => String @@ -531,6 +555,9 @@ def initialize(*args) # value: "TagValue", # required # }, # ], + # monitor_config: { + # monitor_name: "Name", # required + # }, # }) # # @example Response structure @@ -565,22 +592,26 @@ def create_auto_predictor(params = {}, options = {}) # # After creating a dataset, you import your training data into it and # add the dataset to a dataset group. You use the dataset group to - # create a predictor. For more information, see - # howitworks-datasets-groups. + # create a predictor. For more information, see [Importing datasets][1]. # - # To get a list of all your datasets, use the ListDatasets operation. + # To get a list of all your datasets, use the [ListDatasets][2] + # operation. # # For example Forecast datasets, see the [Amazon Forecast Sample GitHub - # repository][1]. + # repository][3]. # # The `Status` of a dataset must be `ACTIVE` before you can import - # training data. Use the DescribeDataset operation to get the status. + # training data. Use the [DescribeDataset][4] operation to get the + # status. # # # # # - # [1]: https://github.com/aws-samples/amazon-forecast-samples + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/howitworks-datasets-groups.html + # [2]: https://docs.aws.amazon.com/forecast/latest/dg/API_ListDatasets.html + # [3]: https://github.com/aws-samples/amazon-forecast-samples + # [4]: https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDataset.html # # @option params [required, String] :dataset_name # A name for the dataset. @@ -588,14 +619,19 @@ def create_auto_predictor(params = {}, options = {}) # @option params [required, String] :domain # The domain associated with the dataset. When you add a dataset to a # dataset group, this value and the value specified for the `Domain` - # parameter of the CreateDatasetGroup operation must match. + # parameter of the [CreateDatasetGroup][1] operation must match. # # The `Domain` and `DatasetType` that you choose determine the fields # that must be present in the training data that you import to the # dataset. For example, if you choose the `RETAIL` domain and # `TARGET_TIME_SERIES` as the `DatasetType`, Amazon Forecast requires # `item_id`, `timestamp`, and `demand` fields to be present in your - # data. For more information, see howitworks-datasets-groups. + # data. For more information, see [Importing datasets][2]. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDatasetGroup.html + # [2]: https://docs.aws.amazon.com/forecast/latest/dg/howitworks-datasets-groups.html # # @option params [required, String] :dataset_type # The dataset type. Valid values depend on the chosen `Domain`. @@ -614,7 +650,11 @@ def create_auto_predictor(params = {}, options = {}) # match the fields in your data. The dataset `Domain` and `DatasetType` # that you choose determine the minimum required fields in your training # data. For information about the required fields for a specific dataset - # domain and type, see howitworks-domains-ds-types. + # domain and type, see [Dataset Domains and Dataset Types][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/howitworks-domains-ds-types.html # # @option params [Types::EncryptionConfig] :encryption_config # An AWS Key Management Service (KMS) key and the AWS Identity and @@ -699,35 +739,47 @@ def create_dataset(params = {}, options = {}) # Creates a dataset group, which holds a collection of related datasets. # You can add datasets to the dataset group when you create the dataset - # group, or later by using the UpdateDatasetGroup operation. + # group, or later by using the [UpdateDatasetGroup][1] operation. # # After creating a dataset group and adding datasets, you use the # dataset group when you create a predictor. For more information, see - # howitworks-datasets-groups. + # [Dataset groups][2]. # - # To get a list of all your datasets groups, use the ListDatasetGroups - # operation. + # To get a list of all your datasets groups, use the + # [ListDatasetGroups][3] operation. # # The `Status` of a dataset group must be `ACTIVE` before you can use # the dataset group to create a predictor. To get the status, use the - # DescribeDatasetGroup operation. + # [DescribeDatasetGroup][4] operation. # # # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_UpdateDatasetGroup.html + # [2]: https://docs.aws.amazon.com/forecast/latest/dg/howitworks-datasets-groups.html + # [3]: https://docs.aws.amazon.com/forecast/latest/dg/API_ListDatasetGroups.html + # [4]: https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDatasetGroup.html + # # @option params [required, String] :dataset_group_name # A name for the dataset group. # # @option params [required, String] :domain # The domain associated with the dataset group. When you add a dataset # to a dataset group, this value and the value specified for the - # `Domain` parameter of the CreateDataset operation must match. + # `Domain` parameter of the [CreateDataset][1] operation must match. # # The `Domain` and `DatasetType` that you choose determine the fields # that must be present in training data that you import to a dataset. # For example, if you choose the `RETAIL` domain and # `TARGET_TIME_SERIES` as the `DatasetType`, Amazon Forecast requires # that `item_id`, `timestamp`, and `demand` fields are present in your - # data. For more information, see howitworks-datasets-groups. + # data. For more information, see [Dataset groups][2]. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDataset.html + # [2]: https://docs.aws.amazon.com/forecast/latest/dg/howitworks-datasets-groups.html # # @option params [Array] :dataset_arns # An array of Amazon Resource Names (ARNs) of the datasets that you want @@ -801,11 +853,11 @@ def create_dataset_group(params = {}, options = {}) # (Amazon S3) bucket and the Amazon Resource Name (ARN) of the dataset # that you want to import the data to. # - # You must specify a DataSource object that includes an AWS Identity and - # Access Management (IAM) role that Amazon Forecast can assume to access - # the data, as Amazon Forecast makes a copy of your data and processes - # it in an internal AWS system. For more information, see - # aws-forecast-iam-roles. + # You must specify a [DataSource][1] object that includes an AWS + # Identity and Access Management (IAM) role that Amazon Forecast can + # assume to access the data, as Amazon Forecast makes a copy of your + # data and processes it in an internal AWS system. For more information, + # see [Set up permissions][2]. # # The training data must be in CSV format. The delimiter must be a comma # (,). @@ -821,7 +873,13 @@ def create_dataset_group(params = {}, options = {}) # collected since the previous import. # # To get a list of all your dataset import jobs, filtered by specified - # criteria, use the ListDatasetImportJobs operation. + # criteria, use the [ListDatasetImportJobs][3] operation. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_DataSource.html + # [2]: https://docs.aws.amazon.com/forecast/latest/dg/aws-forecast-iam-roles.html + # [3]: https://docs.aws.amazon.com/forecast/latest/dg/API_ListDatasetImportJobs.html # # @option params [required, String] :dataset_import_job_name # The name for the dataset import job. We recommend including the @@ -842,7 +900,11 @@ def create_dataset_group(params = {}, options = {}) # Service (KMS) key and the IAM role must allow Amazon Forecast # permission to access the key. The KMS key and IAM role must match # those specified in the `EncryptionConfig` parameter of the - # CreateDataset operation. + # [CreateDataset][1] operation. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDataset.html # # @option params [String] :timestamp_format # The format of timestamps in the dataset. The format that you specify @@ -1065,7 +1127,7 @@ def create_dataset_import_job(params = {}, options = {}) # Defines the fields of a dataset. # # @option params [Boolean] :enable_visualization - # Create an Expainability visualization that is viewable within the AWS + # Create an Explainability visualization that is viewable within the AWS # console. # # @option params [String] :start_date_time @@ -1293,7 +1355,11 @@ def create_explainability_export(params = {}, options = {}) # values include `0.01 to 0.99` (increments of .01 only) and `mean`. The # mean forecast is different from the median (0.50) when the # distribution is not symmetric (for example, Beta and Negative - # Binomial). The default value is `["0.1", "0.5", "0.9"]`. + # Binomial). + # + # The default quantiles are the quantiles you specified during predictor + # creation. If you didn't specify quantiles, the default values are + # `["0.1", "0.5", "0.9"]`. # # @option params [Array] :tags # The optional metadata that you apply to the forecast to help you @@ -1469,6 +1535,58 @@ def create_forecast_export_job(params = {}, options = {}) req.send_request(options) end + # Creates a predictor monitor resource for an existing auto predictor. + # Predictor monitoring allows you to see how your predictor's + # performance changes over time. For more information, see [Predictor + # Monitoring][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/predictor-monitoring.html + # + # @option params [required, String] :monitor_name + # The name of the monitor resource. + # + # @option params [required, String] :resource_arn + # The Amazon Resource Name (ARN) of the predictor to monitor. + # + # @option params [Array] :tags + # A list of [tags][1] to apply to the monitor resource. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/tagging-forecast-resources.html + # + # @return [Types::CreateMonitorResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::CreateMonitorResponse#monitor_arn #monitor_arn} => String + # + # @example Request syntax with placeholder values + # + # resp = client.create_monitor({ + # monitor_name: "Name", # required + # resource_arn: "Arn", # required + # tags: [ + # { + # key: "TagKey", # required + # value: "TagValue", # required + # }, + # ], + # }) + # + # @example Response structure + # + # resp.monitor_arn #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateMonitor AWS API Documentation + # + # @overload create_monitor(params = {}) + # @param [Hash] params ({}) + def create_monitor(params = {}, options = {}) + req = build_request(:create_monitor, params) + req.send_request(options) + end + # This operation creates a legacy predictor that does not include all # the predictor functionalities provided by Amazon Forecast. To create a # predictor that is compatible with all aspects of Forecast, use @@ -1886,16 +2004,23 @@ def create_predictor_backtest_export_job(params = {}, options = {}) end # Deletes an Amazon Forecast dataset that was created using the - # CreateDataset operation. You can only delete datasets that have a + # [CreateDataset][1] operation. You can only delete datasets that have a # status of `ACTIVE` or `CREATE_FAILED`. To get the status use the - # DescribeDataset operation. + # [DescribeDataset][2] operation. # # Forecast does not automatically update any dataset groups that contain # the deleted dataset. In order to update the dataset group, use the - # operation, omitting the deleted dataset's ARN. + # [UpdateDatasetGroup][3] operation, omitting the deleted dataset's + # ARN. # # # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDataset.html + # [2]: https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDataset.html + # [3]: https://docs.aws.amazon.com/forecast/latest/dg/API_UpdateDatasetGroup.html + # # @option params [required, String] :dataset_arn # The Amazon Resource Name (ARN) of the dataset to delete. # @@ -1916,14 +2041,19 @@ def delete_dataset(params = {}, options = {}) req.send_request(options) end - # Deletes a dataset group created using the CreateDatasetGroup + # Deletes a dataset group created using the [CreateDatasetGroup][1] # operation. You can only delete dataset groups that have a status of # `ACTIVE`, `CREATE_FAILED`, or `UPDATE_FAILED`. To get the status, use - # the DescribeDatasetGroup operation. + # the [DescribeDatasetGroup][2] operation. # # This operation deletes only the dataset group, not the datasets in the # group. # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDatasetGroup.html + # [2]: https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDatasetGroup.html + # # @option params [required, String] :dataset_group_arn # The Amazon Resource Name (ARN) of the dataset group to delete. # @@ -1944,10 +2074,15 @@ def delete_dataset_group(params = {}, options = {}) req.send_request(options) end - # Deletes a dataset import job created using the CreateDatasetImportJob - # operation. You can delete only dataset import jobs that have a status - # of `ACTIVE` or `CREATE_FAILED`. To get the status, use the - # DescribeDatasetImportJob operation. + # Deletes a dataset import job created using the + # [CreateDatasetImportJob][1] operation. You can delete only dataset + # import jobs that have a status of `ACTIVE` or `CREATE_FAILED`. To get + # the status, use the [DescribeDatasetImportJob][2] operation. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDatasetImportJob.html + # [2]: https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDatasetImportJob.html # # @option params [required, String] :dataset_import_job_arn # The Amazon Resource Name (ARN) of the dataset import job to delete. @@ -2071,6 +2206,30 @@ def delete_forecast_export_job(params = {}, options = {}) req.send_request(options) end + # Deletes a monitor resource. You can only delete a monitor resource + # with a status of `ACTIVE`, `ACTIVE_STOPPED`, `CREATE_FAILED`, or + # `CREATE_STOPPED`. + # + # @option params [required, String] :monitor_arn + # The Amazon Resource Name (ARN) of the monitor resource to delete. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.delete_monitor({ + # monitor_arn: "Arn", # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteMonitor AWS API Documentation + # + # @overload delete_monitor(params = {}) + # @param [Hash] params ({}) + def delete_monitor(params = {}, options = {}) + req = build_request(:delete_monitor, params) + req.send_request(options) + end + # Deletes a predictor created using the DescribePredictor or # CreatePredictor operations. You can delete only predictor that have a # status of `ACTIVE` or `CREATE_FAILED`. To get the status, use the @@ -2190,6 +2349,7 @@ def delete_resource_tree(params = {}, options = {}) # * {Types::DescribeAutoPredictorResponse#last_modification_time #last_modification_time} => Time # * {Types::DescribeAutoPredictorResponse#optimization_metric #optimization_metric} => String # * {Types::DescribeAutoPredictorResponse#explainability_info #explainability_info} => Types::ExplainabilityInfo + # * {Types::DescribeAutoPredictorResponse#monitor_info #monitor_info} => Types::MonitorInfo # # @example Request syntax with placeholder values # @@ -2231,6 +2391,8 @@ def delete_resource_tree(params = {}, options = {}) # resp.optimization_metric #=> String, one of "WAPE", "RMSE", "AverageWeightedQuantileLoss", "MASE", "MAPE" # resp.explainability_info.explainability_arn #=> String # resp.explainability_info.status #=> String + # resp.monitor_info.monitor_arn #=> String + # resp.monitor_info.status #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeAutoPredictor AWS API Documentation # @@ -2241,8 +2403,8 @@ def describe_auto_predictor(params = {}, options = {}) req.send_request(options) end - # Describes an Amazon Forecast dataset created using the CreateDataset - # operation. + # Describes an Amazon Forecast dataset created using the + # [CreateDataset][1] operation. # # In addition to listing the parameters specified in the `CreateDataset` # request, this operation includes the following dataset properties: @@ -2253,6 +2415,10 @@ def describe_auto_predictor(params = {}, options = {}) # # * `Status` # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDataset.html + # # @option params [required, String] :dataset_arn # The Amazon Resource Name (ARN) of the dataset. # @@ -2300,7 +2466,7 @@ def describe_dataset(params = {}, options = {}) req.send_request(options) end - # Describes a dataset group created using the CreateDatasetGroup + # Describes a dataset group created using the [CreateDatasetGroup][1] # operation. # # In addition to listing the parameters provided in the @@ -2315,6 +2481,10 @@ def describe_dataset(params = {}, options = {}) # # * `Status` # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDatasetGroup.html + # # @option params [required, String] :dataset_group_arn # The Amazon Resource Name (ARN) of the dataset group. # @@ -2355,7 +2525,7 @@ def describe_dataset_group(params = {}, options = {}) end # Describes a dataset import job created using the - # CreateDatasetImportJob operation. + # [CreateDatasetImportJob][1] operation. # # In addition to listing the parameters provided in the # `CreateDatasetImportJob` request, this operation includes the @@ -2373,6 +2543,10 @@ def describe_dataset_group(params = {}, options = {}) # # * `Message` - If an error occurred, information about the error. # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDatasetImportJob.html + # # @option params [required, String] :dataset_import_job_arn # The Amazon Resource Name (ARN) of the dataset import job. # @@ -2664,6 +2838,72 @@ def describe_forecast_export_job(params = {}, options = {}) req.send_request(options) end + # Describes a monitor resource. In addition to listing the properties + # provided in the CreateMonitor request, this operation lists the + # following properties: + # + # * `Baseline` + # + # * `CreationTime` + # + # * `LastEvaluationTime` + # + # * `LastEvaluationState` + # + # * `LastModificationTime` + # + # * `Message` + # + # * `Status` + # + # @option params [required, String] :monitor_arn + # The Amazon Resource Name (ARN) of the monitor resource to describe. + # + # @return [Types::DescribeMonitorResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::DescribeMonitorResponse#monitor_name #monitor_name} => String + # * {Types::DescribeMonitorResponse#monitor_arn #monitor_arn} => String + # * {Types::DescribeMonitorResponse#resource_arn #resource_arn} => String + # * {Types::DescribeMonitorResponse#status #status} => String + # * {Types::DescribeMonitorResponse#last_evaluation_time #last_evaluation_time} => Time + # * {Types::DescribeMonitorResponse#last_evaluation_state #last_evaluation_state} => String + # * {Types::DescribeMonitorResponse#baseline #baseline} => Types::Baseline + # * {Types::DescribeMonitorResponse#message #message} => String + # * {Types::DescribeMonitorResponse#creation_time #creation_time} => Time + # * {Types::DescribeMonitorResponse#last_modification_time #last_modification_time} => Time + # * {Types::DescribeMonitorResponse#estimated_evaluation_time_remaining_in_minutes #estimated_evaluation_time_remaining_in_minutes} => Integer + # + # @example Request syntax with placeholder values + # + # resp = client.describe_monitor({ + # monitor_arn: "Arn", # required + # }) + # + # @example Response structure + # + # resp.monitor_name #=> String + # resp.monitor_arn #=> String + # resp.resource_arn #=> String + # resp.status #=> String + # resp.last_evaluation_time #=> Time + # resp.last_evaluation_state #=> String + # resp.baseline.predictor_baseline.baseline_metrics #=> Array + # resp.baseline.predictor_baseline.baseline_metrics[0].name #=> String + # resp.baseline.predictor_baseline.baseline_metrics[0].value #=> Float + # resp.message #=> String + # resp.creation_time #=> Time + # resp.last_modification_time #=> Time + # resp.estimated_evaluation_time_remaining_in_minutes #=> Integer + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeMonitor AWS API Documentation + # + # @overload describe_monitor(params = {}) + # @param [Hash] params ({}) + def describe_monitor(params = {}, options = {}) + req = build_request(:describe_monitor, params) + req.send_request(options) + end + # This operation is only valid for legacy predictors created with # CreatePredictor. If you are not using a legacy predictor, use # DescribeAutoPredictor. @@ -2933,11 +3173,17 @@ def get_accuracy_metrics(params = {}, options = {}) req.send_request(options) end - # Returns a list of dataset groups created using the CreateDatasetGroup - # operation. For each dataset group, this operation returns a summary of - # its properties, including its Amazon Resource Name (ARN). You can - # retrieve the complete set of properties by using the dataset group ARN - # with the DescribeDatasetGroup operation. + # Returns a list of dataset groups created using the + # [CreateDatasetGroup][1] operation. For each dataset group, this + # operation returns a summary of its properties, including its Amazon + # Resource Name (ARN). You can retrieve the complete set of properties + # by using the dataset group ARN with the [DescribeDatasetGroup][2] + # operation. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDatasetGroup.html + # [2]: https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDatasetGroup.html # # @option params [String] :next_token # If the result of the previous request was truncated, the response @@ -2980,11 +3226,17 @@ def list_dataset_groups(params = {}, options = {}) end # Returns a list of dataset import jobs created using the - # CreateDatasetImportJob operation. For each import job, this operation - # returns a summary of its properties, including its Amazon Resource - # Name (ARN). You can retrieve the complete set of properties by using - # the ARN with the DescribeDatasetImportJob operation. You can filter - # the list by providing an array of Filter objects. + # [CreateDatasetImportJob][1] operation. For each import job, this + # operation returns a summary of its properties, including its Amazon + # Resource Name (ARN). You can retrieve the complete set of properties + # by using the ARN with the [DescribeDatasetImportJob][2] operation. You + # can filter the list by providing an array of [Filter][3] objects. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDatasetImportJob.html + # [2]: https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDatasetImportJob.html + # [3]: https://docs.aws.amazon.com/forecast/latest/dg/API_Filter.html # # @option params [String] :next_token # If the result of the previous request was truncated, the response @@ -3062,10 +3314,16 @@ def list_dataset_import_jobs(params = {}, options = {}) req.send_request(options) end - # Returns a list of datasets created using the CreateDataset operation. - # For each dataset, a summary of its properties, including its Amazon - # Resource Name (ARN), is returned. To retrieve the complete set of - # properties, use the ARN with the DescribeDataset operation. + # Returns a list of datasets created using the [CreateDataset][1] + # operation. For each dataset, a summary of its properties, including + # its Amazon Resource Name (ARN), is returned. To retrieve the complete + # set of properties, use the ARN with the [DescribeDataset][2] + # operation. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDataset.html + # [2]: https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDataset.html # # @option params [String] :next_token # If the result of the previous request was truncated, the response @@ -3147,6 +3405,8 @@ def list_datasets(params = {}, options = {}) # * {Types::ListExplainabilitiesResponse#explainabilities #explainabilities} => Array<Types::ExplainabilitySummary> # * {Types::ListExplainabilitiesResponse#next_token #next_token} => String # + # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. + # # @example Request syntax with placeholder values # # resp = client.list_explainabilities({ @@ -3222,6 +3482,8 @@ def list_explainabilities(params = {}, options = {}) # * {Types::ListExplainabilityExportsResponse#explainability_exports #explainability_exports} => Array<Types::ExplainabilityExportSummary> # * {Types::ListExplainabilityExportsResponse#next_token #next_token} => String # + # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. + # # @example Request syntax with placeholder values # # resp = client.list_explainability_exports({ @@ -3426,6 +3688,183 @@ def list_forecasts(params = {}, options = {}) req.send_request(options) end + # Returns a list of the monitoring evaluation results and predictor + # events collected by the monitor resource during different windows of + # time. + # + # For information about monitoring see [Viewing Monitoring Results][1]. + # For more information about retrieving monitoring results see [Viewing + # Monitoring Results][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/predictor-monitoring-results.html + # + # @option params [String] :next_token + # If the result of the previous request was truncated, the response + # includes a `NextToken`. To retrieve the next set of results, use the + # token in the next request. Tokens expire after 24 hours. + # + # @option params [Integer] :max_results + # The maximum number of monitoring results to return. + # + # @option params [required, String] :monitor_arn + # The Amazon Resource Name (ARN) of the monitor resource to get results + # from. + # + # @option params [Array] :filters + # An array of filters. For each filter, provide a condition and a match + # statement. The condition is either `IS` or `IS_NOT`, which specifies + # whether to include or exclude the resources that match the statement + # from the list. The match statement consists of a key and a value. + # + # **Filter properties** + # + # * `Condition` - The condition to apply. Valid values are `IS` and + # `IS_NOT`. + # + # * `Key` - The name of the parameter to filter on. The only valid value + # is `EvaluationState`. + # + # * `Value` - The value to match. Valid values are only `SUCCESS` or + # `FAILURE`. + # + # For example, to list only successful monitor evaluations, you would + # specify: + # + # `"Filters": [ \{ "Condition": "IS", "Key": "EvaluationState", "Value": + # "SUCCESS" \} ]` + # + # @return [Types::ListMonitorEvaluationsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListMonitorEvaluationsResponse#next_token #next_token} => String + # * {Types::ListMonitorEvaluationsResponse#predictor_monitor_evaluations #predictor_monitor_evaluations} => Array<Types::PredictorMonitorEvaluation> + # + # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. + # + # @example Request syntax with placeholder values + # + # resp = client.list_monitor_evaluations({ + # next_token: "NextToken", + # max_results: 1, + # monitor_arn: "Arn", # required + # filters: [ + # { + # key: "String", # required + # value: "Arn", # required + # condition: "IS", # required, accepts IS, IS_NOT + # }, + # ], + # }) + # + # @example Response structure + # + # resp.next_token #=> String + # resp.predictor_monitor_evaluations #=> Array + # resp.predictor_monitor_evaluations[0].resource_arn #=> String + # resp.predictor_monitor_evaluations[0].monitor_arn #=> String + # resp.predictor_monitor_evaluations[0].evaluation_time #=> Time + # resp.predictor_monitor_evaluations[0].evaluation_state #=> String + # resp.predictor_monitor_evaluations[0].window_start_datetime #=> Time + # resp.predictor_monitor_evaluations[0].window_end_datetime #=> Time + # resp.predictor_monitor_evaluations[0].predictor_event.detail #=> String + # resp.predictor_monitor_evaluations[0].predictor_event.datetime #=> Time + # resp.predictor_monitor_evaluations[0].monitor_data_source.dataset_import_job_arn #=> String + # resp.predictor_monitor_evaluations[0].monitor_data_source.forecast_arn #=> String + # resp.predictor_monitor_evaluations[0].monitor_data_source.predictor_arn #=> String + # resp.predictor_monitor_evaluations[0].metric_results #=> Array + # resp.predictor_monitor_evaluations[0].metric_results[0].metric_name #=> String + # resp.predictor_monitor_evaluations[0].metric_results[0].metric_value #=> Float + # resp.predictor_monitor_evaluations[0].num_items_evaluated #=> Integer + # resp.predictor_monitor_evaluations[0].message #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListMonitorEvaluations AWS API Documentation + # + # @overload list_monitor_evaluations(params = {}) + # @param [Hash] params ({}) + def list_monitor_evaluations(params = {}, options = {}) + req = build_request(:list_monitor_evaluations, params) + req.send_request(options) + end + + # Returns a list of monitors created with the CreateMonitor operation + # and CreateAutoPredictor operation. For each monitor resource, this + # operation returns of a summary of its properties, including its Amazon + # Resource Name (ARN). You can retrieve a complete set of properties of + # a monitor resource by specify the monitor's ARN in the + # DescribeMonitor operation. + # + # @option params [String] :next_token + # If the result of the previous request was truncated, the response + # includes a `NextToken`. To retrieve the next set of results, use the + # token in the next request. Tokens expire after 24 hours. + # + # @option params [Integer] :max_results + # The maximum number of monitors to include in the response. + # + # @option params [Array] :filters + # An array of filters. For each filter, provide a condition and a match + # statement. The condition is either `IS` or `IS_NOT`, which specifies + # whether to include or exclude the resources that match the statement + # from the list. The match statement consists of a key and a value. + # + # **Filter properties** + # + # * `Condition` - The condition to apply. Valid values are `IS` and + # `IS_NOT`. + # + # * `Key` - The name of the parameter to filter on. The only valid value + # is `Status`. + # + # * `Value` - The value to match. + # + # For example, to list all monitors who's status is ACTIVE, you would + # specify: + # + # `"Filters": [ \{ "Condition": "IS", "Key": "Status", "Value": "ACTIVE" + # \} ]` + # + # @return [Types::ListMonitorsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListMonitorsResponse#monitors #monitors} => Array<Types::MonitorSummary> + # * {Types::ListMonitorsResponse#next_token #next_token} => String + # + # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. + # + # @example Request syntax with placeholder values + # + # resp = client.list_monitors({ + # next_token: "NextToken", + # max_results: 1, + # filters: [ + # { + # key: "String", # required + # value: "Arn", # required + # condition: "IS", # required, accepts IS, IS_NOT + # }, + # ], + # }) + # + # @example Response structure + # + # resp.monitors #=> Array + # resp.monitors[0].monitor_arn #=> String + # resp.monitors[0].monitor_name #=> String + # resp.monitors[0].resource_arn #=> String + # resp.monitors[0].status #=> String + # resp.monitors[0].creation_time #=> Time + # resp.monitors[0].last_modification_time #=> Time + # resp.next_token #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListMonitors AWS API Documentation + # + # @overload list_monitors(params = {}) + # @param [Hash] params ({}) + def list_monitors(params = {}, options = {}) + req = build_request(:list_monitors, params) + req.send_request(options) + end + # Returns a list of predictor backtest export jobs created using the # CreatePredictorBacktestExportJob operation. This operation returns a # summary for each backtest export job. You can filter the list using an @@ -3622,6 +4061,28 @@ def list_tags_for_resource(params = {}, options = {}) req.send_request(options) end + # Resumes a stopped monitor resource. + # + # @option params [required, String] :resource_arn + # The Amazon Resource Name (ARN) of the monitor resource to resume. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.resume_resource({ + # resource_arn: "Arn", # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ResumeResource AWS API Documentation + # + # @overload resume_resource(params = {}) + # @param [Hash] params ({}) + def resume_resource(params = {}, options = {}) + req = build_request(:resume_resource, params) + req.send_request(options) + end + # Stops a resource. # # The resource undergoes the following states: `CREATE_STOPPING` and @@ -3761,11 +4222,15 @@ def untag_resource(params = {}, options = {}) # Replaces the datasets in a dataset group with the specified datasets. # # The `Status` of the dataset group must be `ACTIVE` before you can use - # the dataset group to create a predictor. Use the DescribeDatasetGroup - # operation to get the status. + # the dataset group to create a predictor. Use the + # [DescribeDatasetGroup][1] operation to get the status. # # # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDatasetGroup.html + # # @option params [required, String] :dataset_group_arn # The ARN of the dataset group. # @@ -3804,7 +4269,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-forecastservice' - context[:gem_version] = '1.33.0' + context[:gem_version] = '1.34.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-forecastservice/lib/aws-sdk-forecastservice/client_api.rb b/gems/aws-sdk-forecastservice/lib/aws-sdk-forecastservice/client_api.rb index df4deb72eae..9af7d5ceeeb 100644 --- a/gems/aws-sdk-forecastservice/lib/aws-sdk-forecastservice/client_api.rb +++ b/gems/aws-sdk-forecastservice/lib/aws-sdk-forecastservice/client_api.rb @@ -21,6 +21,9 @@ module ClientApi AttributeConfigs = Shapes::ListShape.new(name: 'AttributeConfigs') AttributeType = Shapes::StringShape.new(name: 'AttributeType') AutoMLOverrideStrategy = Shapes::StringShape.new(name: 'AutoMLOverrideStrategy') + Baseline = Shapes::StructureShape.new(name: 'Baseline') + BaselineMetric = Shapes::StructureShape.new(name: 'BaselineMetric') + BaselineMetrics = Shapes::ListShape.new(name: 'BaselineMetrics') Boolean = Shapes::BooleanShape.new(name: 'Boolean') CategoricalParameterRange = Shapes::StructureShape.new(name: 'CategoricalParameterRange') CategoricalParameterRanges = Shapes::ListShape.new(name: 'CategoricalParameterRanges') @@ -43,6 +46,8 @@ module ClientApi CreateForecastExportJobResponse = Shapes::StructureShape.new(name: 'CreateForecastExportJobResponse') CreateForecastRequest = Shapes::StructureShape.new(name: 'CreateForecastRequest') CreateForecastResponse = Shapes::StructureShape.new(name: 'CreateForecastResponse') + CreateMonitorRequest = Shapes::StructureShape.new(name: 'CreateMonitorRequest') + CreateMonitorResponse = Shapes::StructureShape.new(name: 'CreateMonitorResponse') CreatePredictorBacktestExportJobRequest = Shapes::StructureShape.new(name: 'CreatePredictorBacktestExportJobRequest') CreatePredictorBacktestExportJobResponse = Shapes::StructureShape.new(name: 'CreatePredictorBacktestExportJobResponse') CreatePredictorRequest = Shapes::StructureShape.new(name: 'CreatePredictorRequest') @@ -64,6 +69,7 @@ module ClientApi DeleteExplainabilityRequest = Shapes::StructureShape.new(name: 'DeleteExplainabilityRequest') DeleteForecastExportJobRequest = Shapes::StructureShape.new(name: 'DeleteForecastExportJobRequest') DeleteForecastRequest = Shapes::StructureShape.new(name: 'DeleteForecastRequest') + DeleteMonitorRequest = Shapes::StructureShape.new(name: 'DeleteMonitorRequest') DeletePredictorBacktestExportJobRequest = Shapes::StructureShape.new(name: 'DeletePredictorBacktestExportJobRequest') DeletePredictorRequest = Shapes::StructureShape.new(name: 'DeletePredictorRequest') DeleteResourceTreeRequest = Shapes::StructureShape.new(name: 'DeleteResourceTreeRequest') @@ -83,10 +89,13 @@ module ClientApi DescribeForecastExportJobResponse = Shapes::StructureShape.new(name: 'DescribeForecastExportJobResponse') DescribeForecastRequest = Shapes::StructureShape.new(name: 'DescribeForecastRequest') DescribeForecastResponse = Shapes::StructureShape.new(name: 'DescribeForecastResponse') + DescribeMonitorRequest = Shapes::StructureShape.new(name: 'DescribeMonitorRequest') + DescribeMonitorResponse = Shapes::StructureShape.new(name: 'DescribeMonitorResponse') DescribePredictorBacktestExportJobRequest = Shapes::StructureShape.new(name: 'DescribePredictorBacktestExportJobRequest') DescribePredictorBacktestExportJobResponse = Shapes::StructureShape.new(name: 'DescribePredictorBacktestExportJobResponse') DescribePredictorRequest = Shapes::StructureShape.new(name: 'DescribePredictorRequest') DescribePredictorResponse = Shapes::StructureShape.new(name: 'DescribePredictorResponse') + Detail = Shapes::StringShape.new(name: 'Detail') Domain = Shapes::StringShape.new(name: 'Domain') Double = Shapes::FloatShape.new(name: 'Double') EncryptionConfig = Shapes::StructureShape.new(name: 'EncryptionConfig') @@ -95,6 +104,7 @@ module ClientApi ErrorMetrics = Shapes::ListShape.new(name: 'ErrorMetrics') EvaluationParameters = Shapes::StructureShape.new(name: 'EvaluationParameters') EvaluationResult = Shapes::StructureShape.new(name: 'EvaluationResult') + EvaluationState = Shapes::StringShape.new(name: 'EvaluationState') EvaluationType = Shapes::StringShape.new(name: 'EvaluationType') Explainabilities = Shapes::ListShape.new(name: 'Explainabilities') ExplainabilityConfig = Shapes::StructureShape.new(name: 'ExplainabilityConfig') @@ -147,6 +157,10 @@ module ClientApi ListForecastExportJobsResponse = Shapes::StructureShape.new(name: 'ListForecastExportJobsResponse') ListForecastsRequest = Shapes::StructureShape.new(name: 'ListForecastsRequest') ListForecastsResponse = Shapes::StructureShape.new(name: 'ListForecastsResponse') + ListMonitorEvaluationsRequest = Shapes::StructureShape.new(name: 'ListMonitorEvaluationsRequest') + ListMonitorEvaluationsResponse = Shapes::StructureShape.new(name: 'ListMonitorEvaluationsResponse') + ListMonitorsRequest = Shapes::StructureShape.new(name: 'ListMonitorsRequest') + ListMonitorsResponse = Shapes::StructureShape.new(name: 'ListMonitorsResponse') ListPredictorBacktestExportJobsRequest = Shapes::StructureShape.new(name: 'ListPredictorBacktestExportJobsRequest') ListPredictorBacktestExportJobsResponse = Shapes::StructureShape.new(name: 'ListPredictorBacktestExportJobsResponse') ListPredictorsRequest = Shapes::StructureShape.new(name: 'ListPredictorsRequest') @@ -157,7 +171,15 @@ module ClientApi Long = Shapes::IntegerShape.new(name: 'Long') MaxResults = Shapes::IntegerShape.new(name: 'MaxResults') Message = Shapes::StringShape.new(name: 'Message') + MetricName = Shapes::StringShape.new(name: 'MetricName') + MetricResult = Shapes::StructureShape.new(name: 'MetricResult') + MetricResults = Shapes::ListShape.new(name: 'MetricResults') Metrics = Shapes::StructureShape.new(name: 'Metrics') + MonitorConfig = Shapes::StructureShape.new(name: 'MonitorConfig') + MonitorDataSource = Shapes::StructureShape.new(name: 'MonitorDataSource') + MonitorInfo = Shapes::StructureShape.new(name: 'MonitorInfo') + MonitorSummary = Shapes::StructureShape.new(name: 'MonitorSummary') + Monitors = Shapes::ListShape.new(name: 'Monitors') Name = Shapes::StringShape.new(name: 'Name') NextToken = Shapes::StringShape.new(name: 'NextToken') OptimizationMetric = Shapes::StringShape.new(name: 'OptimizationMetric') @@ -166,16 +188,21 @@ module ClientApi ParameterValue = Shapes::StringShape.new(name: 'ParameterValue') PredictorBacktestExportJobSummary = Shapes::StructureShape.new(name: 'PredictorBacktestExportJobSummary') PredictorBacktestExportJobs = Shapes::ListShape.new(name: 'PredictorBacktestExportJobs') + PredictorBaseline = Shapes::StructureShape.new(name: 'PredictorBaseline') PredictorEvaluationResults = Shapes::ListShape.new(name: 'PredictorEvaluationResults') + PredictorEvent = Shapes::StructureShape.new(name: 'PredictorEvent') PredictorExecution = Shapes::StructureShape.new(name: 'PredictorExecution') PredictorExecutionDetails = Shapes::StructureShape.new(name: 'PredictorExecutionDetails') PredictorExecutions = Shapes::ListShape.new(name: 'PredictorExecutions') + PredictorMonitorEvaluation = Shapes::StructureShape.new(name: 'PredictorMonitorEvaluation') + PredictorMonitorEvaluations = Shapes::ListShape.new(name: 'PredictorMonitorEvaluations') PredictorSummary = Shapes::StructureShape.new(name: 'PredictorSummary') Predictors = Shapes::ListShape.new(name: 'Predictors') ReferencePredictorSummary = Shapes::StructureShape.new(name: 'ReferencePredictorSummary') ResourceAlreadyExistsException = Shapes::StructureShape.new(name: 'ResourceAlreadyExistsException') ResourceInUseException = Shapes::StructureShape.new(name: 'ResourceInUseException') ResourceNotFoundException = Shapes::StructureShape.new(name: 'ResourceNotFoundException') + ResumeResourceRequest = Shapes::StructureShape.new(name: 'ResumeResourceRequest') S3Config = Shapes::StructureShape.new(name: 'S3Config') S3Path = Shapes::StringShape.new(name: 'S3Path') ScalingType = Shapes::StringShape.new(name: 'ScalingType') @@ -231,6 +258,15 @@ module ClientApi AttributeConfigs.member = Shapes::ShapeRef.new(shape: AttributeConfig) + Baseline.add_member(:predictor_baseline, Shapes::ShapeRef.new(shape: PredictorBaseline, location_name: "PredictorBaseline")) + Baseline.struct_class = Types::Baseline + + BaselineMetric.add_member(:name, Shapes::ShapeRef.new(shape: Name, location_name: "Name")) + BaselineMetric.add_member(:value, Shapes::ShapeRef.new(shape: Double, location_name: "Value")) + BaselineMetric.struct_class = Types::BaselineMetric + + BaselineMetrics.member = Shapes::ShapeRef.new(shape: BaselineMetric) + CategoricalParameterRange.add_member(:name, Shapes::ShapeRef.new(shape: Name, required: true, location_name: "Name")) CategoricalParameterRange.add_member(:values, Shapes::ShapeRef.new(shape: Values, required: true, location_name: "Values")) CategoricalParameterRange.struct_class = Types::CategoricalParameterRange @@ -259,6 +295,7 @@ module ClientApi CreateAutoPredictorRequest.add_member(:optimization_metric, Shapes::ShapeRef.new(shape: OptimizationMetric, location_name: "OptimizationMetric")) CreateAutoPredictorRequest.add_member(:explain_predictor, Shapes::ShapeRef.new(shape: Boolean, location_name: "ExplainPredictor")) CreateAutoPredictorRequest.add_member(:tags, Shapes::ShapeRef.new(shape: Tags, location_name: "Tags")) + CreateAutoPredictorRequest.add_member(:monitor_config, Shapes::ShapeRef.new(shape: MonitorConfig, location_name: "MonitorConfig")) CreateAutoPredictorRequest.struct_class = Types::CreateAutoPredictorRequest CreateAutoPredictorResponse.add_member(:predictor_arn, Shapes::ShapeRef.new(shape: Arn, location_name: "PredictorArn")) @@ -339,6 +376,14 @@ module ClientApi CreateForecastResponse.add_member(:forecast_arn, Shapes::ShapeRef.new(shape: Arn, location_name: "ForecastArn")) CreateForecastResponse.struct_class = Types::CreateForecastResponse + CreateMonitorRequest.add_member(:monitor_name, Shapes::ShapeRef.new(shape: Name, required: true, location_name: "MonitorName")) + CreateMonitorRequest.add_member(:resource_arn, Shapes::ShapeRef.new(shape: Arn, required: true, location_name: "ResourceArn")) + CreateMonitorRequest.add_member(:tags, Shapes::ShapeRef.new(shape: Tags, location_name: "Tags")) + CreateMonitorRequest.struct_class = Types::CreateMonitorRequest + + CreateMonitorResponse.add_member(:monitor_arn, Shapes::ShapeRef.new(shape: Arn, location_name: "MonitorArn")) + CreateMonitorResponse.struct_class = Types::CreateMonitorResponse + CreatePredictorBacktestExportJobRequest.add_member(:predictor_backtest_export_job_name, Shapes::ShapeRef.new(shape: Name, required: true, location_name: "PredictorBacktestExportJobName")) CreatePredictorBacktestExportJobRequest.add_member(:predictor_arn, Shapes::ShapeRef.new(shape: Arn, required: true, location_name: "PredictorArn")) CreatePredictorBacktestExportJobRequest.add_member(:destination, Shapes::ShapeRef.new(shape: DataDestination, required: true, location_name: "Destination")) @@ -429,6 +474,9 @@ module ClientApi DeleteForecastRequest.add_member(:forecast_arn, Shapes::ShapeRef.new(shape: Arn, required: true, location_name: "ForecastArn")) DeleteForecastRequest.struct_class = Types::DeleteForecastRequest + DeleteMonitorRequest.add_member(:monitor_arn, Shapes::ShapeRef.new(shape: Arn, required: true, location_name: "MonitorArn")) + DeleteMonitorRequest.struct_class = Types::DeleteMonitorRequest + DeletePredictorBacktestExportJobRequest.add_member(:predictor_backtest_export_job_arn, Shapes::ShapeRef.new(shape: Arn, required: true, location_name: "PredictorBacktestExportJobArn")) DeletePredictorBacktestExportJobRequest.struct_class = Types::DeletePredictorBacktestExportJobRequest @@ -458,6 +506,7 @@ module ClientApi DescribeAutoPredictorResponse.add_member(:last_modification_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "LastModificationTime")) DescribeAutoPredictorResponse.add_member(:optimization_metric, Shapes::ShapeRef.new(shape: OptimizationMetric, location_name: "OptimizationMetric")) DescribeAutoPredictorResponse.add_member(:explainability_info, Shapes::ShapeRef.new(shape: ExplainabilityInfo, location_name: "ExplainabilityInfo")) + DescribeAutoPredictorResponse.add_member(:monitor_info, Shapes::ShapeRef.new(shape: MonitorInfo, location_name: "MonitorInfo")) DescribeAutoPredictorResponse.struct_class = Types::DescribeAutoPredictorResponse DescribeDatasetGroupRequest.add_member(:dataset_group_arn, Shapes::ShapeRef.new(shape: Arn, required: true, location_name: "DatasetGroupArn")) @@ -567,6 +616,22 @@ module ClientApi DescribeForecastResponse.add_member(:last_modification_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "LastModificationTime")) DescribeForecastResponse.struct_class = Types::DescribeForecastResponse + DescribeMonitorRequest.add_member(:monitor_arn, Shapes::ShapeRef.new(shape: Arn, required: true, location_name: "MonitorArn")) + DescribeMonitorRequest.struct_class = Types::DescribeMonitorRequest + + DescribeMonitorResponse.add_member(:monitor_name, Shapes::ShapeRef.new(shape: Name, location_name: "MonitorName")) + DescribeMonitorResponse.add_member(:monitor_arn, Shapes::ShapeRef.new(shape: Arn, location_name: "MonitorArn")) + DescribeMonitorResponse.add_member(:resource_arn, Shapes::ShapeRef.new(shape: Arn, location_name: "ResourceArn")) + DescribeMonitorResponse.add_member(:status, Shapes::ShapeRef.new(shape: Status, location_name: "Status")) + DescribeMonitorResponse.add_member(:last_evaluation_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "LastEvaluationTime")) + DescribeMonitorResponse.add_member(:last_evaluation_state, Shapes::ShapeRef.new(shape: EvaluationState, location_name: "LastEvaluationState")) + DescribeMonitorResponse.add_member(:baseline, Shapes::ShapeRef.new(shape: Baseline, location_name: "Baseline")) + DescribeMonitorResponse.add_member(:message, Shapes::ShapeRef.new(shape: Message, location_name: "Message")) + DescribeMonitorResponse.add_member(:creation_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "CreationTime")) + DescribeMonitorResponse.add_member(:last_modification_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "LastModificationTime")) + DescribeMonitorResponse.add_member(:estimated_evaluation_time_remaining_in_minutes, Shapes::ShapeRef.new(shape: Long, location_name: "EstimatedEvaluationTimeRemainingInMinutes")) + DescribeMonitorResponse.struct_class = Types::DescribeMonitorResponse + DescribePredictorBacktestExportJobRequest.add_member(:predictor_backtest_export_job_arn, Shapes::ShapeRef.new(shape: Arn, required: true, location_name: "PredictorBacktestExportJobArn")) DescribePredictorBacktestExportJobRequest.struct_class = Types::DescribePredictorBacktestExportJobRequest @@ -813,6 +878,25 @@ module ClientApi ListForecastsResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextToken")) ListForecastsResponse.struct_class = Types::ListForecastsResponse + ListMonitorEvaluationsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextToken")) + ListMonitorEvaluationsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location_name: "MaxResults")) + ListMonitorEvaluationsRequest.add_member(:monitor_arn, Shapes::ShapeRef.new(shape: Arn, required: true, location_name: "MonitorArn")) + ListMonitorEvaluationsRequest.add_member(:filters, Shapes::ShapeRef.new(shape: Filters, location_name: "Filters")) + ListMonitorEvaluationsRequest.struct_class = Types::ListMonitorEvaluationsRequest + + ListMonitorEvaluationsResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextToken")) + ListMonitorEvaluationsResponse.add_member(:predictor_monitor_evaluations, Shapes::ShapeRef.new(shape: PredictorMonitorEvaluations, location_name: "PredictorMonitorEvaluations")) + ListMonitorEvaluationsResponse.struct_class = Types::ListMonitorEvaluationsResponse + + ListMonitorsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextToken")) + ListMonitorsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location_name: "MaxResults")) + ListMonitorsRequest.add_member(:filters, Shapes::ShapeRef.new(shape: Filters, location_name: "Filters")) + ListMonitorsRequest.struct_class = Types::ListMonitorsRequest + + ListMonitorsResponse.add_member(:monitors, Shapes::ShapeRef.new(shape: Monitors, location_name: "Monitors")) + ListMonitorsResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextToken")) + ListMonitorsResponse.struct_class = Types::ListMonitorsResponse + ListPredictorBacktestExportJobsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextToken")) ListPredictorBacktestExportJobsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location_name: "MaxResults")) ListPredictorBacktestExportJobsRequest.add_member(:filters, Shapes::ShapeRef.new(shape: Filters, location_name: "Filters")) @@ -837,12 +921,40 @@ module ClientApi ListTagsForResourceResponse.add_member(:tags, Shapes::ShapeRef.new(shape: Tags, location_name: "Tags")) ListTagsForResourceResponse.struct_class = Types::ListTagsForResourceResponse + MetricResult.add_member(:metric_name, Shapes::ShapeRef.new(shape: MetricName, location_name: "MetricName")) + MetricResult.add_member(:metric_value, Shapes::ShapeRef.new(shape: Double, location_name: "MetricValue")) + MetricResult.struct_class = Types::MetricResult + + MetricResults.member = Shapes::ShapeRef.new(shape: MetricResult) + Metrics.add_member(:rmse, Shapes::ShapeRef.new(shape: Double, deprecated: true, location_name: "RMSE", metadata: {"deprecatedMessage"=>"This property is deprecated, please refer to ErrorMetrics for both RMSE and WAPE"})) Metrics.add_member(:weighted_quantile_losses, Shapes::ShapeRef.new(shape: WeightedQuantileLosses, location_name: "WeightedQuantileLosses")) Metrics.add_member(:error_metrics, Shapes::ShapeRef.new(shape: ErrorMetrics, location_name: "ErrorMetrics")) Metrics.add_member(:average_weighted_quantile_loss, Shapes::ShapeRef.new(shape: Double, location_name: "AverageWeightedQuantileLoss")) Metrics.struct_class = Types::Metrics + MonitorConfig.add_member(:monitor_name, Shapes::ShapeRef.new(shape: Name, required: true, location_name: "MonitorName")) + MonitorConfig.struct_class = Types::MonitorConfig + + MonitorDataSource.add_member(:dataset_import_job_arn, Shapes::ShapeRef.new(shape: Arn, location_name: "DatasetImportJobArn")) + MonitorDataSource.add_member(:forecast_arn, Shapes::ShapeRef.new(shape: Arn, location_name: "ForecastArn")) + MonitorDataSource.add_member(:predictor_arn, Shapes::ShapeRef.new(shape: Arn, location_name: "PredictorArn")) + MonitorDataSource.struct_class = Types::MonitorDataSource + + MonitorInfo.add_member(:monitor_arn, Shapes::ShapeRef.new(shape: Arn, location_name: "MonitorArn")) + MonitorInfo.add_member(:status, Shapes::ShapeRef.new(shape: Status, location_name: "Status")) + MonitorInfo.struct_class = Types::MonitorInfo + + MonitorSummary.add_member(:monitor_arn, Shapes::ShapeRef.new(shape: Arn, location_name: "MonitorArn")) + MonitorSummary.add_member(:monitor_name, Shapes::ShapeRef.new(shape: Name, location_name: "MonitorName")) + MonitorSummary.add_member(:resource_arn, Shapes::ShapeRef.new(shape: Arn, location_name: "ResourceArn")) + MonitorSummary.add_member(:status, Shapes::ShapeRef.new(shape: Status, location_name: "Status")) + MonitorSummary.add_member(:creation_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "CreationTime")) + MonitorSummary.add_member(:last_modification_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "LastModificationTime")) + MonitorSummary.struct_class = Types::MonitorSummary + + Monitors.member = Shapes::ShapeRef.new(shape: MonitorSummary) + ParameterRanges.add_member(:categorical_parameter_ranges, Shapes::ShapeRef.new(shape: CategoricalParameterRanges, location_name: "CategoricalParameterRanges")) ParameterRanges.add_member(:continuous_parameter_ranges, Shapes::ShapeRef.new(shape: ContinuousParameterRanges, location_name: "ContinuousParameterRanges")) ParameterRanges.add_member(:integer_parameter_ranges, Shapes::ShapeRef.new(shape: IntegerParameterRanges, location_name: "IntegerParameterRanges")) @@ -859,8 +971,15 @@ module ClientApi PredictorBacktestExportJobs.member = Shapes::ShapeRef.new(shape: PredictorBacktestExportJobSummary) + PredictorBaseline.add_member(:baseline_metrics, Shapes::ShapeRef.new(shape: BaselineMetrics, location_name: "BaselineMetrics")) + PredictorBaseline.struct_class = Types::PredictorBaseline + PredictorEvaluationResults.member = Shapes::ShapeRef.new(shape: EvaluationResult) + PredictorEvent.add_member(:detail, Shapes::ShapeRef.new(shape: Detail, location_name: "Detail")) + PredictorEvent.add_member(:datetime, Shapes::ShapeRef.new(shape: Timestamp, location_name: "Datetime")) + PredictorEvent.struct_class = Types::PredictorEvent + PredictorExecution.add_member(:algorithm_arn, Shapes::ShapeRef.new(shape: Arn, location_name: "AlgorithmArn")) PredictorExecution.add_member(:test_windows, Shapes::ShapeRef.new(shape: TestWindowDetails, location_name: "TestWindows")) PredictorExecution.struct_class = Types::PredictorExecution @@ -870,6 +989,21 @@ module ClientApi PredictorExecutions.member = Shapes::ShapeRef.new(shape: PredictorExecution) + PredictorMonitorEvaluation.add_member(:resource_arn, Shapes::ShapeRef.new(shape: Arn, location_name: "ResourceArn")) + PredictorMonitorEvaluation.add_member(:monitor_arn, Shapes::ShapeRef.new(shape: Arn, location_name: "MonitorArn")) + PredictorMonitorEvaluation.add_member(:evaluation_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "EvaluationTime")) + PredictorMonitorEvaluation.add_member(:evaluation_state, Shapes::ShapeRef.new(shape: EvaluationState, location_name: "EvaluationState")) + PredictorMonitorEvaluation.add_member(:window_start_datetime, Shapes::ShapeRef.new(shape: Timestamp, location_name: "WindowStartDatetime")) + PredictorMonitorEvaluation.add_member(:window_end_datetime, Shapes::ShapeRef.new(shape: Timestamp, location_name: "WindowEndDatetime")) + PredictorMonitorEvaluation.add_member(:predictor_event, Shapes::ShapeRef.new(shape: PredictorEvent, location_name: "PredictorEvent")) + PredictorMonitorEvaluation.add_member(:monitor_data_source, Shapes::ShapeRef.new(shape: MonitorDataSource, location_name: "MonitorDataSource")) + PredictorMonitorEvaluation.add_member(:metric_results, Shapes::ShapeRef.new(shape: MetricResults, location_name: "MetricResults")) + PredictorMonitorEvaluation.add_member(:num_items_evaluated, Shapes::ShapeRef.new(shape: Long, location_name: "NumItemsEvaluated")) + PredictorMonitorEvaluation.add_member(:message, Shapes::ShapeRef.new(shape: Message, location_name: "Message")) + PredictorMonitorEvaluation.struct_class = Types::PredictorMonitorEvaluation + + PredictorMonitorEvaluations.member = Shapes::ShapeRef.new(shape: PredictorMonitorEvaluation) + PredictorSummary.add_member(:predictor_arn, Shapes::ShapeRef.new(shape: Arn, location_name: "PredictorArn")) PredictorSummary.add_member(:predictor_name, Shapes::ShapeRef.new(shape: Name, location_name: "PredictorName")) PredictorSummary.add_member(:dataset_group_arn, Shapes::ShapeRef.new(shape: Arn, location_name: "DatasetGroupArn")) @@ -896,6 +1030,9 @@ module ClientApi ResourceNotFoundException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessage, location_name: "Message")) ResourceNotFoundException.struct_class = Types::ResourceNotFoundException + ResumeResourceRequest.add_member(:resource_arn, Shapes::ShapeRef.new(shape: Arn, required: true, location_name: "ResourceArn")) + ResumeResourceRequest.struct_class = Types::ResumeResourceRequest + S3Config.add_member(:path, Shapes::ShapeRef.new(shape: S3Path, required: true, location_name: "Path")) S3Config.add_member(:role_arn, Shapes::ShapeRef.new(shape: Arn, required: true, location_name: "RoleArn")) S3Config.add_member(:kms_key_arn, Shapes::ShapeRef.new(shape: KMSKeyArn, location_name: "KMSKeyArn")) @@ -1111,6 +1248,19 @@ module ClientApi o.errors << Shapes::ShapeRef.new(shape: LimitExceededException) end) + api.add_operation(:create_monitor, Seahorse::Model::Operation.new.tap do |o| + o.name = "CreateMonitor" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: CreateMonitorRequest) + o.output = Shapes::ShapeRef.new(shape: CreateMonitorResponse) + o.errors << Shapes::ShapeRef.new(shape: InvalidInputException) + o.errors << Shapes::ShapeRef.new(shape: ResourceAlreadyExistsException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: ResourceInUseException) + o.errors << Shapes::ShapeRef.new(shape: LimitExceededException) + end) + api.add_operation(:create_predictor, Seahorse::Model::Operation.new.tap do |o| o.name = "CreatePredictor" o.http_method = "POST" @@ -1214,6 +1364,17 @@ module ClientApi o.errors << Shapes::ShapeRef.new(shape: ResourceInUseException) end) + api.add_operation(:delete_monitor, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeleteMonitor" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: DeleteMonitorRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.errors << Shapes::ShapeRef.new(shape: InvalidInputException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: ResourceInUseException) + end) + api.add_operation(:delete_predictor, Seahorse::Model::Operation.new.tap do |o| o.name = "DeletePredictor" o.http_method = "POST" @@ -1327,6 +1488,16 @@ module ClientApi o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) end) + api.add_operation(:describe_monitor, Seahorse::Model::Operation.new.tap do |o| + o.name = "DescribeMonitor" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: DescribeMonitorRequest) + o.output = Shapes::ShapeRef.new(shape: DescribeMonitorResponse) + o.errors << Shapes::ShapeRef.new(shape: InvalidInputException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + end) + api.add_operation(:describe_predictor, Seahorse::Model::Operation.new.tap do |o| o.name = "DescribePredictor" o.http_method = "POST" @@ -1412,6 +1583,12 @@ module ClientApi o.output = Shapes::ShapeRef.new(shape: ListExplainabilitiesResponse) o.errors << Shapes::ShapeRef.new(shape: InvalidNextTokenException) o.errors << Shapes::ShapeRef.new(shape: InvalidInputException) + o[:pager] = Aws::Pager.new( + limit_key: "max_results", + tokens: { + "next_token" => "next_token" + } + ) end) api.add_operation(:list_explainability_exports, Seahorse::Model::Operation.new.tap do |o| @@ -1422,6 +1599,12 @@ module ClientApi o.output = Shapes::ShapeRef.new(shape: ListExplainabilityExportsResponse) o.errors << Shapes::ShapeRef.new(shape: InvalidNextTokenException) o.errors << Shapes::ShapeRef.new(shape: InvalidInputException) + o[:pager] = Aws::Pager.new( + limit_key: "max_results", + tokens: { + "next_token" => "next_token" + } + ) end) api.add_operation(:list_forecast_export_jobs, Seahorse::Model::Operation.new.tap do |o| @@ -1456,6 +1639,39 @@ module ClientApi ) end) + api.add_operation(:list_monitor_evaluations, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListMonitorEvaluations" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: ListMonitorEvaluationsRequest) + o.output = Shapes::ShapeRef.new(shape: ListMonitorEvaluationsResponse) + o.errors << Shapes::ShapeRef.new(shape: InvalidNextTokenException) + o.errors << Shapes::ShapeRef.new(shape: InvalidInputException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + o[:pager] = Aws::Pager.new( + limit_key: "max_results", + tokens: { + "next_token" => "next_token" + } + ) + end) + + api.add_operation(:list_monitors, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListMonitors" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: ListMonitorsRequest) + o.output = Shapes::ShapeRef.new(shape: ListMonitorsResponse) + o.errors << Shapes::ShapeRef.new(shape: InvalidNextTokenException) + o.errors << Shapes::ShapeRef.new(shape: InvalidInputException) + o[:pager] = Aws::Pager.new( + limit_key: "max_results", + tokens: { + "next_token" => "next_token" + } + ) + end) + api.add_operation(:list_predictor_backtest_export_jobs, Seahorse::Model::Operation.new.tap do |o| o.name = "ListPredictorBacktestExportJobs" o.http_method = "POST" @@ -1498,6 +1714,18 @@ module ClientApi o.errors << Shapes::ShapeRef.new(shape: InvalidInputException) end) + api.add_operation(:resume_resource, Seahorse::Model::Operation.new.tap do |o| + o.name = "ResumeResource" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: ResumeResourceRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.errors << Shapes::ShapeRef.new(shape: InvalidInputException) + o.errors << Shapes::ShapeRef.new(shape: LimitExceededException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: ResourceInUseException) + end) + api.add_operation(:stop_resource, Seahorse::Model::Operation.new.tap do |o| o.name = "StopResource" o.http_method = "POST" diff --git a/gems/aws-sdk-forecastservice/lib/aws-sdk-forecastservice/types.rb b/gems/aws-sdk-forecastservice/lib/aws-sdk-forecastservice/types.rb index 7ca31939dbf..5f8f366a516 100644 --- a/gems/aws-sdk-forecastservice/lib/aws-sdk-forecastservice/types.rb +++ b/gems/aws-sdk-forecastservice/lib/aws-sdk-forecastservice/types.rb @@ -59,6 +59,8 @@ module Types # # **Holidays** # + # **Holidays** + # # To enable Holidays, set `CountryCode` to one of the following # two-letter country codes: # @@ -278,6 +280,47 @@ class AttributeConfig < Struct.new( include Aws::Structure end + # Metrics you can use as a baseline for comparison purposes. Use these + # metrics when you interpret monitoring results for an auto predictor. + # + # @!attribute [rw] predictor_baseline + # The initial [accuracy metrics][1] for the predictor you are + # monitoring. Use these metrics as a baseline for comparison purposes + # as you use your predictor and the metrics change. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/metrics.html + # @return [Types::PredictorBaseline] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/Baseline AWS API Documentation + # + class Baseline < Struct.new( + :predictor_baseline) + SENSITIVE = [] + include Aws::Structure + end + + # An individual metric that you can use for comparison as you evaluate + # your monitoring results. + # + # @!attribute [rw] name + # The name of the metric. + # @return [String] + # + # @!attribute [rw] value + # The value for the metric. + # @return [Float] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/BaselineMetric AWS API Documentation + # + class BaselineMetric < Struct.new( + :name, + :value) + SENSITIVE = [] + include Aws::Structure + end + # Specifies a categorical hyperparameter and it's range of tunable # values. This object is part of the ParameterRanges object. # @@ -421,6 +464,9 @@ class ContinuousParameterRange < Struct.new( # value: "TagValue", # required # }, # ], + # monitor_config: { + # monitor_name: "Name", # required + # }, # } # # @!attribute [rw] predictor_name @@ -430,6 +476,17 @@ class ContinuousParameterRange < Struct.new( # @!attribute [rw] forecast_horizon # The number of time-steps that the model predicts. The forecast # horizon is also called the prediction length. + # + # The maximum forecast horizon is the lesser of 500 time-steps or 1/4 + # of the TARGET\_TIME\_SERIES dataset length. If you are retraining an + # existing AutoPredictor, then the maximum forecast horizon is the + # lesser of 500 time-steps or 1/3 of the TARGET\_TIME\_SERIES dataset + # length. + # + # If you are upgrading to an AutoPredictor or retraining an existing + # AutoPredictor, you cannot update the forecast horizon parameter. You + # can meet this requirement by providing longer time-series in the + # dataset. # @return [Integer] # # @!attribute [rw] forecast_types @@ -524,6 +581,19 @@ class ContinuousParameterRange < Struct.new( # prefix. # @return [Array] # + # @!attribute [rw] monitor_config + # The configuration details for predictor monitoring. Provide a name + # for the monitor resource to enable predictor monitoring. + # + # Predictor monitoring allows you to see how your predictor's + # performance changes over time. For more information, see [Predictor + # Monitoring][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/predictor-monitoring.html + # @return [Types::MonitorConfig] + # # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateAutoPredictorRequest AWS API Documentation # class CreateAutoPredictorRequest < Struct.new( @@ -537,7 +607,8 @@ class CreateAutoPredictorRequest < Struct.new( :reference_predictor_arn, :optimization_metric, :explain_predictor, - :tags) + :tags, + :monitor_config) SENSITIVE = [] include Aws::Structure end @@ -576,14 +647,19 @@ class CreateAutoPredictorResponse < Struct.new( # @!attribute [rw] domain # The domain associated with the dataset group. When you add a dataset # to a dataset group, this value and the value specified for the - # `Domain` parameter of the CreateDataset operation must match. + # `Domain` parameter of the [CreateDataset][1] operation must match. # # The `Domain` and `DatasetType` that you choose determine the fields # that must be present in training data that you import to a dataset. # For example, if you choose the `RETAIL` domain and # `TARGET_TIME_SERIES` as the `DatasetType`, Amazon Forecast requires # that `item_id`, `timestamp`, and `demand` fields are present in your - # data. For more information, see howitworks-datasets-groups. + # data. For more information, see [Dataset groups][2]. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDataset.html + # [2]: https://docs.aws.amazon.com/forecast/latest/dg/howitworks-datasets-groups.html # @return [String] # # @!attribute [rw] dataset_arns @@ -694,7 +770,11 @@ class CreateDatasetGroupResponse < Struct.new( # Management Service (KMS) key and the IAM role must allow Amazon # Forecast permission to access the key. The KMS key and IAM role must # match those specified in the `EncryptionConfig` parameter of the - # CreateDataset operation. + # [CreateDataset][1] operation. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDataset.html # @return [Types::DataSource] # # @!attribute [rw] timestamp_format @@ -841,14 +921,19 @@ class CreateDatasetImportJobResponse < Struct.new( # @!attribute [rw] domain # The domain associated with the dataset. When you add a dataset to a # dataset group, this value and the value specified for the `Domain` - # parameter of the CreateDatasetGroup operation must match. + # parameter of the [CreateDatasetGroup][1] operation must match. # # The `Domain` and `DatasetType` that you choose determine the fields # that must be present in the training data that you import to the # dataset. For example, if you choose the `RETAIL` domain and # `TARGET_TIME_SERIES` as the `DatasetType`, Amazon Forecast requires # `item_id`, `timestamp`, and `demand` fields to be present in your - # data. For more information, see howitworks-datasets-groups. + # data. For more information, see [Importing datasets][2]. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDatasetGroup.html + # [2]: https://docs.aws.amazon.com/forecast/latest/dg/howitworks-datasets-groups.html # @return [String] # # @!attribute [rw] dataset_type @@ -870,7 +955,12 @@ class CreateDatasetImportJobResponse < Struct.new( # must match the fields in your data. The dataset `Domain` and # `DatasetType` that you choose determine the minimum required fields # in your training data. For information about the required fields for - # a specific dataset domain and type, see howitworks-domains-ds-types. + # a specific dataset domain and type, see [Dataset Domains and Dataset + # Types][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/howitworks-domains-ds-types.html # @return [Types::Schema] # # @!attribute [rw] encryption_config @@ -1088,7 +1178,7 @@ class CreateExplainabilityExportResponse < Struct.new( # @return [Types::Schema] # # @!attribute [rw] enable_visualization - # Create an Expainability visualization that is viewable within the + # Create an Explainability visualization that is viewable within the # AWS console. # @return [Boolean] # @@ -1293,7 +1383,11 @@ class CreateForecastExportJobResponse < Struct.new( # values include `0.01 to 0.99` (increments of .01 only) and `mean`. # The mean forecast is different from the median (0.50) when the # distribution is not symmetric (for example, Beta and Negative - # Binomial). The default value is `["0.1", "0.5", "0.9"]`. + # Binomial). + # + # The default quantiles are the quantiles you specified during + # predictor creation. If you didn't specify quantiles, the default + # values are `["0.1", "0.5", "0.9"]`. # @return [Array] # # @!attribute [rw] tags @@ -1352,6 +1446,58 @@ class CreateForecastResponse < Struct.new( include Aws::Structure end + # @note When making an API call, you may pass CreateMonitorRequest + # data as a hash: + # + # { + # monitor_name: "Name", # required + # resource_arn: "Arn", # required + # tags: [ + # { + # key: "TagKey", # required + # value: "TagValue", # required + # }, + # ], + # } + # + # @!attribute [rw] monitor_name + # The name of the monitor resource. + # @return [String] + # + # @!attribute [rw] resource_arn + # The Amazon Resource Name (ARN) of the predictor to monitor. + # @return [String] + # + # @!attribute [rw] tags + # A list of [tags][1] to apply to the monitor resource. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/tagging-forecast-resources.html + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateMonitorRequest AWS API Documentation + # + class CreateMonitorRequest < Struct.new( + :monitor_name, + :resource_arn, + :tags) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] monitor_arn + # The Amazon Resource Name (ARN) of the monitor resource. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateMonitorResponse AWS API Documentation + # + class CreateMonitorResponse < Struct.new( + :monitor_arn) + SENSITIVE = [] + include Aws::Structure + end + # @note When making an API call, you may pass CreatePredictorBacktestExportJobRequest # data as a hash: # @@ -1839,9 +1985,14 @@ class DataSource < Struct.new( end # Provides a summary of the dataset group properties used in the - # ListDatasetGroups operation. To get the complete set of properties, - # call the DescribeDatasetGroup operation, and provide the - # `DatasetGroupArn`. + # [ListDatasetGroups][1] operation. To get the complete set of + # properties, call the [DescribeDatasetGroup][2] operation, and provide + # the `DatasetGroupArn`. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_ListDatasetGroups.html + # [2]: https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDatasetGroup.html # # @!attribute [rw] dataset_group_arn # The Amazon Resource Name (ARN) of the dataset group. @@ -1857,9 +2008,13 @@ class DataSource < Struct.new( # # @!attribute [rw] last_modification_time # When the dataset group was created or last updated from a call to - # the UpdateDatasetGroup operation. While the dataset group is being - # updated, `LastModificationTime` is the current time of the + # the [UpdateDatasetGroup][1] operation. While the dataset group is + # being updated, `LastModificationTime` is the current time of the # `ListDatasetGroups` call. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_UpdateDatasetGroup.html # @return [Time] # # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DatasetGroupSummary AWS API Documentation @@ -1874,9 +2029,14 @@ class DatasetGroupSummary < Struct.new( end # Provides a summary of the dataset import job properties used in the - # ListDatasetImportJobs operation. To get the complete set of - # properties, call the DescribeDatasetImportJob operation, and provide - # the `DatasetImportJobArn`. + # [ListDatasetImportJobs][1] operation. To get the complete set of + # properties, call the [DescribeDatasetImportJob][2] operation, and + # provide the `DatasetImportJobArn`. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_ListDatasetImportJobs.html + # [2]: https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDatasetImportJob.html # # @!attribute [rw] dataset_import_job_arn # The Amazon Resource Name (ARN) of the dataset import job. @@ -1945,9 +2105,14 @@ class DatasetImportJobSummary < Struct.new( include Aws::Structure end - # Provides a summary of the dataset properties used in the ListDatasets - # operation. To get the complete set of properties, call the - # DescribeDataset operation, and provide the `DatasetArn`. + # Provides a summary of the dataset properties used in the + # [ListDatasets][1] operation. To get the complete set of properties, + # call the [DescribeDataset][2] operation, and provide the `DatasetArn`. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_ListDatasets.html + # [2]: https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDataset.html # # @!attribute [rw] dataset_arn # The Amazon Resource Name (ARN) of the dataset. @@ -1973,8 +2138,12 @@ class DatasetImportJobSummary < Struct.new( # When you create a dataset, `LastModificationTime` is the same as # `CreationTime`. While data is being imported to the dataset, # `LastModificationTime` is the current time of the `ListDatasets` - # call. After a CreateDatasetImportJob operation has finished, + # call. After a [CreateDatasetImportJob][1] operation has finished, # `LastModificationTime` is when the import job completed or failed. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDatasetImportJob.html # @return [Time] # # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DatasetSummary AWS API Documentation @@ -2125,6 +2294,25 @@ class DeleteForecastRequest < Struct.new( include Aws::Structure end + # @note When making an API call, you may pass DeleteMonitorRequest + # data as a hash: + # + # { + # monitor_arn: "Arn", # required + # } + # + # @!attribute [rw] monitor_arn + # The Amazon Resource Name (ARN) of the monitor resource to delete. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteMonitorRequest AWS API Documentation + # + class DeleteMonitorRequest < Struct.new( + :monitor_arn) + SENSITIVE = [] + include Aws::Structure + end + # @note When making an API call, you may pass DeletePredictorBacktestExportJobRequest # data as a hash: # @@ -2306,6 +2494,11 @@ class DescribeAutoPredictorRequest < Struct.new( # Provides the status and ARN of the Predictor Explainability. # @return [Types::ExplainabilityInfo] # + # @!attribute [rw] monitor_info + # A object with the Amazon Resource Name (ARN) and status of the + # monitor resource. + # @return [Types::MonitorInfo] + # # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeAutoPredictorResponse AWS API Documentation # class DescribeAutoPredictorResponse < Struct.new( @@ -2325,7 +2518,8 @@ class DescribeAutoPredictorResponse < Struct.new( :creation_time, :last_modification_time, :optimization_metric, - :explainability_info) + :explainability_info, + :monitor_info) SENSITIVE = [] include Aws::Structure end @@ -2377,13 +2571,17 @@ class DescribeDatasetGroupRequest < Struct.new( # # * `UPDATE_PENDING`, `UPDATE_IN_PROGRESS`, `UPDATE_FAILED` # - # The `UPDATE` states apply when you call the UpdateDatasetGroup + # The `UPDATE` states apply when you call the [UpdateDatasetGroup][1] # operation. # # The `Status` of the dataset group must be `ACTIVE` before you can # use the dataset group to create a predictor. # # + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_UpdateDatasetGroup.html # @return [String] # # @!attribute [rw] creation_time @@ -2392,9 +2590,13 @@ class DescribeDatasetGroupRequest < Struct.new( # # @!attribute [rw] last_modification_time # When the dataset group was created or last updated from a call to - # the UpdateDatasetGroup operation. While the dataset group is being - # updated, `LastModificationTime` is the current time of the + # the [UpdateDatasetGroup][1] operation. While the dataset group is + # being updated, `LastModificationTime` is the current time of the # `DescribeDatasetGroup` call. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_UpdateDatasetGroup.html # @return [Time] # # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeDatasetGroupResponse AWS API Documentation @@ -2620,7 +2822,7 @@ class DescribeDatasetRequest < Struct.new( # * `UPDATE_PENDING`, `UPDATE_IN_PROGRESS`, `UPDATE_FAILED` # # The `UPDATE` states apply while data is imported to the dataset from - # a call to the CreateDatasetImportJob operation and reflect the + # a call to the [CreateDatasetImportJob][1] operation and reflect the # status of the dataset import job. For example, when the import job # status is `CREATE_IN_PROGRESS`, the status of the dataset is # `UPDATE_IN_PROGRESS`. @@ -2629,6 +2831,10 @@ class DescribeDatasetRequest < Struct.new( # training data. # # + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDatasetImportJob.html # @return [String] # # @!attribute [rw] creation_time @@ -2639,8 +2845,12 @@ class DescribeDatasetRequest < Struct.new( # When you create a dataset, `LastModificationTime` is the same as # `CreationTime`. While data is being imported to the dataset, # `LastModificationTime` is the current time of the `DescribeDataset` - # call. After a CreateDatasetImportJob operation has finished, + # call. After a [CreateDatasetImportJob][1] operation has finished, # `LastModificationTime` is when the import job completed or failed. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDatasetImportJob.html # @return [Time] # # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeDatasetResponse AWS API Documentation @@ -3064,6 +3274,90 @@ class DescribeForecastResponse < Struct.new( include Aws::Structure end + # @note When making an API call, you may pass DescribeMonitorRequest + # data as a hash: + # + # { + # monitor_arn: "Arn", # required + # } + # + # @!attribute [rw] monitor_arn + # The Amazon Resource Name (ARN) of the monitor resource to describe. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeMonitorRequest AWS API Documentation + # + class DescribeMonitorRequest < Struct.new( + :monitor_arn) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] monitor_name + # The name of the monitor. + # @return [String] + # + # @!attribute [rw] monitor_arn + # The Amazon Resource Name (ARN) of the monitor resource described. + # @return [String] + # + # @!attribute [rw] resource_arn + # The Amazon Resource Name (ARN) of the auto predictor being + # monitored. + # @return [String] + # + # @!attribute [rw] status + # The status of the monitor resource. + # @return [String] + # + # @!attribute [rw] last_evaluation_time + # The timestamp of the latest evaluation completed by the monitor. + # @return [Time] + # + # @!attribute [rw] last_evaluation_state + # The state of the monitor's latest evaluation. + # @return [String] + # + # @!attribute [rw] baseline + # Metrics you can use as a baseline for comparison purposes. Use these + # values you interpret monitoring results for an auto predictor. + # @return [Types::Baseline] + # + # @!attribute [rw] message + # An error message, if any, for the monitor. + # @return [String] + # + # @!attribute [rw] creation_time + # The timestamp for when the monitor resource was created. + # @return [Time] + # + # @!attribute [rw] last_modification_time + # The timestamp of the latest modification to the monitor. + # @return [Time] + # + # @!attribute [rw] estimated_evaluation_time_remaining_in_minutes + # The estimated number of minutes remaining before the monitor + # resource finishes its current evaluation. + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeMonitorResponse AWS API Documentation + # + class DescribeMonitorResponse < Struct.new( + :monitor_name, + :monitor_arn, + :resource_arn, + :status, + :last_evaluation_time, + :last_evaluation_state, + :baseline, + :message, + :creation_time, + :last_modification_time, + :estimated_evaluation_time_remaining_in_minutes) + SENSITIVE = [] + include Aws::Structure + end + # @note When making an API call, you may pass DescribePredictorBacktestExportJobRequest # data as a hash: # @@ -4858,6 +5152,178 @@ class ListForecastsResponse < Struct.new( include Aws::Structure end + # @note When making an API call, you may pass ListMonitorEvaluationsRequest + # data as a hash: + # + # { + # next_token: "NextToken", + # max_results: 1, + # monitor_arn: "Arn", # required + # filters: [ + # { + # key: "String", # required + # value: "Arn", # required + # condition: "IS", # required, accepts IS, IS_NOT + # }, + # ], + # } + # + # @!attribute [rw] next_token + # If the result of the previous request was truncated, the response + # includes a `NextToken`. To retrieve the next set of results, use the + # token in the next request. Tokens expire after 24 hours. + # @return [String] + # + # @!attribute [rw] max_results + # The maximum number of monitoring results to return. + # @return [Integer] + # + # @!attribute [rw] monitor_arn + # The Amazon Resource Name (ARN) of the monitor resource to get + # results from. + # @return [String] + # + # @!attribute [rw] filters + # An array of filters. For each filter, provide a condition and a + # match statement. The condition is either `IS` or `IS_NOT`, which + # specifies whether to include or exclude the resources that match the + # statement from the list. The match statement consists of a key and a + # value. + # + # **Filter properties** + # + # * `Condition` - The condition to apply. Valid values are `IS` and + # `IS_NOT`. + # + # * `Key` - The name of the parameter to filter on. The only valid + # value is `EvaluationState`. + # + # * `Value` - The value to match. Valid values are only `SUCCESS` or + # `FAILURE`. + # + # For example, to list only successful monitor evaluations, you would + # specify: + # + # `"Filters": [ \{ "Condition": "IS", "Key": "EvaluationState", + # "Value": "SUCCESS" \} ]` + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListMonitorEvaluationsRequest AWS API Documentation + # + class ListMonitorEvaluationsRequest < Struct.new( + :next_token, + :max_results, + :monitor_arn, + :filters) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] next_token + # If the response is truncated, Amazon Forecast returns this token. To + # retrieve the next set of results, use the token in the next request. + # Tokens expire after 24 hours. + # @return [String] + # + # @!attribute [rw] predictor_monitor_evaluations + # The monitoring results and predictor events collected by the monitor + # resource during different windows of time. + # + # For information about monitoring see [Viewing Monitoring + # Results][1]. For more information about retrieving monitoring + # results see [Viewing Monitoring Results][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/predictor-monitoring-results.html + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListMonitorEvaluationsResponse AWS API Documentation + # + class ListMonitorEvaluationsResponse < Struct.new( + :next_token, + :predictor_monitor_evaluations) + SENSITIVE = [] + include Aws::Structure + end + + # @note When making an API call, you may pass ListMonitorsRequest + # data as a hash: + # + # { + # next_token: "NextToken", + # max_results: 1, + # filters: [ + # { + # key: "String", # required + # value: "Arn", # required + # condition: "IS", # required, accepts IS, IS_NOT + # }, + # ], + # } + # + # @!attribute [rw] next_token + # If the result of the previous request was truncated, the response + # includes a `NextToken`. To retrieve the next set of results, use the + # token in the next request. Tokens expire after 24 hours. + # @return [String] + # + # @!attribute [rw] max_results + # The maximum number of monitors to include in the response. + # @return [Integer] + # + # @!attribute [rw] filters + # An array of filters. For each filter, provide a condition and a + # match statement. The condition is either `IS` or `IS_NOT`, which + # specifies whether to include or exclude the resources that match the + # statement from the list. The match statement consists of a key and a + # value. + # + # **Filter properties** + # + # * `Condition` - The condition to apply. Valid values are `IS` and + # `IS_NOT`. + # + # * `Key` - The name of the parameter to filter on. The only valid + # value is `Status`. + # + # * `Value` - The value to match. + # + # For example, to list all monitors who's status is ACTIVE, you would + # specify: + # + # `"Filters": [ \{ "Condition": "IS", "Key": "Status", "Value": + # "ACTIVE" \} ]` + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListMonitorsRequest AWS API Documentation + # + class ListMonitorsRequest < Struct.new( + :next_token, + :max_results, + :filters) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] monitors + # An array of objects that summarize each monitor's properties. + # @return [Array] + # + # @!attribute [rw] next_token + # If the response is truncated, Amazon Forecast returns this token. To + # retrieve the next set of results, use the token in the next request. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListMonitorsResponse AWS API Documentation + # + class ListMonitorsResponse < Struct.new( + :monitors, + :next_token) + SENSITIVE = [] + include Aws::Structure + end + # @note When making an API call, you may pass ListPredictorBacktestExportJobsRequest # data as a hash: # @@ -5042,6 +5508,35 @@ class ListTagsForResourceResponse < Struct.new( include Aws::Structure end + # An individual metric Forecast calculated when monitoring predictor + # usage. You can compare the value for this metric to the metric's + # value in the Baseline to see how your predictor's performance is + # changing. + # + # For more information about metrics generated by Forecast see + # [Evaluating Predictor Accuracy][1] + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/metrics.html + # + # @!attribute [rw] metric_name + # The name of the metric. + # @return [String] + # + # @!attribute [rw] metric_value + # The value for the metric. + # @return [Float] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/MetricResult AWS API Documentation + # + class MetricResult < Struct.new( + :metric_name, + :metric_value) + SENSITIVE = [] + include Aws::Structure + end + # Provides metrics that are used to evaluate the performance of a # predictor. This object is part of the WindowSummary object. # @@ -5077,6 +5572,144 @@ class Metrics < Struct.new( include Aws::Structure end + # The configuration details for the predictor monitor. + # + # @note When making an API call, you may pass MonitorConfig + # data as a hash: + # + # { + # monitor_name: "Name", # required + # } + # + # @!attribute [rw] monitor_name + # The name of the monitor resource. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/MonitorConfig AWS API Documentation + # + class MonitorConfig < Struct.new( + :monitor_name) + SENSITIVE = [] + include Aws::Structure + end + + # The source of the data the monitor used during the evaluation. + # + # @!attribute [rw] dataset_import_job_arn + # The Amazon Resource Name (ARN) of the dataset import job used to + # import the data that initiated the monitor evaluation. + # @return [String] + # + # @!attribute [rw] forecast_arn + # The Amazon Resource Name (ARN) of the forecast the monitor used + # during the evaluation. + # @return [String] + # + # @!attribute [rw] predictor_arn + # The Amazon Resource Name (ARN) of the predictor resource you are + # monitoring. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/MonitorDataSource AWS API Documentation + # + class MonitorDataSource < Struct.new( + :dataset_import_job_arn, + :forecast_arn, + :predictor_arn) + SENSITIVE = [] + include Aws::Structure + end + + # Provides information about the monitor resource. + # + # @!attribute [rw] monitor_arn + # The Amazon Resource Name (ARN) of the monitor resource. + # @return [String] + # + # @!attribute [rw] status + # The status of the monitor. States include: + # + # * `ACTIVE` + # + # * `ACTIVE_STOPPING`, `ACTIVE_STOPPED` + # + # * `UPDATE_IN_PROGRESS` + # + # * `CREATE_PENDING`, `CREATE_IN_PROGRESS`, `CREATE_FAILED` + # + # * `DELETE_PENDING`, `DELETE_IN_PROGRESS`, `DELETE_FAILED` + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/MonitorInfo AWS API Documentation + # + class MonitorInfo < Struct.new( + :monitor_arn, + :status) + SENSITIVE = [] + include Aws::Structure + end + + # Provides a summary of the monitor properties used in the ListMonitors + # operation. To get a complete set of properties, call the + # DescribeMonitor operation, and provide the listed `MonitorArn`. + # + # @!attribute [rw] monitor_arn + # The Amazon Resource Name (ARN) of the monitor resource. + # @return [String] + # + # @!attribute [rw] monitor_name + # The name of the monitor resource. + # @return [String] + # + # @!attribute [rw] resource_arn + # The Amazon Resource Name (ARN) of the predictor being monitored. + # @return [String] + # + # @!attribute [rw] status + # The status of the monitor. States include: + # + # * `ACTIVE` + # + # * `ACTIVE_STOPPING`, `ACTIVE_STOPPED` + # + # * `UPDATE_IN_PROGRESS` + # + # * `CREATE_PENDING`, `CREATE_IN_PROGRESS`, `CREATE_FAILED` + # + # * `DELETE_PENDING`, `DELETE_IN_PROGRESS`, `DELETE_FAILED` + # @return [String] + # + # @!attribute [rw] creation_time + # When the monitor resource was created. + # @return [Time] + # + # @!attribute [rw] last_modification_time + # The last time the monitor resource was modified. The timestamp + # depends on the status of the job: + # + # * `CREATE_PENDING` - The `CreationTime`. + # + # * `CREATE_IN_PROGRESS` - The current timestamp. + # + # * `STOPPED` - When the resource stopped. + # + # * `ACTIVE` or `CREATE_FAILED` - When the monitor creation finished + # or failed. + # @return [Time] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/MonitorSummary AWS API Documentation + # + class MonitorSummary < Struct.new( + :monitor_arn, + :monitor_name, + :resource_arn, + :status, + :creation_time, + :last_modification_time) + SENSITIVE = [] + include Aws::Structure + end + # Specifies the categorical, continuous, and integer hyperparameters, # and their ranges of tunable values. The range of tunable values # determines which values that a hyperparameter tuning job can choose @@ -5204,6 +5837,49 @@ class PredictorBacktestExportJobSummary < Struct.new( include Aws::Structure end + # Metrics you can use as a baseline for comparison purposes. Use these + # metrics when you interpret monitoring results for an auto predictor. + # + # @!attribute [rw] baseline_metrics + # The initial [accuracy metrics][1] for the predictor. Use these + # metrics as a baseline for comparison purposes as you use your + # predictor and the metrics change. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/metrics.html + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/PredictorBaseline AWS API Documentation + # + class PredictorBaseline < Struct.new( + :baseline_metrics) + SENSITIVE = [] + include Aws::Structure + end + + # Provides details about a predictor event, such as a retraining. + # + # @!attribute [rw] detail + # The type of event. For example, `Retrain`. A retraining event + # denotes the timepoint when a predictor was retrained. Any monitor + # results from before the `Datetime` are from the previous predictor. + # Any new metrics are for the newly retrained predictor. + # @return [String] + # + # @!attribute [rw] datetime + # The timestamp for when the event occurred. + # @return [Time] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/PredictorEvent AWS API Documentation + # + class PredictorEvent < Struct.new( + :detail, + :datetime) + SENSITIVE = [] + include Aws::Structure + end + # The algorithm used to perform a backtest and the status of those # tests. # @@ -5246,6 +5922,77 @@ class PredictorExecutionDetails < Struct.new( include Aws::Structure end + # Describes the results of a monitor evaluation. + # + # @!attribute [rw] resource_arn + # @return [String] + # + # @!attribute [rw] monitor_arn + # @return [String] + # + # @!attribute [rw] evaluation_time + # The timestamp that indicates when the monitor evaluation was + # started. + # @return [Time] + # + # @!attribute [rw] evaluation_state + # The status of the monitor evaluation. The state can be `SUCCESS` or + # `FAILURE`. + # @return [String] + # + # @!attribute [rw] window_start_datetime + # The timestamp that indicates the start of the window that is used + # for monitor evaluation. + # @return [Time] + # + # @!attribute [rw] window_end_datetime + # The timestamp that indicates the end of the window that is used for + # monitor evaluation. + # @return [Time] + # + # @!attribute [rw] predictor_event + # Provides details about a predictor event, such as a retraining. + # @return [Types::PredictorEvent] + # + # @!attribute [rw] monitor_data_source + # The source of the data the monitor resource used during the + # evaluation. + # @return [Types::MonitorDataSource] + # + # @!attribute [rw] metric_results + # A list of metrics Forecast calculated when monitoring a predictor. + # You can compare the value for each metric in the list to the + # metric's value in the Baseline to see how your predictor's + # performance is changing. + # @return [Array] + # + # @!attribute [rw] num_items_evaluated + # The number of items considered during the evaluation. + # @return [Integer] + # + # @!attribute [rw] message + # Information about any errors that may have occurred during the + # monitor evaluation. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/PredictorMonitorEvaluation AWS API Documentation + # + class PredictorMonitorEvaluation < Struct.new( + :resource_arn, + :monitor_arn, + :evaluation_time, + :evaluation_state, + :window_start_datetime, + :window_end_datetime, + :predictor_event, + :monitor_data_source, + :metric_results, + :num_items_evaluated, + :message) + SENSITIVE = [] + include Aws::Structure + end + # Provides a summary of the predictor properties that are used in the # ListPredictors operation. To get the complete set of properties, call # the DescribePredictor operation, and provide the listed @@ -5390,6 +6137,25 @@ class ResourceNotFoundException < Struct.new( include Aws::Structure end + # @note When making an API call, you may pass ResumeResourceRequest + # data as a hash: + # + # { + # resource_arn: "Arn", # required + # } + # + # @!attribute [rw] resource_arn + # The Amazon Resource Name (ARN) of the monitor resource to resume. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ResumeResourceRequest AWS API Documentation + # + class ResumeResourceRequest < Struct.new( + :resource_arn) + SENSITIVE = [] + include Aws::Structure + end + # The path to the file(s) in an Amazon Simple Storage Service (Amazon # S3) bucket, and an AWS Identity and Access Management (IAM) role that # Amazon Forecast can assume to access the file(s). Optionally, includes @@ -5465,8 +6231,12 @@ class Schema < Struct.new( end # An attribute of a schema, which defines a dataset field. A schema - # attribute is required for every field in a dataset. The Schema object - # contains an array of `SchemaAttribute` objects. + # attribute is required for every field in a dataset. The [Schema][1] + # object contains an array of `SchemaAttribute` objects. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_Schema.html # # @note When making an API call, you may pass SchemaAttribute # data as a hash: @@ -5482,6 +6252,10 @@ class Schema < Struct.new( # # @!attribute [rw] attribute_type # The data type of the field. + # + # For a related time series dataset, other than date, item\_id, and + # forecast dimensions attributes, all attributes should be of + # numerical type (integer/float). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/SchemaAttribute AWS API Documentation @@ -5494,7 +6268,11 @@ class SchemaAttribute < Struct.new( end # Provides statistics for each data field imported into to an Amazon - # Forecast dataset with the CreateDatasetImportJob operation. + # Forecast dataset with the [CreateDatasetImportJob][1] operation. + # + # + # + # [1]: https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDatasetImportJob.html # # @!attribute [rw] count # The number of values in the field. If the response value is -1, diff --git a/gems/aws-sdk-personalize/CHANGELOG.md b/gems/aws-sdk-personalize/CHANGELOG.md index 5047c6bf338..b69001326a5 100644 --- a/gems/aws-sdk-personalize/CHANGELOG.md +++ b/gems/aws-sdk-personalize/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.42.0 (2022-05-23) +------------------ + +* Feature - Adding modelMetrics as part of DescribeRecommender API response for Personalize. + 1.41.0 (2022-04-19) ------------------ diff --git a/gems/aws-sdk-personalize/VERSION b/gems/aws-sdk-personalize/VERSION index 7d47e599800..a50908ca3da 100644 --- a/gems/aws-sdk-personalize/VERSION +++ b/gems/aws-sdk-personalize/VERSION @@ -1 +1 @@ -1.41.0 +1.42.0 diff --git a/gems/aws-sdk-personalize/lib/aws-sdk-personalize.rb b/gems/aws-sdk-personalize/lib/aws-sdk-personalize.rb index 3e864c9098a..d3d2fb3ade2 100644 --- a/gems/aws-sdk-personalize/lib/aws-sdk-personalize.rb +++ b/gems/aws-sdk-personalize/lib/aws-sdk-personalize.rb @@ -48,6 +48,6 @@ # @!group service module Aws::Personalize - GEM_VERSION = '1.41.0' + GEM_VERSION = '1.42.0' end diff --git a/gems/aws-sdk-personalize/lib/aws-sdk-personalize/client.rb b/gems/aws-sdk-personalize/lib/aws-sdk-personalize/client.rb index 794c69b8d15..9e05bbc29bd 100644 --- a/gems/aws-sdk-personalize/lib/aws-sdk-personalize/client.rb +++ b/gems/aws-sdk-personalize/lib/aws-sdk-personalize/client.rb @@ -1282,6 +1282,9 @@ def create_filter(params = {}, options = {}) # * CREATE PENDING > CREATE IN\_PROGRESS > ACTIVE -or- CREATE # FAILED # + # * STOP PENDING > STOP IN\_PROGRESS > INACTIVE > START PENDING + # > START IN\_PROGRESS > ACTIVE + # # * DELETE PENDING > DELETE IN\_PROGRESS # # To get the recommender status, call [DescribeRecommender][2]. @@ -2515,11 +2518,17 @@ def describe_recipe(params = {}, options = {}) # * CREATE PENDING > CREATE IN\_PROGRESS > ACTIVE -or- CREATE # FAILED # + # * STOP PENDING > STOP IN\_PROGRESS > INACTIVE > START PENDING + # > START IN\_PROGRESS > ACTIVE + # # * DELETE PENDING > DELETE IN\_PROGRESS # # When the `status` is `CREATE FAILED`, the response includes the # `failureReason` key, which describes why. # + # The `modelMetrics` key is null when the recommender is being created + # or deleted. + # # For more information on recommenders, see [CreateRecommender][1]. # # @@ -2559,6 +2568,8 @@ def describe_recipe(params = {}, options = {}) # resp.recommender.latest_recommender_update.last_updated_date_time #=> Time # resp.recommender.latest_recommender_update.status #=> String # resp.recommender.latest_recommender_update.failure_reason #=> String + # resp.recommender.model_metrics #=> Hash + # resp.recommender.model_metrics["MetricName"] #=> Float # # @see http://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeRecommender AWS API Documentation # @@ -3851,7 +3862,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-personalize' - context[:gem_version] = '1.41.0' + context[:gem_version] = '1.42.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-personalize/lib/aws-sdk-personalize/client_api.rb b/gems/aws-sdk-personalize/lib/aws-sdk-personalize/client_api.rb index c3eba911c8a..c966d12449e 100644 --- a/gems/aws-sdk-personalize/lib/aws-sdk-personalize/client_api.rb +++ b/gems/aws-sdk-personalize/lib/aws-sdk-personalize/client_api.rb @@ -1069,6 +1069,7 @@ module ClientApi Recommender.add_member(:status, Shapes::ShapeRef.new(shape: Status, location_name: "status")) Recommender.add_member(:failure_reason, Shapes::ShapeRef.new(shape: FailureReason, location_name: "failureReason")) Recommender.add_member(:latest_recommender_update, Shapes::ShapeRef.new(shape: RecommenderUpdateSummary, location_name: "latestRecommenderUpdate")) + Recommender.add_member(:model_metrics, Shapes::ShapeRef.new(shape: Metrics, location_name: "modelMetrics")) Recommender.struct_class = Types::Recommender RecommenderConfig.add_member(:item_exploration_config, Shapes::ShapeRef.new(shape: HyperParameters, location_name: "itemExplorationConfig")) @@ -1382,6 +1383,7 @@ module ClientApi o.errors << Shapes::ShapeRef.new(shape: ResourceAlreadyExistsException) o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) o.errors << Shapes::ShapeRef.new(shape: LimitExceededException) + o.errors << Shapes::ShapeRef.new(shape: ResourceInUseException) o.errors << Shapes::ShapeRef.new(shape: TooManyTagsException) end) diff --git a/gems/aws-sdk-personalize/lib/aws-sdk-personalize/types.rb b/gems/aws-sdk-personalize/lib/aws-sdk-personalize/types.rb index 8cfe19755f3..3de944a8d49 100644 --- a/gems/aws-sdk-personalize/lib/aws-sdk-personalize/types.rb +++ b/gems/aws-sdk-personalize/lib/aws-sdk-personalize/types.rb @@ -3644,7 +3644,12 @@ class GetSolutionMetricsRequest < Struct.new( # @return [String] # # @!attribute [rw] metrics - # The metrics for the solution version. + # The metrics for the solution version. For more information, see [ + # Evaluating a solution version with metrics ][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/personalize/latest/dg/working-with-training-metrics.html # @return [Hash] # # @see http://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/GetSolutionMetricsResponse AWS API Documentation @@ -4844,6 +4849,9 @@ class RecipeSummary < Struct.new( # * CREATE PENDING > CREATE IN\_PROGRESS > ACTIVE -or- CREATE # FAILED # + # * STOP PENDING > STOP IN\_PROGRESS > INACTIVE > START + # PENDING > START IN\_PROGRESS > ACTIVE + # # * DELETE PENDING > DELETE IN\_PROGRESS # @return [String] # @@ -4855,6 +4863,16 @@ class RecipeSummary < Struct.new( # Provides a summary of the latest updates to the recommender. # @return [Types::RecommenderUpdateSummary] # + # @!attribute [rw] model_metrics + # Provides evaluation metrics that help you determine the performance + # of a recommender. For more information, see [ Evaluating a + # recommender][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/personalize/latest/dg/evaluating-recommenders.html + # @return [Hash] + # # @see http://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/Recommender AWS API Documentation # class Recommender < Struct.new( @@ -4867,7 +4885,8 @@ class Recommender < Struct.new( :last_updated_date_time, :status, :failure_reason, - :latest_recommender_update) + :latest_recommender_update, + :model_metrics) SENSITIVE = [] include Aws::Structure end @@ -4938,6 +4957,9 @@ class RecommenderConfig < Struct.new( # * CREATE PENDING > CREATE IN\_PROGRESS > ACTIVE -or- CREATE # FAILED # + # * STOP PENDING > STOP IN\_PROGRESS > INACTIVE > START + # PENDING > START IN\_PROGRESS > ACTIVE + # # * DELETE PENDING > DELETE IN\_PROGRESS # @return [String] # @@ -4994,6 +5016,9 @@ class RecommenderSummary < Struct.new( # * CREATE PENDING > CREATE IN\_PROGRESS > ACTIVE -or- CREATE # FAILED # + # * STOP PENDING > STOP IN\_PROGRESS > INACTIVE > START + # PENDING > START IN\_PROGRESS > ACTIVE + # # * DELETE PENDING > DELETE IN\_PROGRESS # @return [String] #