diff --git a/apis/elasticache/2015-02-02/api-2.json b/apis/elasticache/2015-02-02/api-2.json index 92c5db20658..ec6e6a328df 100644 --- a/apis/elasticache/2015-02-02/api-2.json +++ b/apis/elasticache/2015-02-02/api-2.json @@ -1957,7 +1957,8 @@ "OutpostMode":{"shape":"OutpostMode"}, "PreferredOutpostArn":{"shape":"String"}, "PreferredOutpostArns":{"shape":"PreferredOutpostArnList"}, - "LogDeliveryConfigurations":{"shape":"LogDeliveryConfigurationRequestList"} + "LogDeliveryConfigurations":{"shape":"LogDeliveryConfigurationRequestList"}, + "TransitEncryptionEnabled":{"shape":"BooleanOptional"} } }, "CreateCacheClusterResult":{ diff --git a/apis/elasticache/2015-02-02/docs-2.json b/apis/elasticache/2015-02-02/docs-2.json index caa793ce7e8..8cad9c8aec7 100644 --- a/apis/elasticache/2015-02-02/docs-2.json +++ b/apis/elasticache/2015-02-02/docs-2.json @@ -226,6 +226,7 @@ "CacheCluster$TransitEncryptionEnabled": "
A flag that enables in-transit encryption when set to true
.
You cannot modify the value of TransitEncryptionEnabled
after the cluster is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled
to true
when you create a cluster.
Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6
, 4.x
or later.
Default: false
A flag that enables encryption at-rest when set to true
.
You cannot modify the value of AtRestEncryptionEnabled
after the cluster is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled
to true
when you create a cluster.
Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6
, 4.x
or later.
Default: false
If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions.
", + "CreateCacheClusterMessage$TransitEncryptionEnabled": "A flag that enables in-transit encryption when set to true. You cannot modify the value of TransitEncryptionEnabled
after the cluster is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled
to true when you create a cluster.
Required: Only available when creating a cache cluster in an Amazon VPC using Memcached version 1.6.12
or later.
Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.
AutomaticFailoverEnabled
must be enabled for Redis (cluster mode enabled) replication groups.
Default: false
", "CreateReplicationGroupMessage$MultiAZEnabled": "A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see Minimizing Downtime: Multi-AZ.
", "CreateReplicationGroupMessage$AutoMinorVersionUpgrade": "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions.
", @@ -315,7 +316,7 @@ } }, "CacheNode": { - "base": "Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached or Redis.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
For region availability, see Supported Node Types
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized with data tiering:
Current generation:
R6gd node types (available only for Redis engine version 6.2 onward).
cache.r6gd.xlarge
, cache.r6gd.2xlarge
, cache.r6gd.4xlarge
, cache.r6gd.8xlarge
, cache.r6gd.12xlarge
, cache.r6gd.16xlarge
Memory optimized:
Current generation:
R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
For region availability, see Supported Node Types
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached or Redis.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
For region availability, see Supported Node Types
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized with data tiering:
Current generation:
R6gd node types (available only for Redis engine version 6.2 onward).
cache.r6gd.xlarge
, cache.r6gd.2xlarge
, cache.r6gd.4xlarge
, cache.r6gd.8xlarge
, cache.r6gd.12xlarge
, cache.r6gd.16xlarge
Memory optimized:
Current generation:
R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
For region availability, see Supported Node Types
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The unique ID of the service update
", "CacheCluster$CacheClusterId": "The user-supplied identifier of the cluster. This identifier is a unique key that identifies a cluster.
", "CacheCluster$ClientDownloadLandingPage": "The URL of the web page where you can download the latest ElastiCache client library.
", - "CacheCluster$CacheNodeType": "The name of the compute and memory capacity node type for the cluster.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
For region availability, see Supported Node Types
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized with data tiering:
Current generation:
R6gd node types (available only for Redis engine version 6.2 onward).
cache.r6gd.xlarge
, cache.r6gd.2xlarge
, cache.r6gd.4xlarge
, cache.r6gd.8xlarge
, cache.r6gd.12xlarge
, cache.r6gd.16xlarge
Memory optimized:
Current generation:
R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
For region availability, see Supported Node Types
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The name of the compute and memory capacity node type for the cluster.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
For region availability, see Supported Node Types
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized with data tiering:
Current generation:
R6gd node types (available only for Redis engine version 6.2 onward).
cache.r6gd.xlarge
, cache.r6gd.2xlarge
, cache.r6gd.4xlarge
, cache.r6gd.8xlarge
, cache.r6gd.12xlarge
, cache.r6gd.16xlarge
Memory optimized:
Current generation:
R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
For region availability, see Supported Node Types
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The name of the cache engine (memcached
or redis
) to be used for this cluster.
The version of the cache engine that is used in this cluster.
", "CacheCluster$CacheClusterStatus": "The current state of this cluster, one of the following values: available
, creating
, deleted
, deleting
, incompatible-network
, modifying
, rebooting cluster nodes
, restore-failed
, or snapshotting
.
The node group (shard) identifier. This parameter is stored as a lowercase string.
Constraints:
A name must contain from 1 to 50 alphanumeric characters or hyphens.
The first character must be a letter.
A name cannot end with a hyphen or contain two consecutive hyphens.
The ID of the replication group to which this cluster should belong. If this parameter is specified, the cluster is added to the specified replication group as a read replica; otherwise, the cluster is a standalone primary that is not part of any replication group.
If the specified replication group is Multi-AZ enabled and the Availability Zone is not specified, the cluster is created in Availability Zones that provide the best spread of read replicas across Availability Zones.
This parameter is only valid if the Engine
parameter is redis
.
The EC2 Availability Zone in which the cluster is created.
All nodes belonging to this cluster are placed in the preferred Availability Zone. If you want to create your nodes across multiple Availability Zones, use PreferredAvailabilityZones
.
Default: System chosen Availability Zone.
", - "CreateCacheClusterMessage$CacheNodeType": "The compute and memory capacity of the nodes in the node group (shard).
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
For region availability, see Supported Node Types
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
For region availability, see Supported Node Types
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The compute and memory capacity of the nodes in the node group (shard).
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
For region availability, see Supported Node Types
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
For region availability, see Supported Node Types
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The name of the cache engine to be used for this cluster.
Valid values for this parameter are: memcached
| redis
The version number of the cache engine to be used for this cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.
", "CreateCacheClusterMessage$CacheParameterGroupName": "The name of the parameter group to associate with this cluster. If this argument is omitted, the default parameter group for the specified engine is used. You cannot use any parameter group which has cluster-enabled='yes'
when creating a cluster.
A user-created description for the replication group.
", "CreateReplicationGroupMessage$GlobalReplicationGroupId": "The name of the Global datastore
", "CreateReplicationGroupMessage$PrimaryClusterId": "The identifier of the cluster that serves as the primary for this replication group. This cluster must already exist and have a status of available
.
This parameter is not required if NumCacheClusters
, NumNodeGroups
, or ReplicasPerNodeGroup
is specified.
The compute and memory capacity of the nodes in the node group (shard).
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
For region availability, see Supported Node Types
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized with data tiering:
Current generation:
R6gd node types (available only for Redis engine version 6.2 onward).
cache.r6gd.xlarge
, cache.r6gd.2xlarge
, cache.r6gd.4xlarge
, cache.r6gd.8xlarge
, cache.r6gd.12xlarge
, cache.r6gd.16xlarge
Memory optimized:
Current generation:
R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
For region availability, see Supported Node Types
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The compute and memory capacity of the nodes in the node group (shard).
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
For region availability, see Supported Node Types
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized with data tiering:
Current generation:
R6gd node types (available only for Redis engine version 6.2 onward).
cache.r6gd.xlarge
, cache.r6gd.2xlarge
, cache.r6gd.4xlarge
, cache.r6gd.8xlarge
, cache.r6gd.12xlarge
, cache.r6gd.16xlarge
Memory optimized:
Current generation:
R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
For region availability, see Supported Node Types
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The name of the cache engine to be used for the clusters in this replication group. Must be Redis.
", "CreateReplicationGroupMessage$EngineVersion": "The version number of the cache engine to be used for the clusters in this replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions
operation.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version) in the ElastiCache User Guide, but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.
", "CreateReplicationGroupMessage$CacheParameterGroupName": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.
If you are running Redis version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.
To create a Redis (cluster mode disabled) replication group, use CacheParameterGroupName=default.redis3.2
.
To create a Redis (cluster mode enabled) replication group, use CacheParameterGroupName=default.redis3.2.cluster.on
.
An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
The reserved cache node identifier filter value. Use this parameter to show only the reservation that matches the specified reservation ID.
", "DescribeReservedCacheNodesMessage$ReservedCacheNodesOfferingId": "The offering identifier filter value. Use this parameter to show only purchased reservations matching the specified offering identifier.
", - "DescribeReservedCacheNodesMessage$CacheNodeType": "The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
For region availability, see Supported Node Types
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized with data tiering:
Current generation:
R6gd node types (available only for Redis engine version 6.2 onward).
cache.r6gd.xlarge
, cache.r6gd.2xlarge
, cache.r6gd.4xlarge
, cache.r6gd.8xlarge
, cache.r6gd.12xlarge
, cache.r6gd.16xlarge
Memory optimized:
Current generation:
R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
For region availability, see Supported Node Types
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
For region availability, see Supported Node Types
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized with data tiering:
Current generation:
R6gd node types (available only for Redis engine version 6.2 onward).
cache.r6gd.xlarge
, cache.r6gd.2xlarge
, cache.r6gd.4xlarge
, cache.r6gd.8xlarge
, cache.r6gd.12xlarge
, cache.r6gd.16xlarge
Memory optimized:
Current generation:
R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
For region availability, see Supported Node Types
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The duration filter value, specified in years or seconds. Use this parameter to show only reservations for this duration.
Valid Values: 1 | 3 | 31536000 | 94608000
The product description filter value. Use this parameter to show only those reservations matching the specified product description.
", "DescribeReservedCacheNodesMessage$OfferingType": "The offering type filter value. Use this parameter to show only the available offerings matching the specified offering type.
Valid values: \"Light Utilization\"|\"Medium Utilization\"|\"Heavy Utilization\"|\"All Upfront\"|\"Partial Upfront\"| \"No Upfront\"
An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
The offering identifier filter value. Use this parameter to show only the available offering that matches the specified reservation identifier.
Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706
The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward) cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
For region availability, see Supported Node Types
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized with data tiering:
Current generation:
R6gd node types (available only for Redis engine version 6.2 onward).
cache.r6gd.xlarge
, cache.r6gd.2xlarge
, cache.r6gd.4xlarge
, cache.r6gd.8xlarge
, cache.r6gd.12xlarge
, cache.r6gd.16xlarge
Memory optimized:
Current generation:
R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
For region availability, see Supported Node Types
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward) cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
For region availability, see Supported Node Types
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized with data tiering:
Current generation:
R6gd node types (available only for Redis engine version 6.2 onward).
cache.r6gd.xlarge
, cache.r6gd.2xlarge
, cache.r6gd.4xlarge
, cache.r6gd.8xlarge
, cache.r6gd.12xlarge
, cache.r6gd.16xlarge
Memory optimized:
Current generation:
R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
For region availability, see Supported Node Types
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
Duration filter value, specified in years or seconds. Use this parameter to show only reservations for a given duration.
Valid Values: 1 | 3 | 31536000 | 94608000
The product description filter value. Use this parameter to show only the available offerings matching the specified product description.
", "DescribeReservedCacheNodesOfferingsMessage$OfferingType": "The offering type filter value. Use this parameter to show only the available offerings matching the specified offering type.
Valid Values: \"Light Utilization\"|\"Medium Utilization\"|\"Heavy Utilization\" |\"All Upfront\"|\"Partial Upfront\"| \"No Upfront\"
The primary cluster ID that is applied immediately (if --apply-immediately
was specified), or during the next maintenance window.
The unique identifier for the reservation.
", "ReservedCacheNode$ReservedCacheNodesOfferingId": "The offering identifier.
", - "ReservedCacheNode$CacheNodeType": "The cache node type for the reserved cache nodes.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
For region availability, see Supported Node Types
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized with data tiering:
Current generation:
R6gd node types (available only for Redis engine version 6.2 onward).
cache.r6gd.xlarge
, cache.r6gd.2xlarge
, cache.r6gd.4xlarge
, cache.r6gd.8xlarge
, cache.r6gd.12xlarge
, cache.r6gd.16xlarge
Memory optimized:
Current generation:
R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
For region availability, see Supported Node Types
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The cache node type for the reserved cache nodes.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
For region availability, see Supported Node Types
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized with data tiering:
Current generation:
R6gd node types (available only for Redis engine version 6.2 onward).
cache.r6gd.xlarge
, cache.r6gd.2xlarge
, cache.r6gd.4xlarge
, cache.r6gd.8xlarge
, cache.r6gd.12xlarge
, cache.r6gd.16xlarge
Memory optimized:
Current generation:
R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
For region availability, see Supported Node Types
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The description of the reserved cache node.
", "ReservedCacheNode$OfferingType": "The offering type of this reserved cache node.
", "ReservedCacheNode$State": "The state of the reserved cache node.
", "ReservedCacheNode$ReservationARN": "The Amazon Resource Name (ARN) of the reserved cache node.
Example: arn:aws:elasticache:us-east-1:123456789012:reserved-instance:ri-2017-03-27-08-33-25-582
Provides an identifier to allow retrieval of paginated results.
", "ReservedCacheNodesOffering$ReservedCacheNodesOfferingId": "A unique identifier for the reserved cache node offering.
", - "ReservedCacheNodesOffering$CacheNodeType": "The cache node type for the reserved cache node.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
For region availability, see Supported Node Types
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized with data tiering:
Current generation:
R6gd node types (available only for Redis engine version 6.2 onward).
cache.r6gd.xlarge
, cache.r6gd.2xlarge
, cache.r6gd.4xlarge
, cache.r6gd.8xlarge
, cache.r6gd.12xlarge
, cache.r6gd.16xlarge
Memory optimized:
Current generation:
R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
For region availability, see Supported Node Types
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The cache node type for the reserved cache node.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
For region availability, see Supported Node Types
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized with data tiering:
Current generation:
R6gd node types (available only for Redis engine version 6.2 onward).
cache.r6gd.xlarge
, cache.r6gd.2xlarge
, cache.r6gd.4xlarge
, cache.r6gd.8xlarge
, cache.r6gd.12xlarge
, cache.r6gd.16xlarge
Memory optimized:
Current generation:
R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
For region availability, see Supported Node Types
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The cache engine used by the offering.
", "ReservedCacheNodesOffering$OfferingType": "The offering type.
", "ReservedCacheNodesOfferingMessage$Marker": "Provides an identifier to allow retrieval of paginated results.
", @@ -2339,7 +2340,7 @@ "Snapshot$CacheClusterId": "The user-supplied identifier of the source cluster.
", "Snapshot$SnapshotStatus": "The status of the snapshot. Valid values: creating
| available
| restoring
| copying
| deleting
.
Indicates whether the snapshot is from an automatic backup (automated
) or was created manually (manual
).
The name of the compute and memory capacity node type for the source cluster.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
For region availability, see Supported Node Types
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):
cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized with data tiering:
Current generation:
R6gd node types (available only for Redis engine version 6.2 onward).
cache.r6gd.xlarge
, cache.r6gd.2xlarge
, cache.r6gd.4xlarge
, cache.r6gd.8xlarge
, cache.r6gd.12xlarge
, cache.r6gd.16xlarge
Memory optimized:
Current generation:
R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
For region availability, see Supported Node Types
For region availability, see Supported Node Types
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The name of the compute and memory capacity node type for the source cluster.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
For region availability, see Supported Node Types
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):
cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized with data tiering:
Current generation:
R6gd node types (available only for Redis engine version 6.2 onward).
cache.r6gd.xlarge
, cache.r6gd.2xlarge
, cache.r6gd.4xlarge
, cache.r6gd.8xlarge
, cache.r6gd.12xlarge
, cache.r6gd.16xlarge
Memory optimized:
Current generation:
R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
For region availability, see Supported Node Types
For region availability, see Supported Node Types
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The name of the cache engine (memcached
or redis
) used by the source cluster.
The version of the cache engine version that is used by the source cluster.
", "Snapshot$PreferredAvailabilityZone": "The name of the Availability Zone in which the source cluster is located.
", diff --git a/apis/forecast/2018-06-26/api-2.json b/apis/forecast/2018-06-26/api-2.json index 38185c2f2ee..ba91e35caf2 100644 --- a/apis/forecast/2018-06-26/api-2.json +++ b/apis/forecast/2018-06-26/api-2.json @@ -139,6 +139,22 @@ {"shape":"LimitExceededException"} ] }, + "CreateMonitor":{ + "name":"CreateMonitor", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateMonitorRequest"}, + "output":{"shape":"CreateMonitorResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"LimitExceededException"} + ] + }, "CreatePredictor":{ "name":"CreatePredictor", "http":{ @@ -269,6 +285,20 @@ ], "idempotent":true }, + "DeleteMonitor":{ + "name":"DeleteMonitor", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteMonitorRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "idempotent":true + }, "DeletePredictor":{ "name":"DeletePredictor", "http":{ @@ -423,6 +453,20 @@ ], "idempotent":true }, + "DescribeMonitor":{ + "name":"DescribeMonitor", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMonitorRequest"}, + "output":{"shape":"DescribeMonitorResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "idempotent":true + }, "DescribePredictor":{ "name":"DescribePredictor", "http":{ @@ -562,6 +606,35 @@ ], "idempotent":true }, + "ListMonitorEvaluations":{ + "name":"ListMonitorEvaluations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListMonitorEvaluationsRequest"}, + "output":{"shape":"ListMonitorEvaluationsResponse"}, + "errors":[ + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "idempotent":true + }, + "ListMonitors":{ + "name":"ListMonitors", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListMonitorsRequest"}, + "output":{"shape":"ListMonitorsResponse"}, + "errors":[ + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidInputException"} + ], + "idempotent":true + }, "ListPredictorBacktestExportJobs":{ "name":"ListPredictorBacktestExportJobs", "http":{ @@ -603,6 +676,21 @@ {"shape":"InvalidInputException"} ] }, + "ResumeResource":{ + "name":"ResumeResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResumeResourceRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "idempotent":true + }, "StopResource":{ "name":"StopResource", "http":{ @@ -718,6 +806,23 @@ "AccuracyOptimized" ] }, + "Baseline":{ + "type":"structure", + "members":{ + "PredictorBaseline":{"shape":"PredictorBaseline"} + } + }, + "BaselineMetric":{ + "type":"structure", + "members":{ + "Name":{"shape":"Name"}, + "Value":{"shape":"Double"} + } + }, + "BaselineMetrics":{ + "type":"list", + "member":{"shape":"BaselineMetric"} + }, "Boolean":{"type":"boolean"}, "CategoricalParameterRange":{ "type":"structure", @@ -775,7 +880,8 @@ "ReferencePredictorArn":{"shape":"Arn"}, "OptimizationMetric":{"shape":"OptimizationMetric"}, "ExplainPredictor":{"shape":"Boolean"}, - "Tags":{"shape":"Tags"} + "Tags":{"shape":"Tags"}, + "MonitorConfig":{"shape":"MonitorConfig"} } }, "CreateAutoPredictorResponse":{ @@ -935,6 +1041,24 @@ "ForecastArn":{"shape":"Arn"} } }, + "CreateMonitorRequest":{ + "type":"structure", + "required":[ + "MonitorName", + "ResourceArn" + ], + "members":{ + "MonitorName":{"shape":"Name"}, + "ResourceArn":{"shape":"Arn"}, + "Tags":{"shape":"Tags"} + } + }, + "CreateMonitorResponse":{ + "type":"structure", + "members":{ + "MonitorArn":{"shape":"Arn"} + } + }, "CreatePredictorBacktestExportJobRequest":{ "type":"structure", "required":[ @@ -1111,6 +1235,13 @@ "ForecastArn":{"shape":"Arn"} } }, + "DeleteMonitorRequest":{ + "type":"structure", + "required":["MonitorArn"], + "members":{ + "MonitorArn":{"shape":"Arn"} + } + }, "DeletePredictorBacktestExportJobRequest":{ "type":"structure", "required":["PredictorBacktestExportJobArn"], @@ -1158,7 +1289,8 @@ "CreationTime":{"shape":"Timestamp"}, "LastModificationTime":{"shape":"Timestamp"}, "OptimizationMetric":{"shape":"OptimizationMetric"}, - "ExplainabilityInfo":{"shape":"ExplainabilityInfo"} + "ExplainabilityInfo":{"shape":"ExplainabilityInfo"}, + "MonitorInfo":{"shape":"MonitorInfo"} } }, "DescribeDatasetGroupRequest":{ @@ -1317,6 +1449,29 @@ "LastModificationTime":{"shape":"Timestamp"} } }, + "DescribeMonitorRequest":{ + "type":"structure", + "required":["MonitorArn"], + "members":{ + "MonitorArn":{"shape":"Arn"} + } + }, + "DescribeMonitorResponse":{ + "type":"structure", + "members":{ + "MonitorName":{"shape":"Name"}, + "MonitorArn":{"shape":"Arn"}, + "ResourceArn":{"shape":"Arn"}, + "Status":{"shape":"Status"}, + "LastEvaluationTime":{"shape":"Timestamp"}, + "LastEvaluationState":{"shape":"EvaluationState"}, + "Baseline":{"shape":"Baseline"}, + "Message":{"shape":"Message"}, + "CreationTime":{"shape":"Timestamp"}, + "LastModificationTime":{"shape":"Timestamp"}, + "EstimatedEvaluationTimeRemainingInMinutes":{"shape":"Long"} + } + }, "DescribePredictorBacktestExportJobRequest":{ "type":"structure", "required":["PredictorBacktestExportJobArn"], @@ -1373,6 +1528,10 @@ "OptimizationMetric":{"shape":"OptimizationMetric"} } }, + "Detail":{ + "type":"string", + "max":256 + }, "Domain":{ "type":"string", "enum":[ @@ -1426,6 +1585,10 @@ "TestWindows":{"shape":"TestWindows"} } }, + "EvaluationState":{ + "type":"string", + "max":256 + }, "EvaluationType":{ "type":"string", "enum":[ @@ -1804,6 +1967,38 @@ "NextToken":{"shape":"NextToken"} } }, + "ListMonitorEvaluationsRequest":{ + "type":"structure", + "required":["MonitorArn"], + "members":{ + "NextToken":{"shape":"NextToken"}, + "MaxResults":{"shape":"MaxResults"}, + "MonitorArn":{"shape":"Arn"}, + "Filters":{"shape":"Filters"} + } + }, + "ListMonitorEvaluationsResponse":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"}, + "PredictorMonitorEvaluations":{"shape":"PredictorMonitorEvaluations"} + } + }, + "ListMonitorsRequest":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"}, + "MaxResults":{"shape":"MaxResults"}, + "Filters":{"shape":"Filters"} + } + }, + "ListMonitorsResponse":{ + "type":"structure", + "members":{ + "Monitors":{"shape":"Monitors"}, + "NextToken":{"shape":"NextToken"} + } + }, "ListPredictorBacktestExportJobsRequest":{ "type":"structure", "members":{ @@ -1859,6 +2054,21 @@ "min":1 }, "Message":{"type":"string"}, + "MetricName":{ + "type":"string", + "max":256 + }, + "MetricResult":{ + "type":"structure", + "members":{ + "MetricName":{"shape":"MetricName"}, + "MetricValue":{"shape":"Double"} + } + }, + "MetricResults":{ + "type":"list", + "member":{"shape":"MetricResult"} + }, "Metrics":{ "type":"structure", "members":{ @@ -1872,6 +2082,43 @@ "AverageWeightedQuantileLoss":{"shape":"Double"} } }, + "MonitorConfig":{ + "type":"structure", + "required":["MonitorName"], + "members":{ + "MonitorName":{"shape":"Name"} + } + }, + "MonitorDataSource":{ + "type":"structure", + "members":{ + "DatasetImportJobArn":{"shape":"Arn"}, + "ForecastArn":{"shape":"Arn"}, + "PredictorArn":{"shape":"Arn"} + } + }, + "MonitorInfo":{ + "type":"structure", + "members":{ + "MonitorArn":{"shape":"Arn"}, + "Status":{"shape":"Status"} + } + }, + "MonitorSummary":{ + "type":"structure", + "members":{ + "MonitorArn":{"shape":"Arn"}, + "MonitorName":{"shape":"Name"}, + "ResourceArn":{"shape":"Arn"}, + "Status":{"shape":"Status"}, + "CreationTime":{"shape":"Timestamp"}, + "LastModificationTime":{"shape":"Timestamp"} + } + }, + "Monitors":{ + "type":"list", + "member":{"shape":"MonitorSummary"} + }, "Name":{ "type":"string", "max":63, @@ -1928,10 +2175,23 @@ "type":"list", "member":{"shape":"PredictorBacktestExportJobSummary"} }, + "PredictorBaseline":{ + "type":"structure", + "members":{ + "BaselineMetrics":{"shape":"BaselineMetrics"} + } + }, "PredictorEvaluationResults":{ "type":"list", "member":{"shape":"EvaluationResult"} }, + "PredictorEvent":{ + "type":"structure", + "members":{ + "Detail":{"shape":"Detail"}, + "Datetime":{"shape":"Timestamp"} + } + }, "PredictorExecution":{ "type":"structure", "members":{ @@ -1951,6 +2211,26 @@ "max":5, "min":1 }, + "PredictorMonitorEvaluation":{ + "type":"structure", + "members":{ + "ResourceArn":{"shape":"Arn"}, + "MonitorArn":{"shape":"Arn"}, + "EvaluationTime":{"shape":"Timestamp"}, + "EvaluationState":{"shape":"EvaluationState"}, + "WindowStartDatetime":{"shape":"Timestamp"}, + "WindowEndDatetime":{"shape":"Timestamp"}, + "PredictorEvent":{"shape":"PredictorEvent"}, + "MonitorDataSource":{"shape":"MonitorDataSource"}, + "MetricResults":{"shape":"MetricResults"}, + "NumItemsEvaluated":{"shape":"Long"}, + "Message":{"shape":"Message"} + } + }, + "PredictorMonitorEvaluations":{ + "type":"list", + "member":{"shape":"PredictorMonitorEvaluation"} + }, "PredictorSummary":{ "type":"structure", "members":{ @@ -1997,6 +2277,13 @@ }, "exception":true }, + "ResumeResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{"shape":"Arn"} + } + }, "S3Config":{ "type":"structure", "required":[ diff --git a/apis/forecast/2018-06-26/docs-2.json b/apis/forecast/2018-06-26/docs-2.json index 706f9e88777..0ab7e84fa7a 100644 --- a/apis/forecast/2018-06-26/docs-2.json +++ b/apis/forecast/2018-06-26/docs-2.json @@ -2,51 +2,57 @@ "version": "2.0", "service": "Provides APIs for creating and managing Amazon Forecast resources.
", "operations": { - "CreateAutoPredictor": "Creates an Amazon Forecast predictor.
Amazon Forecast creates predictors with AutoPredictor, which involves applying the optimal combination of algorithms to each time series in your datasets. You can use CreateAutoPredictor to create new predictors or upgrade/retrain existing predictors.
Creating new predictors
The following parameters are required when creating a new predictor:
PredictorName
- A unique name for the predictor.
DatasetGroupArn
- The ARN of the dataset group used to train the predictor.
ForecastFrequency
- The granularity of your forecasts (hourly, daily, weekly, etc).
ForecastHorizon
- The number of time steps being forecasted.
When creating a new predictor, do not specify a value for ReferencePredictorArn
.
Upgrading and retraining predictors
The following parameters are required when retraining or upgrading a predictor:
PredictorName
- A unique name for the predictor.
ReferencePredictorArn
- The ARN of the predictor to retrain or upgrade.
When upgrading or retraining a predictor, only specify values for the ReferencePredictorArn
and PredictorName
.
Creates an Amazon Forecast dataset. The information about the dataset that you provide helps Forecast understand how to consume the data for model training. This includes the following:
DataFrequency
- How frequently your historical time-series data is collected.
Domain
and DatasetType
- Each dataset has an associated dataset domain and a type within the domain. Amazon Forecast provides a list of predefined domains and types within each domain. For each unique dataset domain and type within the domain, Amazon Forecast requires your data to include a minimum set of predefined fields.
Schema
- A schema specifies the fields in the dataset, including the field name and data type.
After creating a dataset, you import your training data into it and add the dataset to a dataset group. You use the dataset group to create a predictor. For more information, see howitworks-datasets-groups.
To get a list of all your datasets, use the ListDatasets operation.
For example Forecast datasets, see the Amazon Forecast Sample GitHub repository.
The Status
of a dataset must be ACTIVE
before you can import training data. Use the DescribeDataset operation to get the status.
Creates a dataset group, which holds a collection of related datasets. You can add datasets to the dataset group when you create the dataset group, or later by using the UpdateDatasetGroup operation.
After creating a dataset group and adding datasets, you use the dataset group when you create a predictor. For more information, see howitworks-datasets-groups.
To get a list of all your datasets groups, use the ListDatasetGroups operation.
The Status
of a dataset group must be ACTIVE
before you can use the dataset group to create a predictor. To get the status, use the DescribeDatasetGroup operation.
Imports your training data to an Amazon Forecast dataset. You provide the location of your training data in an Amazon Simple Storage Service (Amazon S3) bucket and the Amazon Resource Name (ARN) of the dataset that you want to import the data to.
You must specify a DataSource object that includes an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data, as Amazon Forecast makes a copy of your data and processes it in an internal AWS system. For more information, see aws-forecast-iam-roles.
The training data must be in CSV format. The delimiter must be a comma (,).
You can specify the path to a specific CSV file, the S3 bucket, or to a folder in the S3 bucket. For the latter two cases, Amazon Forecast imports all files up to the limit of 10,000 files.
Because dataset imports are not aggregated, your most recent dataset import is the one that is used when training a predictor or generating a forecast. Make sure that your most recent dataset import contains all of the data you want to model off of, and not just the new data collected since the previous import.
To get a list of all your dataset import jobs, filtered by specified criteria, use the ListDatasetImportJobs operation.
", + "CreateAutoPredictor": "Creates an Amazon Forecast predictor.
Amazon Forecast creates predictors with AutoPredictor, which involves applying the optimal combination of algorithms to each time series in your datasets. You can use CreateAutoPredictor to create new predictors or upgrade/retrain existing predictors.
Creating new predictors
The following parameters are required when creating a new predictor:
PredictorName
- A unique name for the predictor.
DatasetGroupArn
- The ARN of the dataset group used to train the predictor.
ForecastFrequency
- The granularity of your forecasts (hourly, daily, weekly, etc).
ForecastHorizon
- The number of time-steps that the model predicts. The forecast horizon is also called the prediction length.
When creating a new predictor, do not specify a value for ReferencePredictorArn
.
Upgrading and retraining predictors
The following parameters are required when retraining or upgrading a predictor:
PredictorName
- A unique name for the predictor.
ReferencePredictorArn
- The ARN of the predictor to retrain or upgrade.
When upgrading or retraining a predictor, only specify values for the ReferencePredictorArn
and PredictorName
.
Creates an Amazon Forecast dataset. The information about the dataset that you provide helps Forecast understand how to consume the data for model training. This includes the following:
DataFrequency
- How frequently your historical time-series data is collected.
Domain
and DatasetType
- Each dataset has an associated dataset domain and a type within the domain. Amazon Forecast provides a list of predefined domains and types within each domain. For each unique dataset domain and type within the domain, Amazon Forecast requires your data to include a minimum set of predefined fields.
Schema
- A schema specifies the fields in the dataset, including the field name and data type.
After creating a dataset, you import your training data into it and add the dataset to a dataset group. You use the dataset group to create a predictor. For more information, see Importing datasets.
To get a list of all your datasets, use the ListDatasets operation.
For example Forecast datasets, see the Amazon Forecast Sample GitHub repository.
The Status
of a dataset must be ACTIVE
before you can import training data. Use the DescribeDataset operation to get the status.
Creates a dataset group, which holds a collection of related datasets. You can add datasets to the dataset group when you create the dataset group, or later by using the UpdateDatasetGroup operation.
After creating a dataset group and adding datasets, you use the dataset group when you create a predictor. For more information, see Dataset groups.
To get a list of all your datasets groups, use the ListDatasetGroups operation.
The Status
of a dataset group must be ACTIVE
before you can use the dataset group to create a predictor. To get the status, use the DescribeDatasetGroup operation.
Imports your training data to an Amazon Forecast dataset. You provide the location of your training data in an Amazon Simple Storage Service (Amazon S3) bucket and the Amazon Resource Name (ARN) of the dataset that you want to import the data to.
You must specify a DataSource object that includes an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data, as Amazon Forecast makes a copy of your data and processes it in an internal AWS system. For more information, see Set up permissions.
The training data must be in CSV format. The delimiter must be a comma (,).
You can specify the path to a specific CSV file, the S3 bucket, or to a folder in the S3 bucket. For the latter two cases, Amazon Forecast imports all files up to the limit of 10,000 files.
Because dataset imports are not aggregated, your most recent dataset import is the one that is used when training a predictor or generating a forecast. Make sure that your most recent dataset import contains all of the data you want to model off of, and not just the new data collected since the previous import.
To get a list of all your dataset import jobs, filtered by specified criteria, use the ListDatasetImportJobs operation.
", "CreateExplainability": "Explainability is only available for Forecasts and Predictors generated from an AutoPredictor (CreateAutoPredictor)
Creates an Amazon Forecast Explainability.
Explainability helps you better understand how the attributes in your datasets impact forecast. Amazon Forecast uses a metric called Impact scores to quantify the relative impact of each attribute and determine whether they increase or decrease forecast values.
To enable Forecast Explainability, your predictor must include at least one of the following: related time series, item metadata, or additional datasets like Holidays and the Weather Index.
CreateExplainability accepts either a Predictor ARN or Forecast ARN. To receive aggregated Impact scores for all time series and time points in your datasets, provide a Predictor ARN. To receive Impact scores for specific time series and time points, provide a Forecast ARN.
CreateExplainability with a Predictor ARN
You can only have one Explainability resource per predictor. If you already enabled ExplainPredictor
in CreateAutoPredictor, that predictor already has an Explainability resource.
The following parameters are required when providing a Predictor ARN:
ExplainabilityName
- A unique name for the Explainability.
ResourceArn
- The Arn of the predictor.
TimePointGranularity
- Must be set to “ALL”.
TimeSeriesGranularity
- Must be set to “ALL”.
Do not specify a value for the following parameters:
DataSource
- Only valid when TimeSeriesGranularity is “SPECIFIC”.
Schema
- Only valid when TimeSeriesGranularity is “SPECIFIC”.
StartDateTime
- Only valid when TimePointGranularity is “SPECIFIC”.
EndDateTime
- Only valid when TimePointGranularity is “SPECIFIC”.
CreateExplainability with a Forecast ARN
You can specify a maximum of 50 time series and 500 time points.
The following parameters are required when providing a Predictor ARN:
ExplainabilityName
- A unique name for the Explainability.
ResourceArn
- The Arn of the forecast.
TimePointGranularity
- Either “ALL” or “SPECIFIC”.
TimeSeriesGranularity
- Either “ALL” or “SPECIFIC”.
If you set TimeSeriesGranularity to “SPECIFIC”, you must also provide the following:
DataSource
- The S3 location of the CSV file specifying your time series.
Schema
- The Schema defines the attributes and attribute types listed in the Data Source.
If you set TimePointGranularity to “SPECIFIC”, you must also provide the following:
StartDateTime
- The first timestamp in the range of time points.
EndDateTime
- The last timestamp in the range of time points.
Exports an Explainability resource created by the CreateExplainability operation. Exported files are exported to an Amazon Simple Storage Service (Amazon S3) bucket.
You must specify a DataDestination object that includes an Amazon S3 bucket and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket. For more information, see aws-forecast-iam-roles.
The Status
of the export job must be ACTIVE
before you can access the export in your Amazon S3 bucket. To get the status, use the DescribeExplainabilityExport operation.
Creates a forecast for each item in the TARGET_TIME_SERIES
dataset that was used to train the predictor. This is known as inference. To retrieve the forecast for a single item at low latency, use the operation. To export the complete forecast into your Amazon Simple Storage Service (Amazon S3) bucket, use the CreateForecastExportJob operation.
The range of the forecast is determined by the ForecastHorizon
value, which you specify in the CreatePredictor request. When you query a forecast, you can request a specific date range within the forecast.
To get a list of all your forecasts, use the ListForecasts operation.
The forecasts generated by Amazon Forecast are in the same time zone as the dataset that was used to create the predictor.
For more information, see howitworks-forecast.
The Status
of the forecast must be ACTIVE
before you can query or export the forecast. Use the DescribeForecast operation to get the status.
Exports a forecast created by the CreateForecast operation to your Amazon Simple Storage Service (Amazon S3) bucket. The forecast file name will match the following conventions:
<ForecastExportJobName>_<ExportTimestamp>_<PartNumber>
where the <ExportTimestamp> component is in Java SimpleDateFormat (yyyy-MM-ddTHH-mm-ssZ).
You must specify a DataDestination object that includes an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket. For more information, see aws-forecast-iam-roles.
For more information, see howitworks-forecast.
To get a list of all your forecast export jobs, use the ListForecastExportJobs operation.
The Status
of the forecast export job must be ACTIVE
before you can access the forecast in your Amazon S3 bucket. To get the status, use the DescribeForecastExportJob operation.
Creates a predictor monitor resource for an existing auto predictor. Predictor monitoring allows you to see how your predictor's performance changes over time. For more information, see Predictor Monitoring.
", "CreatePredictor": "This operation creates a legacy predictor that does not include all the predictor functionalities provided by Amazon Forecast. To create a predictor that is compatible with all aspects of Forecast, use CreateAutoPredictor.
Creates an Amazon Forecast predictor.
In the request, provide a dataset group and either specify an algorithm or let Amazon Forecast choose an algorithm for you using AutoML. If you specify an algorithm, you also can override algorithm-specific hyperparameters.
Amazon Forecast uses the algorithm to train a predictor using the latest version of the datasets in the specified dataset group. You can then generate a forecast using the CreateForecast operation.
To see the evaluation metrics, use the GetAccuracyMetrics operation.
You can specify a featurization configuration to fill and aggregate the data fields in the TARGET_TIME_SERIES
dataset to improve model training. For more information, see FeaturizationConfig.
For RELATED_TIME_SERIES datasets, CreatePredictor
verifies that the DataFrequency
specified when the dataset was created matches the ForecastFrequency
. TARGET_TIME_SERIES datasets don't have this restriction. Amazon Forecast also verifies the delimiter and timestamp format. For more information, see howitworks-datasets-groups.
By default, predictors are trained and evaluated at the 0.1 (P10), 0.5 (P50), and 0.9 (P90) quantiles. You can choose custom forecast types to train and evaluate your predictor by setting the ForecastTypes
.
AutoML
If you want Amazon Forecast to evaluate each algorithm and choose the one that minimizes the objective function
, set PerformAutoML
to true
. The objective function
is defined as the mean of the weighted losses over the forecast types. By default, these are the p10, p50, and p90 quantile losses. For more information, see EvaluationResult.
When AutoML is enabled, the following properties are disallowed:
AlgorithmArn
HPOConfig
PerformHPO
TrainingParameters
To get a list of all of your predictors, use the ListPredictors operation.
Before you can use the predictor to create a forecast, the Status
of the predictor must be ACTIVE
, signifying that training has completed. To get the status, use the DescribePredictor operation.
Exports backtest forecasts and accuracy metrics generated by the CreateAutoPredictor or CreatePredictor operations. Two folders containing CSV files are exported to your specified S3 bucket.
The export file names will match the following conventions:
<ExportJobName>_<ExportTimestamp>_<PartNumber>.csv
The <ExportTimestamp> component is in Java SimpleDate format (yyyy-MM-ddTHH-mm-ssZ).
You must specify a DataDestination object that includes an Amazon S3 bucket and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket. For more information, see aws-forecast-iam-roles.
The Status
of the export job must be ACTIVE
before you can access the export in your Amazon S3 bucket. To get the status, use the DescribePredictorBacktestExportJob operation.
Deletes an Amazon Forecast dataset that was created using the CreateDataset operation. You can only delete datasets that have a status of ACTIVE
or CREATE_FAILED
. To get the status use the DescribeDataset operation.
Forecast does not automatically update any dataset groups that contain the deleted dataset. In order to update the dataset group, use the operation, omitting the deleted dataset's ARN.
Deletes a dataset group created using the CreateDatasetGroup operation. You can only delete dataset groups that have a status of ACTIVE
, CREATE_FAILED
, or UPDATE_FAILED
. To get the status, use the DescribeDatasetGroup operation.
This operation deletes only the dataset group, not the datasets in the group.
", - "DeleteDatasetImportJob": "Deletes a dataset import job created using the CreateDatasetImportJob operation. You can delete only dataset import jobs that have a status of ACTIVE
or CREATE_FAILED
. To get the status, use the DescribeDatasetImportJob operation.
Deletes an Amazon Forecast dataset that was created using the CreateDataset operation. You can only delete datasets that have a status of ACTIVE
or CREATE_FAILED
. To get the status use the DescribeDataset operation.
Forecast does not automatically update any dataset groups that contain the deleted dataset. In order to update the dataset group, use the UpdateDatasetGroup operation, omitting the deleted dataset's ARN.
Deletes a dataset group created using the CreateDatasetGroup operation. You can only delete dataset groups that have a status of ACTIVE
, CREATE_FAILED
, or UPDATE_FAILED
. To get the status, use the DescribeDatasetGroup operation.
This operation deletes only the dataset group, not the datasets in the group.
", + "DeleteDatasetImportJob": "Deletes a dataset import job created using the CreateDatasetImportJob operation. You can delete only dataset import jobs that have a status of ACTIVE
or CREATE_FAILED
. To get the status, use the DescribeDatasetImportJob operation.
Deletes an Explainability resource.
You can delete only predictor that have a status of ACTIVE
or CREATE_FAILED
. To get the status, use the DescribeExplainability operation.
Deletes an Explainability export.
", "DeleteForecast": "Deletes a forecast created using the CreateForecast operation. You can delete only forecasts that have a status of ACTIVE
or CREATE_FAILED
. To get the status, use the DescribeForecast operation.
You can't delete a forecast while it is being exported. After a forecast is deleted, you can no longer query the forecast.
", "DeleteForecastExportJob": "Deletes a forecast export job created using the CreateForecastExportJob operation. You can delete only export jobs that have a status of ACTIVE
or CREATE_FAILED
. To get the status, use the DescribeForecastExportJob operation.
Deletes a monitor resource. You can only delete a monitor resource with a status of ACTIVE
, ACTIVE_STOPPED
, CREATE_FAILED
, or CREATE_STOPPED
.
Deletes a predictor created using the DescribePredictor or CreatePredictor operations. You can delete only predictor that have a status of ACTIVE
or CREATE_FAILED
. To get the status, use the DescribePredictor operation.
Deletes a predictor backtest export job.
", "DeleteResourceTree": "Deletes an entire resource tree. This operation will delete the parent resource and its child resources.
Child resources are resources that were created from another resource. For example, when a forecast is generated from a predictor, the forecast is the child resource and the predictor is the parent resource.
Amazon Forecast resources possess the following parent-child resource hierarchies:
Dataset: dataset import jobs
Dataset Group: predictors, predictor backtest export jobs, forecasts, forecast export jobs
Predictor: predictor backtest export jobs, forecasts, forecast export jobs
Forecast: forecast export jobs
DeleteResourceTree
will only delete Amazon Forecast resources, and will not delete datasets or exported files stored in Amazon S3.
Describes a predictor created using the CreateAutoPredictor operation.
", - "DescribeDataset": "Describes an Amazon Forecast dataset created using the CreateDataset operation.
In addition to listing the parameters specified in the CreateDataset
request, this operation includes the following dataset properties:
CreationTime
LastModificationTime
Status
Describes a dataset group created using the CreateDatasetGroup operation.
In addition to listing the parameters provided in the CreateDatasetGroup
request, this operation includes the following properties:
DatasetArns
- The datasets belonging to the group.
CreationTime
LastModificationTime
Status
Describes a dataset import job created using the CreateDatasetImportJob operation.
In addition to listing the parameters provided in the CreateDatasetImportJob
request, this operation includes the following properties:
CreationTime
LastModificationTime
DataSize
FieldStatistics
Status
Message
- If an error occurred, information about the error.
Describes an Amazon Forecast dataset created using the CreateDataset operation.
In addition to listing the parameters specified in the CreateDataset
request, this operation includes the following dataset properties:
CreationTime
LastModificationTime
Status
Describes a dataset group created using the CreateDatasetGroup operation.
In addition to listing the parameters provided in the CreateDatasetGroup
request, this operation includes the following properties:
DatasetArns
- The datasets belonging to the group.
CreationTime
LastModificationTime
Status
Describes a dataset import job created using the CreateDatasetImportJob operation.
In addition to listing the parameters provided in the CreateDatasetImportJob
request, this operation includes the following properties:
CreationTime
LastModificationTime
DataSize
FieldStatistics
Status
Message
- If an error occurred, information about the error.
Describes an Explainability resource created using the CreateExplainability operation.
", "DescribeExplainabilityExport": "Describes an Explainability export created using the CreateExplainabilityExport operation.
", "DescribeForecast": "Describes a forecast created using the CreateForecast operation.
In addition to listing the properties provided in the CreateForecast
request, this operation lists the following properties:
DatasetGroupArn
- The dataset group that provided the training data.
CreationTime
LastModificationTime
Status
Message
- If an error occurred, information about the error.
Describes a forecast export job created using the CreateForecastExportJob operation.
In addition to listing the properties provided by the user in the CreateForecastExportJob
request, this operation lists the following properties:
CreationTime
LastModificationTime
Status
Message
- If an error occurred, information about the error.
Describes a monitor resource. In addition to listing the properties provided in the CreateMonitor request, this operation lists the following properties:
Baseline
CreationTime
LastEvaluationTime
LastEvaluationState
LastModificationTime
Message
Status
This operation is only valid for legacy predictors created with CreatePredictor. If you are not using a legacy predictor, use DescribeAutoPredictor.
Describes a predictor created using the CreatePredictor operation.
In addition to listing the properties provided in the CreatePredictor
request, this operation lists the following properties:
DatasetImportJobArns
- The dataset import jobs used to import training data.
AutoMLAlgorithmArns
- If AutoML is performed, the algorithms that were evaluated.
CreationTime
LastModificationTime
Status
Message
- If an error occurred, information about the error.
Describes a predictor backtest export job created using the CreatePredictorBacktestExportJob operation.
In addition to listing the properties provided by the user in the CreatePredictorBacktestExportJob
request, this operation lists the following properties:
CreationTime
LastModificationTime
Status
Message
(if an error occurred)
Provides metrics on the accuracy of the models that were trained by the CreatePredictor operation. Use metrics to see how well the model performed and to decide whether to use the predictor to generate a forecast. For more information, see Predictor Metrics.
This operation generates metrics for each backtest window that was evaluated. The number of backtest windows (NumberOfBacktestWindows
) is specified using the EvaluationParameters object, which is optionally included in the CreatePredictor
request. If NumberOfBacktestWindows
isn't specified, the number defaults to one.
The parameters of the filling
method determine which items contribute to the metrics. If you want all items to contribute, specify zero
. If you want only those items that have complete data in the range being evaluated to contribute, specify nan
. For more information, see FeaturizationMethod.
Before you can get accuracy metrics, the Status
of the predictor must be ACTIVE
, signifying that training has completed. To get the status, use the DescribePredictor operation.
Returns a list of dataset groups created using the CreateDatasetGroup operation. For each dataset group, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). You can retrieve the complete set of properties by using the dataset group ARN with the DescribeDatasetGroup operation.
", - "ListDatasetImportJobs": "Returns a list of dataset import jobs created using the CreateDatasetImportJob operation. For each import job, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). You can retrieve the complete set of properties by using the ARN with the DescribeDatasetImportJob operation. You can filter the list by providing an array of Filter objects.
", - "ListDatasets": "Returns a list of datasets created using the CreateDataset operation. For each dataset, a summary of its properties, including its Amazon Resource Name (ARN), is returned. To retrieve the complete set of properties, use the ARN with the DescribeDataset operation.
", + "ListDatasetGroups": "Returns a list of dataset groups created using the CreateDatasetGroup operation. For each dataset group, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). You can retrieve the complete set of properties by using the dataset group ARN with the DescribeDatasetGroup operation.
", + "ListDatasetImportJobs": "Returns a list of dataset import jobs created using the CreateDatasetImportJob operation. For each import job, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). You can retrieve the complete set of properties by using the ARN with the DescribeDatasetImportJob operation. You can filter the list by providing an array of Filter objects.
", + "ListDatasets": "Returns a list of datasets created using the CreateDataset operation. For each dataset, a summary of its properties, including its Amazon Resource Name (ARN), is returned. To retrieve the complete set of properties, use the ARN with the DescribeDataset operation.
", "ListExplainabilities": "Returns a list of Explainability resources created using the CreateExplainability operation. This operation returns a summary for each Explainability. You can filter the list using an array of Filter objects.
To retrieve the complete set of properties for a particular Explainability resource, use the ARN with the DescribeExplainability operation.
", "ListExplainabilityExports": "Returns a list of Explainability exports created using the CreateExplainabilityExport operation. This operation returns a summary for each Explainability export. You can filter the list using an array of Filter objects.
To retrieve the complete set of properties for a particular Explainability export, use the ARN with the DescribeExplainability operation.
", "ListForecastExportJobs": "Returns a list of forecast export jobs created using the CreateForecastExportJob operation. For each forecast export job, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). To retrieve the complete set of properties, use the ARN with the DescribeForecastExportJob operation. You can filter the list using an array of Filter objects.
", "ListForecasts": "Returns a list of forecasts created using the CreateForecast operation. For each forecast, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). To retrieve the complete set of properties, specify the ARN with the DescribeForecast operation. You can filter the list using an array of Filter objects.
", + "ListMonitorEvaluations": "Returns a list of the monitoring evaluation results and predictor events collected by the monitor resource during different windows of time.
For information about monitoring see Viewing Monitoring Results. For more information about retrieving monitoring results see Viewing Monitoring Results.
", + "ListMonitors": "Returns a list of monitors created with the CreateMonitor operation and CreateAutoPredictor operation. For each monitor resource, this operation returns of a summary of its properties, including its Amazon Resource Name (ARN). You can retrieve a complete set of properties of a monitor resource by specify the monitor's ARN in the DescribeMonitor operation.
", "ListPredictorBacktestExportJobs": "Returns a list of predictor backtest export jobs created using the CreatePredictorBacktestExportJob operation. This operation returns a summary for each backtest export job. You can filter the list using an array of Filter objects.
To retrieve the complete set of properties for a particular backtest export job, use the ARN with the DescribePredictorBacktestExportJob operation.
", "ListPredictors": "Returns a list of predictors created using the CreateAutoPredictor or CreatePredictor operations. For each predictor, this operation returns a summary of its properties, including its Amazon Resource Name (ARN).
You can retrieve the complete set of properties by using the ARN with the DescribeAutoPredictor and DescribePredictor operations. You can filter the list using an array of Filter objects.
", "ListTagsForResource": "Lists the tags for an Amazon Forecast resource.
", + "ResumeResource": "Resumes a stopped monitor resource.
", "StopResource": "Stops a resource.
The resource undergoes the following states: CREATE_STOPPING
and CREATE_STOPPED
. You cannot resume a resource once it has been stopped.
This operation can be applied to the following resources (and their corresponding child resources):
Dataset Import Job
Predictor Job
Forecast Job
Forecast Export Job
Predictor Backtest Export Job
Explainability Job
Explainability Export Job
Associates the specified tags to a resource with the specified resourceArn
. If existing tags on a resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags associated with that resource are also deleted.
Deletes the specified tags from a resource.
", - "UpdateDatasetGroup": "Replaces the datasets in a dataset group with the specified datasets.
The Status
of the dataset group must be ACTIVE
before you can use the dataset group to create a predictor. Use the DescribeDatasetGroup operation to get the status.
Replaces the datasets in a dataset group with the specified datasets.
The Status
of the dataset group must be ACTIVE
before you can use the dataset group to create a predictor. Use the DescribeDatasetGroup operation to get the status.
The Amazon Resource Name (ARN) of the export job.
", "CreateForecastRequest$PredictorArn": "The Amazon Resource Name (ARN) of the predictor to use to generate the forecast.
", "CreateForecastResponse$ForecastArn": "The Amazon Resource Name (ARN) of the forecast.
", + "CreateMonitorRequest$ResourceArn": "The Amazon Resource Name (ARN) of the predictor to monitor.
", + "CreateMonitorResponse$MonitorArn": "The Amazon Resource Name (ARN) of the monitor resource.
", "CreatePredictorBacktestExportJobRequest$PredictorArn": "The Amazon Resource Name (ARN) of the predictor that you want to export.
", "CreatePredictorBacktestExportJobResponse$PredictorBacktestExportJobArn": "The Amazon Resource Name (ARN) of the predictor backtest export job that you want to export.
", "CreatePredictorRequest$AlgorithmArn": "The Amazon Resource Name (ARN) of the algorithm to use for model training. Required if PerformAutoML
is not set to true
.
Supported algorithms:
arn:aws:forecast:::algorithm/ARIMA
arn:aws:forecast:::algorithm/CNN-QR
arn:aws:forecast:::algorithm/Deep_AR_Plus
arn:aws:forecast:::algorithm/ETS
arn:aws:forecast:::algorithm/NPTS
arn:aws:forecast:::algorithm/Prophet
The Amazon Resource Name (ARN) of the Explainability resource to delete.
", "DeleteForecastExportJobRequest$ForecastExportJobArn": "The Amazon Resource Name (ARN) of the forecast export job to delete.
", "DeleteForecastRequest$ForecastArn": "The Amazon Resource Name (ARN) of the forecast to delete.
", + "DeleteMonitorRequest$MonitorArn": "The Amazon Resource Name (ARN) of the monitor resource to delete.
", "DeletePredictorBacktestExportJobRequest$PredictorBacktestExportJobArn": "The Amazon Resource Name (ARN) of the predictor backtest export job to delete.
", "DeletePredictorRequest$PredictorArn": "The Amazon Resource Name (ARN) of the predictor to delete.
", "DeleteResourceTreeRequest$ResourceArn": "The Amazon Resource Name (ARN) of the parent resource to delete. All child resources of the parent resource will also be deleted.
", @@ -119,6 +128,9 @@ "DescribeForecastResponse$ForecastArn": "The forecast ARN as specified in the request.
", "DescribeForecastResponse$PredictorArn": "The ARN of the predictor used to generate the forecast.
", "DescribeForecastResponse$DatasetGroupArn": "The ARN of the dataset group that provided the data used to train the predictor.
", + "DescribeMonitorRequest$MonitorArn": "The Amazon Resource Name (ARN) of the monitor resource to describe.
", + "DescribeMonitorResponse$MonitorArn": "The Amazon Resource Name (ARN) of the monitor resource described.
", + "DescribeMonitorResponse$ResourceArn": "The Amazon Resource Name (ARN) of the auto predictor being monitored.
", "DescribePredictorBacktestExportJobRequest$PredictorBacktestExportJobArn": "The Amazon Resource Name (ARN) of the predictor backtest export job.
", "DescribePredictorBacktestExportJobResponse$PredictorBacktestExportJobArn": "The Amazon Resource Name (ARN) of the predictor backtest export job.
", "DescribePredictorBacktestExportJobResponse$PredictorArn": "The Amazon Resource Name (ARN) of the predictor.
", @@ -135,12 +147,22 @@ "ForecastSummary$ForecastArn": "The ARN of the forecast.
", "GetAccuracyMetricsRequest$PredictorArn": "The Amazon Resource Name (ARN) of the predictor to get metrics for.
", "InputDataConfig$DatasetGroupArn": "The Amazon Resource Name (ARN) of the dataset group.
", + "ListMonitorEvaluationsRequest$MonitorArn": "The Amazon Resource Name (ARN) of the monitor resource to get results from.
", "ListTagsForResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) that identifies the resource for which to list the tags.
", + "MonitorDataSource$DatasetImportJobArn": "The Amazon Resource Name (ARN) of the dataset import job used to import the data that initiated the monitor evaluation.
", + "MonitorDataSource$ForecastArn": "The Amazon Resource Name (ARN) of the forecast the monitor used during the evaluation.
", + "MonitorDataSource$PredictorArn": "The Amazon Resource Name (ARN) of the predictor resource you are monitoring.
", + "MonitorInfo$MonitorArn": "The Amazon Resource Name (ARN) of the monitor resource.
", + "MonitorSummary$MonitorArn": "The Amazon Resource Name (ARN) of the monitor resource.
", + "MonitorSummary$ResourceArn": "The Amazon Resource Name (ARN) of the predictor being monitored.
", "PredictorBacktestExportJobSummary$PredictorBacktestExportJobArn": "The Amazon Resource Name (ARN) of the predictor backtest export job.
", "PredictorExecution$AlgorithmArn": "The ARN of the algorithm used to test the predictor.
", + "PredictorMonitorEvaluation$ResourceArn": null, + "PredictorMonitorEvaluation$MonitorArn": null, "PredictorSummary$PredictorArn": "The ARN of the predictor.
", "PredictorSummary$DatasetGroupArn": "The Amazon Resource Name (ARN) of the dataset group that contains the data used to train the predictor.
", "ReferencePredictorSummary$Arn": "The ARN of the reference predictor.
", + "ResumeResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) of the monitor resource to resume.
", "S3Config$RoleArn": "The ARN of the AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket or files. If you provide a value for the KMSKeyArn
key, the role must allow access to the key.
Passing a role across AWS accounts is not allowed. If you pass a role that isn't in your account, you get an InvalidInputException
error.
The Amazon Resource Name (ARN) that identifies the resource to stop. The supported ARNs are DatasetImportJobArn
, PredictorArn
, PredictorBacktestExportJobArn
, ForecastArn
, ForecastExportJobArn
, ExplainabilityArn
, and ExplainabilityExportArn
.
The Amazon Resource Name (ARN) that identifies the resource for which to list the tags.
", @@ -174,7 +196,7 @@ "AttributeType": { "base": null, "refs": { - "SchemaAttribute$AttributeType": "The data type of the field.
" + "SchemaAttribute$AttributeType": "The data type of the field.
For a related time series dataset, other than date, item_id, and forecast dimensions attributes, all attributes should be of numerical type (integer/float).
" } }, "AutoMLOverrideStrategy": { @@ -185,11 +207,29 @@ "GetAccuracyMetricsResponse$AutoMLOverrideStrategy": " The LatencyOptimized
AutoML override strategy is only available in private beta. Contact AWS Support or your account manager to learn more about access privileges.
The AutoML strategy used to train the predictor. Unless LatencyOptimized
is specified, the AutoML strategy optimizes predictor accuracy.
This parameter is only valid for predictors trained using AutoML.
" } }, + "Baseline": { + "base": "Metrics you can use as a baseline for comparison purposes. Use these metrics when you interpret monitoring results for an auto predictor.
", + "refs": { + "DescribeMonitorResponse$Baseline": "Metrics you can use as a baseline for comparison purposes. Use these values you interpret monitoring results for an auto predictor.
" + } + }, + "BaselineMetric": { + "base": "An individual metric that you can use for comparison as you evaluate your monitoring results.
", + "refs": { + "BaselineMetrics$member": null + } + }, + "BaselineMetrics": { + "base": null, + "refs": { + "PredictorBaseline$BaselineMetrics": "The initial accuracy metrics for the predictor. Use these metrics as a baseline for comparison purposes as you use your predictor and the metrics change.
" + } + }, "Boolean": { "base": null, "refs": { "CreateAutoPredictorRequest$ExplainPredictor": "Create an Explainability resource for the predictor.
", - "CreateExplainabilityRequest$EnableVisualization": "Create an Expainability visualization that is viewable within the AWS console.
", + "CreateExplainabilityRequest$EnableVisualization": "Create an Explainability visualization that is viewable within the AWS console.
", "CreatePredictorRequest$PerformAutoML": "Whether to perform AutoML. When Amazon Forecast performs AutoML, it evaluates the algorithms it provides and chooses the best algorithm and configuration for your training dataset.
The default value is false
. In this case, you are required to specify an algorithm.
Set PerformAutoML
to true
to have Amazon Forecast perform AutoML. This is a good option if you aren't sure which algorithm is suitable for your training data. In this case, PerformHPO
must be false.
Whether to perform hyperparameter optimization (HPO). HPO finds optimal hyperparameter values for your training data. The process of performing HPO is known as running a hyperparameter tuning job.
The default value is false
. In this case, Amazon Forecast uses default hyperparameter values from the chosen algorithm.
To override the default values, set PerformHPO
to true
and, optionally, supply the HyperParameterTuningJobConfig object. The tuning job specifies a metric to optimize, which hyperparameters participate in tuning, and the valid range for each tunable hyperparameter. In this case, you are required to specify an algorithm and PerformAutoML
must be false.
The following algorithms support HPO:
DeepAR+
CNN-QR
Whether the visualization was enabled for the Explainability resource.
", @@ -216,7 +256,7 @@ "Configuration": { "base": null, "refs": { - "AdditionalDataset$Configuration": "Weather Index
To enable the Weather Index, do not specify a value for Configuration
.
Holidays
To enable Holidays, set CountryCode
to one of the following two-letter country codes:
\"AL\" - ALBANIA
\"AR\" - ARGENTINA
\"AT\" - AUSTRIA
\"AU\" - AUSTRALIA
\"BA\" - BOSNIA HERZEGOVINA
\"BE\" - BELGIUM
\"BG\" - BULGARIA
\"BO\" - BOLIVIA
\"BR\" - BRAZIL
\"BY\" - BELARUS
\"CA\" - CANADA
\"CL\" - CHILE
\"CO\" - COLOMBIA
\"CR\" - COSTA RICA
\"HR\" - CROATIA
\"CZ\" - CZECH REPUBLIC
\"DK\" - DENMARK
\"EC\" - ECUADOR
\"EE\" - ESTONIA
\"ET\" - ETHIOPIA
\"FI\" - FINLAND
\"FR\" - FRANCE
\"DE\" - GERMANY
\"GR\" - GREECE
\"HU\" - HUNGARY
\"IS\" - ICELAND
\"IN\" - INDIA
\"IE\" - IRELAND
\"IT\" - ITALY
\"JP\" - JAPAN
\"KZ\" - KAZAKHSTAN
\"KR\" - KOREA
\"LV\" - LATVIA
\"LI\" - LIECHTENSTEIN
\"LT\" - LITHUANIA
\"LU\" - LUXEMBOURG
\"MK\" - MACEDONIA
\"MT\" - MALTA
\"MX\" - MEXICO
\"MD\" - MOLDOVA
\"ME\" - MONTENEGRO
\"NL\" - NETHERLANDS
\"NZ\" - NEW ZEALAND
\"NI\" - NICARAGUA
\"NG\" - NIGERIA
\"NO\" - NORWAY
\"PA\" - PANAMA
\"PY\" - PARAGUAY
\"PE\" - PERU
\"PL\" - POLAND
\"PT\" - PORTUGAL
\"RO\" - ROMANIA
\"RU\" - RUSSIA
\"RS\" - SERBIA
\"SK\" - SLOVAKIA
\"SI\" - SLOVENIA
\"ZA\" - SOUTH AFRICA
\"ES\" - SPAIN
\"SE\" - SWEDEN
\"CH\" - SWITZERLAND
\"UA\" - UKRAINE
\"AE\" - UNITED ARAB EMIRATES
\"US\" - UNITED STATES
\"UK\" - UNITED KINGDOM
\"UY\" - URUGUAY
\"VE\" - VENEZUELA
Weather Index
To enable the Weather Index, do not specify a value for Configuration
.
Holidays
Holidays
To enable Holidays, set CountryCode
to one of the following two-letter country codes:
\"AL\" - ALBANIA
\"AR\" - ARGENTINA
\"AT\" - AUSTRIA
\"AU\" - AUSTRALIA
\"BA\" - BOSNIA HERZEGOVINA
\"BE\" - BELGIUM
\"BG\" - BULGARIA
\"BO\" - BOLIVIA
\"BR\" - BRAZIL
\"BY\" - BELARUS
\"CA\" - CANADA
\"CL\" - CHILE
\"CO\" - COLOMBIA
\"CR\" - COSTA RICA
\"HR\" - CROATIA
\"CZ\" - CZECH REPUBLIC
\"DK\" - DENMARK
\"EC\" - ECUADOR
\"EE\" - ESTONIA
\"ET\" - ETHIOPIA
\"FI\" - FINLAND
\"FR\" - FRANCE
\"DE\" - GERMANY
\"GR\" - GREECE
\"HU\" - HUNGARY
\"IS\" - ICELAND
\"IN\" - INDIA
\"IE\" - IRELAND
\"IT\" - ITALY
\"JP\" - JAPAN
\"KZ\" - KAZAKHSTAN
\"KR\" - KOREA
\"LV\" - LATVIA
\"LI\" - LIECHTENSTEIN
\"LT\" - LITHUANIA
\"LU\" - LUXEMBOURG
\"MK\" - MACEDONIA
\"MT\" - MALTA
\"MX\" - MEXICO
\"MD\" - MOLDOVA
\"ME\" - MONTENEGRO
\"NL\" - NETHERLANDS
\"NZ\" - NEW ZEALAND
\"NI\" - NICARAGUA
\"NG\" - NIGERIA
\"NO\" - NORWAY
\"PA\" - PANAMA
\"PY\" - PARAGUAY
\"PE\" - PERU
\"PL\" - POLAND
\"PT\" - PORTUGAL
\"RO\" - ROMANIA
\"RU\" - RUSSIA
\"RS\" - SERBIA
\"SK\" - SLOVAKIA
\"SI\" - SLOVENIA
\"ZA\" - SOUTH AFRICA
\"ES\" - SPAIN
\"SE\" - SWEDEN
\"CH\" - SWITZERLAND
\"UA\" - UKRAINE
\"AE\" - UNITED ARAB EMIRATES
\"US\" - UNITED STATES
\"UK\" - UNITED KINGDOM
\"UY\" - URUGUAY
\"VE\" - VENEZUELA
The source of your data, an AWS Identity and Access Management (IAM) role that allows Amazon Forecast to access the data and, optionally, an AWS Key Management Service (KMS) key.
", "refs": { - "CreateDatasetImportJobRequest$DataSource": "The location of the training data to import and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data. The training data must be stored in an Amazon S3 bucket.
If encryption is used, DataSource
must include an AWS Key Management Service (KMS) key and the IAM role must allow Amazon Forecast permission to access the key. The KMS key and IAM role must match those specified in the EncryptionConfig
parameter of the CreateDataset operation.
The location of the training data to import and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data. The training data must be stored in an Amazon S3 bucket.
If encryption is used, DataSource
must include an AWS Key Management Service (KMS) key and the IAM role must allow Amazon Forecast permission to access the key. The KMS key and IAM role must match those specified in the EncryptionConfig
parameter of the CreateDataset operation.
The location of the training data to import and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data. The training data must be stored in an Amazon S3 bucket.
If encryption is used, DataSource
includes an AWS Key Management Service (KMS) key.
The location of the training data to import and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data.
If encryption is used, DataSource
includes an AWS Key Management Service (KMS) key.
Provides a summary of the dataset group properties used in the ListDatasetGroups operation. To get the complete set of properties, call the DescribeDatasetGroup operation, and provide the DatasetGroupArn
.
Provides a summary of the dataset group properties used in the ListDatasetGroups operation. To get the complete set of properties, call the DescribeDatasetGroup operation, and provide the DatasetGroupArn
.
Provides a summary of the dataset import job properties used in the ListDatasetImportJobs operation. To get the complete set of properties, call the DescribeDatasetImportJob operation, and provide the DatasetImportJobArn
.
Provides a summary of the dataset import job properties used in the ListDatasetImportJobs operation. To get the complete set of properties, call the DescribeDatasetImportJob operation, and provide the DatasetImportJobArn
.
Provides a summary of the dataset properties used in the ListDatasets operation. To get the complete set of properties, call the DescribeDataset operation, and provide the DatasetArn
.
Provides a summary of the dataset properties used in the ListDatasets operation. To get the complete set of properties, call the DescribeDataset operation, and provide the DatasetArn
.
The type of event. For example, Retrain
. A retraining event denotes the timepoint when a predictor was retrained. Any monitor results from before the Datetime
are from the previous predictor. Any new metrics are for the newly retrained predictor.
The domain associated with the dataset group. When you add a dataset to a dataset group, this value and the value specified for the Domain
parameter of the CreateDataset operation must match.
The Domain
and DatasetType
that you choose determine the fields that must be present in training data that you import to a dataset. For example, if you choose the RETAIL
domain and TARGET_TIME_SERIES
as the DatasetType
, Amazon Forecast requires that item_id
, timestamp
, and demand
fields are present in your data. For more information, see howitworks-datasets-groups.
The domain associated with the dataset. When you add a dataset to a dataset group, this value and the value specified for the Domain
parameter of the CreateDatasetGroup operation must match.
The Domain
and DatasetType
that you choose determine the fields that must be present in the training data that you import to the dataset. For example, if you choose the RETAIL
domain and TARGET_TIME_SERIES
as the DatasetType
, Amazon Forecast requires item_id
, timestamp
, and demand
fields to be present in your data. For more information, see howitworks-datasets-groups.
The domain associated with the dataset group. When you add a dataset to a dataset group, this value and the value specified for the Domain
parameter of the CreateDataset operation must match.
The Domain
and DatasetType
that you choose determine the fields that must be present in training data that you import to a dataset. For example, if you choose the RETAIL
domain and TARGET_TIME_SERIES
as the DatasetType
, Amazon Forecast requires that item_id
, timestamp
, and demand
fields are present in your data. For more information, see Dataset groups.
The domain associated with the dataset. When you add a dataset to a dataset group, this value and the value specified for the Domain
parameter of the CreateDatasetGroup operation must match.
The Domain
and DatasetType
that you choose determine the fields that must be present in the training data that you import to the dataset. For example, if you choose the RETAIL
domain and TARGET_TIME_SERIES
as the DatasetType
, Amazon Forecast requires item_id
, timestamp
, and demand
fields to be present in your data. For more information, see Importing datasets.
The domain associated with the dataset.
", "DescribeDatasetGroupResponse$Domain": "The domain associated with the dataset group.
", "DescribeDatasetResponse$Domain": "The domain associated with the dataset.
" @@ -569,6 +640,7 @@ "Double": { "base": null, "refs": { + "BaselineMetric$Value": "The value for the metric.
", "ContinuousParameterRange$MaxValue": "The maximum tunable value of the hyperparameter.
", "ContinuousParameterRange$MinValue": "The minimum tunable value of the hyperparameter.
", "DescribeDatasetImportJobResponse$DataSize": "The size of the dataset in gigabytes (GB) after the import job has finished.
", @@ -576,6 +648,7 @@ "ErrorMetric$RMSE": "The root-mean-square error (RMSE).
", "ErrorMetric$MASE": "The Mean Absolute Scaled Error (MASE)
", "ErrorMetric$MAPE": "The Mean Absolute Percentage Error (MAPE)
", + "MetricResult$MetricValue": "The value for the metric.
", "Metrics$RMSE": "The root-mean-square error (RMSE).
", "Metrics$AverageWeightedQuantileLoss": "The average value of all weighted quantile losses.
", "Statistics$Avg": "For a numeric field, the average value in the field.
", @@ -639,6 +712,13 @@ "PredictorEvaluationResults$member": null } }, + "EvaluationState": { + "base": null, + "refs": { + "DescribeMonitorResponse$LastEvaluationState": "The state of the monitor's latest evaluation.
", + "PredictorMonitorEvaluation$EvaluationState": "The status of the monitor evaluation. The state can be SUCCESS
or FAILURE
.
An array of filters. For each filter, provide a condition and a match statement. The condition is either IS
or IS_NOT
, which specifies whether to include or exclude resources that match the statement from the list. The match statement consists of a key and a value.
Filter properties
Condition
- The condition to apply. Valid values are IS
and IS_NOT
.
Key
- The name of the parameter to filter on. Valid values are ResourceArn
and Status
.
Value
- The value to match.
An array of filters. For each filter, you provide a condition and a match statement. The condition is either IS
or IS_NOT
, which specifies whether to include or exclude the forecast export jobs that match the statement from the list, respectively. The match statement consists of a key and a value.
Filter properties
Condition
- The condition to apply. Valid values are IS
and IS_NOT
. To include the forecast export jobs that match the statement, specify IS
. To exclude matching forecast export jobs, specify IS_NOT
.
Key
- The name of the parameter to filter on. Valid values are ForecastArn
and Status
.
Value
- The value to match.
For example, to list all jobs that export a forecast named electricityforecast, specify the following filter:
\"Filters\": [ { \"Condition\": \"IS\", \"Key\": \"ForecastArn\", \"Value\": \"arn:aws:forecast:us-west-2:<acct-id>:forecast/electricityforecast\" } ]
An array of filters. For each filter, you provide a condition and a match statement. The condition is either IS
or IS_NOT
, which specifies whether to include or exclude the forecasts that match the statement from the list, respectively. The match statement consists of a key and a value.
Filter properties
Condition
- The condition to apply. Valid values are IS
and IS_NOT
. To include the forecasts that match the statement, specify IS
. To exclude matching forecasts, specify IS_NOT
.
Key
- The name of the parameter to filter on. Valid values are DatasetGroupArn
, PredictorArn
, and Status
.
Value
- The value to match.
For example, to list all forecasts whose status is not ACTIVE, you would specify:
\"Filters\": [ { \"Condition\": \"IS_NOT\", \"Key\": \"Status\", \"Value\": \"ACTIVE\" } ]
An array of filters. For each filter, provide a condition and a match statement. The condition is either IS
or IS_NOT
, which specifies whether to include or exclude the resources that match the statement from the list. The match statement consists of a key and a value.
Filter properties
Condition
- The condition to apply. Valid values are IS
and IS_NOT
.
Key
- The name of the parameter to filter on. The only valid value is EvaluationState
.
Value
- The value to match. Valid values are only SUCCESS
or FAILURE
.
For example, to list only successful monitor evaluations, you would specify:
\"Filters\": [ { \"Condition\": \"IS\", \"Key\": \"EvaluationState\", \"Value\": \"SUCCESS\" } ]
An array of filters. For each filter, provide a condition and a match statement. The condition is either IS
or IS_NOT
, which specifies whether to include or exclude the resources that match the statement from the list. The match statement consists of a key and a value.
Filter properties
Condition
- The condition to apply. Valid values are IS
and IS_NOT
.
Key
- The name of the parameter to filter on. The only valid value is Status
.
Value
- The value to match.
For example, to list all monitors who's status is ACTIVE, you would specify:
\"Filters\": [ { \"Condition\": \"IS\", \"Key\": \"Status\", \"Value\": \"ACTIVE\" } ]
An array of filters. For each filter, provide a condition and a match statement. The condition is either IS
or IS_NOT
, which specifies whether to include or exclude the predictor backtest export jobs that match the statement from the list. The match statement consists of a key and a value.
Filter properties
Condition
- The condition to apply. Valid values are IS
and IS_NOT
. To include the predictor backtest export jobs that match the statement, specify IS
. To exclude matching predictor backtest export jobs, specify IS_NOT
.
Key
- The name of the parameter to filter on. Valid values are PredictorArn
and Status
.
Value
- The value to match.
An array of filters. For each filter, you provide a condition and a match statement. The condition is either IS
or IS_NOT
, which specifies whether to include or exclude the predictors that match the statement from the list, respectively. The match statement consists of a key and a value.
Filter properties
Condition
- The condition to apply. Valid values are IS
and IS_NOT
. To include the predictors that match the statement, specify IS
. To exclude matching predictors, specify IS_NOT
.
Key
- The name of the parameter to filter on. Valid values are DatasetGroupArn
and Status
.
Value
- The value to match.
For example, to list all predictors whose status is ACTIVE, you would specify:
\"Filters\": [ { \"Condition\": \"IS\", \"Key\": \"Status\", \"Value\": \"ACTIVE\" } ]
The forecast types used to train a predictor. You can specify up to five forecast types. Forecast types can be quantiles from 0.01 to 0.99, by increments of 0.01 or higher. You can also specify the mean forecast with mean
.
The quantiles at which probabilistic forecasts are generated. You can currently specify up to 5 quantiles per forecast. Accepted values include 0.01 to 0.99
(increments of .01 only) and mean
. The mean forecast is different from the median (0.50) when the distribution is not symmetric (for example, Beta and Negative Binomial). The default value is [\"0.1\", \"0.5\", \"0.9\"]
.
The quantiles at which probabilistic forecasts are generated. You can currently specify up to 5 quantiles per forecast. Accepted values include 0.01 to 0.99
(increments of .01 only) and mean
. The mean forecast is different from the median (0.50) when the distribution is not symmetric (for example, Beta and Negative Binomial).
The default quantiles are the quantiles you specified during predictor creation. If you didn't specify quantiles, the default values are [\"0.1\", \"0.5\", \"0.9\"]
.
Specifies the forecast types used to train a predictor. You can specify up to five forecast types. Forecast types can be quantiles from 0.01 to 0.99, by increments of 0.01 or higher. You can also specify the mean forecast with mean
.
The default value is [\"0.10\", \"0.50\", \"0.9\"]
.
The forecast types used during predictor training. Default value is [\"0.1\",\"0.5\",\"0.9\"].
", "DescribeForecastResponse$ForecastTypes": "The quantiles at which probabilistic forecasts were generated.
", @@ -850,7 +932,7 @@ "Integer": { "base": null, "refs": { - "CreateAutoPredictorRequest$ForecastHorizon": "The number of time-steps that the model predicts. The forecast horizon is also called the prediction length.
", + "CreateAutoPredictorRequest$ForecastHorizon": "The number of time-steps that the model predicts. The forecast horizon is also called the prediction length.
The maximum forecast horizon is the lesser of 500 time-steps or 1/4 of the TARGET_TIME_SERIES dataset length. If you are retraining an existing AutoPredictor, then the maximum forecast horizon is the lesser of 500 time-steps or 1/3 of the TARGET_TIME_SERIES dataset length.
If you are upgrading to an AutoPredictor or retraining an existing AutoPredictor, you cannot update the forecast horizon parameter. You can meet this requirement by providing longer time-series in the dataset.
", "CreatePredictorRequest$ForecastHorizon": "Specifies the number of time-steps that the model is trained to predict. The forecast horizon is also called the prediction length.
For example, if you configure a dataset for daily data collection (using the DataFrequency
parameter of the CreateDataset operation) and set the forecast horizon to 10, the model returns predictions for 10 days.
The maximum forecast horizon is the lesser of 500 time-steps or 1/3 of the TARGET_TIME_SERIES dataset length.
", "DescribeAutoPredictorResponse$ForecastHorizon": "The number of time-steps that the model predicts. The forecast horizon is also called the prediction length.
", "DescribePredictorResponse$ForecastHorizon": "The number of time-steps of the forecast. The forecast horizon is also called the prediction length.
", @@ -969,6 +1051,26 @@ "refs": { } }, + "ListMonitorEvaluationsRequest": { + "base": null, + "refs": { + } + }, + "ListMonitorEvaluationsResponse": { + "base": null, + "refs": { + } + }, + "ListMonitorsRequest": { + "base": null, + "refs": { + } + }, + "ListMonitorsResponse": { + "base": null, + "refs": { + } + }, "ListPredictorBacktestExportJobsRequest": { "base": null, "refs": { @@ -1015,7 +1117,9 @@ "DescribeDatasetImportJobResponse$EstimatedTimeRemainingInMinutes": "The estimated time remaining in minutes for the dataset import job to complete.
", "DescribeExplainabilityResponse$EstimatedTimeRemainingInMinutes": "The estimated time remaining in minutes for the CreateExplainability job to complete.
", "DescribeForecastResponse$EstimatedTimeRemainingInMinutes": "The estimated time remaining in minutes for the forecast job to complete.
", + "DescribeMonitorResponse$EstimatedEvaluationTimeRemainingInMinutes": "The estimated number of minutes remaining before the monitor resource finishes its current evaluation.
", "DescribePredictorResponse$EstimatedTimeRemainingInMinutes": "The estimated time remaining in minutes for the predictor training job to complete.
", + "PredictorMonitorEvaluation$NumItemsEvaluated": "The number of items considered during the evaluation.
", "Statistics$CountLong": "The number of values in the field. CountLong
is used instead of Count
if the value is greater than 2,147,483,647.
The number of distinct values in the field. CountDistinctLong
is used instead of CountDistinct
if the value is greater than 2,147,483,647.
The number of null values in the field. CountNullLong
is used instead of CountNull
if the value is greater than 2,147,483,647.
The number of items to return in the response.
", "ListForecastExportJobsRequest$MaxResults": "The number of items to return in the response.
", "ListForecastsRequest$MaxResults": "The number of items to return in the response.
", + "ListMonitorEvaluationsRequest$MaxResults": "The maximum number of monitoring results to return.
", + "ListMonitorsRequest$MaxResults": "The maximum number of monitors to include in the response.
", "ListPredictorBacktestExportJobsRequest$MaxResults": "The number of items to return in the response.
", "ListPredictorsRequest$MaxResults": "The number of items to return in the response.
" } @@ -1044,9 +1150,29 @@ "DescribeExplainabilityExportResponse$Message": "Information about any errors that occurred during the export.
", "DescribeExplainabilityResponse$Message": "If an error occurred, a message about the error.
", "DescribeForecastExportJobResponse$Message": "If an error occurred, an informational message about the error.
", + "DescribeMonitorResponse$Message": "An error message, if any, for the monitor.
", "DescribePredictorBacktestExportJobResponse$Message": "Information about any errors that may have occurred during the backtest export.
", "DescribePredictorResponse$Message": "If an error occurred, an informational message about the error.
", - "ExplainabilitySummary$Message": "Information about any errors that may have occurred during the Explainability creation process.
" + "ExplainabilitySummary$Message": "Information about any errors that may have occurred during the Explainability creation process.
", + "PredictorMonitorEvaluation$Message": "Information about any errors that may have occurred during the monitor evaluation.
" + } + }, + "MetricName": { + "base": null, + "refs": { + "MetricResult$MetricName": "The name of the metric.
" + } + }, + "MetricResult": { + "base": "An individual metric Forecast calculated when monitoring predictor usage. You can compare the value for this metric to the metric's value in the Baseline to see how your predictor's performance is changing.
For more information about metrics generated by Forecast see Evaluating Predictor Accuracy
", + "refs": { + "MetricResults$member": null + } + }, + "MetricResults": { + "base": null, + "refs": { + "PredictorMonitorEvaluation$MetricResults": "A list of metrics Forecast calculated when monitoring a predictor. You can compare the value for each metric in the list to the metric's value in the Baseline to see how your predictor's performance is changing.
" } }, "Metrics": { @@ -1055,11 +1181,42 @@ "WindowSummary$Metrics": "Provides metrics used to evaluate the performance of a predictor.
" } }, + "MonitorConfig": { + "base": "The configuration details for the predictor monitor.
", + "refs": { + "CreateAutoPredictorRequest$MonitorConfig": "The configuration details for predictor monitoring. Provide a name for the monitor resource to enable predictor monitoring.
Predictor monitoring allows you to see how your predictor's performance changes over time. For more information, see Predictor Monitoring.
" + } + }, + "MonitorDataSource": { + "base": "The source of the data the monitor used during the evaluation.
", + "refs": { + "PredictorMonitorEvaluation$MonitorDataSource": "The source of the data the monitor resource used during the evaluation.
" + } + }, + "MonitorInfo": { + "base": "Provides information about the monitor resource.
", + "refs": { + "DescribeAutoPredictorResponse$MonitorInfo": "A object with the Amazon Resource Name (ARN) and status of the monitor resource.
" + } + }, + "MonitorSummary": { + "base": "Provides a summary of the monitor properties used in the ListMonitors operation. To get a complete set of properties, call the DescribeMonitor operation, and provide the listed MonitorArn
.
An array of objects that summarize each monitor's properties.
" + } + }, "Name": { "base": null, "refs": { "AdditionalDataset$Name": "The name of the additional dataset. Valid names: \"holiday\"
and \"weather\"
.
The name of the attribute as specified in the schema. Amazon Forecast supports the target field of the target time series and the related time series datasets. For example, for the RETAIL domain, the target is demand
.
The name of the metric.
", "CategoricalParameterRange$Name": "The name of the categorical hyperparameter to tune.
", "Configuration$key": null, "ContinuousParameterRange$Name": "The name of the hyperparameter to tune.
", @@ -1071,6 +1228,7 @@ "CreateExplainabilityRequest$ExplainabilityName": "A unique name for the Explainability.
", "CreateForecastExportJobRequest$ForecastExportJobName": "The name for the forecast export job.
", "CreateForecastRequest$ForecastName": "A name for the forecast.
", + "CreateMonitorRequest$MonitorName": "The name of the monitor resource.
", "CreatePredictorBacktestExportJobRequest$PredictorBacktestExportJobName": "The name for the backtest export job.
", "CreatePredictorRequest$PredictorName": "A name for the predictor.
", "DatasetGroupSummary$DatasetGroupName": "The name of the dataset group.
", @@ -1084,6 +1242,7 @@ "DescribeExplainabilityResponse$ExplainabilityName": "The name of the Explainability.
", "DescribeForecastExportJobResponse$ForecastExportJobName": "The name of the forecast export job.
", "DescribeForecastResponse$ForecastName": "The name of the forecast.
", + "DescribeMonitorResponse$MonitorName": "The name of the monitor.
", "DescribePredictorBacktestExportJobResponse$PredictorBacktestExportJobName": "The name of the predictor backtest export job.
", "DescribePredictorResponse$PredictorArn": "The ARN of the predictor.
", "DescribePredictorResponse$PredictorName": "The name of the predictor.
", @@ -1094,6 +1253,8 @@ "ForecastExportJobSummary$ForecastExportJobName": "The name of the forecast export job.
", "ForecastSummary$ForecastName": "The name of the forecast.
", "IntegerParameterRange$Name": "The name of the hyperparameter to tune.
", + "MonitorConfig$MonitorName": "The name of the monitor resource.
", + "MonitorSummary$MonitorName": "The name of the monitor resource.
", "PredictorBacktestExportJobSummary$PredictorBacktestExportJobName": "The name of the predictor backtest export job.
", "PredictorSummary$PredictorName": "The name of the predictor.
", "SchemaAttribute$AttributeName": "The name of the dataset field.
", @@ -1118,6 +1279,10 @@ "ListForecastExportJobsResponse$NextToken": "If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.
", "ListForecastsRequest$NextToken": "If the result of the previous request was truncated, the response includes a NextToken
. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.
If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.
", + "ListMonitorEvaluationsRequest$NextToken": "If the result of the previous request was truncated, the response includes a NextToken
. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.
If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.
", + "ListMonitorsRequest$NextToken": "If the result of the previous request was truncated, the response includes a NextToken
. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.
If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.
", "ListPredictorBacktestExportJobsRequest$NextToken": "If the result of the previous request was truncated, the response includes a NextToken. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.
", "ListPredictorBacktestExportJobsResponse$NextToken": "Returns this token if the response is truncated. To retrieve the next set of results, use the token in the next request.
", "ListPredictorsRequest$NextToken": "If the result of the previous request was truncated, the response includes a NextToken
. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.
An array of objects that summarize the properties of each predictor backtest export job.
" } }, + "PredictorBaseline": { + "base": "Metrics you can use as a baseline for comparison purposes. Use these metrics when you interpret monitoring results for an auto predictor.
", + "refs": { + "Baseline$PredictorBaseline": "The initial accuracy metrics for the predictor you are monitoring. Use these metrics as a baseline for comparison purposes as you use your predictor and the metrics change.
" + } + }, "PredictorEvaluationResults": { "base": null, "refs": { "GetAccuracyMetricsResponse$PredictorEvaluationResults": "An array of results from evaluating the predictor.
" } }, + "PredictorEvent": { + "base": "Provides details about a predictor event, such as a retraining.
", + "refs": { + "PredictorMonitorEvaluation$PredictorEvent": "Provides details about a predictor event, such as a retraining.
" + } + }, "PredictorExecution": { "base": "The algorithm used to perform a backtest and the status of those tests.
", "refs": { @@ -1190,6 +1367,18 @@ "PredictorExecutionDetails$PredictorExecutions": "An array of the backtests performed to evaluate the accuracy of the predictor against a particular algorithm. The NumberOfBacktestWindows
from the object determines the number of windows in the array.
Describes the results of a monitor evaluation.
", + "refs": { + "PredictorMonitorEvaluations$member": null + } + }, + "PredictorMonitorEvaluations": { + "base": null, + "refs": { + "ListMonitorEvaluationsResponse$PredictorMonitorEvaluations": "The monitoring results and predictor events collected by the monitor resource during different windows of time.
For information about monitoring see Viewing Monitoring Results. For more information about retrieving monitoring results see Viewing Monitoring Results.
" + } + }, "PredictorSummary": { "base": "Provides a summary of the predictor properties that are used in the ListPredictors operation. To get the complete set of properties, call the DescribePredictor operation, and provide the listed PredictorArn
.
The path to the file(s) in an Amazon Simple Storage Service (Amazon S3) bucket, and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the file(s). Optionally, includes an AWS Key Management Service (KMS) key. This object is part of the DataSource object that is submitted in the CreateDatasetImportJob request, and part of the DataDestination object.
", "refs": { @@ -1247,14 +1441,14 @@ "Schema": { "base": "Defines the fields of a dataset.
", "refs": { - "CreateDatasetRequest$Schema": "The schema for the dataset. The schema attributes and their order must match the fields in your data. The dataset Domain
and DatasetType
that you choose determine the minimum required fields in your training data. For information about the required fields for a specific dataset domain and type, see howitworks-domains-ds-types.
The schema for the dataset. The schema attributes and their order must match the fields in your data. The dataset Domain
and DatasetType
that you choose determine the minimum required fields in your training data. For information about the required fields for a specific dataset domain and type, see Dataset Domains and Dataset Types.
An array of SchemaAttribute
objects that specify the dataset fields. Each SchemaAttribute
specifies the name and data type of a field.
An attribute of a schema, which defines a dataset field. A schema attribute is required for every field in a dataset. The Schema object contains an array of SchemaAttribute
objects.
An attribute of a schema, which defines a dataset field. A schema attribute is required for every field in a dataset. The Schema object contains an array of SchemaAttribute
objects.
Provides statistics for each data field imported into to an Amazon Forecast dataset with the CreateDatasetImportJob operation.
", + "base": "Provides statistics for each data field imported into to an Amazon Forecast dataset with the CreateDatasetImportJob operation.
", "refs": { "FieldStatistics$value": null } @@ -1282,12 +1476,13 @@ "refs": { "DatasetImportJobSummary$Status": "The status of the dataset import job. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
CREATE_STOPPING
, CREATE_STOPPED
The status of the predictor. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
CREATE_STOPPING
, CREATE_STOPPED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
The status of the dataset group. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
UPDATE_PENDING
, UPDATE_IN_PROGRESS
, UPDATE_FAILED
The UPDATE
states apply when you call the UpdateDatasetGroup operation.
The Status
of the dataset group must be ACTIVE
before you can use the dataset group to create a predictor.
The status of the dataset group. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
UPDATE_PENDING
, UPDATE_IN_PROGRESS
, UPDATE_FAILED
The UPDATE
states apply when you call the UpdateDatasetGroup operation.
The Status
of the dataset group must be ACTIVE
before you can use the dataset group to create a predictor.
The status of the dataset import job. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
CREATE_STOPPING
, CREATE_STOPPED
The status of the dataset. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
UPDATE_PENDING
, UPDATE_IN_PROGRESS
, UPDATE_FAILED
The UPDATE
states apply while data is imported to the dataset from a call to the CreateDatasetImportJob operation and reflect the status of the dataset import job. For example, when the import job status is CREATE_IN_PROGRESS
, the status of the dataset is UPDATE_IN_PROGRESS
.
The Status
of the dataset must be ACTIVE
before you can import training data.
The status of the dataset. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
UPDATE_PENDING
, UPDATE_IN_PROGRESS
, UPDATE_FAILED
The UPDATE
states apply while data is imported to the dataset from a call to the CreateDatasetImportJob operation and reflect the status of the dataset import job. For example, when the import job status is CREATE_IN_PROGRESS
, the status of the dataset is UPDATE_IN_PROGRESS
.
The Status
of the dataset must be ACTIVE
before you can import training data.
The status of the Explainability export. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
CREATE_STOPPING
, CREATE_STOPPED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
The status of the Explainability resource. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
CREATE_STOPPING
, CREATE_STOPPED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
The status of the forecast export job. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
CREATE_STOPPING
, CREATE_STOPPED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
The Status
of the forecast export job must be ACTIVE
before you can access the forecast in your S3 bucket.
The status of the monitor resource.
", "DescribePredictorBacktestExportJobResponse$Status": "The status of the predictor backtest export job. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
CREATE_STOPPING
, CREATE_STOPPED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
The status of the predictor. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
CREATE_STOPPING
, CREATE_STOPPED
The Status
of the predictor must be ACTIVE
before you can use the predictor to create a forecast.
The status of the Explainability export. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
CREATE_STOPPING
, CREATE_STOPPED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
The status of the Explainability. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
CREATE_STOPPING
, CREATE_STOPPED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
The status of the forecast export job. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
CREATE_STOPPING
, CREATE_STOPPED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
The Status
of the forecast export job must be ACTIVE
before you can access the forecast in your S3 bucket.
The status of the forecast. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
CREATE_STOPPING
, CREATE_STOPPED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
The Status
of the forecast must be ACTIVE
before you can query or export the forecast.
The status of the monitor. States include:
ACTIVE
ACTIVE_STOPPING
, ACTIVE_STOPPED
UPDATE_IN_PROGRESS
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
The status of the monitor. States include:
ACTIVE
ACTIVE_STOPPING
, ACTIVE_STOPPED
UPDATE_IN_PROGRESS
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
The status of the predictor backtest export job. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
CREATE_STOPPING
, CREATE_STOPPED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
The status of the predictor. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
CREATE_STOPPING
, CREATE_STOPPED
The Status
of the predictor must be ACTIVE
before you can use the predictor to create a forecast.
The status of the test. Possible status values are:
ACTIVE
CREATE_IN_PROGRESS
CREATE_FAILED
Optional metadata to help you categorize and organize your resources. Each tag consists of a key and an optional value, both of which you define. Tag keys and values are case sensitive.
The following restrictions apply to tags:
For each resource, each tag key must be unique and each tag key must have one value.
Maximum number of tags per resource: 50.
Maximum key length: 128 Unicode characters in UTF-8.
Maximum value length: 256 Unicode characters in UTF-8.
Accepted characters: all letters and numbers, spaces representable in UTF-8, and + - = . _ : / @. If your tagging schema is used across other services and resources, the character restrictions of those services also apply.
Key prefixes cannot include any upper or lowercase combination of aws:
or AWS:
. Values can have this prefix. If a tag value has aws
as its prefix but the key does not, Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws
do not count against your tags per resource limit. You cannot edit or delete tag keys with this prefix.
The optional metadata that you apply to the forecast export job to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.
The following basic restrictions apply to tags:
Maximum number of tags per resource - 50.
For each resource, each tag key must be unique, and each tag key can have only one value.
Maximum key length - 128 Unicode characters in UTF-8.
Maximum value length - 256 Unicode characters in UTF-8.
If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.
Tag keys and values are case sensitive.
Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws
as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws
do not count against your tags per resource limit.
The optional metadata that you apply to the forecast to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.
The following basic restrictions apply to tags:
Maximum number of tags per resource - 50.
For each resource, each tag key must be unique, and each tag key can have only one value.
Maximum key length - 128 Unicode characters in UTF-8.
Maximum value length - 256 Unicode characters in UTF-8.
If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.
Tag keys and values are case sensitive.
Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws
as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws
do not count against your tags per resource limit.
A list of tags to apply to the monitor resource.
", "CreatePredictorBacktestExportJobRequest$Tags": "Optional metadata to help you categorize and organize your backtests. Each tag consists of a key and an optional value, both of which you define. Tag keys and values are case sensitive.
The following restrictions apply to tags:
For each resource, each tag key must be unique and each tag key must have one value.
Maximum number of tags per resource: 50.
Maximum key length: 128 Unicode characters in UTF-8.
Maximum value length: 256 Unicode characters in UTF-8.
Accepted characters: all letters and numbers, spaces representable in UTF-8, and + - = . _ : / @. If your tagging schema is used across other services and resources, the character restrictions of those services also apply.
Key prefixes cannot include any upper or lowercase combination of aws:
or AWS:
. Values can have this prefix. If a tag value has aws
as its prefix but the key does not, Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws
do not count against your tags per resource limit. You cannot edit or delete tag keys with this prefix.
The optional metadata that you apply to the predictor to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.
The following basic restrictions apply to tags:
Maximum number of tags per resource - 50.
For each resource, each tag key must be unique, and each tag key can have only one value.
Maximum key length - 128 Unicode characters in UTF-8.
Maximum value length - 256 Unicode characters in UTF-8.
If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.
Tag keys and values are case sensitive.
Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws
as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws
do not count against your tags per resource limit.
The tags for the resource.
", @@ -1422,19 +1620,19 @@ "base": null, "refs": { "DatasetGroupSummary$CreationTime": "When the dataset group was created.
", - "DatasetGroupSummary$LastModificationTime": "When the dataset group was created or last updated from a call to the UpdateDatasetGroup operation. While the dataset group is being updated, LastModificationTime
is the current time of the ListDatasetGroups
call.
When the dataset group was created or last updated from a call to the UpdateDatasetGroup operation. While the dataset group is being updated, LastModificationTime
is the current time of the ListDatasetGroups
call.
When the dataset import job was created.
", "DatasetImportJobSummary$LastModificationTime": "The last time the resource was modified. The timestamp depends on the status of the job:
CREATE_PENDING
- The CreationTime
.
CREATE_IN_PROGRESS
- The current timestamp.
CREATE_STOPPING
- The current timestamp.
CREATE_STOPPED
- When the job stopped.
ACTIVE
or CREATE_FAILED
- When the job finished or failed.
When the dataset was created.
", - "DatasetSummary$LastModificationTime": "When you create a dataset, LastModificationTime
is the same as CreationTime
. While data is being imported to the dataset, LastModificationTime
is the current time of the ListDatasets
call. After a CreateDatasetImportJob operation has finished, LastModificationTime
is when the import job completed or failed.
When you create a dataset, LastModificationTime
is the same as CreationTime
. While data is being imported to the dataset, LastModificationTime
is the current time of the ListDatasets
call. After a CreateDatasetImportJob operation has finished, LastModificationTime
is when the import job completed or failed.
The timestamp of the CreateAutoPredictor request.
", "DescribeAutoPredictorResponse$LastModificationTime": "The last time the resource was modified. The timestamp depends on the status of the job:
CREATE_PENDING
- The CreationTime
.
CREATE_IN_PROGRESS
- The current timestamp.
CREATE_STOPPING
- The current timestamp.
CREATE_STOPPED
- When the job stopped.
ACTIVE
or CREATE_FAILED
- When the job finished or failed.
When the dataset group was created.
", - "DescribeDatasetGroupResponse$LastModificationTime": "When the dataset group was created or last updated from a call to the UpdateDatasetGroup operation. While the dataset group is being updated, LastModificationTime
is the current time of the DescribeDatasetGroup
call.
When the dataset group was created or last updated from a call to the UpdateDatasetGroup operation. While the dataset group is being updated, LastModificationTime
is the current time of the DescribeDatasetGroup
call.
When the dataset import job was created.
", "DescribeDatasetImportJobResponse$LastModificationTime": "The last time the resource was modified. The timestamp depends on the status of the job:
CREATE_PENDING
- The CreationTime
.
CREATE_IN_PROGRESS
- The current timestamp.
CREATE_STOPPING
- The current timestamp.
CREATE_STOPPED
- When the job stopped.
ACTIVE
or CREATE_FAILED
- When the job finished or failed.
When the dataset was created.
", - "DescribeDatasetResponse$LastModificationTime": "When you create a dataset, LastModificationTime
is the same as CreationTime
. While data is being imported to the dataset, LastModificationTime
is the current time of the DescribeDataset
call. After a CreateDatasetImportJob operation has finished, LastModificationTime
is when the import job completed or failed.
When you create a dataset, LastModificationTime
is the same as CreationTime
. While data is being imported to the dataset, LastModificationTime
is the current time of the DescribeDataset
call. After a CreateDatasetImportJob operation has finished, LastModificationTime
is when the import job completed or failed.
When the Explainability export was created.
", "DescribeExplainabilityExportResponse$LastModificationTime": "The last time the resource was modified. The timestamp depends on the status of the job:
CREATE_PENDING
- The CreationTime
.
CREATE_IN_PROGRESS
- The current timestamp.
CREATE_STOPPING
- The current timestamp.
CREATE_STOPPED
- When the job stopped.
ACTIVE
or CREATE_FAILED
- When the job finished or failed.
When the Explainability resource was created.
", @@ -1443,6 +1641,9 @@ "DescribeForecastExportJobResponse$LastModificationTime": "The last time the resource was modified. The timestamp depends on the status of the job:
CREATE_PENDING
- The CreationTime
.
CREATE_IN_PROGRESS
- The current timestamp.
CREATE_STOPPING
- The current timestamp.
CREATE_STOPPED
- When the job stopped.
ACTIVE
or CREATE_FAILED
- When the job finished or failed.
When the forecast creation task was created.
", "DescribeForecastResponse$LastModificationTime": "The last time the resource was modified. The timestamp depends on the status of the job:
CREATE_PENDING
- The CreationTime
.
CREATE_IN_PROGRESS
- The current timestamp.
CREATE_STOPPING
- The current timestamp.
CREATE_STOPPED
- When the job stopped.
ACTIVE
or CREATE_FAILED
- When the job finished or failed.
The timestamp of the latest evaluation completed by the monitor.
", + "DescribeMonitorResponse$CreationTime": "The timestamp for when the monitor resource was created.
", + "DescribeMonitorResponse$LastModificationTime": "The timestamp of the latest modification to the monitor.
", "DescribePredictorBacktestExportJobResponse$CreationTime": "When the predictor backtest export job was created.
", "DescribePredictorBacktestExportJobResponse$LastModificationTime": "The last time the resource was modified. The timestamp depends on the status of the job:
CREATE_PENDING
- The CreationTime
.
CREATE_IN_PROGRESS
- The current timestamp.
CREATE_STOPPING
- The current timestamp.
CREATE_STOPPED
- When the job stopped.
ACTIVE
or CREATE_FAILED
- When the job finished or failed.
When the model training task was created.
", @@ -1455,8 +1656,14 @@ "ForecastExportJobSummary$LastModificationTime": "The last time the resource was modified. The timestamp depends on the status of the job:
CREATE_PENDING
- The CreationTime
.
CREATE_IN_PROGRESS
- The current timestamp.
CREATE_STOPPING
- The current timestamp.
CREATE_STOPPED
- When the job stopped.
ACTIVE
or CREATE_FAILED
- When the job finished or failed.
When the forecast creation task was created.
", "ForecastSummary$LastModificationTime": "The last time the resource was modified. The timestamp depends on the status of the job:
CREATE_PENDING
- The CreationTime
.
CREATE_IN_PROGRESS
- The current timestamp.
CREATE_STOPPING
- The current timestamp.
CREATE_STOPPED
- When the job stopped.
ACTIVE
or CREATE_FAILED
- When the job finished or failed.
When the monitor resource was created.
", + "MonitorSummary$LastModificationTime": "The last time the monitor resource was modified. The timestamp depends on the status of the job:
CREATE_PENDING
- The CreationTime
.
CREATE_IN_PROGRESS
- The current timestamp.
STOPPED
- When the resource stopped.
ACTIVE
or CREATE_FAILED
- When the monitor creation finished or failed.
When the predictor backtest export job was created.
", "PredictorBacktestExportJobSummary$LastModificationTime": "The last time the resource was modified. The timestamp depends on the status of the job:
CREATE_PENDING
- The CreationTime
.
CREATE_IN_PROGRESS
- The current timestamp.
CREATE_STOPPING
- The current timestamp.
CREATE_STOPPED
- When the job stopped.
ACTIVE
or CREATE_FAILED
- When the job finished or failed.
The timestamp for when the event occurred.
", + "PredictorMonitorEvaluation$EvaluationTime": "The timestamp that indicates when the monitor evaluation was started.
", + "PredictorMonitorEvaluation$WindowStartDatetime": "The timestamp that indicates the start of the window that is used for monitor evaluation.
", + "PredictorMonitorEvaluation$WindowEndDatetime": "The timestamp that indicates the end of the window that is used for monitor evaluation.
", "PredictorSummary$CreationTime": "When the model training task was created.
", "PredictorSummary$LastModificationTime": "The last time the resource was modified. The timestamp depends on the status of the job:
CREATE_PENDING
- The CreationTime
.
CREATE_IN_PROGRESS
- The current timestamp.
CREATE_STOPPING
- The current timestamp.
CREATE_STOPPED
- When the job stopped.
ACTIVE
or CREATE_FAILED
- When the job finished or failed.
The time at which the test began.
", diff --git a/apis/forecast/2018-06-26/paginators-1.json b/apis/forecast/2018-06-26/paginators-1.json index da6de7d8f44..6543bf3f279 100644 --- a/apis/forecast/2018-06-26/paginators-1.json +++ b/apis/forecast/2018-06-26/paginators-1.json @@ -18,6 +18,18 @@ "output_token": "NextToken", "result_key": "Datasets" }, + "ListExplainabilities": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Explainabilities" + }, + "ListExplainabilityExports": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ExplainabilityExports" + }, "ListForecastExportJobs": { "input_token": "NextToken", "limit_key": "MaxResults", @@ -30,6 +42,18 @@ "output_token": "NextToken", "result_key": "Forecasts" }, + "ListMonitorEvaluations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "PredictorMonitorEvaluations" + }, + "ListMonitors": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Monitors" + }, "ListPredictorBacktestExportJobs": { "input_token": "NextToken", "limit_key": "MaxResults", diff --git a/apis/personalize/2018-05-22/api-2.json b/apis/personalize/2018-05-22/api-2.json index e90fda0bec0..765053c4817 100644 --- a/apis/personalize/2018-05-22/api-2.json +++ b/apis/personalize/2018-05-22/api-2.json @@ -180,6 +180,7 @@ {"shape":"ResourceAlreadyExistsException"}, {"shape":"ResourceNotFoundException"}, {"shape":"LimitExceededException"}, + {"shape":"ResourceInUseException"}, {"shape":"TooManyTagsException"} ], "idempotent":true @@ -2450,7 +2451,8 @@ "lastUpdatedDateTime":{"shape":"Date"}, "status":{"shape":"Status"}, "failureReason":{"shape":"FailureReason"}, - "latestRecommenderUpdate":{"shape":"RecommenderUpdateSummary"} + "latestRecommenderUpdate":{"shape":"RecommenderUpdateSummary"}, + "modelMetrics":{"shape":"Metrics"} } }, "RecommenderConfig":{ diff --git a/apis/personalize/2018-05-22/docs-2.json b/apis/personalize/2018-05-22/docs-2.json index fc7233daf96..a1e21a3a89c 100644 --- a/apis/personalize/2018-05-22/docs-2.json +++ b/apis/personalize/2018-05-22/docs-2.json @@ -11,7 +11,7 @@ "CreateDatasetImportJob": "Creates a job that imports training data from your data source (an Amazon S3 bucket) to an Amazon Personalize dataset. To allow Amazon Personalize to import the training data, you must specify an IAM service role that has permission to read from the data source, as Amazon Personalize makes a copy of your data and processes it internally. For information on granting access to your Amazon S3 bucket, see Giving Amazon Personalize Access to Amazon S3 Resources.
The dataset import job replaces any existing data in the dataset that you imported in bulk.
Status
A dataset import job can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
To get the status of the import job, call DescribeDatasetImportJob, providing the Amazon Resource Name (ARN) of the dataset import job. The dataset import is complete when the status shows as ACTIVE. If the status shows as CREATE FAILED, the response includes a failureReason
key, which describes why the job failed.
Importing takes time. You must wait until the status shows as ACTIVE before training a model using the dataset.
Related APIs
", "CreateEventTracker": "Creates an event tracker that you use when adding event data to a specified dataset group using the PutEvents API.
Only one event tracker can be associated with a dataset group. You will get an error if you call CreateEventTracker
using the same dataset group as an existing event tracker.
When you create an event tracker, the response includes a tracking ID, which you pass as a parameter when you use the PutEvents operation. Amazon Personalize then appends the event data to the Interactions dataset of the dataset group you specify in your event tracker.
The event tracker can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
DELETE PENDING > DELETE IN_PROGRESS
To get the status of the event tracker, call DescribeEventTracker.
The event tracker must be in the ACTIVE state before using the tracking ID.
Related APIs
", "CreateFilter": "Creates a recommendation filter. For more information, see Filtering recommendations and user segments.
", - "CreateRecommender": "Creates a recommender with the recipe (a Domain dataset group use case) you specify. You create recommenders for a Domain dataset group and specify the recommender's Amazon Resource Name (ARN) when you make a GetRecommendations request.
Minimum recommendation requests per second
When you create a recommender, you can configure the recommender's minimum recommendation requests per second. The minimum recommendation requests per second (minRecommendationRequestsPerSecond
) specifies the baseline recommendation request throughput provisioned by Amazon Personalize. The default minRecommendationRequestsPerSecond is 1
. A recommendation request is a single GetRecommendations
operation. Request throughput is measured in requests per second and Amazon Personalize uses your requests per second to derive your requests per hour and the price of your recommender usage.
If your requests per second increases beyond minRecommendationRequestsPerSecond
, Amazon Personalize auto-scales the provisioned capacity up and down, but never below minRecommendationRequestsPerSecond
. There's a short time delay while the capacity is increased that might cause loss of requests.
Your bill is the greater of either the minimum requests per hour (based on minRecommendationRequestsPerSecond) or the actual number of requests. The actual request throughput used is calculated as the average requests/second within a one-hour window. We recommend starting with the default minRecommendationRequestsPerSecond
, track your usage using Amazon CloudWatch metrics, and then increase the minRecommendationRequestsPerSecond
as necessary.
Status
A recommender can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
DELETE PENDING > DELETE IN_PROGRESS
To get the recommender status, call DescribeRecommender.
Wait until the status
of the recommender is ACTIVE
before asking the recommender for recommendations.
Related APIs
", + "CreateRecommender": "Creates a recommender with the recipe (a Domain dataset group use case) you specify. You create recommenders for a Domain dataset group and specify the recommender's Amazon Resource Name (ARN) when you make a GetRecommendations request.
Minimum recommendation requests per second
When you create a recommender, you can configure the recommender's minimum recommendation requests per second. The minimum recommendation requests per second (minRecommendationRequestsPerSecond
) specifies the baseline recommendation request throughput provisioned by Amazon Personalize. The default minRecommendationRequestsPerSecond is 1
. A recommendation request is a single GetRecommendations
operation. Request throughput is measured in requests per second and Amazon Personalize uses your requests per second to derive your requests per hour and the price of your recommender usage.
If your requests per second increases beyond minRecommendationRequestsPerSecond
, Amazon Personalize auto-scales the provisioned capacity up and down, but never below minRecommendationRequestsPerSecond
. There's a short time delay while the capacity is increased that might cause loss of requests.
Your bill is the greater of either the minimum requests per hour (based on minRecommendationRequestsPerSecond) or the actual number of requests. The actual request throughput used is calculated as the average requests/second within a one-hour window. We recommend starting with the default minRecommendationRequestsPerSecond
, track your usage using Amazon CloudWatch metrics, and then increase the minRecommendationRequestsPerSecond
as necessary.
Status
A recommender can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
STOP PENDING > STOP IN_PROGRESS > INACTIVE > START PENDING > START IN_PROGRESS > ACTIVE
DELETE PENDING > DELETE IN_PROGRESS
To get the recommender status, call DescribeRecommender.
Wait until the status
of the recommender is ACTIVE
before asking the recommender for recommendations.
Related APIs
", "CreateSchema": "Creates an Amazon Personalize schema from the specified schema string. The schema you create must be in Avro JSON format.
Amazon Personalize recognizes three schema variants. Each schema is associated with a dataset type and has a set of required field and keywords. If you are creating a schema for a dataset in a Domain dataset group, you provide the domain of the Domain dataset group. You specify a schema when you call CreateDataset.
Related APIs
", "CreateSolution": "Creates the configuration for training a model. A trained model is known as a solution. After the configuration is created, you train the model (create a solution) by calling the CreateSolutionVersion operation. Every time you call CreateSolutionVersion
, a new version of the solution is created.
After creating a solution version, you check its accuracy by calling GetSolutionMetrics. When you are satisfied with the version, you deploy it using CreateCampaign. The campaign provides recommendations to a client through the GetRecommendations API.
To train a model, Amazon Personalize requires training data and a recipe. The training data comes from the dataset group that you provide in the request. A recipe specifies the training algorithm and a feature transformation. You can specify one of the predefined recipes provided by Amazon Personalize. Alternatively, you can specify performAutoML
and Amazon Personalize will analyze your data and select the optimum USER_PERSONALIZATION recipe for you.
Amazon Personalize doesn't support configuring the hpoObjective
for solution hyperparameter optimization at this time.
Status
A solution can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
DELETE PENDING > DELETE IN_PROGRESS
To get the status of the solution, call DescribeSolution. Wait until the status shows as ACTIVE before calling CreateSolutionVersion
.
Related APIs
", "CreateSolutionVersion": "Trains or retrains an active solution in a Custom dataset group. A solution is created using the CreateSolution operation and must be in the ACTIVE state before calling CreateSolutionVersion
. A new version of the solution is created every time you call this operation.
Status
A solution version can be in one of the following states:
CREATE PENDING
CREATE IN_PROGRESS
ACTIVE
CREATE FAILED
CREATE STOPPING
CREATE STOPPED
To get the status of the version, call DescribeSolutionVersion. Wait until the status shows as ACTIVE before calling CreateCampaign
.
If the status shows as CREATE FAILED, the response includes a failureReason
key, which describes why the job failed.
Related APIs
Describes the given feature transformation.
", "DescribeFilter": "Describes a filter's properties.
", "DescribeRecipe": "Describes a recipe.
A recipe contains three items:
An algorithm that trains a model.
Hyperparameters that govern the training.
Feature transformation information for modifying the input data before training.
Amazon Personalize provides a set of predefined recipes. You specify a recipe when you create a solution with the CreateSolution API. CreateSolution
trains a model by using the algorithm in the specified recipe and a training dataset. The solution, when deployed as a campaign, can provide recommendations using the GetRecommendations API.
Describes the given recommender, including its status.
A recommender can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
DELETE PENDING > DELETE IN_PROGRESS
When the status
is CREATE FAILED
, the response includes the failureReason
key, which describes why.
For more information on recommenders, see CreateRecommender.
", + "DescribeRecommender": "Describes the given recommender, including its status.
A recommender can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
STOP PENDING > STOP IN_PROGRESS > INACTIVE > START PENDING > START IN_PROGRESS > ACTIVE
DELETE PENDING > DELETE IN_PROGRESS
When the status
is CREATE FAILED
, the response includes the failureReason
key, which describes why.
The modelMetrics
key is null when the recommender is being created or deleted.
For more information on recommenders, see CreateRecommender.
", "DescribeSchema": "Describes a schema. For more information on schemas, see CreateSchema.
", "DescribeSolution": "Describes a solution. For more information on solutions, see CreateSolution.
", "DescribeSolutionVersion": "Describes a specific version of a solution. For more information on solutions, see CreateSolution
", @@ -1410,7 +1410,8 @@ "Metrics": { "base": null, "refs": { - "GetSolutionMetricsResponse$metrics": "The metrics for the solution version.
" + "GetSolutionMetricsResponse$metrics": "The metrics for the solution version. For more information, see Evaluating a solution version with metrics .
", + "Recommender$modelMetrics": "Provides evaluation metrics that help you determine the performance of a recommender. For more information, see Evaluating a recommender.
" } }, "Name": { @@ -1753,9 +1754,9 @@ "FilterSummary$status": "The status of the filter.
", "Recipe$status": "The status of the recipe.
", "RecipeSummary$status": "The status of the recipe.
", - "Recommender$status": "The status of the recommender.
A recommender can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
DELETE PENDING > DELETE IN_PROGRESS
The status of the recommender. A recommender can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
DELETE PENDING > DELETE IN_PROGRESS
The status of the recommender update.
A recommender can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
DELETE PENDING > DELETE IN_PROGRESS
The status of the recommender.
A recommender can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
STOP PENDING > STOP IN_PROGRESS > INACTIVE > START PENDING > START IN_PROGRESS > ACTIVE
DELETE PENDING > DELETE IN_PROGRESS
The status of the recommender. A recommender can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
STOP PENDING > STOP IN_PROGRESS > INACTIVE > START PENDING > START IN_PROGRESS > ACTIVE
DELETE PENDING > DELETE IN_PROGRESS
The status of the recommender update.
A recommender can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
STOP PENDING > STOP IN_PROGRESS > INACTIVE > START PENDING > START IN_PROGRESS > ACTIVE
DELETE PENDING > DELETE IN_PROGRESS
The status of the solution.
A solution can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
DELETE PENDING > DELETE IN_PROGRESS
The status of the solution.
A solution can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
DELETE PENDING > DELETE IN_PROGRESS
The status of the solution version.
A solution version can be in one of the following states:
CREATE PENDING
CREATE IN_PROGRESS
ACTIVE
CREATE FAILED
CREATE STOPPING
CREATE STOPPED