diff --git a/apis/appflow/2020-08-23/api-2.json b/apis/appflow/2020-08-23/api-2.json index c5e2ce867bc..e615787af08 100644 --- a/apis/appflow/2020-08-23/api-2.json +++ b/apis/appflow/2020-08-23/api-2.json @@ -1338,7 +1338,8 @@ "Honeycode":{"shape":"HoneycodeDestinationProperties"}, "CustomerProfiles":{"shape":"CustomerProfilesDestinationProperties"}, "Zendesk":{"shape":"ZendeskDestinationProperties"}, - "CustomConnector":{"shape":"CustomConnectorDestinationProperties"} + "CustomConnector":{"shape":"CustomConnectorDestinationProperties"}, + "SAPOData":{"shape":"SAPODataDestinationProperties"} } }, "DestinationField":{ @@ -2415,6 +2416,17 @@ "oAuthProperties":{"shape":"OAuthProperties"} } }, + "SAPODataDestinationProperties":{ + "type":"structure", + "required":["objectPath"], + "members":{ + "objectPath":{"shape":"Object"}, + "successResponseHandlingConfig":{"shape":"SuccessResponseHandlingConfig"}, + "idFieldNames":{"shape":"IdFieldNameList"}, + "errorHandlingConfig":{"shape":"ErrorHandlingConfig"}, + "writeOperationType":{"shape":"WriteOperationType"} + } + }, "SAPODataMetadata":{ "type":"structure", "members":{ @@ -2837,6 +2849,13 @@ "max":2048, "pattern":".*" }, + "SuccessResponseHandlingConfig":{ + "type":"structure", + "members":{ + "bucketPrefix":{"shape":"BucketPrefix"}, + "bucketName":{"shape":"BucketName"} + } + }, "SupportedApiVersion":{ "type":"string", "max":256, @@ -2937,6 +2956,7 @@ "Map_all", "Mask", "Merge", + "Passthrough", "Truncate", "Validate" ] diff --git a/apis/appflow/2020-08-23/docs-2.json b/apis/appflow/2020-08-23/docs-2.json index ab7643d236a..c3c714959a9 100644 --- a/apis/appflow/2020-08-23/docs-2.json +++ b/apis/appflow/2020-08-23/docs-2.json @@ -268,7 +268,8 @@ "S3DestinationProperties$bucketName": "
The Amazon S3 bucket name in which Amazon AppFlow places the transferred data.
", "S3SourceProperties$bucketName": "The Amazon S3 bucket name where the source files are stored.
", "SnowflakeConnectorProfileProperties$bucketName": "The name of the Amazon S3 bucket associated with Snowflake.
", - "SnowflakeDestinationProperties$intermediateBucketName": "The intermediate bucket that Amazon AppFlow uses when moving data into Snowflake.
" + "SnowflakeDestinationProperties$intermediateBucketName": "The intermediate bucket that Amazon AppFlow uses when moving data into Snowflake.
", + "SuccessResponseHandlingConfig$bucketName": "The name of the Amazon S3 bucket.
" } }, "BucketPrefix": { @@ -281,6 +282,7 @@ "S3SourceProperties$bucketPrefix": "The object key for the Amazon S3 bucket in which the source files are stored.
", "SnowflakeConnectorProfileProperties$bucketPrefix": "The bucket path that refers to the Amazon S3 bucket associated with Snowflake.
", "SnowflakeDestinationProperties$bucketPrefix": "The object key for the destination bucket in which Amazon AppFlow places the files.
", + "SuccessResponseHandlingConfig$bucketPrefix": "The Amazon S3 bucket prefix.
", "UpsolverDestinationProperties$bucketPrefix": "The object key for the destination Upsolver Amazon S3 bucket in which Amazon AppFlow places the files.
" } }, @@ -1008,6 +1010,7 @@ "EventBridgeDestinationProperties$errorHandlingConfig": null, "HoneycodeDestinationProperties$errorHandlingConfig": null, "RedshiftDestinationProperties$errorHandlingConfig": " The settings that determine how Amazon AppFlow handles an error when placing data in the Amazon Redshift destination. For example, this setting would determine if the flow should fail after one insertion error, or continue and attempt to insert every record regardless of the initial failure. ErrorHandlingConfig
is a part of the destination connector details.
The settings that determine how Amazon AppFlow handles an error when placing data in the Salesforce destination. For example, this setting would determine if the flow should fail after one insertion error, or continue and attempt to insert every record regardless of the initial failure. ErrorHandlingConfig
is a part of the destination connector details.
The settings that determine how Amazon AppFlow handles an error when placing data in the Snowflake destination. For example, this setting would determine if the flow should fail after one insertion error, or continue and attempt to insert every record regardless of the initial failure. ErrorHandlingConfig
is a part of the destination connector details.
A list of field names that can be used as an ID field when performing a write operation.
", "refs": { "CustomConnectorDestinationProperties$idFieldNames": "The name of the field that Amazon AppFlow uses as an ID when performing a write operation such as update, delete, or upsert.
", + "SAPODataDestinationProperties$idFieldNames": null, "SalesforceDestinationProperties$idFieldNames": "The name of the field that Amazon AppFlow uses as an ID when performing a write operation such as update or delete.
", "ZendeskDestinationProperties$idFieldNames": null } @@ -1548,6 +1552,7 @@ "InforNexusSourceProperties$object": "The object specified in the Infor Nexus flow source.
", "MarketoSourceProperties$object": "The object specified in the Marketo flow source.
", "RedshiftDestinationProperties$object": "The object specified in the Amazon Redshift flow destination.
", + "SAPODataDestinationProperties$objectPath": "The object path specified in the SAPOData flow destination.
", "SAPODataSourceProperties$objectPath": "The object path specified in the SAPOData flow source.
", "SalesforceDestinationProperties$object": "The object specified in the Salesforce flow destination.
", "SalesforceSourceProperties$object": "The object specified in the Salesforce flow source.
", @@ -1824,6 +1829,12 @@ "ConnectorProfileProperties$SAPOData": null } }, + "SAPODataDestinationProperties": { + "base": "The properties that are applied when using SAPOData as a flow destination
", + "refs": { + "DestinationConnectorProperties$SAPOData": "The properties required to query SAPOData.
" + } + }, "SAPODataMetadata": { "base": "The connector metadata specific to SAPOData.
", "refs": { @@ -2088,6 +2099,12 @@ "SourceFields$member": null } }, + "SuccessResponseHandlingConfig": { + "base": "Determines how Amazon AppFlow handles the success response that it gets from the connector after placing data.
For example, this setting would determine where to write the response from the destination connector upon a successful insert operation.
", + "refs": { + "SAPODataDestinationProperties$successResponseHandlingConfig": "Determines how Amazon AppFlow handles the success response that it gets from the connector after placing data.
For example, this setting would determine where to write the response from a destination connector upon a successful insert operation.
" + } + }, "SupportedApiVersion": { "base": null, "refs": { @@ -2411,6 +2428,7 @@ "base": " The possible write operations in the destination connector. When this value is not provided, this defaults to the INSERT
operation.
Specifies the type of write operation to be performed in the custom connector when it's used as destination.
", + "SAPODataDestinationProperties$writeOperationType": null, "SalesforceDestinationProperties$writeOperationType": " This specifies the type of write operation to be performed in Salesforce. When the value is UPSERT
, then idFieldNames
is required.
Returns information about a single query. Requires that you have access to the workgroup in which the query was saved.
", "GetPreparedStatement": "Retrieves the prepared statement with the specified name from the specified workgroup.
", "GetQueryExecution": "Returns information about a single execution of a query if you have access to the workgroup in which the query ran. Each time a query executes, information about the query execution is saved with a unique ID.
", - "GetQueryResults": "Streams the results of a single query execution specified by QueryExecutionId
from the Athena query results location in Amazon S3. For more information, see Query Results in the Amazon Athena User Guide. This request does not execute the query but returns results. Use StartQueryExecution to run a query.
If the original query execution ran using an ResultConfiguration$ExpectedBucketOwner setting, the setting also applies to Amazon S3 read operations when GetQueryResults
is called. If an expected bucket owner has been specified and the query results are in an Amazon S3 bucket whose owner account ID is different from the expected bucket owner, the GetQueryResults
call fails with an Amazon S3 permissions error.
To stream query results successfully, the IAM principal with permission to call GetQueryResults
also must have permissions to the Amazon S3 GetObject
action for the Athena query results location.
IAM principals with permission to the Amazon S3 GetObject
action for the query results location are able to retrieve query results from Amazon S3 even if permission to the GetQueryResults
action is denied. To restrict user or role access, ensure that Amazon S3 permissions to the Athena query location are denied.
Streams the results of a single query execution specified by QueryExecutionId
from the Athena query results location in Amazon S3. For more information, see Query Results in the Amazon Athena User Guide. This request does not execute the query but returns results. Use StartQueryExecution to run a query.
To stream query results successfully, the IAM principal with permission to call GetQueryResults
also must have permissions to the Amazon S3 GetObject
action for the Athena query results location.
IAM principals with permission to the Amazon S3 GetObject
action for the query results location are able to retrieve query results from Amazon S3 even if permission to the GetQueryResults
action is denied. To restrict user or role access, ensure that Amazon S3 permissions to the Athena query location are denied.
Returns table metadata for the specified catalog, database, and table.
", "GetWorkGroup": "Returns information about the workgroup with the specified name.
", "ListDataCatalogs": "Lists the data catalogs in the current Amazon Web Services account.
", @@ -83,8 +83,8 @@ "base": null, "refs": { "DeleteWorkGroupInput$RecursiveDeleteOption": "The option to delete the workgroup and its contents even if the workgroup contains any named queries or query executions.
", - "ResultConfigurationUpdates$RemoveOutputLocation": "If set to \"true\", indicates that the previously-specified query results location (also known as a client-side setting) for queries in this workgroup should be ignored and set to null. If set to \"false\" or not set, and a value is present in the OutputLocation
in ResultConfigurationUpdates
(the client-side setting), the OutputLocation
in the workgroup's ResultConfiguration
is updated with the new value. For more information, see Workgroup Settings Override Client-Side Settings.
If set to \"true\", indicates that the previously-specified encryption configuration (also known as the client-side setting) for queries in this workgroup should be ignored and set to null. If set to \"false\" or not set, and a value is present in the EncryptionConfiguration
in ResultConfigurationUpdates
(the client-side setting), the EncryptionConfiguration
in the workgroup's ResultConfiguration
is updated with the new value. For more information, see Workgroup Settings Override Client-Side Settings.
If set to \"true\", indicates that the previously-specified query results location (also known as a client-side setting) for queries in this workgroup should be ignored and set to null. If set to \"false\" or not set, and a value is present in the OutputLocation
in ResultConfigurationUpdates
(the client-side setting), the OutputLocation
in the workgroup's ResultConfiguration
will be updated with the new value. For more information, see Workgroup Settings Override Client-Side Settings.
If set to \"true\", indicates that the previously-specified encryption configuration (also known as the client-side setting) for queries in this workgroup should be ignored and set to null. If set to \"false\" or not set, and a value is present in the EncryptionConfiguration
in ResultConfigurationUpdates
(the client-side setting), the EncryptionConfiguration
in the workgroup's ResultConfiguration
will be updated with the new value. For more information, see Workgroup Settings Override Client-Side Settings.
If set to \"true\", removes the Amazon Web Services account ID previously specified for ResultConfiguration$ExpectedBucketOwner. If set to \"false\" or not set, and a value is present in the ExpectedBucketOwner
in ResultConfigurationUpdates
(the client-side setting), the ExpectedBucketOwner
in the workgroup's ResultConfiguration
is updated with the new value. For more information, see Workgroup Settings Override Client-Side Settings.
If set to \"true\", the settings for the workgroup override client-side settings. If set to \"false\", client-side settings are used. For more information, see Workgroup Settings Override Client-Side Settings.
", "WorkGroupConfiguration$PublishCloudWatchMetricsEnabled": "Indicates that the Amazon CloudWatch metrics are enabled for the workgroup.
", @@ -369,6 +369,12 @@ "UnprocessedQueryExecutionId$ErrorMessage": "The error message returned when the query execution failed to process, if applicable.
" } }, + "ErrorType": { + "base": null, + "refs": { + "AthenaError$ErrorType": "An integer value that provides specific information about an Athena query error. For the meaning of specific values, see the Error Type Reference in the Amazon Athena User Guide.
" + } + }, "ExpressionString": { "base": null, "refs": { diff --git a/apis/rds/2014-10-31/api-2.json b/apis/rds/2014-10-31/api-2.json index 4526d52b17f..7f64b477e30 100644 --- a/apis/rds/2014-10-31/api-2.json +++ b/apis/rds/2014-10-31/api-2.json @@ -3969,7 +3969,8 @@ "DBEngineVersionArn":{"shape":"String"}, "KMSKeyId":{"shape":"String"}, "CreateTime":{"shape":"TStamp"}, - "TagList":{"shape":"TagList"} + "TagList":{"shape":"TagList"}, + "SupportsBabelfish":{"shape":"Boolean"} } }, "DBEngineVersionList":{ @@ -8388,7 +8389,8 @@ "IsMajorVersionUpgrade":{"shape":"Boolean"}, "SupportedEngineModes":{"shape":"EngineModeList"}, "SupportsParallelQuery":{"shape":"BooleanOptional"}, - "SupportsGlobalDatabases":{"shape":"BooleanOptional"} + "SupportsGlobalDatabases":{"shape":"BooleanOptional"}, + "SupportsBabelfish":{"shape":"BooleanOptional"} } }, "UserAuthConfig":{ diff --git a/apis/rds/2014-10-31/docs-2.json b/apis/rds/2014-10-31/docs-2.json index 7ee4ca80f85..316d880618a 100644 --- a/apis/rds/2014-10-31/docs-2.json +++ b/apis/rds/2014-10-31/docs-2.json @@ -344,6 +344,7 @@ "DBEngineVersion$SupportsReadReplica": "Indicates whether the database engine version supports read replicas.
", "DBEngineVersion$SupportsParallelQuery": "A value that indicates whether you can use Aurora parallel query with a specific DB engine version.
", "DBEngineVersion$SupportsGlobalDatabases": "A value that indicates whether you can use Aurora global databases with a specific DB engine version.
", + "DBEngineVersion$SupportsBabelfish": "A value that indicates whether the engine version supports Babelfish for Aurora PostgreSQL.
", "DBInstance$MultiAZ": "Specifies if the DB instance is a Multi-AZ deployment. This setting doesn't apply to RDS Custom.
", "DBInstance$AutoMinorVersionUpgrade": "A value that indicates that minor version patches are applied automatically.
", "DBInstance$PubliclyAccessible": "Specifies the accessibility options for the DB instance.
When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.
When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.
For more information, see CreateDBInstance.
", @@ -546,7 +547,8 @@ "StartActivityStreamResponse$EngineNativeAuditFieldsIncluded": "Indicates whether engine-native audit fields are included in the database activity stream.
", "StopActivityStreamRequest$ApplyImmediately": "Specifies whether or not the database activity stream is to stop as soon as possible, regardless of the maintenance window for the database.
", "UpgradeTarget$SupportsParallelQuery": "A value that indicates whether you can use Aurora parallel query with the target engine version.
", - "UpgradeTarget$SupportsGlobalDatabases": "A value that indicates whether you can use Aurora global databases with the target engine version.
" + "UpgradeTarget$SupportsGlobalDatabases": "A value that indicates whether you can use Aurora global databases with the target engine version.
", + "UpgradeTarget$SupportsBabelfish": "A value that indicates whether you can use Babelfish for Aurora PostgreSQL with the target engine version.
" } }, "BucketName": { @@ -2305,10 +2307,10 @@ "DescribeDBClusterParameterGroupsMessage$Filters": "This parameter isn't currently supported.
", "DescribeDBClusterParametersMessage$Filters": "This parameter isn't currently supported.
", "DescribeDBClusterSnapshotsMessage$Filters": "A filter that specifies one or more DB cluster snapshots to describe.
Supported filters:
db-cluster-id
- Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs).
db-cluster-snapshot-id
- Accepts DB cluster snapshot identifiers.
snapshot-type
- Accepts types of DB cluster snapshots.
engine
- Accepts names of database engines.
A filter that specifies one or more DB clusters to describe.
Supported filters:
clone-group-id
- Accepts clone group identifiers. The results list will only include information about the DB clusters associated with these clone groups.
db-cluster-id
- Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list will only include information about the DB clusters identified by these ARNs.
domain
- Accepts Active Directory directory IDs. The results list will only include information about the DB clusters associated with these domains.
engine
- Accepts engine names. The results list will only include information about the DB clusters for these engines.
This parameter isn't currently supported.
", + "DescribeDBClustersMessage$Filters": "A filter that specifies one or more DB clusters to describe.
Supported filters:
clone-group-id
- Accepts clone group identifiers. The results list only includes information about the DB clusters associated with these clone groups.
db-cluster-id
- Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list only includes information about the DB clusters identified by these ARNs.
domain
- Accepts Active Directory directory IDs. The results list only includes information about the DB clusters associated with these domains.
engine
- Accepts engine names. The results list only includes information about the DB clusters for these engines.
A filter that specifies one or more DB engine versions to describe.
Supported filters:
db-parameter-group-family
- Accepts parameter groups family names. The results list only includes information about the DB engine versions for these parameter group families.
engine
- Accepts engine names. The results list only includes information about the DB engine versions for these engines.
engine-mode
- Accepts DB engine modes. The results list only includes information about the DB engine versions for these engine modes. Valid DB engine modes are the following:
global
multimaster
parallelquery
provisioned
serverless
engine-version
- Accepts engine versions. The results list only includes information about the DB engine versions for these engine versions.
status
- Accepts engine version statuses. The results list only includes information about the DB engine versions for these statuses. Valid statuses are the following:
available
deprecated
A filter that specifies which resources to return based on status.
Supported filters are the following:
status
active
- automated backups for current instances
retained
- automated backups for deleted instances and after backup replication is stopped
creating
- automated backups that are waiting for the first automated snapshot to be available
db-instance-id
- Accepts DB instance identifiers and Amazon Resource Names (ARNs). The results list includes only information about the DB instance automated backups identified by these ARNs.
dbi-resource-id
- Accepts DB resource identifiers and Amazon Resource Names (ARNs). The results list includes only information about the DB instance resources identified by these ARNs.
Returns all resources by default. The status for each resource is specified in the response.
", - "DescribeDBInstancesMessage$Filters": "A filter that specifies one or more DB instances to describe.
Supported filters:
db-cluster-id
- Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list will only include information about the DB instances associated with the DB clusters identified by these ARNs.
db-instance-id
- Accepts DB instance identifiers and DB instance Amazon Resource Names (ARNs). The results list will only include information about the DB instances identified by these ARNs.
dbi-resource-id
- Accepts DB instance resource identifiers. The results list will only include information about the DB instances identified by these DB instance resource identifiers.
domain
- Accepts Active Directory directory IDs. The results list will only include information about the DB instances associated with these domains.
engine
- Accepts engine names. The results list will only include information about the DB instances for these engines.
A filter that specifies one or more DB instances to describe.
Supported filters:
db-cluster-id
- Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list only includes information about the DB instances associated with the DB clusters identified by these ARNs.
db-instance-id
- Accepts DB instance identifiers and DB instance Amazon Resource Names (ARNs). The results list only includes information about the DB instances identified by these ARNs.
dbi-resource-id
- Accepts DB instance resource identifiers. The results list will only include information about the DB instances identified by these DB instance resource identifiers.
domain
- Accepts Active Directory directory IDs. The results list only includes information about the DB instances associated with these domains.
engine
- Accepts engine names. The results list only includes information about the DB instances for these engines.
This parameter isn't currently supported.
", "DescribeDBParameterGroupsMessage$Filters": "This parameter isn't currently supported.
", "DescribeDBParametersMessage$Filters": "This parameter isn't currently supported.
", @@ -2330,7 +2332,7 @@ "DescribeOptionGroupOptionsMessage$Filters": "This parameter isn't currently supported.
", "DescribeOptionGroupsMessage$Filters": "This parameter isn't currently supported.
", "DescribeOrderableDBInstanceOptionsMessage$Filters": "This parameter isn't currently supported.
", - "DescribePendingMaintenanceActionsMessage$Filters": "A filter that specifies one or more resources to return pending maintenance actions for.
Supported filters:
db-cluster-id
- Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list will only include pending maintenance actions for the DB clusters identified by these ARNs.
db-instance-id
- Accepts DB instance identifiers and DB instance ARNs. The results list will only include pending maintenance actions for the DB instances identified by these ARNs.
A filter that specifies one or more resources to return pending maintenance actions for.
Supported filters:
db-cluster-id
- Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list only includes pending maintenance actions for the DB clusters identified by these ARNs.
db-instance-id
- Accepts DB instance identifiers and DB instance ARNs. The results list only includes pending maintenance actions for the DB instances identified by these ARNs.
This parameter isn't currently supported.
", "DescribeReservedDBInstancesOfferingsMessage$Filters": "This parameter isn't currently supported.
", "DescribeSourceRegionsMessage$Filters": "This parameter isn't currently supported.
", @@ -4359,7 +4361,7 @@ "ModifyDBClusterMessage$PreferredBackupWindow": "The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod
parameter.
The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. To view the time blocks available, see Backup window in the Amazon Aurora User Guide.
Constraints:
Must be in the format hh24:mi-hh24:mi
.
Must be in Universal Coordinated Time (UTC).
Must not conflict with the preferred maintenance window.
Must be at least 30 minutes.
Valid for: Aurora DB clusters and Multi-AZ DB clusters
", "ModifyDBClusterMessage$PreferredMaintenanceWindow": "The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).
Format: ddd:hh24:mi-ddd:hh24:mi
The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred DB Cluster Maintenance Window in the Amazon Aurora User Guide.
Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.
Constraints: Minimum 30-minute window.
Valid for: Aurora DB clusters and Multi-AZ DB clusters
", "ModifyDBClusterMessage$EngineVersion": "The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless ApplyImmediately
is enabled.
To list all of the available engine versions for MySQL 5.6-compatible Aurora, use the following command:
aws rds describe-db-engine-versions --engine aurora --query \"DBEngineVersions[].EngineVersion\"
To list all of the available engine versions for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora, use the following command:
aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"
To list all of the available engine versions for Aurora PostgreSQL, use the following command:
aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"
To list all of the available engine versions for RDS for MySQL, use the following command:
aws rds describe-db-engine-versions --engine mysql --query \"DBEngineVersions[].EngineVersion\"
To list all of the available engine versions for RDS for PostgreSQL, use the following command:
aws rds describe-db-engine-versions --engine postgres --query \"DBEngineVersions[].EngineVersion\"
Valid for: Aurora DB clusters and Multi-AZ DB clusters
", - "ModifyDBClusterMessage$DBInstanceParameterGroupName": "The name of the DB parameter group to apply to all instances of the DB cluster.
When you apply a parameter group using the DBInstanceParameterGroupName
parameter, the DB cluster isn't rebooted automatically. Also, parameter changes aren't applied during the next maintenance window but instead are applied immediately.
Default: The existing name setting
Constraints:
The DB parameter group must be in the same DB parameter group family as this DB cluster.
The DBInstanceParameterGroupName
parameter is only valid in combination with the AllowMajorVersionUpgrade
parameter.
Valid for: Aurora DB clusters only
", + "ModifyDBClusterMessage$DBInstanceParameterGroupName": "The name of the DB parameter group to apply to all instances of the DB cluster.
When you apply a parameter group using the DBInstanceParameterGroupName
parameter, the DB cluster isn't rebooted automatically. Also, parameter changes are applied immediately rather than during the next maintenance window.
Default: The existing name setting
Constraints:
The DB parameter group must be in the same DB parameter group family as this DB cluster.
The DBInstanceParameterGroupName
parameter is valid in combination with the AllowMajorVersionUpgrade
parameter for a major version upgrade only.
Valid for: Aurora DB clusters only
", "ModifyDBClusterMessage$Domain": "The Active Directory directory ID to move the DB cluster to. Specify none
to remove the cluster from its current domain. The domain must be created prior to this operation.
For more information, see Kerberos Authentication in the Amazon Aurora User Guide.
Valid for: Aurora DB clusters only
", "ModifyDBClusterMessage$DomainIAMRoleName": "Specify the name of the IAM role to be used when making API calls to the Directory Service.
Valid for: Aurora DB clusters only
", "ModifyDBClusterMessage$DBClusterInstanceClass": "The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example db.m6g.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.
For the full list of DB instance classes and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.
Valid for: Multi-AZ DB clusters only
", @@ -4551,7 +4553,7 @@ "RestoreDBClusterFromSnapshotMessage$DBClusterParameterGroupName": "The name of the DB cluster parameter group to associate with this DB cluster. If this argument is omitted, the default DB cluster parameter group for the specified engine is used.
Constraints:
If supplied, must match the name of an existing default DB cluster parameter group.
Must be 1 to 255 letters, numbers, or hyphens.
First character must be a letter.
Can't end with a hyphen or contain two consecutive hyphens.
Valid for: Aurora DB clusters and Multi-AZ DB clusters
", "RestoreDBClusterFromSnapshotMessage$Domain": "Specify the Active Directory directory ID to restore the DB cluster in. The domain must be created prior to this operation. Currently, only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain.
For more information, see Kerberos Authentication in the Amazon RDS User Guide.
Valid for: Aurora DB clusters only
", "RestoreDBClusterFromSnapshotMessage$DomainIAMRoleName": "Specify the name of the IAM role to be used when making API calls to the Directory Service.
Valid for: Aurora DB clusters only
", - "RestoreDBClusterFromSnapshotMessage$DBClusterInstanceClass": "The compute and memory capacity of the each DB instance in the Multi-AZ DB cluster, for example db.m6g.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.
For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.
Valid for: Aurora DB clusters and Multi-AZ DB clusters
", + "RestoreDBClusterFromSnapshotMessage$DBClusterInstanceClass": "The compute and memory capacity of the each DB instance in the Multi-AZ DB cluster, for example db.m6g.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.
For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.
Valid for: Multi-AZ DB clusters only
", "RestoreDBClusterFromSnapshotMessage$StorageType": "Specifies the storage type to be associated with the each DB instance in the Multi-AZ DB cluster.
Valid values: io1
When specified, a value for the Iops
parameter is required.
Default: io1
Valid for: Aurora DB clusters and Multi-AZ DB clusters
", "RestoreDBClusterToPointInTimeMessage$DBClusterIdentifier": "The name of the new DB cluster to be created.
Constraints:
Must contain from 1 to 63 letters, numbers, or hyphens
First character must be a letter
Can't end with a hyphen or contain two consecutive hyphens
Valid for: Aurora DB clusters and Multi-AZ DB clusters
", "RestoreDBClusterToPointInTimeMessage$RestoreType": "The type of restore to be performed. You can specify one of the following values:
full-copy
- The new DB cluster is restored as a full copy of the source DB cluster.
copy-on-write
- The new DB cluster is restored as a clone of the source DB cluster.
Constraints: You can't specify copy-on-write
if the engine version of the source DB cluster is earlier than 1.11.
If you don't specify a RestoreType
value, then the new DB cluster is restored as a full copy of the source DB cluster.
Valid for: Aurora DB clusters and Multi-AZ DB clusters
", diff --git a/gems/aws-sdk-appflow/CHANGELOG.md b/gems/aws-sdk-appflow/CHANGELOG.md index 114727ce1f1..05354c7400f 100644 --- a/gems/aws-sdk-appflow/CHANGELOG.md +++ b/gems/aws-sdk-appflow/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.23.0 (2022-02-14) +------------------ + +* Feature - Launching Amazon AppFlow SAP as a destination connector SDK. + 1.22.0 (2022-02-03) ------------------ diff --git a/gems/aws-sdk-appflow/VERSION b/gems/aws-sdk-appflow/VERSION index 57807d6d0d0..a6c2798a482 100644 --- a/gems/aws-sdk-appflow/VERSION +++ b/gems/aws-sdk-appflow/VERSION @@ -1 +1 @@ -1.22.0 +1.23.0 diff --git a/gems/aws-sdk-appflow/lib/aws-sdk-appflow.rb b/gems/aws-sdk-appflow/lib/aws-sdk-appflow.rb index 5b51c65fdf3..fae8bd30d46 100644 --- a/gems/aws-sdk-appflow/lib/aws-sdk-appflow.rb +++ b/gems/aws-sdk-appflow/lib/aws-sdk-appflow.rb @@ -48,6 +48,6 @@ # @!group service module Aws::Appflow - GEM_VERSION = '1.22.0' + GEM_VERSION = '1.23.0' end diff --git a/gems/aws-sdk-appflow/lib/aws-sdk-appflow/client.rb b/gems/aws-sdk-appflow/lib/aws-sdk-appflow/client.rb index da443220b45..66e25f1a1d5 100644 --- a/gems/aws-sdk-appflow/lib/aws-sdk-appflow/client.rb +++ b/gems/aws-sdk-appflow/lib/aws-sdk-appflow/client.rb @@ -869,6 +869,20 @@ def create_connector_profile(params = {}, options = {}) # "CustomPropertyKey" => "CustomPropertyValue", # }, # }, + # sapo_data: { + # object_path: "Object", # required + # success_response_handling_config: { + # bucket_prefix: "BucketPrefix", + # bucket_name: "BucketName", + # }, + # id_field_names: ["Name"], + # error_handling_config: { + # fail_on_first_destination_error: false, + # bucket_prefix: "BucketPrefix", + # bucket_name: "BucketName", + # }, + # write_operation_type: "INSERT", # accepts INSERT, UPSERT, UPDATE, DELETE + # }, # }, # }, # ], @@ -894,7 +908,7 @@ def create_connector_profile(params = {}, options = {}) # custom_connector: "PROJECTION", # accepts PROJECTION, LESS_THAN, GREATER_THAN, CONTAINS, BETWEEN, LESS_THAN_OR_EQUAL_TO, GREATER_THAN_OR_EQUAL_TO, EQUAL_TO, NOT_EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, SUBTRACTION, MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, NO_OP # }, # destination_field: "DestinationField", - # task_type: "Arithmetic", # required, accepts Arithmetic, Filter, Map, Map_all, Mask, Merge, Truncate, Validate + # task_type: "Arithmetic", # required, accepts Arithmetic, Filter, Map, Map_all, Mask, Merge, Passthrough, Truncate, Validate # task_properties: { # "VALUE" => "Property", # }, @@ -1528,6 +1542,15 @@ def describe_connectors(params = {}, options = {}) # resp.destination_flow_config_list[0].destination_connector_properties.custom_connector.id_field_names[0] #=> String # resp.destination_flow_config_list[0].destination_connector_properties.custom_connector.custom_properties #=> Hash # resp.destination_flow_config_list[0].destination_connector_properties.custom_connector.custom_properties["CustomPropertyKey"] #=> String + # resp.destination_flow_config_list[0].destination_connector_properties.sapo_data.object_path #=> String + # resp.destination_flow_config_list[0].destination_connector_properties.sapo_data.success_response_handling_config.bucket_prefix #=> String + # resp.destination_flow_config_list[0].destination_connector_properties.sapo_data.success_response_handling_config.bucket_name #=> String + # resp.destination_flow_config_list[0].destination_connector_properties.sapo_data.id_field_names #=> Array + # resp.destination_flow_config_list[0].destination_connector_properties.sapo_data.id_field_names[0] #=> String + # resp.destination_flow_config_list[0].destination_connector_properties.sapo_data.error_handling_config.fail_on_first_destination_error #=> Boolean + # resp.destination_flow_config_list[0].destination_connector_properties.sapo_data.error_handling_config.bucket_prefix #=> String + # resp.destination_flow_config_list[0].destination_connector_properties.sapo_data.error_handling_config.bucket_name #=> String + # resp.destination_flow_config_list[0].destination_connector_properties.sapo_data.write_operation_type #=> String, one of "INSERT", "UPSERT", "UPDATE", "DELETE" # resp.last_run_execution_details.most_recent_execution_message #=> String # resp.last_run_execution_details.most_recent_execution_time #=> Time # resp.last_run_execution_details.most_recent_execution_status #=> String, one of "InProgress", "Successful", "Error" @@ -1559,7 +1582,7 @@ def describe_connectors(params = {}, options = {}) # resp.tasks[0].connector_operator.sapo_data #=> String, one of "PROJECTION", "LESS_THAN", "CONTAINS", "GREATER_THAN", "BETWEEN", "LESS_THAN_OR_EQUAL_TO", "GREATER_THAN_OR_EQUAL_TO", "EQUAL_TO", "NOT_EQUAL_TO", "ADDITION", "MULTIPLICATION", "DIVISION", "SUBTRACTION", "MASK_ALL", "MASK_FIRST_N", "MASK_LAST_N", "VALIDATE_NON_NULL", "VALIDATE_NON_ZERO", "VALIDATE_NON_NEGATIVE", "VALIDATE_NUMERIC", "NO_OP" # resp.tasks[0].connector_operator.custom_connector #=> String, one of "PROJECTION", "LESS_THAN", "GREATER_THAN", "CONTAINS", "BETWEEN", "LESS_THAN_OR_EQUAL_TO", "GREATER_THAN_OR_EQUAL_TO", "EQUAL_TO", "NOT_EQUAL_TO", "ADDITION", "MULTIPLICATION", "DIVISION", "SUBTRACTION", "MASK_ALL", "MASK_FIRST_N", "MASK_LAST_N", "VALIDATE_NON_NULL", "VALIDATE_NON_ZERO", "VALIDATE_NON_NEGATIVE", "VALIDATE_NUMERIC", "NO_OP" # resp.tasks[0].destination_field #=> String - # resp.tasks[0].task_type #=> String, one of "Arithmetic", "Filter", "Map", "Map_all", "Mask", "Merge", "Truncate", "Validate" + # resp.tasks[0].task_type #=> String, one of "Arithmetic", "Filter", "Map", "Map_all", "Mask", "Merge", "Passthrough", "Truncate", "Validate" # resp.tasks[0].task_properties #=> Hash # resp.tasks[0].task_properties["OperatorPropertiesKeys"] #=> String # resp.created_at #=> Time @@ -2504,6 +2527,20 @@ def update_connector_profile(params = {}, options = {}) # "CustomPropertyKey" => "CustomPropertyValue", # }, # }, + # sapo_data: { + # object_path: "Object", # required + # success_response_handling_config: { + # bucket_prefix: "BucketPrefix", + # bucket_name: "BucketName", + # }, + # id_field_names: ["Name"], + # error_handling_config: { + # fail_on_first_destination_error: false, + # bucket_prefix: "BucketPrefix", + # bucket_name: "BucketName", + # }, + # write_operation_type: "INSERT", # accepts INSERT, UPSERT, UPDATE, DELETE + # }, # }, # }, # ], @@ -2529,7 +2566,7 @@ def update_connector_profile(params = {}, options = {}) # custom_connector: "PROJECTION", # accepts PROJECTION, LESS_THAN, GREATER_THAN, CONTAINS, BETWEEN, LESS_THAN_OR_EQUAL_TO, GREATER_THAN_OR_EQUAL_TO, EQUAL_TO, NOT_EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, SUBTRACTION, MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, NO_OP # }, # destination_field: "DestinationField", - # task_type: "Arithmetic", # required, accepts Arithmetic, Filter, Map, Map_all, Mask, Merge, Truncate, Validate + # task_type: "Arithmetic", # required, accepts Arithmetic, Filter, Map, Map_all, Mask, Merge, Passthrough, Truncate, Validate # task_properties: { # "VALUE" => "Property", # }, @@ -2563,7 +2600,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-appflow' - context[:gem_version] = '1.22.0' + context[:gem_version] = '1.23.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-appflow/lib/aws-sdk-appflow/client_api.rb b/gems/aws-sdk-appflow/lib/aws-sdk-appflow/client_api.rb index 8f60bb43720..9b554458172 100644 --- a/gems/aws-sdk-appflow/lib/aws-sdk-appflow/client_api.rb +++ b/gems/aws-sdk-appflow/lib/aws-sdk-appflow/client_api.rb @@ -273,6 +273,7 @@ module ClientApi SAPODataConnectorOperator = Shapes::StringShape.new(name: 'SAPODataConnectorOperator') SAPODataConnectorProfileCredentials = Shapes::StructureShape.new(name: 'SAPODataConnectorProfileCredentials') SAPODataConnectorProfileProperties = Shapes::StructureShape.new(name: 'SAPODataConnectorProfileProperties') + SAPODataDestinationProperties = Shapes::StructureShape.new(name: 'SAPODataDestinationProperties') SAPODataMetadata = Shapes::StructureShape.new(name: 'SAPODataMetadata') SAPODataSourceProperties = Shapes::StructureShape.new(name: 'SAPODataSourceProperties') SalesforceConnectorOperator = Shapes::StringShape.new(name: 'SalesforceConnectorOperator') @@ -317,6 +318,7 @@ module ClientApi StopFlowRequest = Shapes::StructureShape.new(name: 'StopFlowRequest') StopFlowResponse = Shapes::StructureShape.new(name: 'StopFlowResponse') String = Shapes::StringShape.new(name: 'String') + SuccessResponseHandlingConfig = Shapes::StructureShape.new(name: 'SuccessResponseHandlingConfig') SupportedApiVersion = Shapes::StringShape.new(name: 'SupportedApiVersion') SupportedApiVersionList = Shapes::ListShape.new(name: 'SupportedApiVersionList') SupportedFieldTypeDetails = Shapes::StructureShape.new(name: 'SupportedFieldTypeDetails') @@ -800,6 +802,7 @@ module ClientApi DestinationConnectorProperties.add_member(:customer_profiles, Shapes::ShapeRef.new(shape: CustomerProfilesDestinationProperties, location_name: "CustomerProfiles")) DestinationConnectorProperties.add_member(:zendesk, Shapes::ShapeRef.new(shape: ZendeskDestinationProperties, location_name: "Zendesk")) DestinationConnectorProperties.add_member(:custom_connector, Shapes::ShapeRef.new(shape: CustomConnectorDestinationProperties, location_name: "CustomConnector")) + DestinationConnectorProperties.add_member(:sapo_data, Shapes::ShapeRef.new(shape: SAPODataDestinationProperties, location_name: "SAPOData")) DestinationConnectorProperties.struct_class = Types::DestinationConnectorProperties DestinationFieldProperties.add_member(:is_creatable, Shapes::ShapeRef.new(shape: Boolean, location_name: "isCreatable")) @@ -1111,6 +1114,13 @@ module ClientApi SAPODataConnectorProfileProperties.add_member(:o_auth_properties, Shapes::ShapeRef.new(shape: OAuthProperties, location_name: "oAuthProperties")) SAPODataConnectorProfileProperties.struct_class = Types::SAPODataConnectorProfileProperties + SAPODataDestinationProperties.add_member(:object_path, Shapes::ShapeRef.new(shape: Object, required: true, location_name: "objectPath")) + SAPODataDestinationProperties.add_member(:success_response_handling_config, Shapes::ShapeRef.new(shape: SuccessResponseHandlingConfig, location_name: "successResponseHandlingConfig")) + SAPODataDestinationProperties.add_member(:id_field_names, Shapes::ShapeRef.new(shape: IdFieldNameList, location_name: "idFieldNames")) + SAPODataDestinationProperties.add_member(:error_handling_config, Shapes::ShapeRef.new(shape: ErrorHandlingConfig, location_name: "errorHandlingConfig")) + SAPODataDestinationProperties.add_member(:write_operation_type, Shapes::ShapeRef.new(shape: WriteOperationType, location_name: "writeOperationType")) + SAPODataDestinationProperties.struct_class = Types::SAPODataDestinationProperties + SAPODataMetadata.struct_class = Types::SAPODataMetadata SAPODataSourceProperties.add_member(:object_path, Shapes::ShapeRef.new(shape: Object, location_name: "objectPath")) @@ -1260,6 +1270,10 @@ module ClientApi StopFlowResponse.add_member(:flow_status, Shapes::ShapeRef.new(shape: FlowStatus, location_name: "flowStatus")) StopFlowResponse.struct_class = Types::StopFlowResponse + SuccessResponseHandlingConfig.add_member(:bucket_prefix, Shapes::ShapeRef.new(shape: BucketPrefix, location_name: "bucketPrefix")) + SuccessResponseHandlingConfig.add_member(:bucket_name, Shapes::ShapeRef.new(shape: BucketName, location_name: "bucketName")) + SuccessResponseHandlingConfig.struct_class = Types::SuccessResponseHandlingConfig + SupportedApiVersionList.member = Shapes::ShapeRef.new(shape: SupportedApiVersion) SupportedFieldTypeDetails.add_member(:v1, Shapes::ShapeRef.new(shape: FieldTypeDetails, required: true, location_name: "v1")) diff --git a/gems/aws-sdk-appflow/lib/aws-sdk-appflow/types.rb b/gems/aws-sdk-appflow/lib/aws-sdk-appflow/types.rb index 4d2434c9271..e17f1511ce8 100644 --- a/gems/aws-sdk-appflow/lib/aws-sdk-appflow/types.rb +++ b/gems/aws-sdk-appflow/lib/aws-sdk-appflow/types.rb @@ -2156,6 +2156,20 @@ class CreateConnectorProfileResponse < Struct.new( # "CustomPropertyKey" => "CustomPropertyValue", # }, # }, + # sapo_data: { + # object_path: "Object", # required + # success_response_handling_config: { + # bucket_prefix: "BucketPrefix", + # bucket_name: "BucketName", + # }, + # id_field_names: ["Name"], + # error_handling_config: { + # fail_on_first_destination_error: false, + # bucket_prefix: "BucketPrefix", + # bucket_name: "BucketName", + # }, + # write_operation_type: "INSERT", # accepts INSERT, UPSERT, UPDATE, DELETE + # }, # }, # }, # ], @@ -2181,7 +2195,7 @@ class CreateConnectorProfileResponse < Struct.new( # custom_connector: "PROJECTION", # accepts PROJECTION, LESS_THAN, GREATER_THAN, CONTAINS, BETWEEN, LESS_THAN_OR_EQUAL_TO, GREATER_THAN_OR_EQUAL_TO, EQUAL_TO, NOT_EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, SUBTRACTION, MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, NO_OP # }, # destination_field: "DestinationField", - # task_type: "Arithmetic", # required, accepts Arithmetic, Filter, Map, Map_all, Mask, Merge, Truncate, Validate + # task_type: "Arithmetic", # required, accepts Arithmetic, Filter, Map, Map_all, Mask, Merge, Passthrough, Truncate, Validate # task_properties: { # "VALUE" => "Property", # }, @@ -3176,6 +3190,20 @@ class DescribeFlowResponse < Struct.new( # "CustomPropertyKey" => "CustomPropertyValue", # }, # }, + # sapo_data: { + # object_path: "Object", # required + # success_response_handling_config: { + # bucket_prefix: "BucketPrefix", + # bucket_name: "BucketName", + # }, + # id_field_names: ["Name"], + # error_handling_config: { + # fail_on_first_destination_error: false, + # bucket_prefix: "BucketPrefix", + # bucket_name: "BucketName", + # }, + # write_operation_type: "INSERT", # accepts INSERT, UPSERT, UPDATE, DELETE + # }, # } # # @!attribute [rw] redshift @@ -3222,6 +3250,10 @@ class DescribeFlowResponse < Struct.new( # The properties that are required to query the custom Connector. # @return [Types::CustomConnectorDestinationProperties] # + # @!attribute [rw] sapo_data + # The properties required to query SAPOData. + # @return [Types::SAPODataDestinationProperties] + # # @see http://docs.aws.amazon.com/goto/WebAPI/appflow-2020-08-23/DestinationConnectorProperties AWS API Documentation # class DestinationConnectorProperties < Struct.new( @@ -3235,7 +3267,8 @@ class DestinationConnectorProperties < Struct.new( :honeycode, :customer_profiles, :zendesk, - :custom_connector) + :custom_connector, + :sapo_data) SENSITIVE = [] include Aws::Structure end @@ -3401,6 +3434,20 @@ class DestinationFieldProperties < Struct.new( # "CustomPropertyKey" => "CustomPropertyValue", # }, # }, + # sapo_data: { + # object_path: "Object", # required + # success_response_handling_config: { + # bucket_prefix: "BucketPrefix", + # bucket_name: "BucketName", + # }, + # id_field_names: ["Name"], + # error_handling_config: { + # fail_on_first_destination_error: false, + # bucket_prefix: "BucketPrefix", + # bucket_name: "BucketName", + # }, + # write_operation_type: "INSERT", # accepts INSERT, UPSERT, UPDATE, DELETE + # }, # }, # } # @@ -5187,6 +5234,71 @@ class SAPODataConnectorProfileProperties < Struct.new( include Aws::Structure end + # The properties that are applied when using SAPOData as a flow + # destination + # + # @note When making an API call, you may pass SAPODataDestinationProperties + # data as a hash: + # + # { + # object_path: "Object", # required + # success_response_handling_config: { + # bucket_prefix: "BucketPrefix", + # bucket_name: "BucketName", + # }, + # id_field_names: ["Name"], + # error_handling_config: { + # fail_on_first_destination_error: false, + # bucket_prefix: "BucketPrefix", + # bucket_name: "BucketName", + # }, + # write_operation_type: "INSERT", # accepts INSERT, UPSERT, UPDATE, DELETE + # } + # + # @!attribute [rw] object_path + # The object path specified in the SAPOData flow destination. + # @return [String] + # + # @!attribute [rw] success_response_handling_config + # Determines how Amazon AppFlow handles the success response that it + # gets from the connector after placing data. + # + # For example, this setting would determine where to write the + # response from a destination connector upon a successful insert + # operation. + # @return [Types::SuccessResponseHandlingConfig] + # + # @!attribute [rw] id_field_names + # A list of field names that can be used as an ID field when + # performing a write operation. + # @return [Array