diff --git a/clients/client-firehose/src/commands/CreateDeliveryStreamCommand.ts b/clients/client-firehose/src/commands/CreateDeliveryStreamCommand.ts index 0ef46e3dade9..c5c629987a70 100644 --- a/clients/client-firehose/src/commands/CreateDeliveryStreamCommand.ts +++ b/clients/client-firehose/src/commands/CreateDeliveryStreamCommand.ts @@ -118,7 +118,7 @@ export interface CreateDeliveryStreamCommandOutput extends CreateDeliveryStreamO * const client = new FirehoseClient(config); * const input = { // CreateDeliveryStreamInput * DeliveryStreamName: "STRING_VALUE", // required - * DeliveryStreamType: "DirectPut" || "KinesisStreamAsSource", + * DeliveryStreamType: "DirectPut" || "KinesisStreamAsSource" || "MSKAsSource", * KinesisStreamSourceConfiguration: { // KinesisStreamSourceConfiguration * KinesisStreamARN: "STRING_VALUE", // required * RoleARN: "STRING_VALUE", // required @@ -174,10 +174,10 @@ export interface CreateDeliveryStreamCommandOutput extends CreateDeliveryStreamO * Enabled: true || false, * Processors: [ // ProcessorList * { // Processor - * Type: "RecordDeAggregation" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required + * Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required * Parameters: [ // ProcessorParameterList * { // ProcessorParameter - * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter", // required + * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required * ParameterValue: "STRING_VALUE", // required * }, * ], @@ -306,10 +306,10 @@ export interface CreateDeliveryStreamCommandOutput extends CreateDeliveryStreamO * Enabled: true || false, * Processors: [ * { - * Type: "RecordDeAggregation" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required + * Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required * Parameters: [ * { - * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter", // required + * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required * ParameterValue: "STRING_VALUE", // required * }, * ], @@ -370,10 +370,10 @@ export interface CreateDeliveryStreamCommandOutput extends CreateDeliveryStreamO * Enabled: true || false, * Processors: [ * { - * Type: "RecordDeAggregation" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required + * Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required * Parameters: [ * { - * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter", // required + * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required * ParameterValue: "STRING_VALUE", // required * }, * ], @@ -414,10 +414,10 @@ export interface CreateDeliveryStreamCommandOutput extends CreateDeliveryStreamO * Enabled: true || false, * Processors: [ * { - * Type: "RecordDeAggregation" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required + * Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required * Parameters: [ * { - * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter", // required + * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required * ParameterValue: "STRING_VALUE", // required * }, * ], @@ -452,10 +452,10 @@ export interface CreateDeliveryStreamCommandOutput extends CreateDeliveryStreamO * Enabled: true || false, * Processors: [ * { - * Type: "RecordDeAggregation" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required + * Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required * Parameters: [ * { - * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter", // required + * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required * ParameterValue: "STRING_VALUE", // required * }, * ], @@ -523,6 +523,14 @@ export interface CreateDeliveryStreamCommandOutput extends CreateDeliveryStreamO * ], * }, * }, + * MSKSourceConfiguration: { // MSKSourceConfiguration + * MSKClusterARN: "STRING_VALUE", // required + * TopicName: "STRING_VALUE", // required + * AuthenticationConfiguration: { // AuthenticationConfiguration + * RoleARN: "STRING_VALUE", // required + * Connectivity: "PUBLIC" || "PRIVATE", // required + * }, + * }, * }; * const command = new CreateDeliveryStreamCommand(input); * const response = await client.send(command); diff --git a/clients/client-firehose/src/commands/DescribeDeliveryStreamCommand.ts b/clients/client-firehose/src/commands/DescribeDeliveryStreamCommand.ts index b09e1b126a1d..4af162d94ca1 100644 --- a/clients/client-firehose/src/commands/DescribeDeliveryStreamCommand.ts +++ b/clients/client-firehose/src/commands/DescribeDeliveryStreamCommand.ts @@ -79,7 +79,7 @@ export interface DescribeDeliveryStreamCommandOutput extends DescribeDeliveryStr * // Details: "STRING_VALUE", // required * // }, * // }, - * // DeliveryStreamType: "DirectPut" || "KinesisStreamAsSource", // required + * // DeliveryStreamType: "DirectPut" || "KinesisStreamAsSource" || "MSKAsSource", // required * // VersionId: "STRING_VALUE", // required * // CreateTimestamp: new Date("TIMESTAMP"), * // LastUpdateTimestamp: new Date("TIMESTAMP"), @@ -89,6 +89,15 @@ export interface DescribeDeliveryStreamCommandOutput extends DescribeDeliveryStr * // RoleARN: "STRING_VALUE", * // DeliveryStartTimestamp: new Date("TIMESTAMP"), * // }, + * // MSKSourceDescription: { // MSKSourceDescription + * // MSKClusterARN: "STRING_VALUE", + * // TopicName: "STRING_VALUE", + * // AuthenticationConfiguration: { // AuthenticationConfiguration + * // RoleARN: "STRING_VALUE", // required + * // Connectivity: "PUBLIC" || "PRIVATE", // required + * // }, + * // DeliveryStartTimestamp: new Date("TIMESTAMP"), + * // }, * // }, * // Destinations: [ // DestinationDescriptionList // required * // { // DestinationDescription @@ -140,10 +149,10 @@ export interface DescribeDeliveryStreamCommandOutput extends DescribeDeliveryStr * // Enabled: true || false, * // Processors: [ // ProcessorList * // { // Processor - * // Type: "RecordDeAggregation" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required + * // Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required * // Parameters: [ // ProcessorParameterList * // { // ProcessorParameter - * // ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter", // required + * // ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required * // ParameterValue: "STRING_VALUE", // required * // }, * // ], @@ -271,10 +280,10 @@ export interface DescribeDeliveryStreamCommandOutput extends DescribeDeliveryStr * // Enabled: true || false, * // Processors: [ * // { - * // Type: "RecordDeAggregation" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required + * // Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required * // Parameters: [ * // { - * // ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter", // required + * // ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required * // ParameterValue: "STRING_VALUE", // required * // }, * // ], @@ -335,10 +344,10 @@ export interface DescribeDeliveryStreamCommandOutput extends DescribeDeliveryStr * // Enabled: true || false, * // Processors: [ * // { - * // Type: "RecordDeAggregation" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required + * // Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required * // Parameters: [ * // { - * // ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter", // required + * // ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required * // ParameterValue: "STRING_VALUE", // required * // }, * // ], @@ -380,10 +389,10 @@ export interface DescribeDeliveryStreamCommandOutput extends DescribeDeliveryStr * // Enabled: true || false, * // Processors: [ * // { - * // Type: "RecordDeAggregation" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required + * // Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required * // Parameters: [ * // { - * // ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter", // required + * // ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required * // ParameterValue: "STRING_VALUE", // required * // }, * // ], @@ -419,10 +428,10 @@ export interface DescribeDeliveryStreamCommandOutput extends DescribeDeliveryStr * // Enabled: true || false, * // Processors: [ * // { - * // Type: "RecordDeAggregation" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required + * // Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required * // Parameters: [ * // { - * // ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter", // required + * // ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required * // ParameterValue: "STRING_VALUE", // required * // }, * // ], diff --git a/clients/client-firehose/src/commands/ListDeliveryStreamsCommand.ts b/clients/client-firehose/src/commands/ListDeliveryStreamsCommand.ts index 786178ab7cbd..0dfddc872d6a 100644 --- a/clients/client-firehose/src/commands/ListDeliveryStreamsCommand.ts +++ b/clients/client-firehose/src/commands/ListDeliveryStreamsCommand.ts @@ -53,7 +53,7 @@ export interface ListDeliveryStreamsCommandOutput extends ListDeliveryStreamsOut * const client = new FirehoseClient(config); * const input = { // ListDeliveryStreamsInput * Limit: Number("int"), - * DeliveryStreamType: "DirectPut" || "KinesisStreamAsSource", + * DeliveryStreamType: "DirectPut" || "KinesisStreamAsSource" || "MSKAsSource", * ExclusiveStartDeliveryStreamName: "STRING_VALUE", * }; * const command = new ListDeliveryStreamsCommand(input); diff --git a/clients/client-firehose/src/commands/PutRecordBatchCommand.ts b/clients/client-firehose/src/commands/PutRecordBatchCommand.ts index 4bf0a7cb057e..054efbf17f01 100644 --- a/clients/client-firehose/src/commands/PutRecordBatchCommand.ts +++ b/clients/client-firehose/src/commands/PutRecordBatchCommand.ts @@ -41,6 +41,7 @@ export interface PutRecordBatchCommandOutput extends PutRecordBatchOutput, __Met * achieve higher throughput per producer than when writing single records. To write single * data records into a delivery stream, use PutRecord. Applications using * these operations are referred to as producers.
+ *Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.
*For information about service quota, see Amazon Kinesis Data Firehose * Quota.
*Each PutRecordBatch request supports up to 500 records. Each record @@ -77,8 +78,11 @@ export interface PutRecordBatchCommandOutput extends PutRecordBatchOutput, __Met * duplicate records and also reduces the total bytes sent (and corresponding charges). We * recommend that you handle any duplicates at the destination.
*If PutRecordBatch throws ServiceUnavailableException
,
- * back off and retry. If the exception persists, it is possible that the throughput limits
- * have been exceeded for the delivery stream.
Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can + * result in data duplicates. For larger data assets, allow for a longer time out before + * retrying Put API operations.
*Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they * are added to a delivery stream as it attempts to send the records to the destination. If * the destination is unreachable for more than 24 hours, the data is no longer diff --git a/clients/client-firehose/src/commands/PutRecordCommand.ts b/clients/client-firehose/src/commands/PutRecordCommand.ts index 15c329a518b2..da99cc193038 100644 --- a/clients/client-firehose/src/commands/PutRecordCommand.ts +++ b/clients/client-firehose/src/commands/PutRecordCommand.ts @@ -46,6 +46,7 @@ export interface PutRecordCommandOutput extends PutRecordOutput, __MetadataBeare * operations for each delivery stream. For more information about limits and how to request * an increase, see Amazon * Kinesis Data Firehose Limits.
+ *Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.
*You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 * KiB in size, and any kind of data. For example, it can be a segment from a log file, * geographic location data, website clickstream data, and so on.
@@ -58,8 +59,12 @@ export interface PutRecordCommandOutput extends PutRecordOutput, __MetadataBeare * unique string assigned to each record. Producer applications can use this ID for purposes * such as auditability and investigation. *If the PutRecord
operation throws a
- * ServiceUnavailableException
, back off and retry. If the exception persists,
- * it is possible that the throughput limits have been exceeded for the delivery stream.
ServiceUnavailableException
, the API is automatically reinvoked (retried) 3
+ * times. If the exception persists, it is possible that the throughput limits have been
+ * exceeded for the delivery stream.
+ * Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can + * result in data duplicates. For larger data assets, allow for a longer time out before + * retrying Put API operations.
*Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they
* are added to a delivery stream as it tries to send the records to the destination. If the
* destination is unreachable for more than 24 hours, the data is no longer
diff --git a/clients/client-firehose/src/commands/StartDeliveryStreamEncryptionCommand.ts b/clients/client-firehose/src/commands/StartDeliveryStreamEncryptionCommand.ts
index 7cdb7a9754c1..eb6dc0d8b4f5 100644
--- a/clients/client-firehose/src/commands/StartDeliveryStreamEncryptionCommand.ts
+++ b/clients/client-firehose/src/commands/StartDeliveryStreamEncryptionCommand.ts
@@ -62,6 +62,7 @@ export interface StartDeliveryStreamEncryptionCommandOutput
* CMK is of type CUSTOMER_MANAGED_CMK
, Kinesis Data Firehose creates a grant
* that enables it to use the new CMK to encrypt and decrypt data and to manage the
* grant.
For the KMS grant creation to be successful, Kinesis Data Firehose APIs StartDeliveryStreamEncryption
and CreateDeliveryStream
should not be called with session credentials that are more than 6 hours old.
If a delivery stream already has encryption enabled and then you invoke this operation
* to change the ARN of the CMK or both its type and ARN and you get
* ENABLING_FAILED
, this only means that the attempt to change the CMK failed.
diff --git a/clients/client-firehose/src/commands/UpdateDestinationCommand.ts b/clients/client-firehose/src/commands/UpdateDestinationCommand.ts
index fbbd51ee22ec..04d0dd73dd9f 100644
--- a/clients/client-firehose/src/commands/UpdateDestinationCommand.ts
+++ b/clients/client-firehose/src/commands/UpdateDestinationCommand.ts
@@ -121,10 +121,10 @@ export interface UpdateDestinationCommandOutput extends UpdateDestinationOutput,
* Enabled: true || false,
* Processors: [ // ProcessorList
* { // Processor
- * Type: "RecordDeAggregation" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
+ * Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
* Parameters: [ // ProcessorParameterList
* { // ProcessorParameter
- * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter", // required
+ * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required
* ParameterValue: "STRING_VALUE", // required
* },
* ],
@@ -253,10 +253,10 @@ export interface UpdateDestinationCommandOutput extends UpdateDestinationOutput,
* Enabled: true || false,
* Processors: [
* {
- * Type: "RecordDeAggregation" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
+ * Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
* Parameters: [
* {
- * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter", // required
+ * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required
* ParameterValue: "STRING_VALUE", // required
* },
* ],
@@ -316,10 +316,10 @@ export interface UpdateDestinationCommandOutput extends UpdateDestinationOutput,
* Enabled: true || false,
* Processors: [
* {
- * Type: "RecordDeAggregation" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
+ * Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
* Parameters: [
* {
- * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter", // required
+ * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required
* ParameterValue: "STRING_VALUE", // required
* },
* ],
@@ -350,10 +350,10 @@ export interface UpdateDestinationCommandOutput extends UpdateDestinationOutput,
* Enabled: true || false,
* Processors: [
* {
- * Type: "RecordDeAggregation" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
+ * Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
* Parameters: [
* {
- * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter", // required
+ * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required
* ParameterValue: "STRING_VALUE", // required
* },
* ],
@@ -379,10 +379,10 @@ export interface UpdateDestinationCommandOutput extends UpdateDestinationOutput,
* Enabled: true || false,
* Processors: [
* {
- * Type: "RecordDeAggregation" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
+ * Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
* Parameters: [
* {
- * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter", // required
+ * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required
* ParameterValue: "STRING_VALUE", // required
* },
* ],
diff --git a/clients/client-firehose/src/models/models_0.ts b/clients/client-firehose/src/models/models_0.ts
index cc6341b6e00e..5fbcc0bf03a5 100644
--- a/clients/client-firehose/src/models/models_0.ts
+++ b/clients/client-firehose/src/models/models_0.ts
@@ -60,6 +60,7 @@ export interface CloudWatchLoggingOptions {
export const ProcessorParameterName = {
BUFFER_INTERVAL_IN_SECONDS: "BufferIntervalInSeconds",
BUFFER_SIZE_IN_MB: "BufferSizeInMBs",
+ COMPRESSION_FORMAT: "CompressionFormat",
Delimiter: "Delimiter",
JSON_PARSING_ENGINE: "JsonParsingEngine",
LAMBDA_ARN: "LambdaArn",
@@ -102,6 +103,7 @@ export interface ProcessorParameter {
*/
export const ProcessorType = {
AppendDelimiterToRecord: "AppendDelimiterToRecord",
+ Decompression: "Decompression",
Lambda: "Lambda",
MetadataExtraction: "MetadataExtraction",
RecordDeAggregation: "RecordDeAggregation",
@@ -1277,6 +1279,38 @@ export interface AmazonopensearchserviceDestinationUpdate {
DocumentIdOptions?: DocumentIdOptions;
}
+/**
+ * @public
+ * @enum
+ */
+export const Connectivity = {
+ PRIVATE: "PRIVATE",
+ PUBLIC: "PUBLIC",
+} as const;
+
+/**
+ * @public
+ */
+export type Connectivity = (typeof Connectivity)[keyof typeof Connectivity];
+
+/**
+ * @public
+ *
The authentication configuration of the Amazon MSK cluster.
+ */ +export interface AuthenticationConfiguration { + /** + * @public + *The ARN of the role used to access the Amazon MSK cluster.
+ */ + RoleARN: string | undefined; + + /** + * @public + *The type of connectivity used to access the Amazon MSK cluster.
+ */ + Connectivity: Connectivity | string | undefined; +} + /** * @public *Another modification has already happened. Fetch VersionId
again and use
@@ -1415,6 +1449,7 @@ export interface DeliveryStreamEncryptionConfigurationInput {
export const DeliveryStreamType = {
DirectPut: "DirectPut",
KinesisStreamAsSource: "KinesisStreamAsSource",
+ MSKAsSource: "MSKAsSource",
} as const;
/**
@@ -2423,6 +2458,31 @@ export interface KinesisStreamSourceConfiguration {
RoleARN: string | undefined;
}
+/**
+ * @public
+ *
The configuration for the Amazon MSK cluster to be used as the source for a delivery + * stream.
+ */ +export interface MSKSourceConfiguration { + /** + * @public + *The ARN of the Amazon MSK cluster.
+ */ + MSKClusterARN: string | undefined; + + /** + * @public + *The topic name within the Amazon MSK cluster.
+ */ + TopicName: string | undefined; + + /** + * @public + *The authentication configuration of the Amazon MSK cluster.
+ */ + AuthenticationConfiguration: AuthenticationConfiguration | undefined; +} + /** * @public *Configures retry behavior in case Kinesis Data Firehose is unable to deliver @@ -2786,6 +2846,13 @@ export interface CreateDeliveryStreamInput { * specify only one destination.
*/ AmazonOpenSearchServerlessDestinationConfiguration?: AmazonOpenSearchServerlessDestinationConfiguration; + + /** + * @public + *The configuration for the Amazon MSK cluster to be used as the source for a delivery + * stream.
+ */ + MSKSourceConfiguration?: MSKSourceConfiguration; } /** @@ -3575,6 +3642,38 @@ export interface KinesisStreamSourceDescription { DeliveryStartTimestamp?: Date; } +/** + * @public + *Details about the Amazon MSK cluster used as the source for a Kinesis Data Firehose + * delivery stream.
+ */ +export interface MSKSourceDescription { + /** + * @public + *The ARN of the Amazon MSK cluster.
+ */ + MSKClusterARN?: string; + + /** + * @public + *The topic name within the Amazon MSK cluster.
+ */ + TopicName?: string; + + /** + * @public + *The authentication configuration of the Amazon MSK cluster.
+ */ + AuthenticationConfiguration?: AuthenticationConfiguration; + + /** + * @public + *Kinesis Data Firehose starts retrieving records from the topic within the Amazon MSK + * cluster starting with this timestamp.
+ */ + DeliveryStartTimestamp?: Date; +} + /** * @public *Details about a Kinesis data stream used as the source for a Kinesis Data Firehose @@ -3587,6 +3686,13 @@ export interface SourceDescription { * data stream.
*/ KinesisStreamSourceDescription?: KinesisStreamSourceDescription; + + /** + * @public + *The configuration description for the Amazon MSK cluster to be used as the source for a delivery + * stream.
+ */ + MSKSourceDescription?: MSKSourceDescription; } /** diff --git a/clients/client-firehose/src/protocols/Aws_json1_1.ts b/clients/client-firehose/src/protocols/Aws_json1_1.ts index ef8a0c457155..eb4356185173 100644 --- a/clients/client-firehose/src/protocols/Aws_json1_1.ts +++ b/clients/client-firehose/src/protocols/Aws_json1_1.ts @@ -69,6 +69,7 @@ import { AmazonopensearchserviceDestinationConfiguration, AmazonopensearchserviceDestinationUpdate, AmazonopensearchserviceRetryOptions, + AuthenticationConfiguration, BufferingHints, CloudWatchLoggingOptions, ConcurrentModificationException, @@ -109,6 +110,8 @@ import { LimitExceededException, ListDeliveryStreamsInput, ListTagsForDeliveryStreamInput, + MSKSourceConfiguration, + MSKSourceDescription, OpenXJsonSerDe, OrcSerDe, OutputFormatConfiguration, @@ -1056,6 +1059,8 @@ const de_ServiceUnavailableExceptionRes = async ( // se_AmazonopensearchserviceRetryOptions omitted. +// se_AuthenticationConfiguration omitted. + // se_BufferingHints omitted. // se_CloudWatchLoggingOptions omitted. @@ -1078,6 +1083,7 @@ const se_CreateDeliveryStreamInput = (input: CreateDeliveryStreamInput, context: ExtendedS3DestinationConfiguration: (_) => se_ExtendedS3DestinationConfiguration(_, context), HttpEndpointDestinationConfiguration: _json, KinesisStreamSourceConfiguration: _json, + MSKSourceConfiguration: _json, RedshiftDestinationConfiguration: _json, S3DestinationConfiguration: _json, SplunkDestinationConfiguration: _json, @@ -1199,6 +1205,8 @@ const se_ExtendedS3DestinationUpdate = (input: ExtendedS3DestinationUpdate, cont // se_ListTagsForDeliveryStreamInput omitted. +// se_MSKSourceConfiguration omitted. + // se_OpenXJsonSerDe omitted. /** @@ -1361,6 +1369,8 @@ const se_UpdateDestinationInput = (input: UpdateDestinationInput, context: __Ser // de_AmazonopensearchserviceRetryOptions omitted. +// de_AuthenticationConfiguration omitted. + // de_BufferingHints omitted. // de_CloudWatchLoggingOptions omitted. @@ -1539,6 +1549,18 @@ const de_KinesisStreamSourceDescription = (output: any, context: __SerdeContext) // de_ListTagsForDeliveryStreamOutputTagList omitted. +/** + * deserializeAws_json1_1MSKSourceDescription + */ +const de_MSKSourceDescription = (output: any, context: __SerdeContext): MSKSourceDescription => { + return take(output, { + AuthenticationConfiguration: _json, + DeliveryStartTimestamp: (_: any) => __expectNonNull(__parseEpochTimestamp(__expectNumber(_))), + MSKClusterARN: __expectString, + TopicName: __expectString, + }) as any; +}; + // de_OpenXJsonSerDe omitted. /** @@ -1622,6 +1644,7 @@ const de_Serializer = (output: any, context: __SerdeContext): Serializer => { const de_SourceDescription = (output: any, context: __SerdeContext): SourceDescription => { return take(output, { KinesisStreamSourceDescription: (_: any) => de_KinesisStreamSourceDescription(_, context), + MSKSourceDescription: (_: any) => de_MSKSourceDescription(_, context), }) as any; }; diff --git a/codegen/sdk-codegen/aws-models/firehose.json b/codegen/sdk-codegen/aws-models/firehose.json index ebe91f1d95bd..d8087699f80f 100644 --- a/codegen/sdk-codegen/aws-models/firehose.json +++ b/codegen/sdk-codegen/aws-models/firehose.json @@ -690,6 +690,28 @@ "smithy.api#pattern": ".*" } }, + "com.amazonaws.firehose#AuthenticationConfiguration": { + "type": "structure", + "members": { + "RoleARN": { + "target": "com.amazonaws.firehose#RoleARN", + "traits": { + "smithy.api#documentation": "The ARN of the role used to access the Amazon MSK cluster.
", + "smithy.api#required": {} + } + }, + "Connectivity": { + "target": "com.amazonaws.firehose#Connectivity", + "traits": { + "smithy.api#documentation": "The type of connectivity used to access the Amazon MSK cluster.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The authentication configuration of the Amazon MSK cluster.
" + } + }, "com.amazonaws.firehose#BlockSizeBytes": { "type": "integer", "traits": { @@ -764,7 +786,7 @@ "min": 1, "max": 512 }, - "smithy.api#pattern": "^jdbc:(redshift|postgresql)://((?!-)[A-Za-z0-9-]{1,63}(?The destination in the Serverless offering for Amazon OpenSearch Service. You can\n specify only one destination." } + }, + "MSKSourceConfiguration": { + "target": "com.amazonaws.firehose#MSKSourceConfiguration" } }, "traits": { @@ -1488,6 +1530,12 @@ "traits": { "smithy.api#enumValue": "KinesisStreamAsSource" } + }, + "MSKAsSource": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MSKAsSource" + } } } }, @@ -4293,6 +4341,77 @@ "smithy.api#pattern": "^[^:*]*$" } }, + "com.amazonaws.firehose#MSKClusterARN": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 512 + }, + "smithy.api#pattern": "^arn:" + } + }, + "com.amazonaws.firehose#MSKSourceConfiguration": { + "type": "structure", + "members": { + "MSKClusterARN": { + "target": "com.amazonaws.firehose#MSKClusterARN", + "traits": { + "smithy.api#documentation": "The ARN of the Amazon MSK cluster.
", + "smithy.api#required": {} + } + }, + "TopicName": { + "target": "com.amazonaws.firehose#TopicName", + "traits": { + "smithy.api#documentation": "The topic name within the Amazon MSK cluster.
", + "smithy.api#required": {} + } + }, + "AuthenticationConfiguration": { + "target": "com.amazonaws.firehose#AuthenticationConfiguration", + "traits": { + "smithy.api#documentation": "The authentication configuration of the Amazon MSK cluster.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The configuration for the Amazon MSK cluster to be used as the source for a delivery\n stream.
" + } + }, + "com.amazonaws.firehose#MSKSourceDescription": { + "type": "structure", + "members": { + "MSKClusterARN": { + "target": "com.amazonaws.firehose#MSKClusterARN", + "traits": { + "smithy.api#documentation": "The ARN of the Amazon MSK cluster.
" + } + }, + "TopicName": { + "target": "com.amazonaws.firehose#TopicName", + "traits": { + "smithy.api#documentation": "The topic name within the Amazon MSK cluster.
" + } + }, + "AuthenticationConfiguration": { + "target": "com.amazonaws.firehose#AuthenticationConfiguration", + "traits": { + "smithy.api#documentation": "The authentication configuration of the Amazon MSK cluster.
" + } + }, + "DeliveryStartTimestamp": { + "target": "com.amazonaws.firehose#DeliveryStartTimestamp", + "traits": { + "smithy.api#documentation": "Kinesis Data Firehose starts retrieving records from the topic within the Amazon MSK\n cluster starting with this timestamp.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Details about the Amazon MSK cluster used as the source for a Kinesis Data Firehose\n delivery stream.
" + } + }, "com.amazonaws.firehose#NoEncryptionConfig": { "type": "enum", "members": { @@ -4740,6 +4859,12 @@ "traits": { "smithy.api#enumValue": "Delimiter" } + }, + "COMPRESSION_FORMAT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CompressionFormat" + } } } }, @@ -4762,6 +4887,12 @@ "smithy.api#enumValue": "RecordDeAggregation" } }, + "Decompression": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Decompression" + } + }, "Lambda": { "target": "smithy.api#Unit", "traits": { @@ -4814,7 +4945,7 @@ } ], "traits": { - "smithy.api#documentation": "Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To\n write multiple data records into a delivery stream, use PutRecordBatch.\n Applications using these operations are referred to as producers.
\nBy default, each delivery stream can take in up to 2,000 transactions per second,\n 5,000 records per second, or 5 MB per second. If you use PutRecord and\n PutRecordBatch, the limits are an aggregate across these two\n operations for each delivery stream. For more information about limits and how to request\n an increase, see Amazon\n Kinesis Data Firehose Limits.
\nYou must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000\n KiB in size, and any kind of data. For example, it can be a segment from a log file,\n geographic location data, website clickstream data, and so on.
\nKinesis Data Firehose buffers records before delivering them to the destination. To\n disambiguate the data blobs at the destination, a common solution is to use delimiters in\n the data, such as a newline (\\n
) or some other character unique within the\n data. This allows the consumer application to parse individual data items when reading the\n data from the destination.
The PutRecord
operation returns a RecordId
, which is a\n unique string assigned to each record. Producer applications can use this ID for purposes\n such as auditability and investigation.
If the PutRecord
operation throws a\n ServiceUnavailableException
, back off and retry. If the exception persists,\n it is possible that the throughput limits have been exceeded for the delivery stream.
Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they\n are added to a delivery stream as it tries to send the records to the destination. If the\n destination is unreachable for more than 24 hours, the data is no longer\n available.
\nDon't concatenate two or more base64 strings to form the data fields of your records.\n Instead, concatenate the raw data, then perform base64 encoding.
\nWrites a single data record into an Amazon Kinesis Data Firehose delivery stream. To\n write multiple data records into a delivery stream, use PutRecordBatch.\n Applications using these operations are referred to as producers.
\nBy default, each delivery stream can take in up to 2,000 transactions per second,\n 5,000 records per second, or 5 MB per second. If you use PutRecord and\n PutRecordBatch, the limits are an aggregate across these two\n operations for each delivery stream. For more information about limits and how to request\n an increase, see Amazon\n Kinesis Data Firehose Limits.
\nKinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.
\nYou must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000\n KiB in size, and any kind of data. For example, it can be a segment from a log file,\n geographic location data, website clickstream data, and so on.
\nKinesis Data Firehose buffers records before delivering them to the destination. To\n disambiguate the data blobs at the destination, a common solution is to use delimiters in\n the data, such as a newline (\\n
) or some other character unique within the\n data. This allows the consumer application to parse individual data items when reading the\n data from the destination.
The PutRecord
operation returns a RecordId
, which is a\n unique string assigned to each record. Producer applications can use this ID for purposes\n such as auditability and investigation.
If the PutRecord
operation throws a\n ServiceUnavailableException
, the API is automatically reinvoked (retried) 3\n times. If the exception persists, it is possible that the throughput limits have been\n exceeded for the delivery stream.
Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can\n result in data duplicates. For larger data assets, allow for a longer time out before\n retrying Put API operations.
\nData records sent to Kinesis Data Firehose are stored for 24 hours from the time they\n are added to a delivery stream as it tries to send the records to the destination. If the\n destination is unreachable for more than 24 hours, the data is no longer\n available.
\nDon't concatenate two or more base64 strings to form the data fields of your records.\n Instead, concatenate the raw data, then perform base64 encoding.
\nWrites multiple data records into a delivery stream in a single call, which can\n achieve higher throughput per producer than when writing single records. To write single\n data records into a delivery stream, use PutRecord. Applications using\n these operations are referred to as producers.
\nFor information about service quota, see Amazon Kinesis Data Firehose\n Quota.
\nEach PutRecordBatch request supports up to 500 records. Each record\n in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB\n for the entire request. These limits cannot be changed.
\nYou must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000\n KB in size, and any kind of data. For example, it could be a segment from a log file,\n geographic location data, website clickstream data, and so on.
\nKinesis Data Firehose buffers records before delivering them to the destination. To\n disambiguate the data blobs at the destination, a common solution is to use delimiters in\n the data, such as a newline (\\n
) or some other character unique within the\n data. This allows the consumer application to parse individual data items when reading the\n data from the destination.
The PutRecordBatch response includes a count of failed records,\n FailedPutCount
, and an array of responses, RequestResponses
.\n Even if the PutRecordBatch call succeeds, the value of\n FailedPutCount
may be greater than 0, indicating that there are records for\n which the operation didn't succeed. Each entry in the RequestResponses
array\n provides additional information about the processed record. It directly correlates with a\n record in the request array using the same ordering, from the top to the bottom. The\n response array always includes the same number of records as the request array.\n RequestResponses
includes both successfully and unsuccessfully processed\n records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing\n of subsequent records.
A successfully processed record includes a RecordId
value, which is\n unique for the record. An unsuccessfully processed record includes ErrorCode
\n and ErrorMessage
values. ErrorCode
reflects the type of error,\n and is one of the following values: ServiceUnavailableException
or\n InternalFailure
. ErrorMessage
provides more detailed\n information about the error.
If there is an internal server error or a timeout, the write might have completed or\n it might have failed. If FailedPutCount
is greater than 0, retry the request,\n resending only those records that might have failed processing. This minimizes the possible\n duplicate records and also reduces the total bytes sent (and corresponding charges). We\n recommend that you handle any duplicates at the destination.
If PutRecordBatch throws ServiceUnavailableException
,\n back off and retry. If the exception persists, it is possible that the throughput limits\n have been exceeded for the delivery stream.
Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they\n are added to a delivery stream as it attempts to send the records to the destination. If\n the destination is unreachable for more than 24 hours, the data is no longer\n available.
\nDon't concatenate two or more base64 strings to form the data fields of your records.\n Instead, concatenate the raw data, then perform base64 encoding.
\nWrites multiple data records into a delivery stream in a single call, which can\n achieve higher throughput per producer than when writing single records. To write single\n data records into a delivery stream, use PutRecord. Applications using\n these operations are referred to as producers.
\nKinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.
\nFor information about service quota, see Amazon Kinesis Data Firehose\n Quota.
\nEach PutRecordBatch request supports up to 500 records. Each record\n in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB\n for the entire request. These limits cannot be changed.
\nYou must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000\n KB in size, and any kind of data. For example, it could be a segment from a log file,\n geographic location data, website clickstream data, and so on.
\nKinesis Data Firehose buffers records before delivering them to the destination. To\n disambiguate the data blobs at the destination, a common solution is to use delimiters in\n the data, such as a newline (\\n
) or some other character unique within the\n data. This allows the consumer application to parse individual data items when reading the\n data from the destination.
The PutRecordBatch response includes a count of failed records,\n FailedPutCount
, and an array of responses, RequestResponses
.\n Even if the PutRecordBatch call succeeds, the value of\n FailedPutCount
may be greater than 0, indicating that there are records for\n which the operation didn't succeed. Each entry in the RequestResponses
array\n provides additional information about the processed record. It directly correlates with a\n record in the request array using the same ordering, from the top to the bottom. The\n response array always includes the same number of records as the request array.\n RequestResponses
includes both successfully and unsuccessfully processed\n records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing\n of subsequent records.
A successfully processed record includes a RecordId
value, which is\n unique for the record. An unsuccessfully processed record includes ErrorCode
\n and ErrorMessage
values. ErrorCode
reflects the type of error,\n and is one of the following values: ServiceUnavailableException
or\n InternalFailure
. ErrorMessage
provides more detailed\n information about the error.
If there is an internal server error or a timeout, the write might have completed or\n it might have failed. If FailedPutCount
is greater than 0, retry the request,\n resending only those records that might have failed processing. This minimizes the possible\n duplicate records and also reduces the total bytes sent (and corresponding charges). We\n recommend that you handle any duplicates at the destination.
If PutRecordBatch throws ServiceUnavailableException
,\n the API is automatically reinvoked (retried) 3 times. If the exception persists, it is\n possible that the throughput limits have been exceeded for the delivery stream.
Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can\n result in data duplicates. For larger data assets, allow for a longer time out before\n retrying Put API operations.
\nData records sent to Kinesis Data Firehose are stored for 24 hours from the time they\n are added to a delivery stream as it attempts to send the records to the destination. If\n the destination is unreachable for more than 24 hours, the data is no longer\n available.
\nDon't concatenate two or more base64 strings to form the data fields of your records.\n Instead, concatenate the raw data, then perform base64 encoding.
\nThe KinesisStreamSourceDescription value for the source Kinesis\n data stream.
" } + }, + "MSKSourceDescription": { + "target": "com.amazonaws.firehose#MSKSourceDescription", + "traits": { + "smithy.api#documentation": "The configuration description for the Amazon MSK cluster to be used as the source for a delivery\n stream.
" + } } }, "traits": { @@ -5902,7 +6039,7 @@ } ], "traits": { - "smithy.api#documentation": "Enables server-side encryption (SSE) for the delivery stream.
\nThis operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data\n Firehose first sets the encryption status of the stream to ENABLING
, and then\n to ENABLED
. The encryption status of a delivery stream is the\n Status
property in DeliveryStreamEncryptionConfiguration.\n If the operation fails, the encryption status changes to ENABLING_FAILED
. You\n can continue to read and write data to your delivery stream while the encryption status is\n ENABLING
, but the data is not encrypted. It can take up to 5 seconds after\n the encryption status changes to ENABLED
before all records written to the\n delivery stream are encrypted. To find out whether a record or a batch of records was\n encrypted, check the response elements PutRecordOutput$Encrypted and\n PutRecordBatchOutput$Encrypted, respectively.
To check the encryption status of a delivery stream, use DescribeDeliveryStream.
\nEven if encryption is currently enabled for a delivery stream, you can still invoke this\n operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this\n method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK
,\n Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new\n CMK is of type CUSTOMER_MANAGED_CMK
, Kinesis Data Firehose creates a grant\n that enables it to use the new CMK to encrypt and decrypt data and to manage the\n grant.
If a delivery stream already has encryption enabled and then you invoke this operation\n to change the ARN of the CMK or both its type and ARN and you get\n ENABLING_FAILED
, this only means that the attempt to change the CMK failed.\n In this case, encryption remains enabled with the old CMK.
If the encryption status of your delivery stream is ENABLING_FAILED
, you\n can invoke this operation again with a valid CMK. The CMK must be enabled and the key\n policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS\n encrypt and decrypt operations.
You can enable SSE for a delivery stream only if it's a delivery stream that uses\n DirectPut
as its source.
The StartDeliveryStreamEncryption
and\n StopDeliveryStreamEncryption
operations have a combined limit of 25 calls\n per delivery stream per 24 hours. For example, you reach the limit if you call\n StartDeliveryStreamEncryption
13 times and\n StopDeliveryStreamEncryption
12 times for the same delivery stream in a\n 24-hour period.
Enables server-side encryption (SSE) for the delivery stream.
\nThis operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data\n Firehose first sets the encryption status of the stream to ENABLING
, and then\n to ENABLED
. The encryption status of a delivery stream is the\n Status
property in DeliveryStreamEncryptionConfiguration.\n If the operation fails, the encryption status changes to ENABLING_FAILED
. You\n can continue to read and write data to your delivery stream while the encryption status is\n ENABLING
, but the data is not encrypted. It can take up to 5 seconds after\n the encryption status changes to ENABLED
before all records written to the\n delivery stream are encrypted. To find out whether a record or a batch of records was\n encrypted, check the response elements PutRecordOutput$Encrypted and\n PutRecordBatchOutput$Encrypted, respectively.
To check the encryption status of a delivery stream, use DescribeDeliveryStream.
\nEven if encryption is currently enabled for a delivery stream, you can still invoke this\n operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this\n method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK
,\n Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new\n CMK is of type CUSTOMER_MANAGED_CMK
, Kinesis Data Firehose creates a grant\n that enables it to use the new CMK to encrypt and decrypt data and to manage the\n grant.
For the KMS grant creation to be successful, Kinesis Data Firehose APIs StartDeliveryStreamEncryption
and CreateDeliveryStream
should not be called with session credentials that are more than 6 hours old.
If a delivery stream already has encryption enabled and then you invoke this operation\n to change the ARN of the CMK or both its type and ARN and you get\n ENABLING_FAILED
, this only means that the attempt to change the CMK failed.\n In this case, encryption remains enabled with the old CMK.
If the encryption status of your delivery stream is ENABLING_FAILED
, you\n can invoke this operation again with a valid CMK. The CMK must be enabled and the key\n policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS\n encrypt and decrypt operations.
You can enable SSE for a delivery stream only if it's a delivery stream that uses\n DirectPut
as its source.
The StartDeliveryStreamEncryption
and\n StopDeliveryStreamEncryption
operations have a combined limit of 25 calls\n per delivery stream per 24 hours. For example, you reach the limit if you call\n StartDeliveryStreamEncryption
13 times and\n StopDeliveryStreamEncryption
12 times for the same delivery stream in a\n 24-hour period.