diff --git a/packages/@aws-cdk/aws-kinesis/README.md b/packages/@aws-cdk/aws-kinesis/README.md index 1e7aaf89a5a88..75831cb52b1ed 100644 --- a/packages/@aws-cdk/aws-kinesis/README.md +++ b/packages/@aws-cdk/aws-kinesis/README.md @@ -34,8 +34,8 @@ Using the CDK, a new Kinesis stream can be created as part of the stack using th your own identifier to the stream. If not, CloudFormation will generate a name. ```ts -new Stream(this, "MyFirstStream", { - streamName: "my-awesome-stream" +new kinesis.Stream(this, 'MyFirstStream', { + streamName: 'my-awesome-stream', }); ``` @@ -44,10 +44,10 @@ to specify how long the data in the shards should remain accessible. Read more at [Creating and Managing Streams](https://docs.aws.amazon.com/streams/latest/dev/working-with-streams.html) ```ts -new Stream(this, "MyFirstStream", { - streamName: "my-awesome-stream", +new kinesis.Stream(this, 'MyFirstStream', { + streamName: 'my-awesome-stream', shardCount: 3, - retentionPeriod: Duration.hours(48) + retentionPeriod: Duration.hours(48), }); ``` @@ -59,28 +59,26 @@ server-side encryption using an AWS KMS key for a specified stream. Encryption is enabled by default on your stream with the master key owned by Kinesis Data Streams in regions where it is supported. ```ts -new Stream(this, 'MyEncryptedStream'); +new kinesis.Stream(this, 'MyEncryptedStream'); ``` You can enable encryption on your stream with a user-managed key by specifying the `encryption` property. A KMS key will be created for you and associated with the stream. ```ts -new Stream(this, "MyEncryptedStream", { - encryption: StreamEncryption.KMS +new kinesis.Stream(this, 'MyEncryptedStream', { + encryption: kinesis.StreamEncryption.KMS, }); ``` You can also supply your own external KMS key to use for stream encryption by specifying the `encryptionKey` property. ```ts -import * as kms from "@aws-cdk/aws-kms"; +const key = new kms.Key(this, 'MyKey'); -const key = new kms.Key(this, "MyKey"); - -new Stream(this, "MyEncryptedStream", { - encryption: StreamEncryption.KMS, - encryptionKey: key +new kinesis.Stream(this, 'MyEncryptedStream', { + encryption: kinesis.StreamEncryption.KMS, + encryptionKey: key, }); ``` @@ -91,32 +89,20 @@ Any Kinesis stream that has been created outside the stack can be imported into Streams can be imported by their ARN via the `Stream.fromStreamArn()` API ```ts -const stack = new Stack(app, "MyStack"); - -const importedStream = Stream.fromStreamArn( - stack, - "ImportedStream", - "arn:aws:kinesis:us-east-2:123456789012:stream/f3j09j2230j" +const importedStream = kinesis.Stream.fromStreamArn(this, 'ImportedStream', + 'arn:aws:kinesis:us-east-2:123456789012:stream/f3j09j2230j', ); ``` Encrypted Streams can also be imported by their attributes via the `Stream.fromStreamAttributes()` API ```ts -import { Key } from "@aws-cdk/aws-kms"; - -const stack = new Stack(app, "MyStack"); - -const importedStream = Stream.fromStreamAttributes( - stack, - "ImportedEncryptedStream", - { - streamArn: "arn:aws:kinesis:us-east-2:123456789012:stream/f3j09j2230j", - encryptionKey: kms.Key.fromKeyArn( - "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012" - ) - } -); +const importedStream = kinesis.Stream.fromStreamAttributes(this, 'ImportedEncryptedStream', { + streamArn: 'arn:aws:kinesis:us-east-2:123456789012:stream/f3j09j2230j', + encryptionKey: kms.Key.fromKeyArn(this, 'key', + 'arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012', + ), +}); ``` ### Permission Grants @@ -138,10 +124,10 @@ If the stream has an encryption key, read permissions will also be granted to th const lambdaRole = new iam.Role(this, 'Role', { assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com'), description: 'Example role...', -} +}); -const stream = new Stream(this, 'MyEncryptedStream', { - encryption: StreamEncryption.KMS +const stream = new kinesis.Stream(this, 'MyEncryptedStream', { + encryption: kinesis.StreamEncryption.KMS, }); // give lambda permissions to read stream @@ -165,10 +151,10 @@ If the stream has an encryption key, write permissions will also be granted to t const lambdaRole = new iam.Role(this, 'Role', { assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com'), description: 'Example role...', -} +}); -const stream = new Stream(this, 'MyEncryptedStream', { - encryption: StreamEncryption.KMS +const stream = new kinesis.Stream(this, 'MyEncryptedStream', { + encryption: kinesis.StreamEncryption.KMS, }); // give lambda permissions to write to stream @@ -186,9 +172,9 @@ The following write permissions are provided to a service principal by the `gran You can add any set of permissions to a stream by calling the `grant()` API. ```ts -const user = new iam.User(stack, 'MyUser'); +const user = new iam.User(this, 'MyUser'); -const stream = new Stream(stack, 'MyStream'); +const stream = new kinesis.Stream(this, 'MyStream'); // give my user permissions to list shards stream.grant(user, 'kinesis:ListShards'); @@ -199,7 +185,7 @@ stream.grant(user, 'kinesis:ListShards'); You can use common metrics from your stream to create alarms and/or dashboards. The `stream.metric('MetricName')` method creates a metric with the stream namespace and dimension. You can also use pre-define methods like `stream.metricGetRecordsSuccess()`. To find out more about Kinesis metrics check [Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch](https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html). ```ts -const stream = new Stream(stack, 'MyStream'); +const stream = new kinesis.Stream(this, 'MyStream'); // Using base metric method passing the metric name stream.metric('GetRecords.Success'); @@ -210,4 +196,3 @@ stream.metricGetRecordsSuccess(); // using pre-defined and overriding the statistic stream.metricGetRecordsSuccess({ statistic: 'Maximum' }); ``` - diff --git a/packages/@aws-cdk/aws-kinesis/package.json b/packages/@aws-cdk/aws-kinesis/package.json index 23f1f8684a19c..1641e48f910f7 100644 --- a/packages/@aws-cdk/aws-kinesis/package.json +++ b/packages/@aws-cdk/aws-kinesis/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", diff --git a/packages/@aws-cdk/aws-kinesis/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-kinesis/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..742f48088c404 --- /dev/null +++ b/packages/@aws-cdk/aws-kinesis/rosetta/default.ts-fixture @@ -0,0 +1,14 @@ +// Fixture with packages imported, but nothing else +import { Construct } from 'constructs'; +import { Duration, Stack } from '@aws-cdk/core'; +import * as kinesis from '@aws-cdk/aws-kinesis'; +import * as kms from '@aws-cdk/aws-kms'; +import * as iam from '@aws-cdk/aws-iam'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + + /// here + } +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-kinesisanalytics-flink/README.md b/packages/@aws-cdk/aws-kinesisanalytics-flink/README.md index 830b59a2c7053..2882cc60afc54 100644 --- a/packages/@aws-cdk/aws-kinesisanalytics-flink/README.md +++ b/packages/@aws-cdk/aws-kinesisanalytics-flink/README.md @@ -37,16 +37,17 @@ aws-kinesisanalytics-runtime library to [retrieve these properties](https://docs.aws.amazon.com/kinesisanalytics/latest/java/how-properties.html#how-properties-access). ```ts -import * as flink from '@aws-cdk/aws-kinesisanalytics-flink'; - +declare const bucket: s3.Bucket; const flinkApp = new flink.Application(this, 'Application', { - // ... propertyGroups: { FlinkApplicationProperties: { inputStreamName: 'my-input-kinesis-stream', outputStreamName: 'my-output-kinesis-stream', }, }, + // ... + runtime: flink.Runtime.FLINK_1_13, + code: flink.ApplicationCode.fromBucket(bucket, 'my-app.jar'), }); ``` @@ -55,14 +56,13 @@ when the Flink job starts. These include parameters for checkpointing, snapshotting, monitoring, and parallelism. ```ts -import * as logs from '@aws-cdk/aws-logs'; - +declare const bucket: s3.Bucket; const flinkApp = new flink.Application(this, 'Application', { code: flink.ApplicationCode.fromBucket(bucket, 'my-app.jar'), - runtime: file.Runtime.FLINK_1_13, + runtime: flink.Runtime.FLINK_1_13, checkpointingEnabled: true, // default is true - checkpointInterval: cdk.Duration.seconds(30), // default is 1 minute - minPauseBetweenCheckpoints: cdk.Duration.seconds(10), // default is 5 seconds + checkpointInterval: Duration.seconds(30), // default is 1 minute + minPauseBetweenCheckpoints: Duration.seconds(10), // default is 5 seconds logLevel: flink.LogLevel.ERROR, // default is INFO metricsLevel: flink.MetricsLevel.PARALLELISM, // default is APPLICATION autoScalingEnabled: false, // default is true diff --git a/packages/@aws-cdk/aws-kinesisanalytics-flink/package.json b/packages/@aws-cdk/aws-kinesisanalytics-flink/package.json index f7c50fa0af0a2..1f4a026686416 100644 --- a/packages/@aws-cdk/aws-kinesisanalytics-flink/package.json +++ b/packages/@aws-cdk/aws-kinesisanalytics-flink/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", diff --git a/packages/@aws-cdk/aws-kinesisanalytics-flink/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-kinesisanalytics-flink/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..a9f46e29f793b --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisanalytics-flink/rosetta/default.ts-fixture @@ -0,0 +1,14 @@ +// Fixture with packages imported, but nothing else +import { Construct } from 'constructs'; +import { Duration, Stack } from '@aws-cdk/core'; +import * as flink from '@aws-cdk/aws-kinesisanalytics-flink'; +import * as logs from '@aws-cdk/aws-logs'; +import * as s3 from '@aws-cdk/aws-s3'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + + /// here + } +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-kinesisanalytics/package.json b/packages/@aws-cdk/aws-kinesisanalytics/package.json index f01223c6c4cbb..36a2b60ede671 100644 --- a/packages/@aws-cdk/aws-kinesisanalytics/package.json +++ b/packages/@aws-cdk/aws-kinesisanalytics/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", diff --git a/packages/@aws-cdk/aws-kinesisfirehose-destinations/README.md b/packages/@aws-cdk/aws-kinesisfirehose-destinations/README.md index 03ef4657b3f78..3873a6a493052 100644 --- a/packages/@aws-cdk/aws-kinesisfirehose-destinations/README.md +++ b/packages/@aws-cdk/aws-kinesisfirehose-destinations/README.md @@ -20,3 +20,7 @@ delivery stream. Destinations can be added by specifying the `destinations` prop defining a delivery stream. See [Amazon Kinesis Data Firehose module README](https://docs.aws.amazon.com/cdk/api/latest/docs/aws-kinesisfirehose-readme.html) for usage examples. + +```ts nofixture +import * as destinations from '@aws-cdk/aws-kinesisfirehose-destinations'; +``` diff --git a/packages/@aws-cdk/aws-kinesisfirehose-destinations/package.json b/packages/@aws-cdk/aws-kinesisfirehose-destinations/package.json index a2ba48007718f..a872462d82cf8 100644 --- a/packages/@aws-cdk/aws-kinesisfirehose-destinations/package.json +++ b/packages/@aws-cdk/aws-kinesisfirehose-destinations/package.json @@ -32,7 +32,7 @@ "metadata": { "jsii": { "rosetta": { - "strict": false + "strict": true } } } diff --git a/packages/@aws-cdk/aws-kinesisfirehose-destinations/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-kinesisfirehose-destinations/rosetta/default.ts-fixture index fe46e06908b34..f48bd7e013c59 100644 --- a/packages/@aws-cdk/aws-kinesisfirehose-destinations/rosetta/default.ts-fixture +++ b/packages/@aws-cdk/aws-kinesisfirehose-destinations/rosetta/default.ts-fixture @@ -1,8 +1,9 @@ // Fixture with packages imported, but nothing else -import { Construct } from '@aws-cdk/core'; -import { S3Bucket } from '@aws-cdk/aws-kinesisfirehose-destinations'; +import { Construct } from 'constructs'; +import { Stack } from '@aws-cdk/core'; +import * as destinations from '@aws-cdk/aws-kinesisfirehose-destinations'; -class Fixture extends Construct { +class Fixture extends Stack { constructor(scope: Construct, id: string) { super(scope, id); diff --git a/packages/@aws-cdk/aws-kinesisfirehose/README.md b/packages/@aws-cdk/aws-kinesisfirehose/README.md index 936fcdc881a86..2db9f79e6ba30 100644 --- a/packages/@aws-cdk/aws-kinesisfirehose/README.md +++ b/packages/@aws-cdk/aws-kinesisfirehose/README.md @@ -44,11 +44,8 @@ In order to define a Delivery Stream, you must specify a destination. An S3 buck used as a destination. More supported destinations are covered [below](#destinations). ```ts -import * as destinations from '@aws-cdk/aws-kinesisfirehose-destinations'; -import * as s3 from '@aws-cdk/aws-s3'; - const bucket = new s3.Bucket(this, 'Bucket'); -new DeliveryStream(this, 'Delivery Stream', { +new firehose.DeliveryStream(this, 'Delivery Stream', { destinations: [new destinations.S3Bucket(bucket)], }); ``` @@ -74,11 +71,10 @@ A delivery stream can read directly from a Kinesis data stream as a consumer of stream. Configure this behaviour by providing a data stream in the `sourceStream` property when constructing a delivery stream: -```ts fixture=with-destination -import * as kinesis from '@aws-cdk/aws-kinesis'; - +```ts +declare const destination: firehose.IDestination; const sourceStream = new kinesis.Stream(this, 'Source Stream'); -new DeliveryStream(this, 'Delivery Stream', { +new firehose.DeliveryStream(this, 'Delivery Stream', { sourceStream: sourceStream, destinations: [destination], }); @@ -113,14 +109,10 @@ for the implementations of these destinations. Defining a delivery stream with an S3 bucket destination: ```ts -import * as s3 from '@aws-cdk/aws-s3'; -import * as destinations from '@aws-cdk/aws-kinesisfirehose-destinations'; - -const bucket = new s3.Bucket(this, 'Bucket'); - +declare const bucket: s3.Bucket; const s3Destination = new destinations.S3Bucket(bucket); -new DeliveryStream(this, 'Delivery Stream', { +new firehose.DeliveryStream(this, 'Delivery Stream', { destinations: [s3Destination], }); ``` @@ -129,7 +121,8 @@ The S3 destination also supports custom dynamic prefixes. `prefix` will be used successfully delivered to S3. `errorOutputPrefix` will be added to failed records before writing them to S3. -```ts fixture=with-bucket +```ts +declare const bucket: s3.Bucket; const s3Destination = new destinations.S3Bucket(bucket, { dataOutputPrefix: 'myFirehose/DeliveredYear=!{timestamp:yyyy}/anyMonth/rand=!{firehose:random-string}', errorOutputPrefix: 'myFirehoseFailures/!{firehose:error-output-type}/!{timestamp:yyyy}/anyMonth/!{timestamp:dd}', @@ -158,22 +151,22 @@ access, rotation, aliases, and deletion for these keys, and you are changed for use. See: [Customer master keys](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#master_keys) in the *KMS Developer Guide*. -```ts fixture=with-destination -import * as kms from '@aws-cdk/aws-kms'; +```ts +declare const destination: firehose.IDestination; // SSE with an AWS-owned CMK -new DeliveryStream(this, 'Delivery Stream AWS Owned', { - encryption: StreamEncryption.AWS_OWNED, +new firehose.DeliveryStream(this, 'Delivery Stream AWS Owned', { + encryption: firehose.StreamEncryption.AWS_OWNED, destinations: [destination], }); // SSE with an customer-managed CMK that is created automatically by the CDK -new DeliveryStream(this, 'Delivery Stream Implicit Customer Managed', { - encryption: StreamEncryption.CUSTOMER_MANAGED, +new firehose.DeliveryStream(this, 'Delivery Stream Implicit Customer Managed', { + encryption: firehose.StreamEncryption.CUSTOMER_MANAGED, destinations: [destination], }); // SSE with an customer-managed CMK that is explicitly specified -const key = new kms.Key(this, 'Key'); -new DeliveryStream(this, 'Delivery Stream Explicit Customer Managed', { +declare const key: kms.Key; +new firehose.DeliveryStream(this, 'Delivery Stream Explicit Customer Managed', { encryptionKey: key, destinations: [destination], }); @@ -196,28 +189,29 @@ and LogStream for your Delivery Stream. You can provide a specific log group to specify where the CDK will create the log streams where log events will be sent: -```ts fixture=with-bucket -import * as destinations from '@aws-cdk/aws-kinesisfirehose-destinations'; +```ts import * as logs from '@aws-cdk/aws-logs'; const logGroup = new logs.LogGroup(this, 'Log Group'); +declare const bucket: s3.Bucket; const destination = new destinations.S3Bucket(bucket, { logGroup: logGroup, }); -new DeliveryStream(this, 'Delivery Stream', { + +declare const destination: firehose.IDestination; +new firehose.DeliveryStream(this, 'Delivery Stream', { destinations: [destination], }); ``` Logging can also be disabled: -```ts fixture=with-bucket -import * as destinations from '@aws-cdk/aws-kinesisfirehose-destinations'; - +```ts +declare const bucket: s3.Bucket; const destination = new destinations.S3Bucket(bucket, { logging: false, }); -new DeliveryStream(this, 'Delivery Stream', { +new firehose.DeliveryStream(this, 'Delivery Stream', { destinations: [destination], }); ``` @@ -242,8 +236,9 @@ for a full list). CDK also provides a generic `metric` method that can be used t metric configurations for any metric provided by Kinesis Data Firehose; the configurations are pre-populated with the correct dimensions for the delivery stream. -```ts fixture=with-delivery-stream +```ts import * as cloudwatch from '@aws-cdk/aws-cloudwatch'; +declare const deliveryStream: firehose.DeliveryStream; // Alarm that triggers when the per-second average of incoming bytes exceeds 90% of the current service limit const incomingBytesPercentOfLimit = new cloudwatch.MathExpression({ @@ -253,6 +248,7 @@ const incomingBytesPercentOfLimit = new cloudwatch.MathExpression({ bytePerSecLimit: deliveryStream.metric('BytesPerSecondLimit'), }, }); + new cloudwatch.Alarm(this, 'Alarm', { metric: incomingBytesPercentOfLimit, threshold: 0.9, @@ -271,13 +267,14 @@ Hadoop-compatible Snappy, and ZIP, except for Redshift destinations, where Snapp (regardless of Hadoop-compatibility) and ZIP are not supported. By default, data is delivered to S3 without compression. -```ts fixture=with-bucket +```ts // Compress data delivered to S3 using Snappy +declare const bucket: s3.Bucket; const s3Destination = new destinations.S3Bucket(bucket, { - compression: Compression.SNAPPY, + compression: destinations.Compression.SNAPPY, }); -new DeliveryStream(this, 'Delivery Stream', { - destinations: [destination], +new firehose.DeliveryStream(this, 'Delivery Stream', { + destinations: [s3Destination], }); ``` @@ -290,15 +287,14 @@ threshold (the "buffer interval"), whichever happens first. You can configure th thresholds based on the capabilities of the destination and your use-case. By default, the buffer size is 5 MiB and the buffer interval is 5 minutes. -```ts fixture=with-bucket -import * as cdk from '@aws-cdk/core'; - +```ts // Increase the buffer interval and size to 10 minutes and 8 MiB, respectively +declare const bucket: s3.Bucket; const destination = new destinations.S3Bucket(bucket, { - bufferingInterval: cdk.Duration.minutes(10), - bufferingSize: cdk.Size.mebibytes(8), + bufferingInterval: Duration.minutes(10), + bufferingSize: Size.mebibytes(8), }); -new DeliveryStream(this, 'Delivery Stream', { +new firehose.DeliveryStream(this, 'Delivery Stream', { destinations: [destination], }); ``` @@ -315,14 +311,13 @@ in Amazon S3. You can choose to not encrypt the data or to encrypt with a key fr the list of AWS KMS keys that you own. For more information, see [Protecting Data Using Server-Side Encryption with AWS KMS–Managed Keys (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). Data is not encrypted by default. -```ts fixture=with-bucket -import * as cdk from '@aws-cdk/core'; -import * as kms from '@aws-cdk/aws-kms'; - +```ts +declare const bucket: s3.Bucket; +declare const key: kms.Key; const destination = new destinations.S3Bucket(bucket, { - encryptionKey: new kms.Key(this, 'MyKey'), + encryptionKey: key, }); -new DeliveryStream(this, 'Delivery Stream', { +new firehose.DeliveryStream(this, 'Delivery Stream', { destinations: [destination], }); ``` @@ -337,37 +332,35 @@ you can provide a bucket where data will be backed up. You can also provide a pr which your backed-up data will be placed within the bucket. By default, source data is not backed up to S3. -```ts fixture=with-bucket -import * as destinations from '@aws-cdk/aws-kinesisfirehose-destinations'; -import * as s3 from '@aws-cdk/aws-s3'; - +```ts // Enable backup of all source records (to an S3 bucket created by CDK). -new DeliveryStream(this, 'Delivery Stream Backup All', { +declare const bucket: s3.Bucket; +new firehose.DeliveryStream(this, 'Delivery Stream Backup All', { destinations: [ new destinations.S3Bucket(bucket, { s3Backup: { - mode: BackupMode.ALL, - } + mode: destinations.BackupMode.ALL, + }, }), ], }); // Explicitly provide an S3 bucket to which all source records will be backed up. -const backupBucket = new s3.Bucket(this, 'Bucket'); -new DeliveryStream(this, 'Delivery Stream Backup All Explicit Bucket', { +declare const backupBucket: s3.Bucket; +new firehose.DeliveryStream(this, 'Delivery Stream Backup All Explicit Bucket', { destinations: [ new destinations.S3Bucket(bucket, { s3Backup: { bucket: backupBucket, - } + }, }), ], }); // Explicitly provide an S3 prefix under which all source records will be backed up. -new DeliveryStream(this, 'Delivery Stream Backup All Explicit Prefix', { +new firehose.DeliveryStream(this, 'Delivery Stream Backup All Explicit Prefix', { destinations: [ new destinations.S3Bucket(bucket, { s3Backup: { - mode: BackupMode.ALL, + mode: destinations.BackupMode.ALL, dataOutputPrefix: 'mybackup', }, }), @@ -405,10 +398,7 @@ configuration (see: [Buffering](#buffering)). If the function invocation fails d network timeout or because of hitting an invocation limit, the invocation is retried 3 times by default, but can be configured using `retries` in the processor configuration. -```ts fixture=with-bucket -import * as cdk from '@aws-cdk/core'; -import * as lambda from '@aws-cdk/aws-lambda'; - +```ts // Provide a Lambda function that will transform records before delivery, with custom // buffering and retry configuration const lambdaFunction = new lambda.Function(this, 'Processor', { @@ -416,16 +406,17 @@ const lambdaFunction = new lambda.Function(this, 'Processor', { handler: 'index.handler', code: lambda.Code.fromAsset(path.join(__dirname, 'process-records')), }); -const lambdaProcessor = new LambdaFunctionProcessor(lambdaFunction, { - bufferingInterval: cdk.Duration.minutes(5), - bufferingSize: cdk.Size.mebibytes(5), +const lambdaProcessor = new firehose.LambdaFunctionProcessor(lambdaFunction, { + bufferInterval: Duration.minutes(5), + bufferSize: Size.mebibytes(5), retries: 5, }); +declare const bucket: s3.Bucket; const s3Destination = new destinations.S3Bucket(bucket, { processor: lambdaProcessor, }); -new DeliveryStream(this, 'Delivery Stream', { - destinations: [destination], +new firehose.DeliveryStream(this, 'Delivery Stream', { + destinations: [s3Destination], }); ``` @@ -449,10 +440,7 @@ allow Kinesis Data Firehose to assume it) or delivery stream creation or data de will fail. Other required permissions to destination resources, encryption keys, etc., will be provided automatically. -```ts fixture=with-bucket -import * as destinations from '@aws-cdk/aws-kinesisfirehose-destinations'; -import * as iam from '@aws-cdk/aws-iam'; - +```ts // Create service roles for the delivery stream and destination. // These can be used for other purposes and granted access to different resources. // They must include the Kinesis Data Firehose service principal in their trust policies. @@ -465,8 +453,9 @@ const destinationRole = new iam.Role(this, 'Destination Role', { }); // Specify the roles created above when defining the destination and delivery stream. +declare const bucket: s3.Bucket; const destination = new destinations.S3Bucket(bucket, { role: destinationRole }); -new DeliveryStream(this, 'Delivery Stream', { +new firehose.DeliveryStream(this, 'Delivery Stream', { destinations: [destination], role: deliveryStreamRole, }); @@ -488,14 +477,13 @@ can be granted permissions to a delivery stream by calling: - `grant(principal, ...actions)` - grants the principal permission to a custom set of actions -```ts fixture=with-delivery-stream -import * as iam from '@aws-cdk/aws-iam'; - +```ts const lambdaRole = new iam.Role(this, 'Role', { assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com'), }); // Give the role permissions to write data to the delivery stream +declare const deliveryStream: firehose.DeliveryStream; deliveryStream.grantPutRecords(lambdaRole); ``` @@ -514,15 +502,14 @@ found in the `@aws-cdk/aws-kinesisfirehose-destinations` module, the CDK grants permissions automatically. However, custom or third-party destinations may require custom permissions. In this case, use the delivery stream as an `IGrantable`, as follows: -```ts fixture=with-delivery-stream -import * as lambda from '@aws-cdk/aws-lambda'; - +```ts const fn = new lambda.Function(this, 'Function', { code: lambda.Code.fromInline('exports.handler = (event) => {}'), runtime: lambda.Runtime.NODEJS_14_X, handler: 'index.handler', }); +declare const deliveryStream: firehose.DeliveryStream; fn.grantInvoke(deliveryStream); ``` diff --git a/packages/@aws-cdk/aws-kinesisfirehose/package.json b/packages/@aws-cdk/aws-kinesisfirehose/package.json index c0843bf1d6829..64c30be1210e3 100644 --- a/packages/@aws-cdk/aws-kinesisfirehose/package.json +++ b/packages/@aws-cdk/aws-kinesisfirehose/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", diff --git a/packages/@aws-cdk/aws-kinesisfirehose/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-kinesisfirehose/rosetta/default.ts-fixture index 8a68efc25aa8e..9585eb9368d19 100644 --- a/packages/@aws-cdk/aws-kinesisfirehose/rosetta/default.ts-fixture +++ b/packages/@aws-cdk/aws-kinesisfirehose/rosetta/default.ts-fixture @@ -1,6 +1,14 @@ // Fixture with packages imported, but nothing else -import { Construct, Stack } from '@aws-cdk/core'; -import { DeliveryStream, DestinationBindOptions, DestinationConfig, IDestination } from '@aws-cdk/aws-kinesisfirehose'; +import { Construct } from 'constructs'; +import { Duration, Size, Stack } from '@aws-cdk/core'; +import * as firehose from '@aws-cdk/aws-kinesisfirehose'; +import * as kinesis from '@aws-cdk/aws-kinesis'; +import * as s3 from '@aws-cdk/aws-s3'; +import * as destinations from '@aws-cdk/aws-kinesisfirehose-destinations'; +import * as kms from '@aws-cdk/aws-kms'; +import * as iam from '@aws-cdk/aws-iam'; +import * as lambda from '@aws-cdk/aws-lambda'; +import * as path from 'path'; class Fixture extends Stack { constructor(scope: Construct, id: string) { diff --git a/packages/@aws-cdk/aws-kinesisfirehose/rosetta/with-bucket.ts-fixture b/packages/@aws-cdk/aws-kinesisfirehose/rosetta/with-bucket.ts-fixture deleted file mode 100644 index d0851cff49639..0000000000000 --- a/packages/@aws-cdk/aws-kinesisfirehose/rosetta/with-bucket.ts-fixture +++ /dev/null @@ -1,13 +0,0 @@ -// Fixture with a bucket already created -import { Construct, Stack } from '@aws-cdk/core'; -import { DeliveryStream, DestinationBindOptions, DestinationConfig, IDestination } from '@aws-cdk/aws-kinesisfirehose'; -import * as s3 from '@aws-cdk/aws-s3'; -declare const bucket: s3.Bucket; - -class Fixture extends Stack { - constructor(scope: Construct, id: string) { - super(scope, id); - - /// here - } -} diff --git a/packages/@aws-cdk/aws-kinesisfirehose/rosetta/with-delivery-stream.ts-fixture b/packages/@aws-cdk/aws-kinesisfirehose/rosetta/with-delivery-stream.ts-fixture deleted file mode 100644 index c7b75b20d2c1b..0000000000000 --- a/packages/@aws-cdk/aws-kinesisfirehose/rosetta/with-delivery-stream.ts-fixture +++ /dev/null @@ -1,12 +0,0 @@ -// Fixture with a delivery stream already created -import { Construct, Stack } from '@aws-cdk/core'; -import { DeliveryStream, DestinationBindOptions, DestinationConfig, IDestination } from '@aws-cdk/aws-kinesisfirehose'; -declare const deliveryStream: DeliveryStream; - -class Fixture extends Stack { - constructor(scope: Construct, id: string) { - super(scope, id); - - /// here - } -} diff --git a/packages/@aws-cdk/aws-kinesisfirehose/rosetta/with-destination.ts-fixture b/packages/@aws-cdk/aws-kinesisfirehose/rosetta/with-destination.ts-fixture deleted file mode 100644 index 37d78bf7a43d3..0000000000000 --- a/packages/@aws-cdk/aws-kinesisfirehose/rosetta/with-destination.ts-fixture +++ /dev/null @@ -1,12 +0,0 @@ -// Fixture with a destination already created -import { Construct, Stack } from '@aws-cdk/core'; -import { DeliveryStream, DestinationBindOptions, DestinationConfig, IDestination } from '@aws-cdk/aws-kinesisfirehose'; -declare const destination: IDestination; - -class Fixture extends Stack { - constructor(scope: Construct, id: string) { - super(scope, id); - - /// here - } -}