Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions packages/@aws-cdk/aws-iot-actions-alpha/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ Currently supported are:
- Capture CloudWatch metrics
- Change state for a CloudWatch alarm
- Put records to Kinesis Data stream
- Put records to Kinesis Data Firehose stream
- Put records to Amazon Data Firehose stream
- Send messages to SQS queues
- Publish messages on SNS topics
- Write messages into columns of DynamoDB
Expand Down Expand Up @@ -232,10 +232,10 @@ const topicRule = new iot.TopicRule(this, 'TopicRule', {
});
```

## Put records to Kinesis Data Firehose stream
## Put records to Amazon Data Firehose stream

The code snippet below creates an AWS IoT Rule that puts records to Put records
to Kinesis Data Firehose stream when it is triggered.
to Amazon Data Firehose stream when it is triggered.

```ts
import * as firehose from 'aws-cdk-lib/aws-kinesisfirehose';
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,11 @@ export enum FirehoseRecordSeparator {
}

/**
* Configuration properties of an action for the Kinesis Data Firehose stream.
* Configuration properties of an action for the Amazon Data Firehose stream.
*/
export interface FirehosePutRecordActionProps extends CommonActionProps {
/**
* Whether to deliver the Kinesis Data Firehose stream as a batch by using `PutRecordBatch`.
* Whether to deliver the Amazon Data Firehose stream as a batch by using `PutRecordBatch`.
* When batchMode is true and the rule's SQL statement evaluates to an Array, each Array
* element forms one record in the PutRecordBatch request. The resulting array can't have
* more than 500 records.
Expand All @@ -44,23 +44,23 @@ export interface FirehosePutRecordActionProps extends CommonActionProps {
readonly batchMode?: boolean;

/**
* A character separator that will be used to separate records written to the Kinesis Data Firehose stream.
* A character separator that will be used to separate records written to the Amazon Data Firehose stream.
*
* @default - none -- the stream does not use a separator
*/
readonly recordSeparator?: FirehoseRecordSeparator;
}

/**
* The action to put the record from an MQTT message to the Kinesis Data Firehose stream.
* The action to put the record from an MQTT message to the Amazon Data Firehose stream.
*/
export class FirehosePutRecordAction implements iot.IAction {
private readonly batchMode?: boolean;
private readonly recordSeparator?: string;
private readonly role?: iam.IRole;

/**
* @param stream The Kinesis Data Firehose stream to which to put records.
* @param stream The Amazon Data Firehose stream to which to put records.
* @param props Optional properties to not use default
*/
constructor(private readonly stream: firehose.IDeliveryStream, props: FirehosePutRecordActionProps = {}) {
Expand Down
2 changes: 1 addition & 1 deletion packages/@aws-cdk/aws-msk-alpha/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ const cluster = new msk.Cluster(this, 'Cluster', {
## Logging

You can deliver Apache Kafka broker logs to one or more of the following destination types:
Amazon CloudWatch Logs, Amazon S3, Amazon Kinesis Data Firehose.
Amazon CloudWatch Logs, Amazon S3, Amazon Data Firehose.

To configure logs to be sent to an S3 bucket, provide a bucket in the `logging` config.

Expand Down
2 changes: 1 addition & 1 deletion packages/@aws-cdk/aws-msk-alpha/lib/cluster.ts
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ export interface MonitoringConfiguration {
*/
export interface BrokerLogging {
/**
* The Kinesis Data Firehose delivery stream that is the destination for broker logs.
* The Amazon Data Firehose delivery stream that is the destination for broker logs.
*
* @default - disabled
*/
Expand Down
2 changes: 1 addition & 1 deletion packages/@aws-cdk/aws-pipes-alpha/lib/logs.ts
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ export interface LogDestinationParameters {
readonly cloudwatchLogsLogDestination?: CfnPipe.CloudwatchLogsLogDestinationProperty;

/**
* The Amazon Kinesis Data Firehose logging configuration settings for the pipe.
* The Amazon Data Firehose logging configuration settings for the pipe.
*
* @see http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-pipes-pipe-pipelogconfiguration.html#cfn-pipes-pipe-pipelogconfiguration-firehoselogdestination
*
Expand Down
6 changes: 3 additions & 3 deletions packages/@aws-cdk/aws-scheduler-targets-alpha/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ The following targets are supported:
6. `targets.EventBridgePutEvents`: [Put Events on EventBridge](#send-events-to-an-eventbridge-event-bus)
7. `targets.InspectorStartAssessmentRun`: [Start an Amazon Inspector assessment run](#start-an-amazon-inspector-assessment-run)
8. `targets.KinesisStreamPutRecord`: [Put a record to an Amazon Kinesis Data Stream](#put-a-record-to-an-amazon-kinesis-data-stream)
9. `targets.KinesisDataFirehosePutRecord`: [Put a record to a Kinesis Data Firehose](#put-a-record-to-a-kinesis-data-firehose)
9. `targets.KinesisDataFirehosePutRecord`: [Put a record to an Amazon Data Firehose](#put-a-record-to-an-amazon-data-firehose)
10. `targets.CodePipelineStartPipelineExecution`: [Start a CodePipeline execution](#start-a-codepipeline-execution)
11. `targets.SageMakerStartPipelineExecution`: [Start a SageMaker pipeline execution](#start-a-sagemaker-pipeline-execution)
12. `targets.Universal`: [Invoke a wider set of AWS API](#invoke-a-wider-set-of-aws-api)
Expand Down Expand Up @@ -252,9 +252,9 @@ new Schedule(this, 'Schedule', {
});
```

## Put a record to a Kinesis Data Firehose
## Put a record to an Amazon Data Firehose

Use the `KinesisDataFirehosePutRecord` target to put a record to a Kinesis Data Firehose delivery stream.
Use the `KinesisDataFirehosePutRecord` target to put a record to an Amazon Data Firehose delivery stream.

The code snippet below creates an event rule with a delivery stream as a target
called every hour by EventBridge Scheduler with a custom payload.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ import { IDeliveryStream } from 'aws-cdk-lib/aws-kinesisfirehose';
import { ScheduleTargetBase, ScheduleTargetBaseProps } from './target';

/**
* Use an Amazon Kinesis Data Firehose as a target for AWS EventBridge Scheduler.
* Use an Amazon Data Firehose as a target for AWS EventBridge Scheduler.
*/
export class KinesisDataFirehosePutRecord extends ScheduleTargetBase implements IScheduleTarget {
constructor(
Expand Down
2 changes: 1 addition & 1 deletion packages/aws-cdk-lib/aws-apigateway/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -1400,7 +1400,7 @@ const api = new apigateway.RestApi(this, 'books', {

**Note:** The delivery stream name must start with `amazon-apigateway-`.

> Visit [Logging API calls to Kinesis Data Firehose](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-logging-to-kinesis.html) for more details.
> Visit [Logging API calls to Amazon Data Firehose](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-logging-to-kinesis.html) for more details.

## Cross Origin Resource Sharing (CORS)

Expand Down
2 changes: 1 addition & 1 deletion packages/aws-cdk-lib/aws-config/lib/rule.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2754,7 +2754,7 @@ export class ResourceType {
public static readonly IAM_SAML_PROVIDER = new ResourceType('AWS::IAM::SAMLProvider');
/** AWS IAM ServerCertificate */
public static readonly IAM_SERVER_CERTIFICATE = new ResourceType('AWS::IAM::ServerCertificate');
/** Amazon Kinesis Firehose DeliveryStream */
/** Amazon Data Firehose DeliveryStream */
public static readonly KINESIS_FIREHOSE_DELIVERY_STREAM = new ResourceType('AWS::KinesisFirehose::DeliveryStream');
/** Amazon Pinpoint Campaign */
public static readonly PINPOINT_CAMPAIGN = new ResourceType('AWS::Pinpoint::Campaign');
Expand Down
4 changes: 2 additions & 2 deletions packages/aws-cdk-lib/aws-ec2/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2202,7 +2202,7 @@ new ec2.FlowLog(this, 'FlowLogWithKeyPrefix', {
});
```

*Kinesis Data Firehose*
*Amazon Data Firehose*

```ts
import * as firehose from 'aws-cdk-lib/aws-kinesisfirehose';
Expand Down Expand Up @@ -2524,4 +2524,4 @@ new ec2.Instance(this, 'Instance', {
machineImage: ec2.MachineImage.latestAmazonLinux2023(),
instanceProfile,
});
```
```
10 changes: 5 additions & 5 deletions packages/aws-cdk-lib/aws-ec2/lib/vpc-flow-logs.ts
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ export enum FlowLogDestinationType {
S3 = 's3',

/**
* Send flow logs to Kinesis Data Firehose
* Send flow logs to Amazon Data Firehose
*/
KINESIS_DATA_FIREHOSE = 'kinesis-data-firehose',
}
Expand Down Expand Up @@ -215,9 +215,9 @@ export abstract class FlowLogDestination {
}

/**
* Use Kinesis Data Firehose as the destination
* Use Amazon Data Firehose as the destination
*
* @param deliveryStreamArn the ARN of Kinesis Data Firehose delivery stream to publish logs to
* @param deliveryStreamArn the ARN of Amazon Data Firehose delivery stream to publish logs to
*/
public static toKinesisDataFirehoseDestination(deliveryStreamArn: string): FlowLogDestination {
return new KinesisDataFirehoseDestination({
Expand Down Expand Up @@ -272,7 +272,7 @@ export interface FlowLogDestinationConfig {
readonly keyPrefix?: string;

/**
* The ARN of Kinesis Data Firehose delivery stream to publish the flow logs to
* The ARN of Amazon Data Firehose delivery stream to publish the flow logs to
*
* @default - undefined
*/
Expand Down Expand Up @@ -849,7 +849,7 @@ export class FlowLog extends FlowLogBase {
public readonly logGroup?: logs.ILogGroup;

/**
* The ARN of the Kinesis Data Firehose delivery stream to publish flow logs to
* The ARN of the Amazon Data Firehose delivery stream to publish flow logs to
*/
public readonly deliveryStreamArn?: string;

Expand Down
2 changes: 1 addition & 1 deletion packages/aws-cdk-lib/aws-elasticsearch/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ const domain = new es.Domain(this, 'Domain', {
```

For more complex use-cases, for example, to set the domain up to receive data from a
[cross-account Kinesis Firehose](https://aws.amazon.com/premiumsupport/knowledge-center/kinesis-firehose-cross-account-streaming/) the `addAccessPolicies` helper method
[cross-account Amazon Data Firehose](https://aws.amazon.com/premiumsupport/knowledge-center/kinesis-firehose-cross-account-streaming/) the `addAccessPolicies` helper method
allows for policies that include the explicit domain ARN.

```ts
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import * as firehose from '../../aws-kinesisfirehose';
import { IResource } from '../../core';

/**
* Customize the Firehose Stream Event Target
* Customize the Amazon Data Firehose Stream Event Target
*/
export interface KinesisFirehoseStreamProps {
/**
Expand All @@ -19,7 +19,7 @@ export interface KinesisFirehoseStreamProps {
}

/**
* Customize the Firehose Stream Event Target
* Customize the Amazon Data Firehose Stream Event Target
*
* @deprecated Use KinesisFirehoseStreamV2
*/
Expand Down Expand Up @@ -48,7 +48,7 @@ export class KinesisFirehoseStream implements events.IRuleTarget {
}

/**
* Represents a Kinesis Data Firehose delivery stream.
* Represents an Amazon Data Firehose delivery stream.
*/
export interface IDeliveryStream extends IResource {
/**
Expand All @@ -67,8 +67,8 @@ export interface IDeliveryStream extends IResource {
}

/**
* Customize the Firehose Stream Event Target V2 to support L2 Kinesis Delivery Stream
* instead of L1 Cfn Kinesis Delivery Stream.
* Customize the Amazon Data Firehose Stream Event Target V2 to support L2 Amazon Data Firehose Delivery Stream
* instead of L1 Cfn Firehose Delivery Stream.
*/
export class KinesisFirehoseStreamV2 implements events.IRuleTarget {
constructor(private readonly stream: IDeliveryStream, private readonly props: KinesisFirehoseStreamProps = {}) {
Expand Down
9 changes: 4 additions & 5 deletions packages/aws-cdk-lib/aws-kinesisfirehose/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ new firehose.DeliveryStream(this, 'Delivery Stream', {

### Direct Put

Data must be provided via "direct put", ie., by using a `PutRecord` or
Data must be provided via "direct put", ie., by using a `PutRecord` or
`PutRecordBatch` API call. There are a number of ways of doing so, such as:

- Kinesis Agent: a standalone Java application that monitors and delivers files while
Expand All @@ -80,10 +80,9 @@ Data must be provided via "direct put", ie., by using a `PutRecord` or

## Destinations

Amazon Data Firehose supports multiple AWS and third-party services as destinations, including Amazon S3, Amazon Redshift, and more. You can find the full list of supported destination [here](https://docs.aws.amazon.com/firehose/latest/dev/create-destination.html).
Amazon Data Firehose supports multiple AWS and third-party services as destinations, including Amazon S3, Amazon Redshift, and more. You can find the full list of supported destination [here](https://docs.aws.amazon.com/firehose/latest/dev/create-destination.html).

Currently in the AWS CDK, only S3 is implemented as an L2 construct destination. Other destinations can still be configured using L1 constructs. See [kinesisfirehose-destinations](https://docs.aws.amazon.com/cdk/api/latest/docs/aws-kinesisfirehose-destinations-readme.html)
for the implementations of these destinations.
Currently in the AWS CDK, only S3 is implemented as an L2 construct destination. Other destinations can still be configured using L1 constructs.

### S3

Expand Down Expand Up @@ -214,7 +213,7 @@ limit of records per second (indicating data is flowing into your delivery strea
than it is configured to process).

CDK provides methods for accessing delivery stream metrics with default configuration,
such as `metricIncomingBytes`, and `metricIncomingRecords` (see [`IDeliveryStream`](https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_aws-kinesisfirehose.IDeliveryStream.html)
such as `metricIncomingBytes`, and `metricIncomingRecords` (see [`IDeliveryStream`](https://docs.aws.amazon.com/cdk/api/latest/docs/aws-cdk-lib.aws_kinesisfirehose.IDeliveryStream.html)
for a full list). CDK also provides a generic `metric` method that can be used to produce
metric configurations for any metric provided by Amazon Data Firehose; the configurations
are pre-populated with the correct dimensions for the delivery stream.
Expand Down
14 changes: 7 additions & 7 deletions packages/aws-cdk-lib/aws-kinesisfirehose/lib/common.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import * as s3 from '../../aws-s3';
import * as cdk from '../../core';

/**
* Possible compression options Kinesis Data Firehose can use to compress data on delivery.
* Possible compression options Amazon Data Firehose can use to compress data on delivery.
*/
export class Compression {
/**
Expand Down Expand Up @@ -75,7 +75,7 @@ interface DestinationLoggingProps {
}

/**
* Common properties for defining a backup, intermediary, or final S3 destination for a Kinesis Data Firehose delivery stream.
* Common properties for defining a backup, intermediary, or final S3 destination for a Amazon Data Firehose delivery stream.
*/
export interface CommonDestinationS3Props {
/**
Expand All @@ -90,7 +90,7 @@ export interface CommonDestinationS3Props {
readonly bufferingInterval?: cdk.Duration;

/**
* The size of the buffer that Kinesis Data Firehose uses for incoming data before
* The size of the buffer that Amazon Data Firehose uses for incoming data before
* delivering it to the S3 bucket.
*
* Minimum: Size.mebibytes(1)
Expand All @@ -101,7 +101,7 @@ export interface CommonDestinationS3Props {
readonly bufferingSize?: cdk.Size;

/**
* The type of compression that Kinesis Data Firehose uses to compress the data
* The type of compression that Amazon Data Firehose uses to compress the data
* that it delivers to the Amazon S3 bucket.
*
* The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift
Expand All @@ -120,7 +120,7 @@ export interface CommonDestinationS3Props {
readonly encryptionKey?: kms.IKey;

/**
* A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3.
* A prefix that Amazon Data Firehose evaluates and adds to failed records before writing them to S3.
*
* This prefix appears immediately following the bucket name.
* @see https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html
Expand All @@ -130,7 +130,7 @@ export interface CommonDestinationS3Props {
readonly errorOutputPrefix?: string;

/**
* A prefix that Kinesis Data Firehose evaluates and adds to records before writing them to S3.
* A prefix that Amazon Data Firehose evaluates and adds to records before writing them to S3.
*
* This prefix appears immediately following the bucket name.
* @see https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html
Expand Down Expand Up @@ -171,7 +171,7 @@ export interface CommonDestinationProps extends DestinationLoggingProps {
/**
* The IAM role associated with this destination.
*
* Assumed by Kinesis Data Firehose to invoke processors and write to destinations
* Assumed by Amazon Data Firehose to invoke processors and write to destinations
*
* @default - a role will be created with default permissions.
*/
Expand Down
14 changes: 7 additions & 7 deletions packages/aws-cdk-lib/aws-kinesisfirehose/lib/delivery-stream.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ const PUT_RECORD_ACTIONS = [
];

/**
* Represents a Kinesis Data Firehose delivery stream.
* Represents an Amazon Data Firehose delivery stream.
*/
export interface IDeliveryStream extends cdk.IResource, iam.IGrantable, ec2.IConnectable {
/**
Expand Down Expand Up @@ -72,7 +72,7 @@ export interface IDeliveryStream extends cdk.IResource, iam.IGrantable, ec2.ICon
metricBackupToS3Bytes(props?: cloudwatch.MetricOptions): cloudwatch.Metric;

/**
* Metric for the age (from getting into Kinesis Data Firehose to now) of the oldest record in Kinesis Data Firehose.
* Metric for the age (from getting into Amazon Data Firehose to now) of the oldest record in Amazon Data Firehose.
*
* Any record older than this age has been delivered to the Amazon S3 bucket for backup.
*
Expand All @@ -89,7 +89,7 @@ export interface IDeliveryStream extends cdk.IResource, iam.IGrantable, ec2.ICon
}

/**
* Base class for new and imported Kinesis Data Firehose delivery streams.
* Base class for new and imported Amazon Data Firehose delivery streams.
*/
abstract class DeliveryStreamBase extends cdk.Resource implements IDeliveryStream {
public abstract readonly deliveryStreamName: string;
Expand All @@ -99,7 +99,7 @@ abstract class DeliveryStreamBase extends cdk.Resource implements IDeliveryStrea
public abstract readonly grantPrincipal: iam.IPrincipal;

/**
* Network connections between Kinesis Data Firehose and other resources, i.e. Redshift cluster.
* Network connections between Amazon Data Firehose and other resources, i.e. Redshift cluster.
*/
public readonly connections: ec2.Connections;

Expand Down Expand Up @@ -206,7 +206,7 @@ export interface DeliveryStreamProps {
/**
* The IAM role associated with this delivery stream.
*
* Assumed by Kinesis Data Firehose to read from sources and encrypt data server-side.
* Assumed by Amazon Data Firehose to read from sources and encrypt data server-side.
*
* @default - a role will be created with default permissions.
*/
Expand Down Expand Up @@ -245,15 +245,15 @@ export interface DeliveryStreamAttributes {
/**
* The IAM role associated with this delivery stream.
*
* Assumed by Kinesis Data Firehose to read from sources and encrypt data server-side.
* Assumed by Amazon Data Firehose to read from sources and encrypt data server-side.
*
* @default - the imported stream cannot be granted access to other resources as an `iam.IGrantable`.
*/
readonly role?: iam.IRole;
}

/**
* Create a Kinesis Data Firehose delivery stream
* Create a Amazon Data Firehose delivery stream
*
* @resource AWS::KinesisFirehose::DeliveryStream
*/
Expand Down
Loading