Skip to content

Commit 645766d

Browse files
authored
Merge branch 'master' into aws-iot-actions-firehose
2 parents ae9cb88 + df30d4f commit 645766d

File tree

119 files changed

+5549
-661
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

119 files changed

+5549
-661
lines changed

Diff for: .github/workflows/issue-label-assign.yml

+4-1
Original file line numberDiff line numberDiff line change
@@ -228,5 +228,8 @@ jobs:
228228
{"area":"@aws-cdk/region-info","keywords":["region-info","fact"],"labels":["@aws-cdk/region-info"],"assignees":["skinny85"]},
229229
{"area":"aws-cdk-lib","keywords":["aws-cdk-lib","cdk-v2","v2","ubergen"],"labels":["aws-cdk-lib"],"assignees":["nija-at"]},
230230
{"area":"monocdk","keywords":["monocdk","monocdk-experiment"],"labels":["monocdk"],"assignees":["nija-at"]},
231-
{"area":"@aws-cdk/yaml-cfn","keywords":["(aws-yaml-cfn)","(yaml-cfn)"],"labels":["@aws-cdk/aws-yaml-cfn"],"assignees":["skinny85"]}
231+
{"area":"@aws-cdk/yaml-cfn","keywords":["(aws-yaml-cfn)","(yaml-cfn)"],"labels":["@aws-cdk/aws-yaml-cfn"],"assignees":["skinny85"]},
232+
{"area":"@aws-cdk/aws-apprunner","keywords":["apprunner","aws-apprunner"],"labels":["@aws-cdk/aws-apprunner"],"assignees":["corymhall"]},
233+
{"area":"@aws-cdk/aws-lightsail","keywords":["lightsail","aws-lightsail"],"labels":["@aws-cdk/aws-lightsail"],"assignees":["corymhall"]},
234+
{"area":"@aws-cdk/aws-aps","keywords":["aps","aws-aps","prometheus"],"labels":["@aws-cdk/aws-aps"],"assignees":["corymhall"]}
232235
]

Diff for: .mergify.yml

+7-12
Original file line numberDiff line numberDiff line change
@@ -37,10 +37,9 @@ pull_request_rules:
3737
actions:
3838
comment:
3939
message: Thank you for contributing! Your pull request will be automatically updated and merged (do not update manually, and be sure to [allow changes to be pushed to your fork](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/allowing-changes-to-a-pull-request-branch-created-from-a-fork)).
40-
merge:
41-
strict: smart
40+
queue:
41+
name: default
4242
method: squash
43-
strict_method: merge
4443
commit_message: title+body
4544
conditions:
4645
- base!=release
@@ -60,11 +59,9 @@ pull_request_rules:
6059
actions:
6160
comment:
6261
message: Thank you for contributing! Your pull request will be automatically updated and merged without squashing (do not update manually, and be sure to [allow changes to be pushed to your fork](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/allowing-changes-to-a-pull-request-branch-created-from-a-fork)).
63-
merge:
64-
strict: smart
65-
# Merge instead of squash
62+
queue:
63+
name: default
6664
method: merge
67-
strict_method: merge
6865
commit_message: title+body
6966
conditions:
7067
- -title~=(WIP|wip)
@@ -106,12 +103,10 @@ pull_request_rules:
106103
actions:
107104
comment:
108105
message: Thanks Dependabot!
109-
merge:
110-
# 'strict: false' disables Mergify keeping the branch up-to-date from master.
111-
# It's not necessary: Dependabot will do that itself.
112-
# It's not dangerous: GitHub branch protection settings prevent merging stale branches.
113-
strict: false
106+
queue:
107+
name: default
114108
method: squash
109+
commit_message: title+body
115110
conditions:
116111
- -title~=(WIP|wip)
117112
- -label~=(blocked|do-not-merge)

Diff for: allowed-breaking-changes.txt

+5-1
Original file line numberDiff line numberDiff line change
@@ -77,4 +77,8 @@ strengthened:@aws-cdk/aws-lambda-event-sources.ManagedKafkaEventSourceProps
7777

7878
# Remove IO2 from autoscaling EbsDeviceVolumeType. This value is not supported
7979
# at the moment and was not supported in the past.
80-
removed:@aws-cdk/aws-autoscaling.EbsDeviceVolumeType.IO2
80+
removed:@aws-cdk/aws-autoscaling.EbsDeviceVolumeType.IO2
81+
82+
# Remove autoTerminationPolicy from stepfunctions-tasks EmrCreateClusterProps. This value is not supported by stepfunctions at the moment and was not supported in the past.
83+
removed:@aws-cdk/aws-stepfunctions-tasks.EmrCreateCluster.AutoTerminationPolicyProperty
84+
removed:@aws-cdk/aws-stepfunctions-tasks.EmrCreateClusterProps.autoTerminationPolicy

Diff for: packages/@aws-cdk/aws-batch/README.md

+44-26
Original file line numberDiff line numberDiff line change
@@ -44,17 +44,17 @@ In **MANAGED** mode, AWS will handle the provisioning of compute resources to ac
4444
Below is an example of each available type of compute environment:
4545

4646
```ts
47-
const defaultVpc = new ec2.Vpc(this, 'VPC');
47+
declare const vpc: ec2.Vpc;
4848

4949
// default is managed
50-
const awsManagedEnvironment = new batch.ComputeEnvironment(stack, 'AWS-Managed-Compute-Env', {
50+
const awsManagedEnvironment = new batch.ComputeEnvironment(this, 'AWS-Managed-Compute-Env', {
5151
computeResources: {
52-
vpc
52+
vpc,
5353
}
5454
});
5555

56-
const customerManagedEnvironment = new batch.ComputeEnvironment(stack, 'Customer-Managed-Compute-Env', {
57-
managed: false // unmanaged environment
56+
const customerManagedEnvironment = new batch.ComputeEnvironment(this, 'Customer-Managed-Compute-Env', {
57+
managed: false, // unmanaged environment
5858
});
5959
```
6060

@@ -65,7 +65,7 @@ It is possible to have AWS Batch submit spotfleet requests for obtaining compute
6565
```ts
6666
const vpc = new ec2.Vpc(this, 'VPC');
6767

68-
const spotEnvironment = new batch.ComputeEnvironment(stack, 'MySpotEnvironment', {
68+
const spotEnvironment = new batch.ComputeEnvironment(this, 'MySpotEnvironment', {
6969
computeResources: {
7070
type: batch.ComputeResourceType.SPOT,
7171
bidPercentage: 75, // Bids for resources at 75% of the on-demand price
@@ -81,7 +81,7 @@ It is possible to have AWS Batch submit jobs to be run on Fargate compute resour
8181
```ts
8282
const vpc = new ec2.Vpc(this, 'VPC');
8383

84-
const fargateSpotEnvironment = new batch.ComputeEnvironment(stack, 'MyFargateEnvironment', {
84+
const fargateSpotEnvironment = new batch.ComputeEnvironment(this, 'MyFargateEnvironment', {
8585
computeResources: {
8686
type: batch.ComputeResourceType.FARGATE_SPOT,
8787
vpc,
@@ -119,7 +119,8 @@ The alternative would be to use the `BEST_FIT_PROGRESSIVE` strategy in order for
119119

120120
Simply define your Launch Template:
121121

122-
```ts
122+
```text
123+
// This example is only available in TypeScript
123124
const myLaunchTemplate = new ec2.CfnLaunchTemplate(this, 'LaunchTemplate', {
124125
launchTemplateName: 'extra-storage-template',
125126
launchTemplateData: {
@@ -129,17 +130,20 @@ const myLaunchTemplate = new ec2.CfnLaunchTemplate(this, 'LaunchTemplate', {
129130
ebs: {
130131
encrypted: true,
131132
volumeSize: 100,
132-
volumeType: 'gp2'
133-
}
134-
}
135-
]
136-
}
133+
volumeType: 'gp2',
134+
},
135+
},
136+
],
137+
},
137138
});
138139
```
139140

140141
and use it:
141142

142143
```ts
144+
declare const vpc: ec2.Vpc;
145+
declare const myLaunchTemplate: ec2.CfnLaunchTemplate;
146+
143147
const myComputeEnv = new batch.ComputeEnvironment(this, 'ComputeEnv', {
144148
computeResources: {
145149
launchTemplate: {
@@ -168,6 +172,7 @@ Occasionally, you will need to deviate from the default processing AMI.
168172
ECS Optimized Amazon Linux 2 example:
169173

170174
```ts
175+
declare const vpc: ec2.Vpc;
171176
const myComputeEnv = new batch.ComputeEnvironment(this, 'ComputeEnv', {
172177
computeResources: {
173178
image: new ecs.EcsOptimizedAmi({
@@ -181,11 +186,12 @@ const myComputeEnv = new batch.ComputeEnvironment(this, 'ComputeEnv', {
181186
Custom based AMI example:
182187

183188
```ts
189+
declare const vpc: ec2.Vpc;
184190
const myComputeEnv = new batch.ComputeEnvironment(this, 'ComputeEnv', {
185191
computeResources: {
186192
image: ec2.MachineImage.genericLinux({
187193
"[aws-region]": "[ami-ID]",
188-
})
194+
}),
189195
vpc,
190196
}
191197
});
@@ -196,7 +202,8 @@ const myComputeEnv = new batch.ComputeEnvironment(this, 'ComputeEnv', {
196202
Jobs are always submitted to a specific queue. This means that you have to create a queue before you can start submitting jobs. Each queue is mapped to at least one (and no more than three) compute environment. When the job is scheduled for execution, AWS Batch will select the compute environment based on ordinal priority and available capacity in each environment.
197203

198204
```ts
199-
const jobQueue = new batch.JobQueue(stack, 'JobQueue', {
205+
declare const computeEnvironment: batch.ComputeEnvironment;
206+
const jobQueue = new batch.JobQueue(this, 'JobQueue', {
200207
computeEnvironments: [
201208
{
202209
// Defines a collection of compute resources to handle assigned batch jobs
@@ -213,13 +220,20 @@ const jobQueue = new batch.JobQueue(stack, 'JobQueue', {
213220
Sometimes you might have jobs that are more important than others, and when submitted, should take precedence over the existing jobs. To achieve this, you can create a priority based execution strategy, by assigning each queue its own priority:
214221

215222
```ts
216-
const highPrioQueue = new batch.JobQueue(stack, 'JobQueue', {
217-
computeEnvironments: sharedComputeEnvs,
223+
declare const sharedComputeEnvs: batch.ComputeEnvironment;
224+
const highPrioQueue = new batch.JobQueue(this, 'JobQueue', {
225+
computeEnvironments: [{
226+
computeEnvironment: sharedComputeEnvs,
227+
order: 1,
228+
}],
218229
priority: 2,
219230
});
220231

221-
const lowPrioQueue = new batch.JobQueue(stack, 'JobQueue', {
222-
computeEnvironments: sharedComputeEnvs,
232+
const lowPrioQueue = new batch.JobQueue(this, 'JobQueue', {
233+
computeEnvironments: [{
234+
computeEnvironment: sharedComputeEnvs,
235+
order: 1,
236+
}],
223237
priority: 1,
224238
});
225239
```
@@ -241,9 +255,11 @@ const jobQueue = batch.JobQueue.fromJobQueueArn(this, 'imported-job-queue', 'arn
241255
A Batch Job definition helps AWS Batch understand important details about how to run your application in the scope of a Batch Job. This involves key information like resource requirements, what containers to run, how the compute environment should be prepared, and more. Below is a simple example of how to create a job definition:
242256

243257
```ts
244-
const repo = ecr.Repository.fromRepositoryName(stack, 'batch-job-repo', 'todo-list');
258+
import * as ecr from '@aws-cdk/aws-ecr';
245259

246-
new batch.JobDefinition(stack, 'batch-job-def-from-ecr', {
260+
const repo = ecr.Repository.fromRepositoryName(this, 'batch-job-repo', 'todo-list');
261+
262+
new batch.JobDefinition(this, 'batch-job-def-from-ecr', {
247263
container: {
248264
image: new ecs.EcrImage(repo, 'latest'),
249265
},
@@ -255,7 +271,7 @@ new batch.JobDefinition(stack, 'batch-job-def-from-ecr', {
255271
Below is an example of how you can create a Batch Job Definition from a local Docker application.
256272

257273
```ts
258-
new batch.JobDefinition(stack, 'batch-job-def-from-local', {
274+
new batch.JobDefinition(this, 'batch-job-def-from-local', {
259275
container: {
260276
// todo-list is a directory containing a Dockerfile to build the application
261277
image: ecs.ContainerImage.fromAsset('../todo-list'),
@@ -268,14 +284,16 @@ new batch.JobDefinition(stack, 'batch-job-def-from-local', {
268284
You can provide custom log driver and its configuration for the container.
269285

270286
```ts
271-
new batch.JobDefinition(stack, 'job-def', {
287+
import * as ssm from '@aws-cdk/aws-ssm';
288+
289+
new batch.JobDefinition(this, 'job-def', {
272290
container: {
273291
image: ecs.EcrImage.fromRegistry('docker/whalesay'),
274292
logConfiguration: {
275293
logDriver: batch.LogDriver.AWSLOGS,
276294
options: { 'awslogs-region': 'us-east-1' },
277295
secretOptions: [
278-
batch.ExposedSecret.fromParametersStore('xyz', ssm.StringParameter.fromStringParameterName(stack, 'parameter', 'xyz')),
296+
batch.ExposedSecret.fromParametersStore('xyz', ssm.StringParameter.fromStringParameterName(this, 'parameter', 'xyz')),
279297
],
280298
},
281299
},
@@ -303,8 +321,8 @@ Below is an example:
303321

304322
```ts
305323
// Without revision
306-
const job = batch.JobDefinition.fromJobDefinitionName(this, 'imported-job-definition', 'my-job-definition');
324+
const job1 = batch.JobDefinition.fromJobDefinitionName(this, 'imported-job-definition', 'my-job-definition');
307325

308326
// With revision
309-
const job = batch.JobDefinition.fromJobDefinitionName(this, 'imported-job-definition', 'my-job-definition:3');
327+
const job2 = batch.JobDefinition.fromJobDefinitionName(this, 'imported-job-definition', 'my-job-definition:3');
310328
```

Diff for: packages/@aws-cdk/aws-batch/package.json

+8-1
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,14 @@
2828
]
2929
}
3030
},
31-
"projectReferences": true
31+
"projectReferences": true,
32+
"metadata": {
33+
"jsii": {
34+
"rosetta": {
35+
"strict": true
36+
}
37+
}
38+
}
3239
},
3340
"repository": {
3441
"type": "git",
+14
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
// Fixture with packages imported, but nothing else
2+
import { Construct } from 'constructs';
3+
import { Stack } from '@aws-cdk/core';
4+
import * as ec2 from '@aws-cdk/aws-ec2';
5+
import * as batch from '@aws-cdk/aws-batch';
6+
import * as ecs from '@aws-cdk/aws-ecs';
7+
8+
class Fixture extends Stack {
9+
constructor(scope: Construct, id: string) {
10+
super(scope, id);
11+
12+
/// here
13+
}
14+
}

0 commit comments

Comments
 (0)