diff --git a/packages/@aws-cdk/aws-codepipeline-actions/.gitignore b/packages/@aws-cdk/aws-codepipeline-actions/.gitignore index 0f4bf01dd552c..9f6a9219fad75 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/.gitignore +++ b/packages/@aws-cdk/aws-codepipeline-actions/.gitignore @@ -16,4 +16,5 @@ nyc.config.js *.snk !.eslintrc.js -junit.xml \ No newline at end of file +junit.xml +!jest.config.js \ No newline at end of file diff --git a/packages/@aws-cdk/aws-codepipeline-actions/.npmignore b/packages/@aws-cdk/aws-codepipeline-actions/.npmignore index a94c531529866..9e88226921c33 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/.npmignore +++ b/packages/@aws-cdk/aws-codepipeline-actions/.npmignore @@ -23,4 +23,5 @@ tsconfig.json # exclude cdk artifacts **/cdk.out junit.xml -test/ \ No newline at end of file +test/ +jest.config.js \ No newline at end of file diff --git a/packages/@aws-cdk/aws-codepipeline-actions/jest.config.js b/packages/@aws-cdk/aws-codepipeline-actions/jest.config.js new file mode 100644 index 0000000000000..54e28beb9798b --- /dev/null +++ b/packages/@aws-cdk/aws-codepipeline-actions/jest.config.js @@ -0,0 +1,2 @@ +const baseConfig = require('cdk-build-tools/config/jest.config'); +module.exports = baseConfig; diff --git a/packages/@aws-cdk/aws-codepipeline-actions/package.json b/packages/@aws-cdk/aws-codepipeline-actions/package.json index 76381cfdbbb47..6087beb2745df 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/package.json +++ b/packages/@aws-cdk/aws-codepipeline-actions/package.json @@ -69,11 +69,10 @@ "@aws-cdk/assert": "0.0.0", "@aws-cdk/aws-cloudtrail": "0.0.0", "@types/lodash": "^4.14.168", - "@types/nodeunit": "^0.0.31", "cdk-build-tools": "0.0.0", "cdk-integ-tools": "0.0.0", "lodash": "^4.17.21", - "nodeunit": "^0.11.3", + "nodeunit-shim": "0.0.0", "pkglint": "0.0.0" }, "dependencies": { @@ -177,6 +176,7 @@ }, "maturity": "stable", "cdk-build": { + "jest": true, "env": { "AWSLINT_BASE_CONSTRUCT": true } diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/bitbucket/test.bitbucket-source-action.ts b/packages/@aws-cdk/aws-codepipeline-actions/test/bitbucket/bitbucket-source-action.test.ts similarity index 97% rename from packages/@aws-cdk/aws-codepipeline-actions/test/bitbucket/test.bitbucket-source-action.ts rename to packages/@aws-cdk/aws-codepipeline-actions/test/bitbucket/bitbucket-source-action.test.ts index a4f120f2abf68..942811b94b2fd 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/bitbucket/test.bitbucket-source-action.ts +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/bitbucket/bitbucket-source-action.test.ts @@ -2,12 +2,12 @@ import { expect, haveResourceLike } from '@aws-cdk/assert'; import * as codebuild from '@aws-cdk/aws-codebuild'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; import { Stack } from '@aws-cdk/core'; -import { Test } from 'nodeunit'; +import { nodeunitShim, Test } from 'nodeunit-shim'; import * as cpactions from '../../lib'; /* eslint-disable quote-props */ -export = { +nodeunitShim({ 'BitBucket source Action': { 'produces the correct configuration when added to a pipeline'(test: Test) { const stack = new Stack(); @@ -82,7 +82,7 @@ export = { test.done(); }, -}; +}); function createBitBucketAndCodeBuildPipeline(stack: Stack, props: { codeBuildCloneOutput: boolean }): void { const sourceOutput = new codepipeline.Artifact(); diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/cloudformation/test.cloudformation-pipeline-actions.ts b/packages/@aws-cdk/aws-codepipeline-actions/test/cloudformation/cloudformation-pipeline-actions.test.ts similarity index 99% rename from packages/@aws-cdk/aws-codepipeline-actions/test/cloudformation/test.cloudformation-pipeline-actions.ts rename to packages/@aws-cdk/aws-codepipeline-actions/test/cloudformation/cloudformation-pipeline-actions.test.ts index 22bf46a15a641..28a22d970a1c0 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/cloudformation/test.cloudformation-pipeline-actions.ts +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/cloudformation/cloudformation-pipeline-actions.test.ts @@ -5,12 +5,12 @@ import * as codecommit from '@aws-cdk/aws-codecommit'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; import { PolicyStatement, Role, ServicePrincipal } from '@aws-cdk/aws-iam'; import * as cdk from '@aws-cdk/core'; -import { Test } from 'nodeunit'; +import { nodeunitShim, Test } from 'nodeunit-shim'; import * as cpactions from '../../lib'; /* eslint-disable quote-props */ -export = { +nodeunitShim({ 'CreateChangeSetAction can be used to make a change set from a CodePipeline'(test: Test) { const stack = new cdk.Stack(); @@ -712,7 +712,7 @@ export = { test.done(); }, }, -}; +}); /** * A test stack with a half-prepared pipeline ready to add CloudFormation actions to diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/cloudformation/test.pipeline-actions.ts b/packages/@aws-cdk/aws-codepipeline-actions/test/cloudformation/pipeline-actions.test.ts similarity index 97% rename from packages/@aws-cdk/aws-codepipeline-actions/test/cloudformation/test.pipeline-actions.ts rename to packages/@aws-cdk/aws-codepipeline-actions/test/cloudformation/pipeline-actions.test.ts index 66e23dcfe0584..1433ffc6e2860 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/cloudformation/test.pipeline-actions.ts +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/cloudformation/pipeline-actions.test.ts @@ -5,12 +5,12 @@ import * as s3 from '@aws-cdk/aws-s3'; import * as cdk from '@aws-cdk/core'; import * as constructs from 'constructs'; import * as _ from 'lodash'; -import * as nodeunit from 'nodeunit'; +import { nodeunitShim, Test } from 'nodeunit-shim'; import * as cpactions from '../../lib'; -export = nodeunit.testCase({ +nodeunitShim({ CreateReplaceChangeSet: { - 'works'(test: nodeunit.Test) { + 'works'(test: Test) { const app = new cdk.App(); const stack = new cdk.Stack(app, 'Stack'); const pipelineRole = new RoleDouble(stack, 'PipelineRole'); @@ -51,7 +51,7 @@ export = nodeunit.testCase({ test.done(); }, - 'uses a single permission statement if the same ChangeSet name is used'(test: nodeunit.Test) { + 'uses a single permission statement if the same ChangeSet name is used'(test: Test) { const stack = new cdk.Stack(); const pipelineRole = new RoleDouble(stack, 'PipelineRole'); const artifact = new codepipeline.Artifact('TestArtifact'); @@ -110,7 +110,7 @@ export = nodeunit.testCase({ }, ExecuteChangeSet: { - 'works'(test: nodeunit.Test) { + 'works'(test: Test) { const stack = new cdk.Stack(); const pipelineRole = new RoleDouble(stack, 'PipelineRole'); const stage = new StageDouble({ @@ -137,7 +137,7 @@ export = nodeunit.testCase({ test.done(); }, - 'uses a single permission statement if the same ChangeSet name is used'(test: nodeunit.Test) { + 'uses a single permission statement if the same ChangeSet name is used'(test: Test) { const stack = new cdk.Stack(); const pipelineRole = new RoleDouble(stack, 'PipelineRole'); new StageDouble({ @@ -181,7 +181,7 @@ export = nodeunit.testCase({ }, }, - 'the CreateUpdateStack Action sets the DescribeStack*, Create/Update/DeleteStack & PassRole permissions'(test: nodeunit.Test) { + 'the CreateUpdateStack Action sets the DescribeStack*, Create/Update/DeleteStack & PassRole permissions'(test: Test) { const stack = new cdk.Stack(); const pipelineRole = new RoleDouble(stack, 'PipelineRole'); const action = new cpactions.CloudFormationCreateUpdateStackAction({ @@ -207,7 +207,7 @@ export = nodeunit.testCase({ test.done(); }, - 'the DeleteStack Action sets the DescribeStack*, DeleteStack & PassRole permissions'(test: nodeunit.Test) { + 'the DeleteStack Action sets the DescribeStack*, DeleteStack & PassRole permissions'(test: Test) { const stack = new cdk.Stack(); const pipelineRole = new RoleDouble(stack, 'PipelineRole'); const action = new cpactions.CloudFormationDeleteStackAction({ @@ -238,7 +238,7 @@ interface PolicyStatementJson { } function _assertActionMatches( - test: nodeunit.Test, + test: Test, stack: cdk.Stack, actions: FullAction[], provider: string, @@ -279,7 +279,7 @@ function _hasAction( } function _assertPermissionGranted( - test: nodeunit.Test, + test: Test, stack: cdk.Stack, statements: iam.PolicyStatement[], action: string, diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/codebuild/test.codebuild-action.ts b/packages/@aws-cdk/aws-codepipeline-actions/test/codebuild/codebuild-action.test.ts similarity index 99% rename from packages/@aws-cdk/aws-codepipeline-actions/test/codebuild/test.codebuild-action.ts rename to packages/@aws-cdk/aws-codepipeline-actions/test/codebuild/codebuild-action.test.ts index 68e5acaa849ca..a451aa51dcc42 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/codebuild/test.codebuild-action.ts +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/codebuild/codebuild-action.test.ts @@ -5,12 +5,12 @@ import * as codepipeline from '@aws-cdk/aws-codepipeline'; import * as s3 from '@aws-cdk/aws-s3'; import * as sns from '@aws-cdk/aws-sns'; import { App, SecretValue, Stack } from '@aws-cdk/core'; -import { Test } from 'nodeunit'; +import { nodeunitShim, Test } from 'nodeunit-shim'; import * as cpactions from '../../lib'; /* eslint-disable quote-props */ -export = { +nodeunitShim({ 'CodeBuild action': { 'that is cross-account and has outputs': { 'causes an error'(test: Test) { @@ -337,4 +337,4 @@ export = { }, }, }, -}; +}); diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/codecommit/test.codecommit-source-action.ts b/packages/@aws-cdk/aws-codepipeline-actions/test/codecommit/codecommit-source-action.test.ts similarity index 99% rename from packages/@aws-cdk/aws-codepipeline-actions/test/codecommit/test.codecommit-source-action.ts rename to packages/@aws-cdk/aws-codepipeline-actions/test/codecommit/codecommit-source-action.test.ts index 3dc5b1c739d4e..c3e2f4b9b6582 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/codecommit/test.codecommit-source-action.ts +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/codecommit/codecommit-source-action.test.ts @@ -4,12 +4,12 @@ import * as codecommit from '@aws-cdk/aws-codecommit'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; import * as iam from '@aws-cdk/aws-iam'; import { Stack, Lazy } from '@aws-cdk/core'; -import { Test } from 'nodeunit'; +import { nodeunitShim, Test } from 'nodeunit-shim'; import * as cpactions from '../../lib'; /* eslint-disable quote-props */ -export = { +nodeunitShim({ 'CodeCommit Source Action': { 'by default does not poll for source changes and uses Events'(test: Test) { const stack = new Stack(); @@ -430,7 +430,7 @@ export = { test.done(); }, }, -}; +}); function minimalPipeline(stack: Stack, trigger: cpactions.CodeCommitTrigger | undefined): codepipeline.Pipeline { const sourceOutput = new codepipeline.Artifact(); diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/codedeploy/test.ecs-deploy-action.ts b/packages/@aws-cdk/aws-codepipeline-actions/test/codedeploy/ecs-deploy-action.test.ts similarity index 99% rename from packages/@aws-cdk/aws-codepipeline-actions/test/codedeploy/test.ecs-deploy-action.ts rename to packages/@aws-cdk/aws-codepipeline-actions/test/codedeploy/ecs-deploy-action.test.ts index b75809ad8f515..4444ea9d409df 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/codedeploy/test.ecs-deploy-action.ts +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/codedeploy/ecs-deploy-action.test.ts @@ -2,10 +2,10 @@ import { expect, haveResourceLike } from '@aws-cdk/assert'; import * as codedeploy from '@aws-cdk/aws-codedeploy'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; import * as cdk from '@aws-cdk/core'; -import { Test } from 'nodeunit'; +import { nodeunitShim, Test } from 'nodeunit-shim'; import * as cpactions from '../../lib'; -export = { +nodeunitShim({ 'CodeDeploy ECS Deploy Action': { 'throws an exception if more than 4 container image inputs are provided'(test: Test) { const stack = new cdk.Stack(); @@ -198,7 +198,7 @@ export = { test.done(); }, }, -}; +}); function addEcsDeploymentGroup(stack: cdk.Stack): codedeploy.IEcsDeploymentGroup { return codedeploy.EcsDeploymentGroup.fromEcsDeploymentGroupAttributes( diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/ecr/test.ecr-source-action.ts b/packages/@aws-cdk/aws-codepipeline-actions/test/ecr/ecr-source-action.test.ts similarity index 96% rename from packages/@aws-cdk/aws-codepipeline-actions/test/ecr/test.ecr-source-action.ts rename to packages/@aws-cdk/aws-codepipeline-actions/test/ecr/ecr-source-action.test.ts index b27ad5f89f880..aacbb856e065c 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/ecr/test.ecr-source-action.ts +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/ecr/ecr-source-action.test.ts @@ -3,12 +3,12 @@ import * as codebuild from '@aws-cdk/aws-codebuild'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; import * as ecr from '@aws-cdk/aws-ecr'; import { Stack } from '@aws-cdk/core'; -import { Test } from 'nodeunit'; +import { nodeunitShim, Test } from 'nodeunit-shim'; import * as cpactions from '../../lib'; /* eslint-disable quote-props */ -export = { +nodeunitShim({ 'ECR source Action': { 'exposes variables for other actions to consume'(test: Test) { const stack = new Stack(); @@ -63,4 +63,4 @@ export = { test.done(); }, }, -}; +}); diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/ecs/test.ecs-deploy-action.ts b/packages/@aws-cdk/aws-codepipeline-actions/test/ecs/ecs-deploy-action.test.ts similarity index 98% rename from packages/@aws-cdk/aws-codepipeline-actions/test/ecs/test.ecs-deploy-action.ts rename to packages/@aws-cdk/aws-codepipeline-actions/test/ecs/ecs-deploy-action.test.ts index 1343850871206..841ca3948ec83 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/ecs/test.ecs-deploy-action.ts +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/ecs/ecs-deploy-action.test.ts @@ -4,10 +4,10 @@ import * as ec2 from '@aws-cdk/aws-ec2'; import * as ecs from '@aws-cdk/aws-ecs'; import * as s3 from '@aws-cdk/aws-s3'; import * as cdk from '@aws-cdk/core'; -import { Test } from 'nodeunit'; +import { nodeunitShim, Test } from 'nodeunit-shim'; import * as cpactions from '../../lib'; -export = { +nodeunitShim({ 'ECS deploy Action': { 'throws an exception if neither inputArtifact nor imageFile were provided'(test: Test) { const service = anyEcsService(); @@ -198,7 +198,7 @@ export = { test.done(); }, }, -}; +}); function anyEcsService(): ecs.FargateService { const stack = new cdk.Stack(); diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/github/test.github-source-action.ts b/packages/@aws-cdk/aws-codepipeline-actions/test/github/github-source-action.test.ts similarity index 98% rename from packages/@aws-cdk/aws-codepipeline-actions/test/github/test.github-source-action.ts rename to packages/@aws-cdk/aws-codepipeline-actions/test/github/github-source-action.test.ts index 56131d3d15b1e..a73726e055459 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/github/test.github-source-action.ts +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/github/github-source-action.test.ts @@ -2,12 +2,12 @@ import { expect, haveResourceLike, SynthUtils } from '@aws-cdk/assert'; import * as codebuild from '@aws-cdk/aws-codebuild'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; import { SecretValue, Stack } from '@aws-cdk/core'; -import { Test } from 'nodeunit'; +import { nodeunitShim, Test } from 'nodeunit-shim'; import * as cpactions from '../../lib'; /* eslint-disable quote-props */ -export = { +nodeunitShim({ 'GitHub source Action': { 'exposes variables for other actions to consume'(test: Test) { const stack = new Stack(); @@ -208,4 +208,4 @@ export = { test.done(); }, }, -}; +}); diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/lambda/test.lambda-invoke-action.ts b/packages/@aws-cdk/aws-codepipeline-actions/test/lambda/lambda-invoke-action.test.ts similarity index 81% rename from packages/@aws-cdk/aws-codepipeline-actions/test/lambda/test.lambda-invoke-action.ts rename to packages/@aws-cdk/aws-codepipeline-actions/test/lambda/lambda-invoke-action.test.ts index 035d8ddb63a38..e9674e4531695 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/lambda/test.lambda-invoke-action.ts +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/lambda/lambda-invoke-action.test.ts @@ -1,24 +1,26 @@ -import { expect, haveResourceLike } from '@aws-cdk/assert'; +import '@aws-cdk/assert/jest'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; import * as lambda from '@aws-cdk/aws-lambda'; import * as s3 from '@aws-cdk/aws-s3'; import * as sns from '@aws-cdk/aws-sns'; -import { Aws, Lazy, SecretValue, Stack, Token } from '@aws-cdk/core'; -import { Test } from 'nodeunit'; +import { App, Aws, Lazy, SecretValue, Stack, Token } from '@aws-cdk/core'; +import { testFutureBehavior } from 'cdk-build-tools/lib/feature-flag'; import * as cpactions from '../../lib'; /* eslint-disable quote-props */ -export = { - 'Lambda invoke Action': { - 'properly serializes the object passed in userParameters'(test: Test) { +const s3GrantWriteCtx = { '@aws-cdk/aws-s3:grantWriteWithoutAcl': true }; + +describe('', () => { + describe('Lambda invoke Action', () => { + test('properly serializes the object passed in userParameters', () => { const stack = stackIncludingLambdaInvokeCodePipeline({ userParams: { key: 1234, }, }); - expect(stack).to(haveResourceLike('AWS::CodePipeline::Pipeline', { + expect(stack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { 'Stages': [ {}, { @@ -31,19 +33,19 @@ export = { ], }, ], - })); + }); + - test.done(); - }, + }); - 'properly resolves any Tokens passed in userParameters'(test: Test) { + test('properly resolves any Tokens passed in userParameters', () => { const stack = stackIncludingLambdaInvokeCodePipeline({ userParams: { key: Lazy.string({ produce: () => Aws.REGION }), }, }); - expect(stack).to(haveResourceLike('AWS::CodePipeline::Pipeline', { + expect(stack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { 'Stages': [ {}, { @@ -67,19 +69,19 @@ export = { ], }, ], - })); + }); + - test.done(); - }, + }); - 'properly resolves any stringified Tokens passed in userParameters'(test: Test) { + test('properly resolves any stringified Tokens passed in userParameters', () => { const stack = stackIncludingLambdaInvokeCodePipeline({ userParams: { key: Token.asString(null), }, }); - expect(stack).to(haveResourceLike('AWS::CodePipeline::Pipeline', { + expect(stack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { 'Stages': [ {}, { @@ -92,17 +94,17 @@ export = { ], }, ], - })); + }); + - test.done(); - }, + }); - "assigns the Action's Role with read permissions to the Bucket if it has only inputs"(test: Test) { + test("assigns the Action's Role with read permissions to the Bucket if it has only inputs", () => { const stack = stackIncludingLambdaInvokeCodePipeline({ lambdaInput: new codepipeline.Artifact(), }); - expect(stack).to(haveResourceLike('AWS::IAM::Policy', { + expect(stack).toHaveResourceLike('AWS::IAM::Policy', { 'PolicyDocument': { 'Statement': [ { @@ -131,18 +133,18 @@ export = { }, ], }, - })); + }); + - test.done(); - }, + }); - "assigns the Action's Role with write permissions to the Bucket if it has only outputs"(test: Test) { + testFutureBehavior("assigns the Action's Role with write permissions to the Bucket if it has only outputs", s3GrantWriteCtx, App, (app) => { const stack = stackIncludingLambdaInvokeCodePipeline({ lambdaOutput: new codepipeline.Artifact(), // no input to the Lambda Action - we want write permissions only in this case - }); + }, app); - expect(stack).to(haveResourceLike('AWS::IAM::Policy', { + expect(stack).toHaveResourceLike('AWS::IAM::Policy', { 'PolicyDocument': { 'Statement': [ { @@ -157,7 +159,7 @@ export = { { 'Action': [ 's3:DeleteObject*', - 's3:PutObject*', + 's3:PutObject', 's3:Abort*', ], 'Effect': 'Allow', @@ -172,18 +174,18 @@ export = { }, ], }, - })); + }); + - test.done(); - }, + }); - "assigns the Action's Role with read-write permissions to the Bucket if it has both inputs and outputs"(test: Test) { + testFutureBehavior("assigns the Action's Role with read-write permissions to the Bucket if it has both inputs and outputs", s3GrantWriteCtx, App, (app) => { const stack = stackIncludingLambdaInvokeCodePipeline({ lambdaInput: new codepipeline.Artifact(), lambdaOutput: new codepipeline.Artifact(), - }); + }, app); - expect(stack).to(haveResourceLike('AWS::IAM::Policy', { + expect(stack).toHaveResourceLike('AWS::IAM::Policy', { 'PolicyDocument': { 'Statement': [ { @@ -213,7 +215,7 @@ export = { { 'Action': [ 's3:DeleteObject*', - 's3:PutObject*', + 's3:PutObject', 's3:Abort*', ], 'Effect': 'Allow', @@ -228,12 +230,12 @@ export = { }, ], }, - })); + }); - test.done(); - }, - 'exposes variables for other actions to consume'(test: Test) { + }); + + test('exposes variables for other actions to consume', () => { const stack = new Stack(); const sourceOutput = new codepipeline.Artifact(); @@ -269,7 +271,7 @@ export = { ], }); - expect(stack).to(haveResourceLike('AWS::CodePipeline::Pipeline', { + expect(stack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { 'Stages': [ { 'Name': 'Source', @@ -290,12 +292,12 @@ export = { ], }, ], - })); + }); - test.done(); - }, - }, -}; + + }); + }); +}); interface HelperProps { readonly userParams?: { [key: string]: any }; @@ -303,8 +305,8 @@ interface HelperProps { readonly lambdaOutput?: codepipeline.Artifact; } -function stackIncludingLambdaInvokeCodePipeline(props: HelperProps) { - const stack = new Stack(); +function stackIncludingLambdaInvokeCodePipeline(props: HelperProps, app?: App) { + const stack = new Stack(app); new codepipeline.Pipeline(stack, 'Pipeline', { stages: [ diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/test.manual-approval.ts b/packages/@aws-cdk/aws-codepipeline-actions/test/manual-approval.test.ts similarity index 96% rename from packages/@aws-cdk/aws-codepipeline-actions/test/test.manual-approval.ts rename to packages/@aws-cdk/aws-codepipeline-actions/test/manual-approval.test.ts index cac35015eda15..121e87cd9eb4e 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/test.manual-approval.ts +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/manual-approval.test.ts @@ -2,12 +2,12 @@ import { expect, haveResourceLike } from '@aws-cdk/assert'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; import * as sns from '@aws-cdk/aws-sns'; import { SecretValue, Stack } from '@aws-cdk/core'; -import { Test } from 'nodeunit'; +import { nodeunitShim, Test } from 'nodeunit-shim'; import * as cpactions from '../lib'; /* eslint-disable quote-props */ -export = { +nodeunitShim({ 'manual approval Action': { 'allows passing an SNS Topic when constructing it'(test: Test) { const stack = new Stack(); @@ -75,4 +75,4 @@ export = { test.done(); }, }, -}; +}); diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/test.pipeline.ts b/packages/@aws-cdk/aws-codepipeline-actions/test/pipeline.test.ts similarity index 99% rename from packages/@aws-cdk/aws-codepipeline-actions/test/test.pipeline.ts rename to packages/@aws-cdk/aws-codepipeline-actions/test/pipeline.test.ts index 242e55b40e039..0c0491d778150 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/test.pipeline.ts +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/pipeline.test.ts @@ -8,12 +8,12 @@ import * as lambda from '@aws-cdk/aws-lambda'; import * as s3 from '@aws-cdk/aws-s3'; import * as sns from '@aws-cdk/aws-sns'; import { App, Aws, CfnParameter, ConstructNode, SecretValue, Stack } from '@aws-cdk/core'; -import { Test } from 'nodeunit'; +import { nodeunitShim, Test } from 'nodeunit-shim'; import * as cpactions from '../lib'; /* eslint-disable quote-props */ -export = { +nodeunitShim({ 'basic pipeline'(test: Test) { const stack = new Stack(); @@ -1126,4 +1126,4 @@ export = { test.done(); }, }, -}; +}); diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/s3/test.s3-deploy-action.ts b/packages/@aws-cdk/aws-codepipeline-actions/test/s3/s3-deploy-action.test.ts similarity index 78% rename from packages/@aws-cdk/aws-codepipeline-actions/test/s3/test.s3-deploy-action.ts rename to packages/@aws-cdk/aws-codepipeline-actions/test/s3/s3-deploy-action.test.ts index 8184c61bc3fce..c1e1c3c34c2c8 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/s3/test.s3-deploy-action.ts +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/s3/s3-deploy-action.test.ts @@ -1,19 +1,19 @@ -import { expect, haveResourceLike } from '@aws-cdk/assert'; +import '@aws-cdk/assert/jest'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; import * as s3 from '@aws-cdk/aws-s3'; import { App, Duration, SecretValue, Stack } from '@aws-cdk/core'; -import { Test } from 'nodeunit'; +import { testFutureBehavior } from 'cdk-build-tools/lib/feature-flag'; import * as cpactions from '../../lib'; /* eslint-disable quote-props */ -export = { - 'S3 Deploy Action': { - 'by default extract artifacts'(test: Test) { +describe('', () => { + describe('S3 Deploy Action', () => { + test('by default extract artifacts', () => { const stack = new Stack(); minimalPipeline(stack); - expect(stack).to(haveResourceLike('AWS::CodePipeline::Pipeline', { + expect(stack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { 'Stages': [ { 'Name': 'Source', @@ -43,16 +43,16 @@ export = { ], }, ], - })); + }); - test.done(); - }, - 'grant the pipeline correct access to the target bucket'(test: Test) { - const stack = new Stack(); + }); + + testFutureBehavior('grant the pipeline correct access to the target bucket', { '@aws-cdk/aws-s3:grantWriteWithoutAcl': true }, App, (app) => { + const stack = new Stack(app); minimalPipeline(stack); - expect(stack).to(haveResourceLike('AWS::IAM::Policy', { + expect(stack).toHaveResourceLike('AWS::IAM::Policy', { 'PolicyDocument': { 'Statement': [ { @@ -62,7 +62,7 @@ export = { 's3:GetBucket*', 's3:List*', 's3:DeleteObject*', - 's3:PutObject*', + 's3:PutObject', 's3:Abort*', ], }, @@ -73,18 +73,18 @@ export = { }, ], }, - })); + }); - test.done(); - }, - 'kebab-case CannedACL value'(test: Test) { + }); + + test('kebab-case CannedACL value', () => { const stack = new Stack(); minimalPipeline(stack, { accessControl: s3.BucketAccessControl.PUBLIC_READ_WRITE, }); - expect(stack).to(haveResourceLike('AWS::CodePipeline::Pipeline', { + expect(stack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { 'Stages': [ {}, { @@ -97,12 +97,12 @@ export = { ], }, ], - })); + }); + - test.done(); - }, + }); - 'allow customizing cache-control'(test: Test) { + test('allow customizing cache-control', () => { const stack = new Stack(); minimalPipeline(stack, { cacheControl: [ @@ -112,7 +112,7 @@ export = { ], }); - expect(stack).to(haveResourceLike('AWS::CodePipeline::Pipeline', { + expect(stack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { 'Stages': [ {}, { @@ -125,18 +125,18 @@ export = { ], }, ], - })); + }); + - test.done(); - }, + }); - 'allow customizing objectKey (deployment path on S3)'(test: Test) { + test('allow customizing objectKey (deployment path on S3)', () => { const stack = new Stack(); minimalPipeline(stack, { objectKey: '/a/b/c', }); - expect(stack).to(haveResourceLike('AWS::CodePipeline::Pipeline', { + expect(stack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { 'Stages': [ {}, { @@ -149,12 +149,12 @@ export = { ], }, ], - })); + }); + - test.done(); - }, + }); - 'correctly makes the action cross-region for a Bucket imported with a different region'(test: Test) { + test('correctly makes the action cross-region for a Bucket imported with a different region', () => { const app = new App(); const stack = new Stack(app, 'PipelineStack', { env: { account: '123456789012', region: 'us-west-2' }, @@ -168,7 +168,7 @@ export = { bucket: deployBucket, }); - expect(stack).to(haveResourceLike('AWS::CodePipeline::Pipeline', { + expect(stack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { Stages: [ {}, { @@ -181,12 +181,12 @@ export = { ], }, ], - })); + }); + - test.done(); - }, - }, -}; + }); + }); +}); interface MinimalPipelineOptions { readonly accessControl?: s3.BucketAccessControl; diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/s3/test.s3-source-action.ts b/packages/@aws-cdk/aws-codepipeline-actions/test/s3/s3-source-action.test.ts similarity index 99% rename from packages/@aws-cdk/aws-codepipeline-actions/test/s3/test.s3-source-action.ts rename to packages/@aws-cdk/aws-codepipeline-actions/test/s3/s3-source-action.test.ts index fc069cffae6d3..3ae1210dcd8f7 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/s3/test.s3-source-action.ts +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/s3/s3-source-action.test.ts @@ -3,12 +3,12 @@ import * as codebuild from '@aws-cdk/aws-codebuild'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; import * as s3 from '@aws-cdk/aws-s3'; import { Lazy, Stack } from '@aws-cdk/core'; -import { Test } from 'nodeunit'; +import { nodeunitShim, Test } from 'nodeunit-shim'; import * as cpactions from '../../lib'; /* eslint-disable quote-props */ -export = { +nodeunitShim({ 'S3 Source Action': { 'by default polls for source changes and does not use Events'(test: Test) { const stack = new Stack(); @@ -269,7 +269,7 @@ export = { test.done(); }, }, -}; +}); interface MinimalPipelineOptions { readonly trigger?: cpactions.S3Trigger; diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/servicecatalog/test.servicecatalog-action.ts b/packages/@aws-cdk/aws-codepipeline-actions/test/servicecatalog/servicecatalog-action.test.ts similarity index 98% rename from packages/@aws-cdk/aws-codepipeline-actions/test/servicecatalog/test.servicecatalog-action.ts rename to packages/@aws-cdk/aws-codepipeline-actions/test/servicecatalog/servicecatalog-action.test.ts index 8c4101552c37f..1415b2a084eaf 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/servicecatalog/test.servicecatalog-action.ts +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/servicecatalog/servicecatalog-action.test.ts @@ -2,12 +2,12 @@ import { expect, haveResourceLike } from '@aws-cdk/assert'; import * as codecommit from '@aws-cdk/aws-codecommit'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; import { Stack } from '@aws-cdk/core'; -import { Test } from 'nodeunit'; +import { nodeunitShim, Test } from 'nodeunit-shim'; import * as cpactions from '../../lib'; /* eslint-disable quote-props */ -export = { +nodeunitShim({ 'addAction succesfully leads to creation of codepipeline service catalog action with properly formatted TemplateFilePath'(test: Test) { // GIVEN const stack = new TestFixture(); @@ -98,7 +98,7 @@ export = { test.done(); }, -}; +}); /** * A test stack with a half-prepared pipeline ready to add CloudFormation actions to diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/stepfunctions/test.stepfunctions-invoke-actions.ts b/packages/@aws-cdk/aws-codepipeline-actions/test/stepfunctions/stepfunctions-invoke-actions.test.ts similarity index 98% rename from packages/@aws-cdk/aws-codepipeline-actions/test/stepfunctions/test.stepfunctions-invoke-actions.ts rename to packages/@aws-cdk/aws-codepipeline-actions/test/stepfunctions/stepfunctions-invoke-actions.test.ts index af9bf28b90640..47e33e7d5b5d8 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/stepfunctions/test.stepfunctions-invoke-actions.ts +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/stepfunctions/stepfunctions-invoke-actions.test.ts @@ -3,10 +3,10 @@ import * as codepipeline from '@aws-cdk/aws-codepipeline'; import * as s3 from '@aws-cdk/aws-s3'; import * as stepfunction from '@aws-cdk/aws-stepfunctions'; import { Stack } from '@aws-cdk/core'; -import { Test } from 'nodeunit'; +import { nodeunitShim, Test } from 'nodeunit-shim'; import * as cpactions from '../../lib'; -export = { +nodeunitShim({ 'StepFunctions Invoke Action': { 'Verify stepfunction configuration properties are set to specific values'(test: Test) { const stack = new Stack(); @@ -144,7 +144,7 @@ export = { }, }, -}; +}); function minimalPipeline(stack: Stack): codepipeline.IStage { const sourceOutput = new codepipeline.Artifact(); diff --git a/packages/@aws-cdk/aws-rds/test/cluster.test.ts b/packages/@aws-cdk/aws-rds/test/cluster.test.ts index 6bfda0f1cfa50..11f18100e9fbe 100644 --- a/packages/@aws-cdk/aws-rds/test/cluster.test.ts +++ b/packages/@aws-cdk/aws-rds/test/cluster.test.ts @@ -1,18 +1,19 @@ -import { ABSENT, countResources, expect, haveResource, haveResourceLike, ResourcePart, SynthUtils } from '@aws-cdk/assert'; +import '@aws-cdk/assert/jest'; +import { ABSENT, ResourcePart, SynthUtils } from '@aws-cdk/assert'; import * as ec2 from '@aws-cdk/aws-ec2'; import { ManagedPolicy, Role, ServicePrincipal } from '@aws-cdk/aws-iam'; import * as kms from '@aws-cdk/aws-kms'; import * as logs from '@aws-cdk/aws-logs'; import * as s3 from '@aws-cdk/aws-s3'; import * as cdk from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; +import { testFutureBehavior } from 'cdk-build-tools/lib/feature-flag'; import { AuroraEngineVersion, AuroraMysqlEngineVersion, AuroraPostgresEngineVersion, CfnDBCluster, Credentials, DatabaseCluster, DatabaseClusterEngine, DatabaseClusterFromSnapshot, ParameterGroup, PerformanceInsightRetention, SubnetGroup, } from '../lib'; -nodeunitShim({ - 'creating a Cluster also creates 2 DB Instances'(test: Test) { +describe('cluster', () => { + test('creating a Cluster also creates 2 DB Instances', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -31,7 +32,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { Properties: { Engine: 'aurora', DBSubnetGroupName: { Ref: 'DatabaseSubnets56F17B9A' }, @@ -41,18 +42,18 @@ nodeunitShim({ }, DeletionPolicy: 'Snapshot', UpdateReplacePolicy: 'Snapshot', - }, ResourcePart.CompleteDefinition)); + }, ResourcePart.CompleteDefinition); - expect(stack).to(countResources('AWS::RDS::DBInstance', 2)); - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toCountResources('AWS::RDS::DBInstance', 2); + expect(stack).toHaveResource('AWS::RDS::DBInstance', { DeletionPolicy: 'Delete', UpdateReplacePolicy: 'Delete', - }, ResourcePart.CompleteDefinition)); + }, ResourcePart.CompleteDefinition); - test.done(); - }, - 'can create a cluster with a single instance'(test: Test) { + }); + + test('can create a cluster with a single instance', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -72,18 +73,18 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { Engine: 'aurora', DBSubnetGroupName: { Ref: 'DatabaseSubnets56F17B9A' }, MasterUsername: 'admin', MasterUserPassword: 'tooshort', VpcSecurityGroupIds: [{ 'Fn::GetAtt': ['DatabaseSecurityGroup5C91FDCB', 'GroupId'] }], - })); + }); - test.done(); - }, - 'can create a cluster with imported vpc and security group'(test: Test) { + }); + + test('can create a cluster with imported vpc and security group', () => { // GIVEN const stack = testStack(); const vpc = ec2.Vpc.fromLookup(stack, 'VPC', { @@ -107,18 +108,18 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { Engine: 'aurora', DBSubnetGroupName: { Ref: 'DatabaseSubnets56F17B9A' }, MasterUsername: 'admin', MasterUserPassword: 'tooshort', VpcSecurityGroupIds: ['SecurityGroupId12345'], - })); + }); + - test.done(); - }, + }); - 'cluster with parameter group'(test: Test) { + test('cluster with parameter group', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -145,14 +146,14 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { DBClusterParameterGroupName: { Ref: 'ParamsA8366201' }, - })); + }); + - test.done(); - }, + }); - "sets the retention policy of the SubnetGroup to 'Retain' if the Cluster is created with 'Retain'"(test: Test) { + test("sets the retention policy of the SubnetGroup to 'Retain' if the Cluster is created with 'Retain'", () => { const stack = new cdk.Stack(); const vpc = new ec2.Vpc(stack, 'Vpc'); @@ -166,15 +167,15 @@ nodeunitShim({ removalPolicy: cdk.RemovalPolicy.RETAIN, }); - expect(stack).to(haveResourceLike('AWS::RDS::DBSubnetGroup', { + expect(stack).toHaveResourceLike('AWS::RDS::DBSubnetGroup', { DeletionPolicy: 'Retain', UpdateReplacePolicy: 'Retain', - }, ResourcePart.CompleteDefinition)); + }, ResourcePart.CompleteDefinition); + - test.done(); - }, + }); - 'creates a secret when master credentials are not specified'(test: Test) { + test('creates a secret when master credentials are not specified', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -193,7 +194,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { MasterUsername: { 'Fn::Join': [ '', @@ -218,21 +219,21 @@ nodeunitShim({ ], ], }, - })); + }); - expect(stack).to(haveResource('AWS::SecretsManager::Secret', { + expect(stack).toHaveResource('AWS::SecretsManager::Secret', { GenerateSecretString: { ExcludeCharacters: '\"@/\\', GenerateStringKey: 'password', PasswordLength: 30, SecretStringTemplate: '{"username":"admin"}', }, - })); + }); - test.done(); - }, - 'create an encrypted cluster with custom KMS key'(test: Test) { + }); + + test('create an encrypted cluster with custom KMS key', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -251,19 +252,19 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { KmsKeyId: { 'Fn::GetAtt': [ 'Key961B73FD', 'Arn', ], }, - })); + }); - test.done(); - }, - 'cluster with instance parameter group'(test: Test) { + }); + + test('cluster with instance parameter group', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -287,18 +288,17 @@ nodeunitShim({ }, }); - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { DBParameterGroupName: { Ref: 'ParameterGroup5E32DECB', }, - })); + }); - test.done(); - }, + }); - 'performance insights': { - 'cluster with all performance insights properties'(test: Test) { + describe('performance insights', () => { + test('cluster with all performance insights properties', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -317,16 +317,16 @@ nodeunitShim({ }, }); - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { EnablePerformanceInsights: true, PerformanceInsightsRetentionPeriod: 731, PerformanceInsightsKMSKeyId: { 'Fn::GetAtt': ['Key961B73FD', 'Arn'] }, - })); + }); - test.done(); - }, - 'setting performance insights fields enables performance insights'(test: Test) { + }); + + test('setting performance insights fields enables performance insights', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -343,20 +343,20 @@ nodeunitShim({ }, }); - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { EnablePerformanceInsights: true, PerformanceInsightsRetentionPeriod: 731, - })); + }); - test.done(); - }, - 'throws if performance insights fields are set but performance insights is disabled'(test: Test) { + }); + + test('throws if performance insights fields are set but performance insights is disabled', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); - test.throws(() => { + expect(() => { new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, credentials: { @@ -368,13 +368,13 @@ nodeunitShim({ performanceInsightRetention: PerformanceInsightRetention.DEFAULT, }, }); - }, /`enablePerformanceInsights` disabled, but `performanceInsightRetention` or `performanceInsightEncryptionKey` was set/); + }).toThrow(/`enablePerformanceInsights` disabled, but `performanceInsightRetention` or `performanceInsightEncryptionKey` was set/); - test.done(); - }, - }, - 'cluster with disable automatic upgrade of minor version'(test: Test) { + }); + }); + + test('cluster with disable automatic upgrade of minor version', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -388,14 +388,14 @@ nodeunitShim({ }, }); - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { AutoMinorVersionUpgrade: false, - })); + }); - test.done(); - }, - 'cluster with allow upgrade of major version'(test: Test) { + }); + + test('cluster with allow upgrade of major version', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -409,14 +409,14 @@ nodeunitShim({ }, }); - expect(stack).to(haveResourceLike('AWS::RDS::DBInstance', { + expect(stack).toHaveResourceLike('AWS::RDS::DBInstance', { AllowMajorVersionUpgrade: true, - })); + }); + - test.done(); - }, + }); - 'cluster with disallow remove backups'(test: Test) { + test('cluster with disallow remove backups', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -430,14 +430,14 @@ nodeunitShim({ }, }); - expect(stack).to(haveResourceLike('AWS::RDS::DBInstance', { + expect(stack).toHaveResourceLike('AWS::RDS::DBInstance', { DeleteAutomatedBackups: false, - })); + }); - test.done(); - }, - 'create a cluster using a specific version of MySQL'(test: Test) { + }); + + test('create a cluster using a specific version of MySQL', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -457,15 +457,15 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { Engine: 'aurora-mysql', EngineVersion: '5.7.mysql_aurora.2.04.4', - })); + }); - test.done(); - }, - 'create a cluster using a specific version of Postgresql'(test: Test) { + }); + + test('create a cluster using a specific version of Postgresql', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -485,15 +485,15 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { Engine: 'aurora-postgresql', EngineVersion: '10.7', - })); + }); - test.done(); - }, - 'cluster exposes different read and write endpoints'(test: Test) { + }); + + test('cluster exposes different read and write endpoints', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -511,15 +511,12 @@ nodeunitShim({ }); // THEN - test.notDeepEqual( - stack.resolve(cluster.clusterEndpoint), - stack.resolve(cluster.clusterReadEndpoint), - ); + expect(stack.resolve(cluster.clusterEndpoint)).not.toEqual(stack.resolve(cluster.clusterReadEndpoint)); - test.done(); - }, - 'imported cluster with imported security group honors allowAllOutbound'(test: Test) { + }); + + test('imported cluster with imported security group honors allowAllOutbound', () => { // GIVEN const stack = testStack(); @@ -539,41 +536,41 @@ nodeunitShim({ cluster.connections.allowToAnyIpv4(ec2.Port.tcp(443)); // THEN - expect(stack).to(haveResource('AWS::EC2::SecurityGroupEgress', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroupEgress', { GroupId: 'sg-123456789', - })); + }); - test.done(); - }, - 'can import a cluster with minimal attributes'(test: Test) { + }); + + test('can import a cluster with minimal attributes', () => { const stack = testStack(); const cluster = DatabaseCluster.fromDatabaseClusterAttributes(stack, 'Database', { clusterIdentifier: 'identifier', }); - test.equals(cluster.clusterIdentifier, 'identifier'); + expect(cluster.clusterIdentifier).toEqual('identifier'); - test.done(); - }, - 'minimal imported cluster throws on accessing attributes for unprovided parameters'(test: Test) { + }); + + test('minimal imported cluster throws on accessing attributes for unprovided parameters', () => { const stack = testStack(); const cluster = DatabaseCluster.fromDatabaseClusterAttributes(stack, 'Database', { clusterIdentifier: 'identifier', }); - test.throws(() => cluster.clusterEndpoint, /Cannot access `clusterEndpoint` of an imported cluster/); - test.throws(() => cluster.clusterReadEndpoint, /Cannot access `clusterReadEndpoint` of an imported cluster/); - test.throws(() => cluster.instanceIdentifiers, /Cannot access `instanceIdentifiers` of an imported cluster/); - test.throws(() => cluster.instanceEndpoints, /Cannot access `instanceEndpoints` of an imported cluster/); + expect(() => cluster.clusterEndpoint).toThrow(/Cannot access `clusterEndpoint` of an imported cluster/); + expect(() => cluster.clusterReadEndpoint).toThrow(/Cannot access `clusterReadEndpoint` of an imported cluster/); + expect(() => cluster.instanceIdentifiers).toThrow(/Cannot access `instanceIdentifiers` of an imported cluster/); + expect(() => cluster.instanceEndpoints).toThrow(/Cannot access `instanceEndpoints` of an imported cluster/); - test.done(); - }, - 'imported cluster can access properties if attributes are provided'(test: Test) { + }); + + test('imported cluster can access properties if attributes are provided', () => { const stack = testStack(); const cluster = DatabaseCluster.fromDatabaseClusterAttributes(stack, 'Database', { @@ -588,15 +585,15 @@ nodeunitShim({ })], }); - test.equals(cluster.clusterEndpoint.socketAddress, 'addr:3306'); - test.equals(cluster.clusterReadEndpoint.socketAddress, 'reader-address:3306'); - test.deepEqual(cluster.instanceIdentifiers, ['identifier']); - test.deepEqual(cluster.instanceEndpoints.map(endpoint => endpoint.socketAddress), ['instance-addr:3306']); + expect(cluster.clusterEndpoint.socketAddress).toEqual('addr:3306'); + expect(cluster.clusterReadEndpoint.socketAddress).toEqual('reader-address:3306'); + expect(cluster.instanceIdentifiers).toEqual(['identifier']); + expect(cluster.instanceEndpoints.map(endpoint => endpoint.socketAddress)).toEqual(['instance-addr:3306']); - test.done(); - }, - 'cluster supports metrics'(test: Test) { + }); + + test('cluster supports metrics', () => { const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -611,7 +608,7 @@ nodeunitShim({ }, }); - test.deepEqual(stack.resolve(cluster.metricCPUUtilization()), { + expect(stack.resolve(cluster.metricCPUUtilization())).toEqual({ dimensions: { DBClusterIdentifier: { Ref: 'DatabaseB269D8BB' } }, namespace: 'AWS/RDS', metricName: 'CPUUtilization', @@ -621,10 +618,10 @@ nodeunitShim({ region: 'us-test-1', }); - test.done(); - }, - 'cluster with enabled monitoring'(test: Test) { + }); + + test('cluster with enabled monitoring', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -644,14 +641,14 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { MonitoringInterval: 60, MonitoringRoleArn: { 'Fn::GetAtt': ['DatabaseMonitoringRole576991DA', 'Arn'], }, - }, ResourcePart.Properties)); + }, ResourcePart.Properties); - expect(stack).to(haveResource('AWS::IAM::Role', { + expect(stack).toHaveResource('AWS::IAM::Role', { AssumeRolePolicyDocument: { Statement: [ { @@ -678,12 +675,12 @@ nodeunitShim({ ], }, ], - })); + }); + - test.done(); - }, + }); - 'create a cluster with imported monitoring role'(test: Test) { + test('create a cluster with imported monitoring role', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -711,17 +708,17 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { MonitoringInterval: 60, MonitoringRoleArn: { 'Fn::GetAtt': ['MonitoringRole90457BF9', 'Arn'], }, - }, ResourcePart.Properties)); + }, ResourcePart.Properties); + - test.done(); - }, + }); - 'throws when trying to add rotation to a cluster without secret'(test: Test) { + test('throws when trying to add rotation to a cluster without secret', () => { // GIVEN const stack = new cdk.Stack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -740,12 +737,12 @@ nodeunitShim({ }); // THEN - test.throws(() => cluster.addRotationSingleUser(), /without secret/); + expect(() => cluster.addRotationSingleUser()).toThrow(/without secret/); - test.done(); - }, - 'throws when trying to add single user rotation multiple times'(test: Test) { + }); + + test('throws when trying to add single user rotation multiple times', () => { // GIVEN const stack = new cdk.Stack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -762,12 +759,12 @@ nodeunitShim({ cluster.addRotationSingleUser(); // THEN - test.throws(() => cluster.addRotationSingleUser(), /A single user rotation was already added to this cluster/); + expect(() => cluster.addRotationSingleUser()).toThrow(/A single user rotation was already added to this cluster/); - test.done(); - }, - 'create a cluster with s3 import role'(test: Test) { + }); + + test('create a cluster with s3 import role', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -791,7 +788,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { AssociatedRoles: [{ RoleArn: { 'Fn::GetAtt': [ @@ -800,9 +797,9 @@ nodeunitShim({ ], }, }], - })); + }); - expect(stack).to(haveResource('AWS::RDS::DBClusterParameterGroup', { + expect(stack).toHaveResource('AWS::RDS::DBClusterParameterGroup', { Family: 'aurora5.6', Parameters: { aurora_load_from_s3_role: { @@ -812,12 +809,12 @@ nodeunitShim({ ], }, }, - })); + }); - test.done(); - }, - 'create a cluster with s3 import buckets'(test: Test) { + }); + + test('create a cluster with s3 import buckets', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -839,7 +836,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { AssociatedRoles: [{ RoleArn: { 'Fn::GetAtt': [ @@ -848,9 +845,9 @@ nodeunitShim({ ], }, }], - })); + }); - expect(stack).to(haveResource('AWS::RDS::DBClusterParameterGroup', { + expect(stack).toHaveResource('AWS::RDS::DBClusterParameterGroup', { Family: 'aurora5.6', Parameters: { aurora_load_from_s3_role: { @@ -860,9 +857,9 @@ nodeunitShim({ ], }, }, - })); + }); - expect(stack).to(haveResource('AWS::IAM::Policy', { + expect(stack).toHaveResource('AWS::IAM::Policy', { PolicyDocument: { Statement: [ { @@ -898,12 +895,12 @@ nodeunitShim({ ], Version: '2012-10-17', }, - })); + }); - test.done(); - }, - 'cluster with s3 import bucket adds supported feature name to IAM role'(test: Test) { + }); + + test('cluster with s3 import bucket adds supported feature name to IAM role', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -927,7 +924,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { AssociatedRoles: [{ RoleArn: { 'Fn::GetAtt': [ @@ -937,12 +934,12 @@ nodeunitShim({ }, FeatureName: 's3Import', }], - })); + }); - test.done(); - }, - 'throws when s3 import bucket or s3 export bucket is supplied for a Postgres version that does not support it'(test: Test) { + }); + + test('throws when s3 import bucket or s3 export bucket is supplied for a Postgres version that does not support it', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -950,7 +947,7 @@ nodeunitShim({ const bucket = new s3.Bucket(stack, 'Bucket'); // WHEN / THEN - test.throws(() => { + expect(() => { new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.auroraPostgres({ version: AuroraPostgresEngineVersion.VER_10_4, @@ -965,9 +962,9 @@ nodeunitShim({ }, s3ImportBuckets: [bucket], }); - }, /s3Import is not supported for Postgres version: 10.4. Use a version that supports the s3Import feature./); + }).toThrow(/s3Import is not supported for Postgres version: 10.4. Use a version that supports the s3Import feature./); - test.throws(() => { + expect(() => { new DatabaseCluster(stack, 'AnotherDatabase', { engine: DatabaseClusterEngine.auroraPostgres({ version: AuroraPostgresEngineVersion.VER_10_4, @@ -982,12 +979,12 @@ nodeunitShim({ }, s3ExportBuckets: [bucket], }); - }, /s3Export is not supported for Postgres version: 10.4. Use a version that supports the s3Export feature./); + }).toThrow(/s3Export is not supported for Postgres version: 10.4. Use a version that supports the s3Export feature./); - test.done(); - }, - 'cluster with s3 export bucket adds supported feature name to IAM role'(test: Test) { + }); + + test('cluster with s3 export bucket adds supported feature name to IAM role', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -1011,7 +1008,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { AssociatedRoles: [{ RoleArn: { 'Fn::GetAtt': [ @@ -1021,12 +1018,12 @@ nodeunitShim({ }, FeatureName: 's3Export', }], - })); + }); - test.done(); - }, - 'create a cluster with s3 export role'(test: Test) { + }); + + test('create a cluster with s3 export role', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -1050,7 +1047,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { AssociatedRoles: [{ RoleArn: { 'Fn::GetAtt': [ @@ -1059,9 +1056,9 @@ nodeunitShim({ ], }, }], - })); + }); - expect(stack).to(haveResource('AWS::RDS::DBClusterParameterGroup', { + expect(stack).toHaveResource('AWS::RDS::DBClusterParameterGroup', { Family: 'aurora5.6', Parameters: { aurora_select_into_s3_role: { @@ -1071,14 +1068,14 @@ nodeunitShim({ ], }, }, - })); + }); - test.done(); - }, - 'create a cluster with s3 export buckets'(test: Test) { + }); + + testFutureBehavior('create a cluster with s3 export buckets', { '@aws-cdk/aws-s3:grantWriteWithoutAcl': true }, cdk.App, (app) => { // GIVEN - const stack = testStack(); + const stack = testStack(app); const vpc = new ec2.Vpc(stack, 'VPC'); const bucket = new s3.Bucket(stack, 'Bucket'); @@ -1098,7 +1095,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { AssociatedRoles: [{ RoleArn: { 'Fn::GetAtt': [ @@ -1107,9 +1104,9 @@ nodeunitShim({ ], }, }], - })); + }); - expect(stack).to(haveResource('AWS::RDS::DBClusterParameterGroup', { + expect(stack).toHaveResource('AWS::RDS::DBClusterParameterGroup', { Family: 'aurora5.6', Parameters: { aurora_select_into_s3_role: { @@ -1119,9 +1116,9 @@ nodeunitShim({ ], }, }, - })); + }); - expect(stack).to(haveResource('AWS::IAM::Policy', { + expect(stack).toHaveResource('AWS::IAM::Policy', { PolicyDocument: { Statement: [ { @@ -1130,7 +1127,7 @@ nodeunitShim({ 's3:GetBucket*', 's3:List*', 's3:DeleteObject*', - 's3:PutObject*', + 's3:PutObject', 's3:Abort*', ], Effect: 'Allow', @@ -1160,12 +1157,12 @@ nodeunitShim({ ], Version: '2012-10-17', }, - })); + }); - test.done(); - }, - 'create a cluster with s3 import and export buckets'(test: Test) { + }); + + test('create a cluster with s3 import and export buckets', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -1189,7 +1186,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { AssociatedRoles: [{ RoleArn: { 'Fn::GetAtt': [ @@ -1206,9 +1203,9 @@ nodeunitShim({ ], }, }], - })); + }); - expect(stack).to(haveResource('AWS::RDS::DBClusterParameterGroup', { + expect(stack).toHaveResource('AWS::RDS::DBClusterParameterGroup', { Family: 'aurora5.6', Parameters: { aurora_load_from_s3_role: { @@ -1224,12 +1221,12 @@ nodeunitShim({ ], }, }, - })); + }); - test.done(); - }, - 'create a cluster with s3 import and export buckets and custom parameter group'(test: Test) { + }); + + test('create a cluster with s3 import and export buckets and custom parameter group', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -1261,7 +1258,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { AssociatedRoles: [{ RoleArn: { 'Fn::GetAtt': [ @@ -1278,9 +1275,9 @@ nodeunitShim({ ], }, }], - })); + }); - expect(stack).to(haveResource('AWS::RDS::DBClusterParameterGroup', { + expect(stack).toHaveResource('AWS::RDS::DBClusterParameterGroup', { Family: 'aurora5.6', Parameters: { key: 'value', @@ -1297,12 +1294,12 @@ nodeunitShim({ ], }, }, - })); + }); + - test.done(); - }, + }); - 'PostgreSQL cluster with s3 export buckets does not generate custom parameter group and specifies the correct port'(test: Test) { + test('PostgreSQL cluster with s3 export buckets does not generate custom parameter group and specifies the correct port', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -1326,7 +1323,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResourceLike('AWS::RDS::DBCluster', { + expect(stack).toHaveResourceLike('AWS::RDS::DBCluster', { AssociatedRoles: [{ RoleArn: { 'Fn::GetAtt': [ @@ -1337,14 +1334,14 @@ nodeunitShim({ }], DBClusterParameterGroupName: 'default.aurora-postgresql11', Port: 5432, - })); + }); - expect(stack).notTo(haveResource('AWS::RDS::DBClusterParameterGroup')); + expect(stack).not.toHaveResource('AWS::RDS::DBClusterParameterGroup'); - test.done(); - }, - 'unversioned PostgreSQL cluster can be used with s3 import and s3 export buckets'(test: Test) { + }); + + test('unversioned PostgreSQL cluster can be used with s3 import and s3 export buckets', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -1368,7 +1365,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { AssociatedRoles: [ { FeatureName: 's3Import', @@ -1389,12 +1386,12 @@ nodeunitShim({ }, }, ], - })); + }); - test.done(); - }, - "Aurora PostgreSQL cluster uses a different default master username than 'admin', which is a reserved word"(test: Test) { + }); + + test("Aurora PostgreSQL cluster uses a different default master username than 'admin', which is a reserved word", () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -1408,16 +1405,16 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResourceLike('AWS::SecretsManager::Secret', { + expect(stack).toHaveResourceLike('AWS::SecretsManager::Secret', { GenerateSecretString: { SecretStringTemplate: '{"username":"postgres"}', }, - })); + }); - test.done(); - }, - 'MySQL cluster without S3 exports or imports references the correct default ParameterGroup'(test: Test) { + }); + + test('MySQL cluster without S3 exports or imports references the correct default ParameterGroup', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -1436,16 +1433,16 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResourceLike('AWS::RDS::DBCluster', { + expect(stack).toHaveResourceLike('AWS::RDS::DBCluster', { DBClusterParameterGroupName: 'default.aurora-mysql5.7', - })); + }); + + expect(stack).not.toHaveResource('AWS::RDS::DBClusterParameterGroup'); - expect(stack).notTo(haveResource('AWS::RDS::DBClusterParameterGroup')); - test.done(); - }, + }); - 'throws when s3ExportRole and s3ExportBuckets properties are both specified'(test: Test) { + test('throws when s3ExportRole and s3ExportBuckets properties are both specified', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -1456,7 +1453,7 @@ nodeunitShim({ const exportBucket = new s3.Bucket(stack, 'ExportBucket'); // THEN - test.throws(() => new DatabaseCluster(stack, 'Database', { + expect(() => new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, instances: 1, credentials: { @@ -1468,12 +1465,12 @@ nodeunitShim({ }, s3ExportRole: exportRole, s3ExportBuckets: [exportBucket], - })); + })).toThrow(); + - test.done(); - }, + }); - 'throws when s3ImportRole and s3ImportBuckets properties are both specified'(test: Test) { + test('throws when s3ImportRole and s3ImportBuckets properties are both specified', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -1484,7 +1481,7 @@ nodeunitShim({ const importBucket = new s3.Bucket(stack, 'ImportBucket'); // THEN - test.throws(() => new DatabaseCluster(stack, 'Database', { + expect(() => new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, instances: 1, credentials: { @@ -1496,12 +1493,12 @@ nodeunitShim({ }, s3ImportRole: importRole, s3ImportBuckets: [importBucket], - })); + })).toThrow(); - test.done(); - }, - 'can set CloudWatch log exports'(test: Test) { + }); + + test('can set CloudWatch log exports', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -1521,14 +1518,14 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResourceLike('AWS::RDS::DBCluster', { + expect(stack).toHaveResourceLike('AWS::RDS::DBCluster', { EnableCloudwatchLogsExports: ['error', 'general', 'slowquery', 'audit'], - })); + }); + - test.done(); - }, + }); - 'can set CloudWatch log retention'(test: Test) { + test('can set CloudWatch log retention', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -1549,7 +1546,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('Custom::LogRetention', { + expect(stack).toHaveResource('Custom::LogRetention', { ServiceToken: { 'Fn::GetAtt': [ 'LogRetentionaae0aa3c5b4d4f87b02d85b201efdd8aFD4BFC8A', @@ -1558,8 +1555,8 @@ nodeunitShim({ }, LogGroupName: { 'Fn::Join': ['', ['/aws/rds/cluster/', { Ref: 'DatabaseB269D8BB' }, '/error']] }, RetentionInDays: 90, - })); - expect(stack).to(haveResource('Custom::LogRetention', { + }); + expect(stack).toHaveResource('Custom::LogRetention', { ServiceToken: { 'Fn::GetAtt': [ 'LogRetentionaae0aa3c5b4d4f87b02d85b201efdd8aFD4BFC8A', @@ -1568,17 +1565,17 @@ nodeunitShim({ }, LogGroupName: { 'Fn::Join': ['', ['/aws/rds/cluster/', { Ref: 'DatabaseB269D8BB' }, '/general']] }, RetentionInDays: 90, - })); + }); - test.done(); - }, - 'throws if given unsupported CloudWatch log exports'(test: Test) { + }); + + test('throws if given unsupported CloudWatch log exports', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); - test.throws(() => { + expect(() => { new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, credentials: { @@ -1591,12 +1588,12 @@ nodeunitShim({ }, cloudwatchLogsExports: ['error', 'general', 'slowquery', 'audit', 'thislogdoesnotexist', 'neitherdoesthisone'], }); - }, /Unsupported logs for the current engine type: thislogdoesnotexist,neitherdoesthisone/); + }).toThrow(/Unsupported logs for the current engine type: thislogdoesnotexist,neitherdoesthisone/); - test.done(); - }, - 'can set deletion protection'(test: Test) { + }); + + test('can set deletion protection', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -1616,14 +1613,14 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResourceLike('AWS::RDS::DBCluster', { + expect(stack).toHaveResourceLike('AWS::RDS::DBCluster', { DeletionProtection: true, - })); + }); - test.done(); - }, - 'does not throw (but adds a node error) if a (dummy) VPC does not have sufficient subnets'(test: Test) { + }); + + test('does not throw (but adds a node error) if a (dummy) VPC does not have sufficient subnets', () => { // GIVEN const stack = testStack(); const vpc = ec2.Vpc.fromLookup(stack, 'VPC', { isDefault: true }); @@ -1647,12 +1644,12 @@ nodeunitShim({ // THEN const art = SynthUtils.synthesize(stack); const meta = art.findMetadataByType('aws:cdk:error'); - test.equal(meta[0].data, 'Cluster requires at least 2 subnets, got 0'); + expect(meta[0].data).toEqual('Cluster requires at least 2 subnets, got 0'); - test.done(); - }, - 'create a cluster from a snapshot'(test: Test) { + }); + + test('create a cluster from a snapshot', () => { const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -1666,7 +1663,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { Properties: { Engine: 'aurora', EngineVersion: '5.6.mysql_aurora.1.22.2', @@ -1676,14 +1673,14 @@ nodeunitShim({ }, DeletionPolicy: 'Snapshot', UpdateReplacePolicy: 'Snapshot', - }, ResourcePart.CompleteDefinition)); + }, ResourcePart.CompleteDefinition); - expect(stack).to(countResources('AWS::RDS::DBInstance', 2)); + expect(stack).toCountResources('AWS::RDS::DBInstance', 2); - test.done(); - }, - 'reuse an existing subnet group'(test: Test) { + }); + + test('reuse an existing subnet group', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -1701,15 +1698,15 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResourceLike('AWS::RDS::DBCluster', { + expect(stack).toHaveResourceLike('AWS::RDS::DBCluster', { DBSubnetGroupName: 'my-subnet-group', - })); - expect(stack).to(countResources('AWS::RDS::DBSubnetGroup', 0)); + }); + expect(stack).toCountResources('AWS::RDS::DBSubnetGroup', 0); - test.done(); - }, - 'defaultChild returns the DB Cluster'(test: Test) { + }); + + test('defaultChild returns the DB Cluster', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -1726,12 +1723,12 @@ nodeunitShim({ }); // THEN - test.ok(cluster.node.defaultChild instanceof CfnDBCluster); + expect(cluster.node.defaultChild instanceof CfnDBCluster).toBeTruthy(); + - test.done(); - }, + }); - 'fromGeneratedSecret'(test: Test) { + test('fromGeneratedSecret', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -1746,7 +1743,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { MasterUsername: 'admin', // username is a string MasterUserPassword: { 'Fn::Join': [ @@ -1760,12 +1757,12 @@ nodeunitShim({ ], ], }, - })); + }); - test.done(); - }, - 'can set public accessibility for database cluster with instances in private subnet'(test: Test) { + }); + + test('can set public accessibility for database cluster with instances in private subnet', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -1782,15 +1779,15 @@ nodeunitShim({ }, }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { Engine: 'aurora', PubliclyAccessible: true, - })); + }); - test.done(); - }, - 'can set public accessibility for database cluster with instances in public subnet'(test: Test) { + }); + + test('can set public accessibility for database cluster with instances in public subnet', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -1807,15 +1804,15 @@ nodeunitShim({ }, }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { Engine: 'aurora', PubliclyAccessible: false, - })); + }); - test.done(); - }, - 'database cluster instances in public subnet should by default have publiclyAccessible set to true'(test: Test) { + }); + + test('database cluster instances in public subnet should by default have publiclyAccessible set to true', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -1831,13 +1828,13 @@ nodeunitShim({ }, }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { Engine: 'aurora', PubliclyAccessible: true, - })); + }); + - test.done(); - }, + }); }); test.each([ @@ -1859,25 +1856,24 @@ test.each([ }); // THEN - expect(stack).to(haveResourceLike('AWS::RDS::DBCluster', { + expect(stack).toHaveResourceLike('AWS::RDS::DBCluster', { DeletionPolicy: clusterValue, UpdateReplacePolicy: clusterValue, - }, ResourcePart.CompleteDefinition)); + }, ResourcePart.CompleteDefinition); - expect(stack).to(haveResourceLike('AWS::RDS::DBInstance', { + expect(stack).toHaveResourceLike('AWS::RDS::DBInstance', { DeletionPolicy: instanceValue, UpdateReplacePolicy: instanceValue, - }, ResourcePart.CompleteDefinition)); + }, ResourcePart.CompleteDefinition); - expect(stack).to(haveResourceLike('AWS::RDS::DBSubnetGroup', { + expect(stack).toHaveResourceLike('AWS::RDS::DBSubnetGroup', { DeletionPolicy: subnetValue, UpdateReplacePolicy: subnetValue, - }, ResourcePart.CompleteDefinition)); + }, ResourcePart.CompleteDefinition); }); - -function testStack() { - const stack = new cdk.Stack(undefined, undefined, { env: { account: '12345', region: 'us-test-1' } }); +function testStack(app?: cdk.App) { + const stack = new cdk.Stack(app, undefined, { env: { account: '12345', region: 'us-test-1' } }); stack.node.setContext('availability-zones:12345:us-test-1', ['us-test-1a', 'us-test-1b']); return stack; } diff --git a/packages/@aws-cdk/aws-rds/test/instance.test.ts b/packages/@aws-cdk/aws-rds/test/instance.test.ts index 625dfa7074ae3..67fc9fd3905ff 100644 --- a/packages/@aws-cdk/aws-rds/test/instance.test.ts +++ b/packages/@aws-cdk/aws-rds/test/instance.test.ts @@ -1,4 +1,5 @@ -import { ABSENT, countResources, expect, haveResource, ResourcePart, haveResourceLike, anything } from '@aws-cdk/assert'; +import '@aws-cdk/assert/jest'; +import { ABSENT, ResourcePart, anything } from '@aws-cdk/assert'; import * as ec2 from '@aws-cdk/aws-ec2'; import * as targets from '@aws-cdk/aws-events-targets'; import { ManagedPolicy, Role, ServicePrincipal, AccountPrincipal } from '@aws-cdk/aws-iam'; @@ -7,20 +8,19 @@ import * as lambda from '@aws-cdk/aws-lambda'; import * as logs from '@aws-cdk/aws-logs'; import * as s3 from '@aws-cdk/aws-s3'; import * as cdk from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; +import { testFutureBehavior } from 'cdk-build-tools/lib/feature-flag'; import * as rds from '../lib'; let stack: cdk.Stack; let vpc: ec2.Vpc; -nodeunitShim({ - 'setUp'(cb: () => void) { +describe('instance', () => { + beforeEach(() => { stack = new cdk.Stack(); vpc = new ec2.Vpc(stack, 'VPC'); - cb(); - }, + }); - 'create a DB instance'(test: Test) { + test('create a DB instance', () => { // WHEN new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.oracleSe2({ version: rds.OracleEngineVersion.VER_19_0_0_0_2020_04_R1 }), @@ -48,7 +48,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { Properties: { DBInstanceClass: 'db.t2.medium', AllocatedStorage: '100', @@ -116,9 +116,9 @@ nodeunitShim({ }, DeletionPolicy: 'Snapshot', UpdateReplacePolicy: 'Snapshot', - }, ResourcePart.CompleteDefinition)); + }, ResourcePart.CompleteDefinition); - expect(stack).to(haveResource('AWS::RDS::DBSubnetGroup', { + expect(stack).toHaveResource('AWS::RDS::DBSubnetGroup', { DBSubnetGroupDescription: 'Subnet group for Instance database', SubnetIds: [ { @@ -128,13 +128,13 @@ nodeunitShim({ Ref: 'VPCPrivateSubnet2SubnetCFCDAA7A', }, ], - })); + }); - expect(stack).to(haveResource('AWS::EC2::SecurityGroup', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { GroupDescription: 'Security group for Instance database', - })); + }); - expect(stack).to(haveResource('AWS::IAM::Role', { + expect(stack).toHaveResource('AWS::IAM::Role', { AssumeRolePolicyDocument: { Statement: [ { @@ -161,9 +161,9 @@ nodeunitShim({ ], }, ], - })); + }); - expect(stack).to(haveResource('AWS::SecretsManager::Secret', { + expect(stack).toHaveResource('AWS::SecretsManager::Secret', { Description: { 'Fn::Join': [ '', @@ -181,9 +181,9 @@ nodeunitShim({ PasswordLength: 30, SecretStringTemplate: '{"username":"syscdk"}', }, - })); + }); - expect(stack).to(haveResource('AWS::SecretsManager::SecretTargetAttachment', { + expect(stack).toHaveResource('AWS::SecretsManager::SecretTargetAttachment', { SecretId: { Ref: 'InstanceSecret478E0A47', }, @@ -191,14 +191,14 @@ nodeunitShim({ Ref: 'InstanceC1063A87', }, TargetType: 'AWS::RDS::DBInstance', - })); + }); + + expect(stack).toCountResources('Custom::LogRetention', 4); - expect(stack).to(countResources('Custom::LogRetention', 4)); - test.done(); - }, + }); - 'instance with option and parameter group'(test: Test) { + test('instance with option and parameter group', () => { const optionGroup = new rds.OptionGroup(stack, 'OptionGroup', { engine: rds.DatabaseInstanceEngine.oracleSe2({ version: rds.OracleEngineVersion.VER_19_0_0_0_2020_04_R1 }), configurations: [ @@ -227,19 +227,19 @@ nodeunitShim({ parameterGroup, }); - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { DBParameterGroupName: { Ref: 'ParameterGroup5E32DECB', }, OptionGroupName: { Ref: 'OptionGroupACA43DC1', }, - })); + }); - test.done(); - }, - 'can specify subnet type'(test: Test) { + }); + + test('can specify subnet type', () => { new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19, @@ -251,13 +251,13 @@ nodeunitShim({ }, }); - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { DBSubnetGroupName: { Ref: 'InstanceSubnetGroupF2CBA54F', }, PubliclyAccessible: false, - })); - expect(stack).to(haveResource('AWS::RDS::DBSubnetGroup', { + }); + expect(stack).toHaveResource('AWS::RDS::DBSubnetGroup', { DBSubnetGroupDescription: 'Subnet group for Instance database', SubnetIds: [ { @@ -267,13 +267,13 @@ nodeunitShim({ Ref: 'VPCPrivateSubnet2SubnetCFCDAA7A', }, ], - })); + }); - test.done(); - }, - 'DatabaseInstanceFromSnapshot': { - 'create an instance from snapshot'(test: Test) { + }); + + describe('DatabaseInstanceFromSnapshot', () => { + test('create an instance from snapshot', () => { new rds.DatabaseInstanceFromSnapshot(stack, 'Instance', { snapshotIdentifier: 'my-snapshot', engine: rds.DatabaseInstanceEngine.postgres({ version: rds.PostgresEngineVersion.VER_12_3 }), @@ -281,14 +281,14 @@ nodeunitShim({ vpc, }); - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { DBSnapshotIdentifier: 'my-snapshot', - })); + }); + - test.done(); - }, + }); - 'can generate a new snapshot password'(test: Test) { + test('can generate a new snapshot password', () => { new rds.DatabaseInstanceFromSnapshot(stack, 'Instance', { snapshotIdentifier: 'my-snapshot', engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), @@ -298,13 +298,13 @@ nodeunitShim({ }), }); - expect(stack).to(haveResourceLike('AWS::RDS::DBInstance', { + expect(stack).toHaveResourceLike('AWS::RDS::DBInstance', { MasterUsername: ABSENT, MasterUserPassword: { 'Fn::Join': ['', ['{{resolve:secretsmanager:', { Ref: 'InstanceSecret478E0A47' }, ':SecretString:password::}}']], }, - })); - expect(stack).to(haveResource('AWS::SecretsManager::Secret', { + }); + expect(stack).toHaveResource('AWS::SecretsManager::Secret', { Description: { 'Fn::Join': ['', ['Generated by the CDK for stack: ', { Ref: 'AWS::StackName' }]], }, @@ -314,12 +314,12 @@ nodeunitShim({ PasswordLength: 30, SecretStringTemplate: '{"username":"admin"}', }, - })); + }); - test.done(); - }, - 'fromGeneratedSecret'(test: Test) { + }); + + test('fromGeneratedSecret', () => { new rds.DatabaseInstanceFromSnapshot(stack, 'Instance', { snapshotIdentifier: 'my-snapshot', engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), @@ -329,29 +329,29 @@ nodeunitShim({ }), }); - expect(stack).to(haveResourceLike('AWS::RDS::DBInstance', { + expect(stack).toHaveResourceLike('AWS::RDS::DBInstance', { MasterUsername: ABSENT, MasterUserPassword: { // logical id of secret has a hash 'Fn::Join': ['', ['{{resolve:secretsmanager:', { Ref: 'InstanceSecretB6DFA6BE8ee0a797cad8a68dbeb85f8698cdb5bb' }, ':SecretString:password::}}']], }, - })); + }); + - test.done(); - }, + }); - 'throws if generating a new password without a username'(test: Test) { - test.throws(() => new rds.DatabaseInstanceFromSnapshot(stack, 'Instance', { + test('throws if generating a new password without a username', () => { + expect(() => new rds.DatabaseInstanceFromSnapshot(stack, 'Instance', { snapshotIdentifier: 'my-snapshot', engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), vpc, credentials: { generatePassword: true }, - }), /`credentials` `username` must be specified when `generatePassword` is set to true/); + })).toThrow(/`credentials` `username` must be specified when `generatePassword` is set to true/); - test.done(); - }, - 'can set a new snapshot password from an existing SecretValue'(test: Test) { + }); + + test('can set a new snapshot password from an existing SecretValue', () => { new rds.DatabaseInstanceFromSnapshot(stack, 'Instance', { snapshotIdentifier: 'my-snapshot', engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), @@ -360,15 +360,15 @@ nodeunitShim({ }); // TODO - Expect this to be broken - expect(stack).to(haveResourceLike('AWS::RDS::DBInstance', { + expect(stack).toHaveResourceLike('AWS::RDS::DBInstance', { MasterUsername: ABSENT, MasterUserPassword: 'mysecretpassword', - })); + }); - test.done(); - }, - 'can set a new snapshot password from an existing Secret'(test: Test) { + }); + + test('can set a new snapshot password from an existing Secret', () => { const secret = new rds.DatabaseSecret(stack, 'DBSecret', { username: 'admin', encryptionKey: new kms.Key(stack, 'PasswordKey'), @@ -380,18 +380,18 @@ nodeunitShim({ credentials: rds.SnapshotCredentials.fromSecret(secret), }); - expect(stack).to(haveResourceLike('AWS::RDS::DBInstance', { + expect(stack).toHaveResourceLike('AWS::RDS::DBInstance', { MasterUsername: ABSENT, MasterUserPassword: { 'Fn::Join': ['', ['{{resolve:secretsmanager:', { Ref: 'DBSecretD58955BC' }, ':SecretString:password::}}']], }, - })); + }); + - test.done(); - }, - }, + }); + }); - 'create a read replica in the same region - with the subnet group name'(test: Test) { + test('create a read replica in the same region - with the subnet group name', () => { const sourceInstance = new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.MYSQL, instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), @@ -406,7 +406,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { SourceDBInstanceIdentifier: { 'Fn::Join': ['', [ 'arn:', @@ -422,12 +422,12 @@ nodeunitShim({ DBSubnetGroupName: { Ref: 'ReadReplicaSubnetGroup680C605C', }, - })); + }); + - test.done(); - }, + }); - 'on event'(test: Test) { + test('on event', () => { const instance = new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.MYSQL, vpc, @@ -442,7 +442,7 @@ nodeunitShim({ instance.onEvent('InstanceEvent', { target: new targets.LambdaFunction(fn) }); // THEN - expect(stack).to(haveResource('AWS::Events::Rule', { + expect(stack).toHaveResource('AWS::Events::Rule', { EventPattern: { source: [ 'aws.rds', @@ -484,12 +484,12 @@ nodeunitShim({ Id: 'Target0', }, ], - })); + }); - test.done(); - }, - 'on event without target'(test: Test) { + }); + + test('on event without target', () => { const instance = new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.MYSQL, vpc, @@ -499,7 +499,7 @@ nodeunitShim({ instance.onEvent('InstanceEvent'); // THEN - expect(stack).to(haveResource('AWS::Events::Rule', { + expect(stack).toHaveResource('AWS::Events::Rule', { EventPattern: { source: [ 'aws.rds', @@ -530,12 +530,12 @@ nodeunitShim({ }, ], }, - })); + }); - test.done(); - }, - 'can use metricCPUUtilization'(test: Test) { + }); + + test('can use metricCPUUtilization', () => { // WHEN const instance = new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.MYSQL, @@ -543,7 +543,7 @@ nodeunitShim({ }); // THEN - test.deepEqual(stack.resolve(instance.metricCPUUtilization()), { + expect(stack.resolve(instance.metricCPUUtilization())).toEqual({ dimensions: { DBInstanceIdentifier: { Ref: 'InstanceC1063A87' } }, namespace: 'AWS/RDS', metricName: 'CPUUtilization', @@ -551,21 +551,21 @@ nodeunitShim({ statistic: 'Average', }); - test.done(); - }, - 'can resolve endpoint port and socket address'(test: Test) { + }); + + test('can resolve endpoint port and socket address', () => { // WHEN const instance = new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.MYSQL, vpc, }); - test.deepEqual(stack.resolve(instance.instanceEndpoint.port), { + expect(stack.resolve(instance.instanceEndpoint.port)).toEqual({ 'Fn::GetAtt': ['InstanceC1063A87', 'Endpoint.Port'], }); - test.deepEqual(stack.resolve(instance.instanceEndpoint.socketAddress), { + expect(stack.resolve(instance.instanceEndpoint.socketAddress)).toEqual({ 'Fn::Join': [ '', [ @@ -576,10 +576,10 @@ nodeunitShim({ ], }); - test.done(); - }, - 'can deactivate backup'(test: Test) { + }); + + test('can deactivate backup', () => { // WHEN new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.MYSQL, @@ -588,14 +588,14 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { BackupRetentionPeriod: 0, - })); + }); + - test.done(); - }, + }); - 'imported instance with imported security group with allowAllOutbound set to false'(test: Test) { + test('imported instance with imported security group with allowAllOutbound set to false', () => { const instance = rds.DatabaseInstance.fromDatabaseInstanceAttributes(stack, 'Database', { instanceEndpointAddress: 'address', instanceIdentifier: 'identifier', @@ -609,14 +609,14 @@ nodeunitShim({ instance.connections.allowToAnyIpv4(ec2.Port.tcp(443)); // THEN - expect(stack).to(haveResource('AWS::EC2::SecurityGroupEgress', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroupEgress', { GroupId: 'sg-123456789', - })); + }); - test.done(); - }, - 'create an instance with imported monitoring role'(test: Test) { + }); + + test('create an instance with imported monitoring role', () => { const monitoringRole = new Role(stack, 'MonitoringRole', { assumedBy: new ServicePrincipal('monitoring.rds.amazonaws.com'), managedPolicies: [ @@ -633,17 +633,17 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { MonitoringInterval: 60, MonitoringRoleArn: { 'Fn::GetAtt': ['MonitoringRole90457BF9', 'Arn'], }, - }, ResourcePart.Properties)); + }, ResourcePart.Properties); - test.done(); - }, - 'create an instance with an existing security group'(test: Test) { + }); + + test('create an instance with an existing security group', () => { const securityGroup = ec2.SecurityGroup.fromSecurityGroupId(stack, 'SG', 'sg-123456789', { allowAllOutbound: false, }); @@ -657,11 +657,11 @@ nodeunitShim({ instance.connections.allowDefaultPortFromAnyIpv4(); // THEN - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { VPCSecurityGroups: ['sg-123456789'], - })); + }); - expect(stack).to(haveResource('AWS::EC2::SecurityGroupIngress', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroupIngress', { FromPort: { 'Fn::GetAtt': [ 'InstanceC1063A87', @@ -675,12 +675,12 @@ nodeunitShim({ 'Endpoint.Port', ], }, - })); + }); - test.done(); - }, - 'throws when trying to add rotation to an instance without secret'(test: Test) { + }); + + test('throws when trying to add rotation to an instance without secret', () => { const instance = new rds.DatabaseInstance(stack, 'Database', { engine: rds.DatabaseInstanceEngine.SQL_SERVER_EE, credentials: rds.Credentials.fromUsername('syscdk', { password: cdk.SecretValue.plainText('tooshort') }), @@ -688,12 +688,12 @@ nodeunitShim({ }); // THEN - test.throws(() => instance.addRotationSingleUser(), /without secret/); + expect(() => instance.addRotationSingleUser()).toThrow(/without secret/); - test.done(); - }, - 'throws when trying to add single user rotation multiple times'(test: Test) { + }); + + test('throws when trying to add single user rotation multiple times', () => { const instance = new rds.DatabaseInstance(stack, 'Database', { engine: rds.DatabaseInstanceEngine.SQL_SERVER_EE, instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), @@ -705,12 +705,12 @@ nodeunitShim({ instance.addRotationSingleUser(); // THEN - test.throws(() => instance.addRotationSingleUser(), /A single user rotation was already added to this instance/); + expect(() => instance.addRotationSingleUser()).toThrow(/A single user rotation was already added to this instance/); - test.done(); - }, - 'throws when timezone is set for non-sqlserver database engine'(test: Test) { + }); + + test('throws when timezone is set for non-sqlserver database engine', () => { const tzSupportedEngines = [rds.DatabaseInstanceEngine.SQL_SERVER_EE, rds.DatabaseInstanceEngine.SQL_SERVER_EX, rds.DatabaseInstanceEngine.SQL_SERVER_SE, rds.DatabaseInstanceEngine.SQL_SERVER_WEB]; const tzUnsupportedEngines = [rds.DatabaseInstanceEngine.MYSQL, rds.DatabaseInstanceEngine.POSTGRES, @@ -718,25 +718,25 @@ nodeunitShim({ // THEN tzSupportedEngines.forEach((engine) => { - test.ok(new rds.DatabaseInstance(stack, `${engine.engineType}-db`, { + expect(new rds.DatabaseInstance(stack, `${engine.engineType}-db`, { engine, timezone: 'Europe/Zurich', vpc, - })); + })).toBeDefined(); }); tzUnsupportedEngines.forEach((engine) => { - test.throws(() => new rds.DatabaseInstance(stack, `${engine.engineType}-db`, { + expect(() => new rds.DatabaseInstance(stack, `${engine.engineType}-db`, { engine, timezone: 'Europe/Zurich', vpc, - }), /timezone property can not be configured for/); + })).toThrow(/timezone property can not be configured for/); }); - test.done(); - }, - 'create an instance from snapshot with maximum allocated storage'(test: Test) { + }); + + test('create an instance from snapshot with maximum allocated storage', () => { // WHEN new rds.DatabaseInstanceFromSnapshot(stack, 'Instance', { snapshotIdentifier: 'my-snapshot', @@ -746,15 +746,15 @@ nodeunitShim({ maxAllocatedStorage: 200, }); - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { DBSnapshotIdentifier: 'my-snapshot', MaxAllocatedStorage: 200, - })); + }); - test.done(); - }, - 'create a DB instance with maximum allocated storage'(test: Test) { + }); + + test('create a DB instance with maximum allocated storage', () => { // WHEN new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.MYSQL, @@ -764,28 +764,28 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { BackupRetentionPeriod: 0, MaxAllocatedStorage: 250, - })); + }); - test.done(); - }, - 'iam authentication - off by default'(test: Test) { + }); + + test('iam authentication - off by default', () => { new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), vpc, }); - expect(stack).to(haveResourceLike('AWS::RDS::DBInstance', { + expect(stack).toHaveResourceLike('AWS::RDS::DBInstance', { EnableIAMDatabaseAuthentication: ABSENT, - })); + }); - test.done(); - }, - 'createGrant - creates IAM policy and enables IAM auth'(test: Test) { + }); + + test('createGrant - creates IAM policy and enables IAM auth', () => { const instance = new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), vpc, @@ -795,10 +795,10 @@ nodeunitShim({ }); instance.grantConnect(role); - expect(stack).to(haveResourceLike('AWS::RDS::DBInstance', { + expect(stack).toHaveResourceLike('AWS::RDS::DBInstance', { EnableIAMDatabaseAuthentication: true, - })); - expect(stack).to(haveResource('AWS::IAM::Policy', { + }); + expect(stack).toHaveResource('AWS::IAM::Policy', { PolicyDocument: { Statement: [{ Effect: 'Allow', @@ -809,12 +809,12 @@ nodeunitShim({ }], Version: '2012-10-17', }, - })); + }); - test.done(); - }, - 'createGrant - throws if IAM auth disabled'(test: Test) { + }); + + test('createGrant - throws if IAM auth disabled', () => { const instance = new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), vpc, @@ -824,12 +824,12 @@ nodeunitShim({ assumedBy: new AccountPrincipal(stack.account), }); - test.throws(() => { instance.grantConnect(role); }, /Cannot grant connect when IAM authentication is disabled/); + expect(() => { instance.grantConnect(role); }).toThrow(/Cannot grant connect when IAM authentication is disabled/); - test.done(); - }, - 'domain - sets domain property'(test: Test) { + }); + + test('domain - sets domain property', () => { const domain = 'd-90670a8d36'; // WHEN @@ -840,14 +840,14 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResourceLike('AWS::RDS::DBInstance', { + expect(stack).toHaveResourceLike('AWS::RDS::DBInstance', { Domain: domain, - })); + }); + - test.done(); - }, + }); - 'domain - uses role if provided'(test: Test) { + test('domain - uses role if provided', () => { const domain = 'd-90670a8d36'; // WHEN @@ -860,15 +860,15 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResourceLike('AWS::RDS::DBInstance', { + expect(stack).toHaveResourceLike('AWS::RDS::DBInstance', { Domain: domain, DomainIAMRoleName: stack.resolve(role.roleName), - })); + }); - test.done(); - }, - 'domain - creates role if not provided'(test: Test) { + }); + + test('domain - creates role if not provided', () => { const domain = 'd-90670a8d36'; // WHEN @@ -879,12 +879,12 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResourceLike('AWS::RDS::DBInstance', { + expect(stack).toHaveResourceLike('AWS::RDS::DBInstance', { Domain: domain, DomainIAMRoleName: anything(), - })); + }); - expect(stack).to(haveResource('AWS::IAM::Role', { + expect(stack).toHaveResource('AWS::IAM::Role', { AssumeRolePolicyDocument: { Statement: [ { @@ -911,12 +911,12 @@ nodeunitShim({ ], }, ], - })); + }); - test.done(); - }, - 'throws when domain is set for mariadb database engine'(test: Test) { + }); + + test('throws when domain is set for mariadb database engine', () => { const domainSupportedEngines = [rds.DatabaseInstanceEngine.SQL_SERVER_EE, rds.DatabaseInstanceEngine.SQL_SERVER_EX, rds.DatabaseInstanceEngine.SQL_SERVER_SE, rds.DatabaseInstanceEngine.SQL_SERVER_WEB, rds.DatabaseInstanceEngine.MYSQL, rds.DatabaseInstanceEngine.POSTGRES, rds.DatabaseInstanceEngine.ORACLE_EE]; @@ -924,28 +924,28 @@ nodeunitShim({ // THEN domainSupportedEngines.forEach((engine) => { - test.ok(new rds.DatabaseInstance(stack, `${engine.engineType}-db`, { + expect(() => new rds.DatabaseInstance(stack, `${engine.engineType}-db`, { engine, domain: 'd-90670a8d36', vpc, - })); + })).not.toThrow(); }); domainUnsupportedEngines.forEach((engine) => { const expectedError = new RegExp(`domain property cannot be configured for ${engine.engineType}`); - test.throws(() => new rds.DatabaseInstance(stack, `${engine.engineType}-db`, { + expect(() => new rds.DatabaseInstance(stack, `${engine.engineType}-db`, { engine, domain: 'd-90670a8d36', vpc, - }), expectedError); + })).toThrow(expectedError); }); - test.done(); - }, - 'performance insights': { - 'instance with all performance insights properties'(test: Test) { + }); + + describe('performance insights', () => { + test('instance with all performance insights properties', () => { new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), vpc, @@ -954,72 +954,72 @@ nodeunitShim({ performanceInsightEncryptionKey: new kms.Key(stack, 'Key'), }); - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { EnablePerformanceInsights: true, PerformanceInsightsRetentionPeriod: 731, PerformanceInsightsKMSKeyId: { 'Fn::GetAtt': ['Key961B73FD', 'Arn'] }, - })); + }); + - test.done(); - }, + }); - 'setting performance insights fields enables performance insights'(test: Test) { + test('setting performance insights fields enables performance insights', () => { new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), vpc, performanceInsightRetention: rds.PerformanceInsightRetention.LONG_TERM, }); - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { EnablePerformanceInsights: true, PerformanceInsightsRetentionPeriod: 731, - })); + }); - test.done(); - }, - 'throws if performance insights fields are set but performance insights is disabled'(test: Test) { - test.throws(() => { + }); + + test('throws if performance insights fields are set but performance insights is disabled', () => { + expect(() => { new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), vpc, enablePerformanceInsights: false, performanceInsightRetention: rds.PerformanceInsightRetention.DEFAULT, }); - }, /`enablePerformanceInsights` disabled, but `performanceInsightRetention` or `performanceInsightEncryptionKey` was set/); + }).toThrow(/`enablePerformanceInsights` disabled, but `performanceInsightRetention` or `performanceInsightEncryptionKey` was set/); - test.done(); - }, - }, - 'reuse an existing subnet group'(test: Test) { + }); + }); + + test('reuse an existing subnet group', () => { new rds.DatabaseInstance(stack, 'Database', { engine: rds.DatabaseInstanceEngine.postgres({ version: rds.PostgresEngineVersion.VER_12_3 }), vpc, subnetGroup: rds.SubnetGroup.fromSubnetGroupName(stack, 'SubnetGroup', 'my-subnet-group'), }); - expect(stack).to(haveResourceLike('AWS::RDS::DBInstance', { + expect(stack).toHaveResourceLike('AWS::RDS::DBInstance', { DBSubnetGroupName: 'my-subnet-group', - })); - expect(stack).to(countResources('AWS::RDS::DBSubnetGroup', 0)); + }); + expect(stack).toCountResources('AWS::RDS::DBSubnetGroup', 0); - test.done(); - }, - 'defaultChild returns the DB Instance'(test: Test) { + }); + + test('defaultChild returns the DB Instance', () => { const instance = new rds.DatabaseInstance(stack, 'Database', { engine: rds.DatabaseInstanceEngine.postgres({ version: rds.PostgresEngineVersion.VER_12_3 }), vpc, }); // THEN - test.ok(instance.node.defaultChild instanceof rds.CfnDBInstance); + expect(instance.node.defaultChild instanceof rds.CfnDBInstance).toBeTruthy(); - test.done(); - }, - "PostgreSQL database instance uses a different default master username than 'admin', which is a reserved word"(test: Test) { + }); + + test("PostgreSQL database instance uses a different default master username than 'admin', which is a reserved word", () => { new rds.DatabaseInstance(stack, 'Instance', { vpc, engine: rds.DatabaseInstanceEngine.postgres({ @@ -1028,17 +1028,19 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResourceLike('AWS::SecretsManager::Secret', { + expect(stack).toHaveResourceLike('AWS::SecretsManager::Secret', { GenerateSecretString: { SecretStringTemplate: '{"username":"postgres"}', }, - })); + }); - test.done(); - }, - 'S3 Import/Export': { - 'instance with s3 import and export buckets'(test: Test) { + }); + + describe('S3 Import/Export', () => { + testFutureBehavior('instance with s3 import and export buckets', { '@aws-cdk/aws-s3:grantWriteWithoutAcl': true }, cdk.App, (app) => { + stack = new cdk.Stack(app); + vpc = new ec2.Vpc(stack, 'VPC'); new rds.DatabaseInstance(stack, 'DB', { engine: rds.DatabaseInstanceEngine.sqlServerSe({ version: rds.SqlServerEngineVersion.VER_14_00_3192_2_V1 }), vpc, @@ -1046,7 +1048,7 @@ nodeunitShim({ s3ExportBuckets: [new s3.Bucket(stack, 'S3Export')], }); - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { AssociatedRoles: [ { FeatureName: 'S3_INTEGRATION', @@ -1054,10 +1056,10 @@ nodeunitShim({ }, ], OptionGroupName: { Ref: 'DBInstanceOptionGroup46C68006' }, - })); + }); // Can read from import bucket, and read/write from export bucket - expect(stack).to(haveResource('AWS::IAM::Policy', { + expect(stack).toHaveResource('AWS::IAM::Policy', { PolicyDocument: { Statement: [{ Action: [ @@ -1077,7 +1079,7 @@ nodeunitShim({ 's3:GetBucket*', 's3:List*', 's3:DeleteObject*', - 's3:PutObject*', + 's3:PutObject', 's3:Abort*', ], Effect: 'Allow', @@ -1088,58 +1090,58 @@ nodeunitShim({ }], Version: '2012-10-17', }, - })); + }); - test.done(); - }, - 'throws if using s3 import on unsupported engine'(test: Test) { + }); + + test('throws if using s3 import on unsupported engine', () => { const s3ImportRole = new Role(stack, 'S3ImportRole', { assumedBy: new ServicePrincipal('rds.amazonaws.com'), }); - test.throws(() => { + expect(() => { new rds.DatabaseInstance(stack, 'DBWithImportBucket', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), vpc, s3ImportBuckets: [new s3.Bucket(stack, 'S3Import')], }); - }, /Engine 'mysql-8.0.19' does not support S3 import/); - test.throws(() => { + }).toThrow(/Engine 'mysql-8.0.19' does not support S3 import/); + expect(() => { new rds.DatabaseInstance(stack, 'DBWithImportRole', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), vpc, s3ImportRole, }); - }, /Engine 'mysql-8.0.19' does not support S3 import/); + }).toThrow(/Engine 'mysql-8.0.19' does not support S3 import/); - test.done(); - }, - 'throws if using s3 export on unsupported engine'(test: Test) { + }); + + test('throws if using s3 export on unsupported engine', () => { const s3ExportRole = new Role(stack, 'S3ExportRole', { assumedBy: new ServicePrincipal('rds.amazonaws.com'), }); - test.throws(() => { + expect(() => { new rds.DatabaseInstance(stack, 'DBWithExportBucket', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), vpc, s3ExportBuckets: [new s3.Bucket(stack, 'S3Export')], }); - }, /Engine 'mysql-8.0.19' does not support S3 export/); - test.throws(() => { + }).toThrow(/Engine 'mysql-8.0.19' does not support S3 export/); + expect(() => { new rds.DatabaseInstance(stack, 'DBWithExportRole', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), vpc, s3ExportRole: s3ExportRole, }); - }, /Engine 'mysql-8.0.19' does not support S3 export/); + }).toThrow(/Engine 'mysql-8.0.19' does not support S3 export/); - test.done(); - }, - 'throws if provided two different roles for import/export'(test: Test) { + }); + + test('throws if provided two different roles for import/export', () => { const s3ImportRole = new Role(stack, 'S3ImportRole', { assumedBy: new ServicePrincipal('rds.amazonaws.com'), }); @@ -1147,20 +1149,20 @@ nodeunitShim({ assumedBy: new ServicePrincipal('rds.amazonaws.com'), }); - test.throws(() => { + expect(() => { new rds.DatabaseInstance(stack, 'DBWithExportBucket', { engine: rds.DatabaseInstanceEngine.sqlServerEe({ version: rds.SqlServerEngineVersion.VER_14_00_3192_2_V1 }), vpc, s3ImportRole, s3ExportRole, }); - }, /S3 import and export roles must be the same/); + }).toThrow(/S3 import and export roles must be the same/); - test.done(); - }, - }, - 'fromGeneratedSecret'(test: Test) { + }); + }); + + test('fromGeneratedSecret', () => { // WHEN new rds.DatabaseInstance(stack, 'Database', { engine: rds.DatabaseInstanceEngine.postgres({ version: rds.PostgresEngineVersion.VER_12_3 }), @@ -1169,7 +1171,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { MasterUsername: 'postgres', // username is a string MasterUserPassword: { 'Fn::Join': [ @@ -1183,12 +1185,12 @@ nodeunitShim({ ], ], }, - })); + }); - test.done(); - }, - 'fromPassword'(test: Test) { + }); + + test('fromPassword', () => { // WHEN new rds.DatabaseInstance(stack, 'Database', { engine: rds.DatabaseInstanceEngine.postgres({ version: rds.PostgresEngineVersion.VER_12_3 }), @@ -1197,15 +1199,15 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { MasterUsername: 'postgres', // username is a string MasterUserPassword: '{{resolve:ssm-secure:/dbPassword:1}}', // reference to SSM - })); + }); + - test.done(); - }, + }); - 'can set publiclyAccessible to false with public subnets'(test: Test) { + test('can set publiclyAccessible to false with public subnets', () => { new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19, @@ -1217,14 +1219,14 @@ nodeunitShim({ publiclyAccessible: false, }); - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { PubliclyAccessible: false, - })); + }); - test.done(); - }, - 'can set publiclyAccessible to true with private subnets'(test: Test) { + }); + + test('can set publiclyAccessible to true with private subnets', () => { new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19, @@ -1236,12 +1238,12 @@ nodeunitShim({ publiclyAccessible: true, }); - expect(stack).to(haveResource('AWS::RDS::DBInstance', { + expect(stack).toHaveResource('AWS::RDS::DBInstance', { PubliclyAccessible: true, - })); + }); + - test.done(); - }, + }); }); test.each([ @@ -1249,6 +1251,10 @@ test.each([ [cdk.RemovalPolicy.SNAPSHOT, 'Snapshot', ABSENT], [cdk.RemovalPolicy.DESTROY, 'Delete', ABSENT], ])('if Instance RemovalPolicy is \'%s\', the instance has DeletionPolicy \'%s\' and the DBSubnetGroup has \'%s\'', (instanceRemovalPolicy, instanceValue, subnetValue) => { + // GIVEN + stack = new cdk.Stack(); + vpc = new ec2.Vpc(stack, 'VPC'); + // WHEN new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.mysql({ @@ -1260,13 +1266,13 @@ test.each([ }); // THEN - expect(stack).to(haveResourceLike('AWS::RDS::DBInstance', { + expect(stack).toHaveResourceLike('AWS::RDS::DBInstance', { DeletionPolicy: instanceValue, UpdateReplacePolicy: instanceValue, - }, ResourcePart.CompleteDefinition)); + }, ResourcePart.CompleteDefinition); - expect(stack).to(haveResourceLike('AWS::RDS::DBSubnetGroup', { + expect(stack).toHaveResourceLike('AWS::RDS::DBSubnetGroup', { DeletionPolicy: subnetValue, UpdateReplacePolicy: subnetValue, - }, ResourcePart.CompleteDefinition)); + }, ResourcePart.CompleteDefinition); });