diff --git a/.gitattributes b/.gitattributes index 87eb81e8..9714bb1f 100644 --- a/.gitattributes +++ b/.gitattributes @@ -15,4 +15,5 @@ /package-lock.json linguist-generated /package.json linguist-generated /solution-manifest.yaml linguist-generated +/source/pipeline/jest.config.json linguist-generated /tsconfig.json linguist-generated \ No newline at end of file diff --git a/.gitignore b/.gitignore index f84bf134..eee78dd3 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ !/package.json !/LICENSE logs +*.log npm-debug.log* yarn-debug.log* yarn-error.log* @@ -26,81 +27,18 @@ jspm_packages/ .eslintcache *.tgz .yarn-integrity -__pycache__/ -*.py[cod] -*$py.class -*node_modules* -*.so -*.pyc -.Python -env/ -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib64/ -parts/ -sdist/ -var/ -*.egg-info/ -.installed.cfg -*.egg -.idea/ -*.manifest -*.spec -pip-log.txt -pip-delete-this-directory.txt -htmlcov/ -.tox/ -.coverage -.coverage.* .cache -nosetests.xml -coverage.xml -*,cover -.hypothesis/ -deployment/coverage-reports/ -deployment/test-reports/ -coverage/ -*.mo -*.pot -*.log -local_settings.py -instance/ -.webassets-cache -.scrapy -docs/_build/ -target/ -.ipynb_checkpoints -.python-version -celerybeat-schedule -.env +.idea/ +.vscode/ .venv/ -venv/ -ENV/ -.spyderproject -.ropeproject -*cdk.out* -*.js -!.eslintrc.js -*regional-s3-assets* -*staging* -*global-s3-assets* -.DS_Store -.pytest_cache -.mypy_cache -*.zip -deployment/open-source -deployment/dist -source/deploy -source/code/sample_events -.vscode -__pycache__ -**/cdk-test-report.xml +*.DS_Store +deployment/open-source/ +deployment/global-s3-assets +deployment/regional-s3-assets +__pycache__/ +deployment/test-reports +deployment/coverage-reports !/jest.config.json -/coverage/ !/.github/pull_request_template.md !/source/instance-scheduler/tests/ !/tsconfig.json @@ -113,5 +51,7 @@ __pycache__ /build/cdk.out/ .cdk.staging/ .parcel-cache/ +!/source/pipeline/jest.config.json +/coverage/ !/solution-manifest.yaml !/.projenrc.ts diff --git a/.projen/deps.json b/.projen/deps.json index 0c6d4644..c0f1a8a5 100644 --- a/.projen/deps.json +++ b/.projen/deps.json @@ -1,5 +1,9 @@ { "dependencies": [ + { + "name": "@cdklabs/cdk-ssm-documents", + "type": "build" + }, { "name": "@types/jest", "type": "build" @@ -9,13 +13,17 @@ "version": "^18", "type": "build" }, + { + "name": "@types/uuid", + "type": "build" + }, { "name": "@typescript-eslint/eslint-plugin", "type": "build" }, { "name": "aws-cdk", - "version": "^2.102.0", + "version": "2.130.0", "type": "build" }, { @@ -68,17 +76,34 @@ }, { "name": "typescript", - "version": "~5.1.6", + "version": "~5.2.x", "type": "build" }, { "name": "@aws-cdk/aws-lambda-python-alpha", - "version": "^2.102.0-alpha.0", + "version": "2.130.0-alpha.0", + "type": "runtime" + }, + { + "name": "@aws-cdk/aws-neptune-alpha", + "version": "2.130.0-alpha.0", "type": "runtime" }, { "name": "@aws-cdk/aws-servicecatalogappregistry-alpha", - "version": "^2.102.0-alpha.0", + "version": "2.130.0-alpha.0", + "type": "runtime" + }, + { + "name": "@aws-sdk/client-auto-scaling", + "type": "runtime" + }, + { + "name": "@aws-sdk/client-cloudformation", + "type": "runtime" + }, + { + "name": "@aws-sdk/client-docdb", "type": "runtime" }, { @@ -89,6 +114,14 @@ "name": "@aws-sdk/client-ec2", "type": "runtime" }, + { + "name": "@aws-sdk/client-lambda", + "type": "runtime" + }, + { + "name": "@aws-sdk/client-neptune", + "type": "runtime" + }, { "name": "@aws-sdk/client-rds", "type": "runtime" @@ -98,12 +131,12 @@ "type": "runtime" }, { - "name": "@aws-solutions-constructs/aws-lambda-dynamodb", + "name": "@aws-sdk/util-dynamodb", "type": "runtime" }, { "name": "aws-cdk-lib", - "version": "^2.102.0", + "version": "2.130.0", "type": "runtime" }, { @@ -118,6 +151,10 @@ { "name": "source-map-support", "type": "runtime" + }, + { + "name": "uuid", + "type": "runtime" } ], "//": "~~ Generated by projen. To modify, edit .projenrc.ts and run \"npx projen\"." diff --git a/.projen/files.json b/.projen/files.json index f5db922e..f018040a 100644 --- a/.projen/files.json +++ b/.projen/files.json @@ -11,6 +11,7 @@ "jest.config.json", "LICENSE", "solution-manifest.yaml", + "source/pipeline/jest.config.json", "tsconfig.json" ], "//": "~~ Generated by projen. To modify, edit .projenrc.ts and run \"npx projen\"." diff --git a/.projen/tasks.json b/.projen/tasks.json index ab87cd4d..2316028e 100644 --- a/.projen/tasks.json +++ b/.projen/tasks.json @@ -106,7 +106,7 @@ "name": "e2e-tests", "steps": [ { - "exec": "jest --config source/pipeline/jest.config.ts", + "exec": "jest --config source/pipeline/jest.config.json", "receiveArgs": true } ] @@ -184,12 +184,6 @@ "name": "test", "description": "Run tests", "steps": [ - { - "spawn": "test:prettier" - }, - { - "spawn": "test:eslint" - }, { "spawn": "test:cdk" }, @@ -203,6 +197,9 @@ }, "test:app": { "name": "test:app", + "env": { + "TOX_PARALLEL_NO_SPINNER": "true" + }, "steps": [ { "exec": "python -m tox --parallel --exit-and-dump-after 1200" @@ -224,22 +221,36 @@ }, "test:cdk": { "name": "test:cdk", + "steps": [ + { + "spawn": "test:prettier" + }, + { + "spawn": "test:eslint" + }, + { + "spawn": "test:cdk-tests" + } + ] + }, + "test:cdk-tests": { + "name": "test:cdk-tests", "steps": [ { "exec": "jest --coverageProvider=v8 --ci" } ] }, - "test:cdk:ci": { - "name": "test:cdk:ci", + "test:cdk-tests:ci": { + "name": "test:cdk-tests:ci", "steps": [ { "exec": "jest --coverageProvider=v8 --ci --coverage --coverageDirectory deployment/coverage-reports/cdk-coverage" } ] }, - "test:ci": { - "name": "test:ci", + "test:cdk:ci": { + "name": "test:cdk:ci", "steps": [ { "spawn": "test:prettier" @@ -247,6 +258,14 @@ { "spawn": "test:eslint" }, + { + "spawn": "test:cdk-tests:ci" + } + ] + }, + "test:ci": { + "name": "test:ci", + "steps": [ { "spawn": "test:cdk:ci" }, @@ -260,6 +279,9 @@ }, "test:cli": { "name": "test:cli", + "env": { + "TOX_PARALLEL_NO_SPINNER": "true" + }, "steps": [ { "exec": "python -m tox --parallel --exit-and-dump-after 1200" @@ -300,8 +322,21 @@ "description": "Update jest snapshots", "steps": [ { - "exec": "jest --updateSnapshot --passWithNoTests --coverageProvider=v8 --ci", - "receiveArgs": true + "spawn": "test:prettier" + }, + { + "spawn": "test:eslint" + }, + { + "spawn": "test:update-snapshots" + } + ] + }, + "test:update-snapshots": { + "name": "test:update-snapshots", + "steps": [ + { + "exec": "jest --updateSnapshot --passWithNoTests --coverageProvider=v8 --ci" } ] }, @@ -322,13 +357,13 @@ }, "steps": [ { - "exec": "npx npm-check-updates@16 --upgrade --target=minor --peer --dep=dev,peer,prod,optional --filter=@types/jest,@typescript-eslint/eslint-plugin,esbuild,eslint,eslint-config-prettier,eslint-plugin-header,eslint-plugin-import,eslint-plugin-prettier,jest,jest-extended,jest-junit,projen,ts-jest,ts-node,@aws-sdk/client-dynamodb,@aws-sdk/client-ec2,@aws-sdk/client-rds,@aws-sdk/client-ssm,@aws-solutions-constructs/aws-lambda-dynamodb,cdk-nag,source-map-support" + "exec": "npx npm-check-updates@16 --upgrade --target=minor --peer --dep=dev,peer,prod,optional --filter=@cdklabs/cdk-ssm-documents,@types/jest,@types/uuid,@typescript-eslint/eslint-plugin,esbuild,eslint,eslint-config-prettier,eslint-plugin-header,eslint-plugin-import,eslint-plugin-prettier,jest,jest-extended,jest-junit,projen,ts-jest,ts-node,@aws-sdk/client-auto-scaling,@aws-sdk/client-cloudformation,@aws-sdk/client-docdb,@aws-sdk/client-dynamodb,@aws-sdk/client-ec2,@aws-sdk/client-lambda,@aws-sdk/client-neptune,@aws-sdk/client-rds,@aws-sdk/client-ssm,@aws-sdk/util-dynamodb,cdk-nag,source-map-support,uuid" }, { "exec": "npm install" }, { - "exec": "npm update @types/jest @types/node @typescript-eslint/eslint-plugin aws-cdk esbuild eslint eslint-config-prettier eslint-plugin-header eslint-plugin-import eslint-plugin-prettier jest jest-extended jest-junit projen ts-jest ts-node typescript @aws-cdk/aws-lambda-python-alpha @aws-cdk/aws-servicecatalogappregistry-alpha @aws-sdk/client-dynamodb @aws-sdk/client-ec2 @aws-sdk/client-rds @aws-sdk/client-ssm @aws-solutions-constructs/aws-lambda-dynamodb aws-cdk-lib cdk-nag constructs source-map-support" + "exec": "npm update @cdklabs/cdk-ssm-documents @types/jest @types/node @types/uuid @typescript-eslint/eslint-plugin aws-cdk esbuild eslint eslint-config-prettier eslint-plugin-header eslint-plugin-import eslint-plugin-prettier jest jest-extended jest-junit projen ts-jest ts-node typescript @aws-cdk/aws-lambda-python-alpha @aws-cdk/aws-neptune-alpha @aws-cdk/aws-servicecatalogappregistry-alpha @aws-sdk/client-auto-scaling @aws-sdk/client-cloudformation @aws-sdk/client-docdb @aws-sdk/client-dynamodb @aws-sdk/client-ec2 @aws-sdk/client-lambda @aws-sdk/client-neptune @aws-sdk/client-rds @aws-sdk/client-ssm @aws-sdk/util-dynamodb aws-cdk-lib cdk-nag constructs source-map-support uuid" }, { "exec": "npx projen" diff --git a/.projenrc.ts b/.projenrc.ts index e0c76dad..07e02b31 100644 --- a/.projenrc.ts +++ b/.projenrc.ts @@ -2,68 +2,76 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 import { readFileSync } from "node:fs"; -import { awscdk, YamlFile } from "projen"; -import { JestReporter, NodePackageManager, Transform, UpdateSnapshot } from "projen/lib/javascript"; - -const cdkVersion = "2.102.0"; -const solutionVersion = "1.5.6"; -const solutionId = "SO0030"; -const solutionName = "instance-scheduler-on-aws"; - -const project = new awscdk.AwsCdkTypeScriptApp({ - projenrcTs: true, - minNodeVersion: "18.0.0", - name: "instance-scheduler-on-aws", - description: `Instance Scheduler on AWS (${solutionId})`, - authorName: "Amazon Web Services", - authorUrl: "https://aws.amazon.com/solutions", - authorOrganization: true, - defaultReleaseBranch: "main", - packageManager: NodePackageManager.NPM, - majorVersion: 1, - srcdir: "source", - testdir: "source/instance-scheduler/tests", - cdkVersion, - cdkout: "build/cdk.out", - appEntrypoint: "instance-scheduler.ts", - jestOptions: { - configFilePath: "jest.config.json", - updateSnapshot: UpdateSnapshot.NEVER, - junitReporting: false, // we will override - jestConfig: { - roots: ["/source/instance-scheduler/tests"], - transform: { "^.+\\.tsx?$": new Transform("ts-jest") }, - reporters: [ - new JestReporter("jest-junit", { - outputDirectory: "deployment/test-reports", - outputName: "cdk-test-report.xml", - }), - ], - }, - }, - context: { - solutionId, - solutionName, +import { Project, YamlFile } from "projen"; +import { AwsCdkTypeScriptApp } from "projen/lib/awscdk"; +import { + Jest, + JestOptions, + JestReporter, + NodePackageManager, + Transform, + TypescriptConfigOptions, + UpdateSnapshot, +} from "projen/lib/javascript"; +import { PythonProject } from "projen/lib/python"; + +function main() { + new InstanceScheduler({ version: "3.0.0", cdkVersion: "2.130.0" }).synth(); +} + +interface InstanceSchedulerProps { + readonly version: string; + readonly cdkVersion: string; +} + +class InstanceScheduler extends AwsCdkTypeScriptApp { + private static readonly solutionId: string = "SO0030"; + private static readonly solutionName: string = "instance-scheduler-on-aws"; + + private static readonly cdkContext: { [key: string]: any } = { + solutionId: this.solutionId, + solutionName: this.solutionName, appRegApplicationName: "AWS-Solutions", - appRegSolutionName: solutionName, + appRegSolutionName: this.solutionName, "instance-scheduler-on-aws-pipeline-source": "codecommit", - }, - typescriptVersion: "~5.1.6", // held back for @typescript-eslint/typescript-estree - deps: [ - `@aws-cdk/aws-lambda-python-alpha@^${cdkVersion}-alpha.0`, - `@aws-cdk/aws-servicecatalogappregistry-alpha@^${cdkVersion}-alpha.0`, + }; + + private static readonly tsconfig: TypescriptConfigOptions = { + include: ["deployment/cdk-solution-helper/**/*.ts"], + compilerOptions: { + forceConsistentCasingInFileNames: true, + lib: ["es2022", "dom"], + noPropertyAccessFromIndexSignature: false, + noUncheckedIndexedAccess: false, + target: "ES2022", + outDir: "build/cdk.ts.dist", + rootDir: ".", + }, + }; + + private static readonly prTemplate: string[] = readFileSync("projenrc/PULL_REQUEST_TEMPLATE.md") + .toString() + .split("\n"); + + private static readonly deps: string[] = [ + "@aws-sdk/client-auto-scaling", + "@aws-sdk/client-cloudformation", + "@aws-sdk/client-docdb", "@aws-sdk/client-dynamodb", + "@aws-sdk/util-dynamodb", "@aws-sdk/client-ec2", + "@aws-sdk/client-lambda", + "@aws-sdk/client-neptune", "@aws-sdk/client-rds", "@aws-sdk/client-ssm", - "@aws-solutions-constructs/aws-lambda-dynamodb", "cdk-nag", - "constructs", "source-map-support", - ], - devDeps: [ - "@types/jest", - "@types/node", + "uuid", + ]; + + private static readonly devDeps: string[] = [ + "@cdklabs/cdk-ssm-documents", + "@types/uuid", "@typescript-eslint/eslint-plugin", "eslint", "eslint-config-prettier", @@ -73,176 +81,402 @@ const project = new awscdk.AwsCdkTypeScriptApp({ "jest-extended", "jest-junit", "ts-jest", - ], - githubOptions: { - workflows: false, - }, - pullRequestTemplateContents: readFileSync("projenrc/PULL_REQUEST_TEMPLATE.md").toString().split("\n"), - eslint: false, - autoMerge: false, - npmignoreEnabled: false, - license: "Apache-2.0", - disableTsconfigDev: true, - tsconfig: { - compilerOptions: { - rootDir: ".", - noUnusedLocals: true, - forceConsistentCasingInFileNames: true, - lib: ["es2022", "dom"], - noEmitOnError: true, - noPropertyAccessFromIndexSignature: false, // TODO: enable - noUncheckedIndexedAccess: false, // TODO: enable - target: "ES2022", - allowJs: false, - outDir: "build/cdk.ts.dist", - }, - include: ["source/pipeline/**/*.ts", "deployment/cdk-solution-helper/**/*.ts"], - exclude: ["node_modules"], - }, - gitignore: [ - "__pycache__/", - "*.py[cod]", - "*$py.class", - "*node_modules*", - "*.so", - "*.pyc", - ".Python", - "env/", - "build/", - "develop-eggs/", - "dist/", - "downloads/", - "eggs/", - ".eggs/", - "lib64/", - "parts/", - "sdist/", - "var/", - "*.egg-info/", - ".installed.cfg", - "*.egg", + ]; + + private static readonly testReportDir: string = "deployment/test-reports"; + private static readonly coverageReportDir: string = "deployment/coverage-reports"; + + private static readonly gitignore: string[] = [ ".idea/", - "*.manifest", - "*.spec", - "pip-log.txt", - "pip-delete-this-directory.txt", - "htmlcov/", - ".tox/", - ".coverage", - ".coverage.*", - ".cache", - "nosetests.xml", - "coverage.xml", - "*,cover", - ".hypothesis/", - "deployment/coverage-reports/", - "deployment/test-reports/", - "coverage/", - "*.mo", - "*.pot", - "*.log", - "local_settings.py", - "instance/", - ".webassets-cache", - ".scrapy", - "docs/_build/", - "target/", - ".ipynb_checkpoints", - ".python-version", - "celerybeat-schedule", - ".env", + ".vscode/", ".venv/", - "venv/", - "ENV/", - ".spyderproject", - ".ropeproject", - "*cdk.out*", - "*.js", - "!.eslintrc.js", - "*regional-s3-assets*", - "*staging*", - "*global-s3-assets*", - ".DS_Store", - ".pytest_cache", - ".mypy_cache", - "*.zip", - "deployment/open-source", - "deployment/dist", - "source/deploy", - "source/code/sample_events", - ".vscode", - "__pycache__", - "**/cdk-test-report.xml", - ], -}); - -new YamlFile(project, "solution-manifest.yaml", { - obj: { - id: solutionId, - name: solutionName, - version: solutionVersion, - cloudformation_templates: [ - { template: "instance-scheduler-on-aws.template", main_template: true }, - { template: "instance-scheduler-on-aws-remote.template" }, - ], - build_environment: { build_image: "aws/codebuild/standard:7.0" }, - }, -}); - -project.addTask("e2e-tests", { exec: "jest --config source/pipeline/jest.config.ts", receiveArgs: true }); - -const prettierTask = project.addTask("test:prettier", { exec: "npx prettier --check ./**/*.ts" }); -const eslintTask = project.addTask("test:eslint", { exec: "npx eslint --max-warnings=0 ." }); -const cdkTestTask = project.addTask("test:cdk", { - exec: "jest --coverageProvider=v8 --ci", -}); -const appTestTask = project.addTask("test:app", { - cwd: "source/app", - exec: "python -m tox --parallel --exit-and-dump-after 1200", -}); -const cliTestTask = project.addTask("test:cli", { - cwd: "source/cli", - exec: "python -m tox --parallel --exit-and-dump-after 1200", -}); - -const testTask = project.tasks.tryFind("test"); -testTask?.reset(); -testTask?.spawn(prettierTask); -testTask?.spawn(eslintTask); -testTask?.spawn(cdkTestTask); -testTask?.spawn(appTestTask); -testTask?.spawn(cliTestTask); - -const testCiTask = project.addTask("test:ci"); - -const cdkTestCiTask = project.addTask("test:cdk:ci", { - exec: "jest --coverageProvider=v8 --ci --coverage --coverageDirectory deployment/coverage-reports/cdk-coverage", -}); -const appTestCiTask = project.addTask("test:app:ci", { - cwd: "source/app", - env: { TOX_PARALLEL_NO_SPINNER: "true" }, - exec: 'python -m tox --parallel --exit-and-dump-after 1200 --skip-missing-interpreters false -- --junitxml=../../deployment/test-reports/lambda-test-report.xml --cov --cov-report "xml:../../deployment/coverage-reports/lambda-coverage.xml" && sed -i -e "s|.*|source/app/instance_scheduler|g" ../../deployment/coverage-reports/lambda-coverage.xml', -}); -const cliTestCiTask = project.addTask("test:cli:ci", { - cwd: "source/cli", - env: { TOX_PARALLEL_NO_SPINNER: "true" }, - exec: 'python -m tox --parallel --exit-and-dump-after 1200 --skip-missing-interpreters false -- --junitxml=../../deployment/test-reports/cli-test-report.xml --cov --cov-report "xml:../../deployment/coverage-reports/cli-coverage.xml" && sed -i -e "s|.*|source/cli/instance_scheduler_cli|g" ../../deployment/coverage-reports/cli-coverage.xml', -}); - -testCiTask.spawn(prettierTask); -testCiTask.spawn(eslintTask); -testCiTask.spawn(cdkTestCiTask); -testCiTask.spawn(appTestCiTask); -testCiTask.spawn(cliTestCiTask); - -project.tryFindObjectFile("tsconfig.json")?.addOverride("files", ["global.d.ts"]); - -// adding to project props doesn't seem to work as expected -project.jest?.addTestMatch("**/*.test.ts"); - -// use default snapshot resolution -project.tryFindObjectFile("jest.config.json")?.addOverride("snapshotResolver", undefined); - -project.tryFindObjectFile("package.json")?.addOverride("version", solutionVersion); - -project.synth(); + "*.DS_Store", + "deployment/open-source/", + "deployment/global-s3-assets", + "deployment/regional-s3-assets", + "__pycache__/", + this.testReportDir, + this.coverageReportDir, + ]; + + private static readonly jestConfigFile: string = "jest.config.json"; + private static readonly testdir: string = "source/instance-scheduler/tests"; + + private static readonly jestOptions: JestOptions = { + junitReporting: false, // we will override + updateSnapshot: UpdateSnapshot.NEVER, + configFilePath: this.jestConfigFile, + jestConfig: { + reporters: [ + new JestReporter("jest-junit", { + outputDirectory: this.testReportDir, + outputName: "cdk-test-report.xml", + }), + ], + roots: [`/${this.testdir}`], + transform: { "^.+\\.tsx?$": new Transform("ts-jest") }, + setupFilesAfterEnv: ["jest-extended/all"], + }, + }; + + constructor(props: InstanceSchedulerProps) { + const authorName = "Amazon Web Services"; + const license = "Apache-2.0"; + + super({ + appEntrypoint: "instance-scheduler.ts", + cdkVersion: props.cdkVersion, + cdkVersionPinning: true, + context: InstanceScheduler.cdkContext, + cdkout: "build/cdk.out", + srcdir: "source", + testdir: InstanceScheduler.testdir, + eslint: false, + tsconfig: InstanceScheduler.tsconfig, + typescriptVersion: "~5.2.x", //@typescript-eslint/typescript-estree doesn't support 5.3.x yet + disableTsconfigDev: true, + projenrcTs: true, + defaultReleaseBranch: "main", + npmignoreEnabled: false, + pullRequestTemplateContents: InstanceScheduler.prTemplate, + gitignore: InstanceScheduler.gitignore, + jestOptions: InstanceScheduler.jestOptions, + githubOptions: { mergify: false, workflows: false }, + name: InstanceScheduler.solutionName, + description: `Instance Scheduler on AWS (${InstanceScheduler.solutionId})`, + deps: InstanceScheduler.deps, + devDeps: InstanceScheduler.devDeps, + packageManager: NodePackageManager.NPM, + authorName, + authorUrl: "https://aws.amazon.com/solutions", + authorOrganization: true, + minNodeVersion: "18.0.0", + license, + }); + + // manage project versioning manually + this.overrideVersion(props.version); + // cdk deps should lock to the same version as cdk itself, so they must be specified separately + this.addDeps(...this.getCdkDeps(props.cdkVersion)); + this.addTestTasks(); + this.addTypescriptFiles("global.d.ts"); + // adding to project props doesn't seem to work as expected + this.addJestMatch("**/*.test.ts"); + // use default snapshot resolution + this.removeCustomSnapshotResolver(); + + new YamlFile(this, "solution-manifest.yaml", { + obj: { + id: InstanceScheduler.solutionId, + name: InstanceScheduler.solutionName, + version: props.version, + cloudformation_templates: [ + { template: "instance-scheduler-on-aws.template", main_template: true }, + { template: "instance-scheduler-on-aws-remote.template" }, + ], + build_environment: { build_image: "aws/codebuild/standard:7.0" }, + }, + }); + + const homepage = "https://aws.amazon.com/solutions/implementations/instance-scheduler-on-aws/"; + + const commonPythonDevDeps = [ + "black@^24.3.0", + "flake8@^6.1.0", + "isort@^5.12.0", + "mypy@^1.7.1", + "pytest@^7.4.3", + "pytest-cov@^4.1.0", + "tox@^4.11.4", + ]; + + const commonPythonProjectOptions: CommonPythonProjectOptions = { + authorName, + version: props.version, + parent: this, + license, + homepage, + devDeps: commonPythonDevDeps, + }; + + new InstanceSchedulerLambdaFunction(commonPythonProjectOptions); + new InstanceSchedulerCli(commonPythonProjectOptions); + } + + private overrideVersion(version: string): void { + const packageFile = this.tryFindObjectFile("package.json"); + if (!packageFile) { + throw new Error("Error overriding package version"); + } + packageFile.addOverride("version", version); + } + + private getCdkDeps(cdkVersion: string): string[] { + return [ + `@aws-cdk/aws-lambda-python-alpha@${cdkVersion}-alpha.0`, + `@aws-cdk/aws-servicecatalogappregistry-alpha@${cdkVersion}-alpha.0`, + `@aws-cdk/aws-neptune-alpha@${cdkVersion}-alpha.0`, + ]; + } + + private addTestTasks(): void { + this.addE2ETestTask(); + + const prettierTask = this.addTask("test:prettier", { exec: "npx prettier --check ./**/*.ts" }); + const eslintTask = this.addTask("test:eslint", { exec: "npx eslint --max-warnings=0 ." }); + + const updateSnapshotsTask = this.addTask("test:update-snapshots", { + exec: "jest --updateSnapshot --passWithNoTests --coverageProvider=v8 --ci", + }); + + const updateTask = this.tasks.tryFind("test:update"); + if (!updateTask) { + throw new Error("Error adding subtasks to update task"); + } + updateTask.reset(); + updateTask.spawn(prettierTask); + updateTask.spawn(eslintTask); + updateTask.spawn(updateSnapshotsTask); + + const baseJestCommand = "jest --coverageProvider=v8 --ci"; + const cdkTestTask = this.addTask("test:cdk-tests", { exec: baseJestCommand }); + + const cdkTask = this.addTask("test:cdk"); + + cdkTask.spawn(prettierTask); + cdkTask.spawn(eslintTask); + cdkTask.spawn(cdkTestTask); + + const baseToxCommand = "python -m tox --parallel --exit-and-dump-after 1200"; + const appDir = "source/app"; + const cliDir = "source/cli"; + const appTestTask = this.addTask("test:app", { + cwd: appDir, + env: { TOX_PARALLEL_NO_SPINNER: "true" }, + exec: baseToxCommand, + }); + const cliTestTask = this.addTask("test:cli", { + cwd: cliDir, + env: { TOX_PARALLEL_NO_SPINNER: "true" }, + exec: baseToxCommand, + }); + + const testTask = this.tasks.tryFind("test"); + if (!testTask) { + throw new Error("Error adding subtasks to test task"); + } + testTask.reset(); + testTask.spawn(cdkTask); + testTask.spawn(appTestTask); + testTask.spawn(cliTestTask); + + const testCiTask = this.addTask("test:ci"); + + const jestCoverageOptions = `--coverage --coverageDirectory ${InstanceScheduler.coverageReportDir}/cdk-coverage`; + const cdkTestCiTask = this.addTask("test:cdk-tests:ci", { exec: `${baseJestCommand} ${jestCoverageOptions}` }); + + const cdkCiTask = this.addTask("test:cdk:ci"); + + cdkCiTask.spawn(prettierTask); + cdkCiTask.spawn(eslintTask); + cdkCiTask.spawn(cdkTestCiTask); + + const ciToxOptions = "--skip-missing-interpreters false"; + const appPytestOptions = `--junitxml=../../${InstanceScheduler.testReportDir}/lambda-test-report.xml --cov --cov-report "xml:../../${InstanceScheduler.coverageReportDir}/lambda-coverage.xml"`; + const appReportFixupCommand = `sed -i -e "s|.*|source/app/instance_scheduler|g" ../../${InstanceScheduler.coverageReportDir}/lambda-coverage.xml`; + const appTestCiTask = this.addTask("test:app:ci", { + cwd: appDir, + env: { TOX_PARALLEL_NO_SPINNER: "true" }, + exec: `${baseToxCommand} ${ciToxOptions} -- ${appPytestOptions} && ${appReportFixupCommand}`, + }); + const cliPytestOptions = `--junitxml=../../${InstanceScheduler.testReportDir}/cli-test-report.xml --cov --cov-report "xml:../../${InstanceScheduler.coverageReportDir}/cli-coverage.xml"`; + const cliReportFixupCommand = `sed -i -e "s|.*|source/cli/instance_scheduler_cli|g" ../../${InstanceScheduler.coverageReportDir}/cli-coverage.xml`; + const cliTestCiTask = this.addTask("test:cli:ci", { + cwd: cliDir, + env: { TOX_PARALLEL_NO_SPINNER: "true" }, + exec: `${baseToxCommand} ${ciToxOptions} -- ${cliPytestOptions} && ${cliReportFixupCommand}`, + }); + + testCiTask.spawn(cdkCiTask); + testCiTask.spawn(appTestCiTask); + testCiTask.spawn(cliTestCiTask); + } + + private addE2ETestTask(): void { + const e2eConfigFile = "source/pipeline/jest.config.json"; + new Jest(this, { + junitReporting: false, // we will override + updateSnapshot: UpdateSnapshot.NEVER, + configFilePath: e2eConfigFile, + jestConfig: { + reporters: [ + new JestReporter("jest-junit", { + outputDirectory: InstanceScheduler.testReportDir, + outputName: "e2e-test-report.xml", + }), + ], + roots: [`/e2e-tests`], + setupFilesAfterEnv: ["jest-extended/all"], + transform: { "^.+\\.tsx?$": new Transform("ts-jest") }, + globalSetup: "./setup.ts", + }, + }); + + this.addTask("e2e-tests", { exec: `jest --config ${e2eConfigFile}`, receiveArgs: true }); + } + + private addTypescriptFiles(...files: string[]): void { + const tsconfig = this.tryFindObjectFile("tsconfig.json"); + if (!tsconfig) { + throw new Error("Error overriding tsconfig"); + } + tsconfig.addOverride("files", files); + } + + private addJestMatch(pattern: string): void { + if (!this.jest) { + throw new Error("Error overriding jest matcher"); + } + this.jest.addTestMatch(pattern); + } + + private removeCustomSnapshotResolver(): void { + const jestConfig = this.tryFindObjectFile(InstanceScheduler.jestConfigFile); + if (!jestConfig) { + throw new Error("Error overriding jest config"); + } + jestConfig.addOverride("snapshotResolver", undefined); + } +} + +interface CommonPythonProjectOptions { + readonly authorName: string; + readonly license: string; + readonly version: string; + readonly parent: Project; + readonly homepage: string; + readonly devDeps: string[]; +} + +class InstanceSchedulerLambdaFunction extends PythonProject { + constructor(options: CommonPythonProjectOptions) { + super({ + authorEmail: "", + moduleName: "instance_scheduler", + name: "instance_scheduler", + outdir: "./source/app", + poetry: true, + description: "Instance Scheduler on AWS", + deps: ["python@^3.11"], + pytest: false, + ...options, + }); + + const boto3StubsExtras = [ + "autoscaling", + "cloudwatch", + "dynamodb", + "ec2", + "ecs", + "lambda", + "logs", + "rds", + "resourcegroupstaggingapi", + "sns", + "ssm", + "sts", + ]; + + const motoExtras = ["autoscaling", "dynamodb", "ec2", "logs", "rds", "resourcegroupstaggingapi", "ssm"]; + + const boto3Version = "^1.34.1"; + const jmespathVersion = "1.0.1"; + const pythonDateutilVersion = "2.8.2"; + const urllib3Version = "1.26.15"; + [ + `boto3@${boto3Version}`, + `boto3-stubs-lite@{version = "${boto3Version}", extras = ${JSON.stringify(boto3StubsExtras)}}`, + `botocore@${boto3Version}`, + "botocore-stubs@^1.31.66", + "freezegun@^1.3.1", + `jmespath@${jmespathVersion}`, + "pytest-mock@^3.12.0", + "pytest-runner@^6.0.1", + "pytest-xdist@^3.5.0", + `python-dateutil@${pythonDateutilVersion}`, + `moto@{version = "^5.0.2", extras = ${JSON.stringify(motoExtras)}}`, + "types-freezegun@^1.1.10", + `types-jmespath@${jmespathVersion}`, + `types-python-dateutil@${pythonDateutilVersion}`, + "types-requests@2.31.0.6", // held back, need to support urllib3@^1 + `types-urllib3@^${urllib3Version}`, + "tzdata@^2023.3", + `urllib3@^${urllib3Version}`, + ].forEach((spec: string) => this.addDevDependency(spec)); + + [ + "aws-lambda-powertools@^2.26.0", + "packaging@^24.0", + ].forEach((spec: string) => this.addDependency(spec)); + + const pyproject = this.tryFindObjectFile("pyproject.toml"); + if (!pyproject) { + throw new Error("Could not override pyproject.toml"); + } + pyproject.addOverride("tool.poetry.authors", [options.authorName]); + + const installTask = this.tasks.tryFind("install"); + if (!installTask) { + throw new Error("Could not override install task"); + } + installTask.reset(); + installTask.exec("poetry lock --no-update && poetry install"); + } +} + +class InstanceSchedulerCli extends PythonProject { + constructor(options: CommonPythonProjectOptions) { + const boto3Version = "^1.34.1"; + const jmespathVersion = "1.0.1"; + super({ + authorEmail: "", + moduleName: "instance_scheduler_cli", + name: "instance_scheduler_cli", + outdir: "./source/cli", + poetry: true, + description: "Instance Scheduler on AWS CLI", + deps: ["python@^3.8.1", `boto3@${boto3Version}`, `jmespath@^${jmespathVersion}`], + pytest: false, + ...options, + }); + + const boto3StubsExtras = ["cloudformation", "lambda"]; + + const motoExtras = ["cloudformation", "lambda"]; + + [ + `boto3-stubs-lite@{version = "${boto3Version}", extras = ${JSON.stringify(boto3StubsExtras)}}`, + "jsonschema@~4.17.3", // held back, 4.18.0 is a breaking change + `moto@{version = "^5.0.2", extras = ${JSON.stringify(motoExtras)}}`, + `types-jmespath@^${jmespathVersion}`, + "types-PyYAML@^6.0.12.12", + "types-requests@2.31.0.6", // held back, need to support urllib3@^1 + ].forEach((spec: string) => this.addDevDependency(spec)); + + const pyproject = this.tryFindObjectFile("pyproject.toml"); + if (!pyproject) { + throw new Error("Could not override pyproject.toml"); + } + pyproject.addOverride("tool.poetry.authors", [options.authorName]); + pyproject.addOverride("tool.poetry.scripts.scheduler-cli", "instance_scheduler_cli:__main__"); + + const installTask = this.tasks.tryFind("install"); + if (!installTask) { + throw new Error("Could not override install task"); + } + installTask.reset(); + installTask.exec("poetry lock --no-update && poetry install"); + } +} + +main(); diff --git a/CHANGELOG.md b/CHANGELOG.md index fc18cfb2..8f752602 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,54 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). +## [3.0.0] - 2024-06-05 + +### Added + +- Added support for scheduling of Neptune and DocumentDB clusters +- Added support for scheduling of ASG through the automatic creation of Scheduled Scaling Rules from configured schedules +- Added optional Operational Insights Dashboard to CloudWatch for monitoring and insights into solution performance +- Added support for using multiple EC2 maintenance windows with a single schedule +- Added ability to specify KMS keys that Instance Scheduler should be granted permissions to use when starting + EC2 instances with encrypted EBS volumes + +### Changed + +- Separated "Scheduled Services" parameter into individual enabled/disabled parameters for each supported service +- Upgrade Python runtime to 3.11 +- Extensive refactoring to internal code to improve code quality and testability +- CloudWatch metrics feature renamed to "Per Schedule Metrics" and integrated with new Operational Insights Dashboard +- DynamoDB Deletion Protection now enabled by default on solution DynamoDB tables. +- Refactored maintenance window dynamodb table to be more cost-efficient at scale +- Updated schedule logs to include SchedulingDecision entries for all decisions made by the EC2/RDS schedulers. +- Scheduler CLI will now error when attempting to overwrite schedules managed by CloudFormation + +### Removed + +- Configuration settings from CloudFormation parameters no longer duplicated in DynamoDB +- Remove deprecated "overwrite" Schedule flag (distinct from still-supported "override" flag) +- Cloudwatch Metrics feature replaced with Operational Monitoring + +### Fixed + +- Fixed deployment error in China partition, introduced in v1.5.0 +- Fixed bug where CloudFormation Schedules used UTC timezone if not specified in template (instead of stack default) +- Fixed bug that would cause the scheduling request handler lambda would hang when trying to scheduler more than 50 RDS instances in the same region +- Fixed bug that would sometimes cause the CFN schedule custom resource to error when many schedules were deployed in parallel +- Fixed bug that would cause spoke stacks to not be correctly deregistered from the hub stack when undeployed +- Fixed bug in cli describe_schedule_usage command that would incorrectly estimate the behavior of schedules using nth weekday expressions +- Fixed bug that would cause schedules using monthday ranges of the format "n-31" to fail to load in months + with less days then the end of the range (such as February) +- Fixed configured_in_stack property not being correctly applied to periods deployed by CloudFormation custom resource. + +### Security + +- Break monolith Lambda Function and permissions apart based on principle of least privilege +- Spoke stack trust permissions restricted to only specific lambda roles in the Hub account +- Allow KMS keys for scheduling encrypted EBS volumes to be specified directly on hub/spoke stacks in cloudformation + rather needing to be added to scheduling roles manually +- Upgrade Requests to mitigate CVE-2024-35195 + ## [1.5.6] -- 2024-05-10 ### Security @@ -18,7 +66,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - Upgrade Black to mitigate CVE-2024-21503 - Upgrade idna to mitigate CVE-2024-3651 -## [1.5.4] -- 2024-02-29 +## [1.5.4] -- 2024-4-1 ### Security diff --git a/NOTICE.txt b/NOTICE.txt index b3d8845c..d333bead 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -14,10 +14,17 @@ This software includes third party software subject to the following copyrights: @aws-cdk/aws-lambda-python-alpha under the Apache License 2.0 @aws-cdk/aws-servicecatalogappregistry-alpha under the Apache License 2.0 +@aws-cdk/aws-neptune-alpha under the Apache License 2.0 @aws-sdk/client-dynamodb under the Apache License 2.0 +@aws-sdk/util-dynamodb under the Apache License 2.0 +@aws-sdk/client-lambda under the Apache License 2.0 +@aws-sdk/client-auto-scaling under the Apache License 2.0 @aws-sdk/client-ec2 under the Apache License 2.0 @aws-sdk/client-rds under the Apache License 2.0 @aws-sdk/client-ssm under the Apache License 2.0 +@aws-sdk/client-cloudformation under the Apache License 2.0 +@aws-sdk/client-docdb under the Apache License 2.0 +@aws-sdk/client-neptune under the Apache License 2.0 @aws-solutions-constructs/aws-lambda-dynamodb under the Apache License 2.0 adm-zip under the MIT License aws-cdk-lib under the Apache License 2.0 @@ -25,6 +32,7 @@ cdk-nag under the Apache License 2.0 constructs under the Apache License 2.0 projen under the Apache License 2.0 source-map-support under the MIT License +uuid under the MIT License attrs under the MIT License aws-lambda-powertools under the MIT License @@ -43,6 +51,7 @@ pluggy under the MIT License pyasn1 under the BSD 2-Clause License pytest under the MIT License python-dateutil under the Apache License 2.0 and the BSD 3-Clause "New" or "Revised" License +python-jose under the MIT License pytz under the MIT License requests under the Apache License 2.0 rsa under the Apache License 2.0 diff --git a/README.md b/README.md index 227ab387..6aac83d3 100644 --- a/README.md +++ b/README.md @@ -49,7 +49,7 @@ Instance Scheduler can be deployed to your AWS account directly from the source #### Deploying the hub stack ``` -npm install +npm ci npx cdk bootstrap npx cdk deploy instance-scheduler-on-aws ``` @@ -70,7 +70,7 @@ stack. ``` npx cdk bootstrap -npx cdk deploy instance-scheduler-on-aws-remote --parameters InstanceSchedulerAccount={account-id} --parameters namespace={namespace} --parameters UsingAWSOrganizations={useOrgs} +npx cdk deploy instance-scheduler-on-aws-remote --parameters InstanceSchedulerAccount={account-id} --parameters Namespace={namespace} --parameters UsingAWSOrganizations={useOrgs} ``` Replace: @@ -170,7 +170,7 @@ Ex. https://mybucket.s3.amazonaws.com/instance-scheduler-on-aws/v1.5.0.mybuild/i ### Running Tests Locally ``` -npm install +npm ci npm run test ``` @@ -207,7 +207,7 @@ Once the connection has been set up, make sure you save the connection ARN for t In your local environment, first install all necessary dependencies and bootstrap your account for CDK deployment. ``` -npm install +npm ci npx cdk bootstrap ``` @@ -266,18 +266,33 @@ click on the pipeline that begins with instance-scheduler-on-aws-testing-pipelin This solution uses [projen](https://projen.io/) to manage certain project files. If you need to modify any of these files, modify the source in [.projenrc.ts](./.projenrc.ts) and run `projen` to regenerate the files. +### Package installation + +``` +# For Node.js dependencies +npm ci + +# For Python dependencies +cd source/app +poetry install +``` + +If you don't have `poetry`, refer to [Poetry](https://python-poetry.org/docs/) to install `poetry`. + ## CDK Documentation Instance Scheduler on AWS templates are generated using AWS CDK, for further information on CDK please refer to the [documentation](https://docs.aws.amazon.com/cdk/latest/guide/getting_started.html). - ## Collection of Operational Metrics -This solution collects anonymous operational metrics to help AWS improve the quality and features of the solution. For more information, including how to disable this capability, please see the [implementation guide](https://docs.aws.amazon.com/solutions/latest/instance-scheduler-on-aws/anonymized-data.html). + +This solution collects anonymized operational metrics to help AWS improve the quality and features of the solution. For +more information, including how to disable this capability, please see the [implementation +guide](https://docs.aws.amazon.com/solutions/latest/instance-scheduler-on-aws/anonymized-data.html). --- -Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at diff --git a/deployment/cdk-solution-helper/package-lock.json b/deployment/cdk-solution-helper/package-lock.json index 7f01aacc..36fed2cf 100644 --- a/deployment/cdk-solution-helper/package-lock.json +++ b/deployment/cdk-solution-helper/package-lock.json @@ -10,16 +10,16 @@ "license": "Apache-2.0", "dependencies": { "adm-zip": "^0.5.10", - "aws-cdk-lib": "^2.87.0" + "aws-cdk-lib": "^2.114.1" }, "devDependencies": { - "@types/adm-zip": "^0.5.0", - "@types/jest": "^29.5.3", - "@types/node": "^18.16.19", - "jest": "^29.6.1", + "@types/adm-zip": "^0.5.5", + "@types/jest": "^29.5.11", + "@types/node": "^18.19.3", + "jest": "^29.7.0", "ts-jest": "^29.1.1", "ts-node": "^10.9.1", - "typescript": "^5.1.6" + "typescript": "~5.3.3" } }, "node_modules/@ampproject/remapping": { @@ -36,9 +36,9 @@ } }, "node_modules/@aws-cdk/asset-awscli-v1": { - "version": "2.2.200", - "resolved": "https://registry.npmjs.org/@aws-cdk/asset-awscli-v1/-/asset-awscli-v1-2.2.200.tgz", - "integrity": "sha512-Kf5J8DfJK4wZFWT2Myca0lhwke7LwHcHBo+4TvWOGJrFVVKVuuiLCkzPPRBQQVDj0Vtn2NBokZAz8pfMpAqAKg==" + "version": "2.2.201", + "resolved": "https://registry.npmjs.org/@aws-cdk/asset-awscli-v1/-/asset-awscli-v1-2.2.201.tgz", + "integrity": "sha512-INZqcwDinNaIdb5CtW3ez5s943nX5stGBQS6VOP2JDlOFP81hM3fds/9NDknipqfUkZM43dx+HgVvkXYXXARCQ==" }, "node_modules/@aws-cdk/asset-kubectl-v20": { "version": "2.1.2", @@ -51,12 +51,12 @@ "integrity": "sha512-DDt4SLdLOwWCjGtltH4VCST7hpOI5DzieuhGZsBpZ+AgJdSI2GCjklCXm0GCTwJG/SolkL5dtQXyUKgg9luBDg==" }, "node_modules/@babel/code-frame": { - "version": "7.22.13", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", - "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.23.5.tgz", + "integrity": "sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA==", "dev": true, "dependencies": { - "@babel/highlight": "^7.22.13", + "@babel/highlight": "^7.23.4", "chalk": "^2.4.2" }, "engines": { @@ -135,30 +135,30 @@ } }, "node_modules/@babel/compat-data": { - "version": "7.23.2", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.23.2.tgz", - "integrity": "sha512-0S9TQMmDHlqAZ2ITT95irXKfxN9bncq8ZCoJhun3nHL/lLUxd2NKBJYoNGWH7S0hz6fRQwWlAWn/ILM0C70KZQ==", + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.23.5.tgz", + "integrity": "sha512-uU27kfDRlhfKl+w1U6vp16IuvSLtjAxdArVXPa9BvLkrr7CYIsxH5adpHObeAGY/41+syctUWOZ140a2Rvkgjw==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/core": { - "version": "7.23.2", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.23.2.tgz", - "integrity": "sha512-n7s51eWdaWZ3vGT2tD4T7J6eJs3QoBXydv7vkUM06Bf1cbVD2Kc2UrkzhiQwobfV7NwOnQXYL7UBJ5VPU+RGoQ==", + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.23.5.tgz", + "integrity": "sha512-Cwc2XjUrG4ilcfOw4wBAK+enbdgwAcAJCfGUItPBKR7Mjw4aEfAFYrLxeRp4jWgtNIKn3n2AlBOfwwafl+42/g==", "dev": true, "dependencies": { "@ampproject/remapping": "^2.2.0", - "@babel/code-frame": "^7.22.13", - "@babel/generator": "^7.23.0", + "@babel/code-frame": "^7.23.5", + "@babel/generator": "^7.23.5", "@babel/helper-compilation-targets": "^7.22.15", - "@babel/helper-module-transforms": "^7.23.0", - "@babel/helpers": "^7.23.2", - "@babel/parser": "^7.23.0", + "@babel/helper-module-transforms": "^7.23.3", + "@babel/helpers": "^7.23.5", + "@babel/parser": "^7.23.5", "@babel/template": "^7.22.15", - "@babel/traverse": "^7.23.2", - "@babel/types": "^7.23.0", + "@babel/traverse": "^7.23.5", + "@babel/types": "^7.23.5", "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", @@ -174,12 +174,12 @@ } }, "node_modules/@babel/generator": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.0.tgz", - "integrity": "sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g==", + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.5.tgz", + "integrity": "sha512-BPssCHrBD+0YrxviOa3QzpqwhNIXKEtOa2jQrm4FlmkC2apYgRnQcmPWiGZDlGxiNtltnUFolMe8497Esry+jA==", "dev": true, "dependencies": { - "@babel/types": "^7.23.0", + "@babel/types": "^7.23.5", "@jridgewell/gen-mapping": "^0.3.2", "@jridgewell/trace-mapping": "^0.3.17", "jsesc": "^2.5.1" @@ -251,9 +251,9 @@ } }, "node_modules/@babel/helper-module-transforms": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.23.0.tgz", - "integrity": "sha512-WhDWw1tdrlT0gMgUJSlX0IQvoO1eN279zrAUbVB+KpV2c3Tylz8+GnKOLllCS6Z/iZQEyVYxhZVUdPTqs2YYPw==", + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.23.3.tgz", + "integrity": "sha512-7bBs4ED9OmswdfDzpz4MpWgSrV7FXlc3zIagvLFjS5H+Mk7Snr21vQ6QwrsoCGMfNC4e4LQPdoULEt4ykz0SRQ==", "dev": true, "dependencies": { "@babel/helper-environment-visitor": "^7.22.20", @@ -303,9 +303,9 @@ } }, "node_modules/@babel/helper-string-parser": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", - "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==", + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.23.4.tgz", + "integrity": "sha512-803gmbQdqwdf4olxrX4AJyFBV/RTr3rSmOj0rKwesmzlfhYNDEs+/iOcznzpNWlJlIlTJC2QfPFcHB6DlzdVLQ==", "dev": true, "engines": { "node": ">=6.9.0" @@ -321,32 +321,32 @@ } }, "node_modules/@babel/helper-validator-option": { - "version": "7.22.15", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.22.15.tgz", - "integrity": "sha512-bMn7RmyFjY/mdECUbgn9eoSY4vqvacUnS9i9vGAGttgFWesO6B4CYWA7XlpbWgBt71iv/hfbPlynohStqnu5hA==", + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.23.5.tgz", + "integrity": "sha512-85ttAOMLsr53VgXkTbkx8oA6YTfT4q7/HzXSLEYmjcSTJPMPQtvq1BD79Byep5xMUYbGRzEpDsjUf3dyp54IKw==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helpers": { - "version": "7.23.2", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.23.2.tgz", - "integrity": "sha512-lzchcp8SjTSVe/fPmLwtWVBFC7+Tbn8LGHDVfDp9JGxpAY5opSaEFgt8UQvrnECWOTdji2mOWMz1rOhkHscmGQ==", + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.23.5.tgz", + "integrity": "sha512-oO7us8FzTEsG3U6ag9MfdF1iA/7Z6dz+MtFhifZk8C8o453rGJFFWUP1t+ULM9TUIAzC9uxXEiXjOiVMyd7QPg==", "dev": true, "dependencies": { "@babel/template": "^7.22.15", - "@babel/traverse": "^7.23.2", - "@babel/types": "^7.23.0" + "@babel/traverse": "^7.23.5", + "@babel/types": "^7.23.5" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/highlight": { - "version": "7.22.20", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz", - "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==", + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.23.4.tgz", + "integrity": "sha512-acGdbYSfp2WheJoJm/EBBBLh/ID8KDc64ISZ9DYtBmC8/Q204PZJLHyzeB5qMzJ5trcOkybd78M4x2KWsUq++A==", "dev": true, "dependencies": { "@babel/helper-validator-identifier": "^7.22.20", @@ -429,9 +429,9 @@ } }, "node_modules/@babel/parser": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz", - "integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==", + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.5.tgz", + "integrity": "sha512-hOOqoiNXrmGdFbhgCzu6GiURxUgM27Xwd/aPuu8RfHEZPBzL1Z54okAHAQjXfcQNwvrlkAmAp4SlRTZ45vlthQ==", "dev": true, "bin": { "parser": "bin/babel-parser.js" @@ -501,9 +501,9 @@ } }, "node_modules/@babel/plugin-syntax-jsx": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.22.5.tgz", - "integrity": "sha512-gvyP4hZrgrs/wWMaocvxZ44Hw0b3W8Pe+cMxc8V1ULQ07oh8VNbIRaoD1LRZVTvD+0nieDKjfgKg89sD7rrKrg==", + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.23.3.tgz", + "integrity": "sha512-EB2MELswq55OHUoRZLGg/zC7QWUKfNLpE57m/S2yr1uEneIgsTgrSzXP3NXEsMkVn76OlaVVnzN+ugObuYGwhg==", "dev": true, "dependencies": { "@babel/helper-plugin-utils": "^7.22.5" @@ -603,9 +603,9 @@ } }, "node_modules/@babel/plugin-syntax-typescript": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.22.5.tgz", - "integrity": "sha512-1mS2o03i7t1c6VzH6fdQ3OA8tcEIxwG18zIPRp+UY1Ihv6W+XZzBCVxExF9upussPXJ0xE9XRHwMoNs1ep/nRQ==", + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.23.3.tgz", + "integrity": "sha512-9EiNjVJOMwCO+43TqoTrgQ8jMwcAd0sWyXi9RPfIsLTj4R2MADDDQXELhffaUx/uJv2AYcxBgPwH6j4TIA4ytQ==", "dev": true, "dependencies": { "@babel/helper-plugin-utils": "^7.22.5" @@ -632,19 +632,19 @@ } }, "node_modules/@babel/traverse": { - "version": "7.23.2", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz", - "integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==", + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.5.tgz", + "integrity": "sha512-czx7Xy5a6sapWWRx61m1Ke1Ra4vczu1mCTtJam5zRTBOonfdJ+S/B6HYmGYu3fJtr8GGET3si6IhgWVBhJ/m8w==", "dev": true, "dependencies": { - "@babel/code-frame": "^7.22.13", - "@babel/generator": "^7.23.0", + "@babel/code-frame": "^7.23.5", + "@babel/generator": "^7.23.5", "@babel/helper-environment-visitor": "^7.22.20", "@babel/helper-function-name": "^7.23.0", "@babel/helper-hoist-variables": "^7.22.5", "@babel/helper-split-export-declaration": "^7.22.6", - "@babel/parser": "^7.23.0", - "@babel/types": "^7.23.0", + "@babel/parser": "^7.23.5", + "@babel/types": "^7.23.5", "debug": "^4.1.0", "globals": "^11.1.0" }, @@ -653,12 +653,12 @@ } }, "node_modules/@babel/types": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz", - "integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==", + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.5.tgz", + "integrity": "sha512-ON5kSOJwVO6xXVRTvOI0eOnWe7VdUcIpsovGo9U/Br4Ie4UVFQTboO2cYnDhAGU6Fp+UxSiT+pMft0SMHfuq6w==", "dev": true, "dependencies": { - "@babel/helper-string-parser": "^7.22.5", + "@babel/helper-string-parser": "^7.23.4", "@babel/helper-validator-identifier": "^7.22.20", "to-fast-properties": "^2.0.0" }, @@ -1094,18 +1094,18 @@ "dev": true }, "node_modules/@types/adm-zip": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/@types/adm-zip/-/adm-zip-0.5.3.tgz", - "integrity": "sha512-LfeDIiFdvphelYY2aMWTyQBr5cTb1EL9Qcu19jFizdt2sL/jL+fy1fE8IgAKBFI5XfbGukaRDDM5PiJTrovAhA==", + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/@types/adm-zip/-/adm-zip-0.5.5.tgz", + "integrity": "sha512-YCGstVMjc4LTY5uK9/obvxBya93axZOVOyf2GSUulADzmLhYE45u2nAssCs/fWBs1Ifq5Vat75JTPwd5XZoPJw==", "dev": true, "dependencies": { "@types/node": "*" } }, "node_modules/@types/babel__core": { - "version": "7.20.3", - "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.3.tgz", - "integrity": "sha512-54fjTSeSHwfan8AyHWrKbfBWiEUrNTZsUwPTDSNaaP1QDQIZbeNUg3a59E9D+375MzUw/x1vx2/0F5LBz+AeYA==", + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", "dev": true, "dependencies": { "@babel/parser": "^7.20.7", @@ -1116,18 +1116,18 @@ } }, "node_modules/@types/babel__generator": { - "version": "7.6.6", - "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.6.6.tgz", - "integrity": "sha512-66BXMKb/sUWbMdBNdMvajU7i/44RkrA3z/Yt1c7R5xejt8qh84iU54yUWCtm0QwGJlDcf/gg4zd/x4mpLAlb/w==", + "version": "7.6.7", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.6.7.tgz", + "integrity": "sha512-6Sfsq+EaaLrw4RmdFWE9Onp63TOUue71AWb4Gpa6JxzgTYtimbM086WnYTy2U67AofR++QKCo08ZP6pwx8YFHQ==", "dev": true, "dependencies": { "@babel/types": "^7.0.0" } }, "node_modules/@types/babel__template": { - "version": "7.4.3", - "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.3.tgz", - "integrity": "sha512-ciwyCLeuRfxboZ4isgdNZi/tkt06m8Tw6uGbBSBgWrnnZGNXiEyM27xc/PjXGQLqlZ6ylbgHMnm7ccF9tCkOeQ==", + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", "dev": true, "dependencies": { "@babel/parser": "^7.1.0", @@ -1135,51 +1135,51 @@ } }, "node_modules/@types/babel__traverse": { - "version": "7.20.3", - "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.20.3.tgz", - "integrity": "sha512-Lsh766rGEFbaxMIDH7Qa+Yha8cMVI3qAK6CHt3OR0YfxOIn5Z54iHiyDRycHrBqeIiqGa20Kpsv1cavfBKkRSw==", + "version": "7.20.4", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.20.4.tgz", + "integrity": "sha512-mSM/iKUk5fDDrEV/e83qY+Cr3I1+Q3qqTuEn++HAWYjEa1+NxZr6CNrcJGf2ZTnq4HoFGC3zaTPZTobCzCFukA==", "dev": true, "dependencies": { "@babel/types": "^7.20.7" } }, "node_modules/@types/graceful-fs": { - "version": "4.1.8", - "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.8.tgz", - "integrity": "sha512-NhRH7YzWq8WiNKVavKPBmtLYZHxNY19Hh+az28O/phfp68CF45pMFud+ZzJ8ewnxnC5smIdF3dqFeiSUQ5I+pw==", + "version": "4.1.9", + "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.9.tgz", + "integrity": "sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==", "dev": true, "dependencies": { "@types/node": "*" } }, "node_modules/@types/istanbul-lib-coverage": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.5.tgz", - "integrity": "sha512-zONci81DZYCZjiLe0r6equvZut0b+dBRPBN5kBDjsONnutYNtJMoWQ9uR2RkL1gLG9NMTzvf+29e5RFfPbeKhQ==", + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", + "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==", "dev": true }, "node_modules/@types/istanbul-lib-report": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.2.tgz", - "integrity": "sha512-8toY6FgdltSdONav1XtUHl4LN1yTmLza+EuDazb/fEmRNCwjyqNVIQWs2IfC74IqjHkREs/nQ2FWq5kZU9IC0w==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", + "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", "dev": true, "dependencies": { "@types/istanbul-lib-coverage": "*" } }, "node_modules/@types/istanbul-reports": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.3.tgz", - "integrity": "sha512-1nESsePMBlf0RPRffLZi5ujYh7IH1BWL4y9pr+Bn3cJBdxz+RTP8bUFljLz9HvzhhOSWKdyBZ4DIivdL6rvgZg==", + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", + "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", "dev": true, "dependencies": { "@types/istanbul-lib-report": "*" } }, "node_modules/@types/jest": { - "version": "29.5.6", - "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.6.tgz", - "integrity": "sha512-/t9NnzkOpXb4Nfvg17ieHE6EeSjDS2SGSpNYfoLbUAeL/EOueU/RSdOWFpfQTXBEM7BguYW1XQ0EbM+6RlIh6w==", + "version": "29.5.11", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.11.tgz", + "integrity": "sha512-S2mHmYIVe13vrm6q4kN6fLYYAka15ALQki/vgDC3mIukEOx8WJlv0kQPM+d4w8Gp6u0uSdKND04IlTXBv0rwnQ==", "dev": true, "dependencies": { "expect": "^29.0.0", @@ -1187,36 +1187,39 @@ } }, "node_modules/@types/node": { - "version": "18.18.6", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.18.6.tgz", - "integrity": "sha512-wf3Vz+jCmOQ2HV1YUJuCWdL64adYxumkrxtc+H1VUQlnQI04+5HtH+qZCOE21lBE7gIrt+CwX2Wv8Acrw5Ak6w==", - "dev": true + "version": "18.19.3", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.3.tgz", + "integrity": "sha512-k5fggr14DwAytoA/t8rPrIz++lXK7/DqckthCmoZOKNsEbJkId4Z//BqgApXBUGrGddrigYa1oqheo/7YmW4rg==", + "dev": true, + "dependencies": { + "undici-types": "~5.26.4" + } }, "node_modules/@types/stack-utils": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.2.tgz", - "integrity": "sha512-g7CK9nHdwjK2n0ymT2CW698FuWJRIx+RP6embAzZ2Qi8/ilIrA1Imt2LVSeHUzKvpoi7BhmmQcXz95eS0f2JXw==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz", + "integrity": "sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==", "dev": true }, "node_modules/@types/yargs": { - "version": "17.0.29", - "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.29.tgz", - "integrity": "sha512-nacjqA3ee9zRF/++a3FUY1suHTFKZeHba2n8WeDw9cCVdmzmHpIxyzOJBcpHvvEmS8E9KqWlSnWHUkOrkhWcvA==", + "version": "17.0.32", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.32.tgz", + "integrity": "sha512-xQ67Yc/laOG5uMfX/093MRlGGCIBzZMarVa+gfNKJxWAIgykYpVGkBdbqEzGDDfCrVUj6Hiff4mTZ5BA6TmAog==", "dev": true, "dependencies": { "@types/yargs-parser": "*" } }, "node_modules/@types/yargs-parser": { - "version": "21.0.2", - "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.2.tgz", - "integrity": "sha512-5qcvofLPbfjmBfKaLfj/+f+Sbd6pN4zl7w7VSVI5uz7m9QZTuB2aZAa2uo1wHFBNN2x6g/SoTkXmd8mQnQF2Cw==", + "version": "21.0.3", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", + "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==", "dev": true }, "node_modules/acorn": { - "version": "8.10.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.10.0.tgz", - "integrity": "sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==", + "version": "8.11.2", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.2.tgz", + "integrity": "sha512-nc0Axzp/0FILLEVsm4fNwLCwMttvhEI263QtVPQcbpfZZ3ts0hLsZGOpE6czNlid7CJ9MlyH8reXkpsf3YUY4w==", "dev": true, "bin": { "acorn": "bin/acorn" @@ -1226,9 +1229,9 @@ } }, "node_modules/acorn-walk": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz", - "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==", + "version": "8.3.1", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.1.tgz", + "integrity": "sha512-TgUZgYvqZprrl7YldZNoa9OciCAyZR+Ejm9eXzKCmjsF5IKp/wgQ7Z/ZpjpGTIUPwrHQIcYeI8qDh4PsEwxMbw==", "dev": true, "engines": { "node": ">=0.4.0" @@ -1310,9 +1313,9 @@ } }, "node_modules/aws-cdk-lib": { - "version": "2.102.0", - "resolved": "https://registry.npmjs.org/aws-cdk-lib/-/aws-cdk-lib-2.102.0.tgz", - "integrity": "sha512-pYcKGlshU2j7n3f8TbJ1CCrwNnLsgGd17G7p/s9njIU8xakU4tIwuNyo4Q9HHQA7aUb3enPI/afAn1A6gp7TrA==", + "version": "2.114.1", + "resolved": "https://registry.npmjs.org/aws-cdk-lib/-/aws-cdk-lib-2.114.1.tgz", + "integrity": "sha512-pJy+Sa3+s6K9I0CXYGU8J5jumw9uQEbl8zPK8EMA+A6hP9qb1JN+a8ohyw6a1O1cb4D5S6gwH+hE7Fq7hGPY3A==", "bundleDependencies": [ "@balena/dockerignore", "case", @@ -1326,16 +1329,16 @@ "yaml" ], "dependencies": { - "@aws-cdk/asset-awscli-v1": "^2.2.200", + "@aws-cdk/asset-awscli-v1": "^2.2.201", "@aws-cdk/asset-kubectl-v20": "^2.1.2", "@aws-cdk/asset-node-proxy-agent-v6": "^2.0.1", "@balena/dockerignore": "^1.0.2", "case": "1.6.3", "fs-extra": "^11.1.1", - "ignore": "^5.2.4", + "ignore": "^5.3.0", "jsonschema": "^1.4.1", "minimatch": "^3.1.2", - "punycode": "^2.3.0", + "punycode": "^2.3.1", "semver": "^7.5.4", "table": "^6.8.1", "yaml": "1.10.2" @@ -1469,7 +1472,7 @@ "license": "ISC" }, "node_modules/aws-cdk-lib/node_modules/ignore": { - "version": "5.2.4", + "version": "5.3.0", "inBundle": true, "license": "MIT", "engines": { @@ -1536,7 +1539,7 @@ } }, "node_modules/aws-cdk-lib/node_modules/punycode": { - "version": "2.3.0", + "version": "2.3.1", "inBundle": true, "license": "MIT", "engines": { @@ -1621,7 +1624,7 @@ } }, "node_modules/aws-cdk-lib/node_modules/universalify": { - "version": "2.0.0", + "version": "2.0.1", "inBundle": true, "license": "MIT", "engines": { @@ -1783,9 +1786,9 @@ } }, "node_modules/browserslist": { - "version": "4.22.1", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.22.1.tgz", - "integrity": "sha512-FEVc202+2iuClEhZhrWy6ZiAcRLvNMyYcxZ8raemul1DYVOVdFsbqckWLdsixQZCpJlwe77Z3UTalE7jsjnKfQ==", + "version": "4.22.2", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.22.2.tgz", + "integrity": "sha512-0UgcrvQmBDvZHFGdYUehrCNIazki7/lUP3kkoi/r3YB2amZbFM9J43ZRkJTXBUZK4gmx56+Sqk9+Vs9mwZx9+A==", "dev": true, "funding": [ { @@ -1802,9 +1805,9 @@ } ], "dependencies": { - "caniuse-lite": "^1.0.30001541", - "electron-to-chromium": "^1.4.535", - "node-releases": "^2.0.13", + "caniuse-lite": "^1.0.30001565", + "electron-to-chromium": "^1.4.601", + "node-releases": "^2.0.14", "update-browserslist-db": "^1.0.13" }, "bin": { @@ -1860,9 +1863,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001551", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001551.tgz", - "integrity": "sha512-vtBAez47BoGMMzlbYhfXrMV1kvRF2WP/lqiMuDu1Sb4EE4LKEgjopFDSRtZfdVnslNRpOqV/woE+Xgrwj6VQlg==", + "version": "1.0.30001566", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001566.tgz", + "integrity": "sha512-ggIhCsTxmITBAMmK8yZjEhCO5/47jKXPu6Dha/wuCS4JePVL+3uiDEBuhu2aIoT+bqTOR8L76Ip1ARL9xYsEJA==", "dev": true, "funding": [ { @@ -2102,9 +2105,9 @@ } }, "node_modules/electron-to-chromium": { - "version": "1.4.559", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.559.tgz", - "integrity": "sha512-iS7KhLYCSJbdo3rUSkhDTVuFNCV34RKs2UaB9Ecr7VlqzjjWW//0nfsFF5dtDmyXlZQaDYYtID5fjtC/6lpRug==", + "version": "1.4.608", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.608.tgz", + "integrity": "sha512-J2f/3iIIm3Mo0npneITZ2UPe4B1bg8fTNrFjD8715F/k1BvbviRuqYGkET1PgprrczXYTHFvotbBOmUp6KE0uA==", "dev": true }, "node_modules/emittery": { @@ -2273,6 +2276,15 @@ "node": "^8.16.0 || ^10.6.0 || >=11.0.0" } }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/gensync": { "version": "1.0.0-beta.2", "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", @@ -2347,15 +2359,6 @@ "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", "dev": true }, - "node_modules/has": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.4.tgz", - "integrity": "sha512-qdSAmqLF6209RFj4VVItywPMbm3vWylknmB3nvNiUIs72xAimcM8nVYxYr7ncvZq5qzk9MKIZR8ijqD/1QuYjQ==", - "dev": true, - "engines": { - "node": ">= 0.4.0" - } - }, "node_modules/has-flag": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", @@ -2365,6 +2368,18 @@ "node": ">=8" } }, + "node_modules/hasown": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.0.tgz", + "integrity": "sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA==", + "dev": true, + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/html-escaper": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", @@ -2431,12 +2446,12 @@ "dev": true }, "node_modules/is-core-module": { - "version": "2.13.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.0.tgz", - "integrity": "sha512-Z7dk6Qo8pOCp3l4tsX2C5ZVas4V+UxwQodwZhLopL91TX8UyyHEXafPcyoeeWuLrwzHcr3igO78wNLwHJHsMCQ==", + "version": "2.13.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.1.tgz", + "integrity": "sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==", "dev": true, "dependencies": { - "has": "^1.0.3" + "hasown": "^2.0.0" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -2488,9 +2503,9 @@ "dev": true }, "node_modules/istanbul-lib-coverage": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.0.tgz", - "integrity": "sha512-eOeJ5BHCmHYvQK7xt9GkdHuzuCGS1Y6g9Gvnx3Ym33fz/HpLRYxiS0wHNr+m/MBC8B647Xt608vCDEvhl9c6Mw==", + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", + "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", "dev": true, "engines": { "node": ">=8" @@ -3395,9 +3410,9 @@ "dev": true }, "node_modules/node-releases": { - "version": "2.0.13", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.13.tgz", - "integrity": "sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ==", + "version": "2.0.14", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.14.tgz", + "integrity": "sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==", "dev": true }, "node_modules/normalize-path": { @@ -4065,9 +4080,9 @@ } }, "node_modules/typescript": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz", - "integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==", + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.3.3.tgz", + "integrity": "sha512-pXWcraxM0uxAS+tN0AG/BF2TyqmHO014Z070UsJ+pFvYuRSq8KH8DmWpnbXe0pEPDHXZV3FcAbJkijJ5oNEnWw==", "dev": true, "bin": { "tsc": "bin/tsc", @@ -4077,6 +4092,12 @@ "node": ">=14.17" } }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", + "dev": true + }, "node_modules/update-browserslist-db": { "version": "1.0.13", "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz", @@ -4114,9 +4135,9 @@ "dev": true }, "node_modules/v8-to-istanbul": { - "version": "9.1.3", - "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.1.3.tgz", - "integrity": "sha512-9lDD+EVI2fjFsMWXc6dy5JJzBsVTcQ2fVkfBvncZ6xJWG9wtBhOldG+mHkSL0+V1K/xgZz0JDO5UT5hFwHUghg==", + "version": "9.2.0", + "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.2.0.tgz", + "integrity": "sha512-/EH/sDgxU2eGxajKdwLCDmQ4FWq+kpi3uCmBGpw1xJtnAxEjlD8j8PEiGWpCIMIs3ciNAgH0d3TTJiUkYzyZjA==", "dev": true, "dependencies": { "@jridgewell/trace-mapping": "^0.3.12", diff --git a/deployment/cdk-solution-helper/package.json b/deployment/cdk-solution-helper/package.json index 97176fb8..97ffb570 100644 --- a/deployment/cdk-solution-helper/package.json +++ b/deployment/cdk-solution-helper/package.json @@ -9,20 +9,20 @@ "organization": true }, "scripts": { - "test": "jest" + "test": "jest --silent" }, "license": "Apache-2.0", "devDependencies": { - "@types/adm-zip": "^0.5.0", - "@types/jest": "^29.5.3", - "@types/node": "^18.16.19", - "jest": "^29.6.1", + "@types/adm-zip": "^0.5.5", + "@types/jest": "^29.5.11", + "@types/node": "^18.19.3", + "jest": "^29.7.0", "ts-jest": "^29.1.1", "ts-node": "^10.9.1", - "typescript": "^5.1.6" + "typescript": "~5.3.3" }, "dependencies": { "adm-zip": "^0.5.10", - "aws-cdk-lib": "^2.87.0" + "aws-cdk-lib": "^2.114.1" } } diff --git a/deployment/cdk-solution-helper/tsconfig.json b/deployment/cdk-solution-helper/tsconfig.json deleted file mode 100644 index 6d0eb8fd..00000000 --- a/deployment/cdk-solution-helper/tsconfig.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "extends": "ts-node/node16/tsconfig.json", -} \ No newline at end of file diff --git a/deployment/run-unit-tests.sh b/deployment/run-unit-tests.sh old mode 100644 new mode 100755 diff --git a/jest.config.json b/jest.config.json index 385c51a8..d3a28078 100644 --- a/jest.config.json +++ b/jest.config.json @@ -1,10 +1,5 @@ { - "roots": [ - "/source/instance-scheduler/tests" - ], - "transform": { - "^.+\\.tsx?$": "ts-jest" - }, + "coverageProvider": "v8", "reporters": [ "default", [ @@ -15,6 +10,15 @@ } ] ], + "roots": [ + "/source/instance-scheduler/tests" + ], + "transform": { + "^.+\\.tsx?$": "ts-jest" + }, + "setupFilesAfterEnv": [ + "jest-extended/all" + ], "testMatch": [ "**/lib/instance-scheduler/tests/**/?(*.)+(spec|test).js?(x)", "**/*.test.ts" diff --git a/package-lock.json b/package-lock.json index aa591700..b9799fe2 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,32 +1,41 @@ { "name": "instance-scheduler-on-aws", - "version": "1.5.6", + "version": "3.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "instance-scheduler-on-aws", - "version": "1.5.6", + "version": "3.0.0", "license": "Apache-2.0", "dependencies": { - "@aws-cdk/aws-lambda-python-alpha": "^2.102.0-alpha.0", - "@aws-cdk/aws-servicecatalogappregistry-alpha": "^2.102.0-alpha.0", - "@aws-sdk/client-dynamodb": "^3.552.0", - "@aws-sdk/client-ec2": "^3.552.0", - "@aws-sdk/client-rds": "^3.552.0", - "@aws-sdk/client-ssm": "^3.552.0", - "@aws-solutions-constructs/aws-lambda-dynamodb": "^2.54.1", - "aws-cdk-lib": "^2.102.0", - "cdk-nag": "^2.28.87", + "@aws-cdk/aws-lambda-python-alpha": "2.130.0-alpha.0", + "@aws-cdk/aws-neptune-alpha": "2.130.0-alpha.0", + "@aws-cdk/aws-servicecatalogappregistry-alpha": "2.130.0-alpha.0", + "@aws-sdk/client-auto-scaling": "^3.583.0", + "@aws-sdk/client-cloudformation": "^3.583.0", + "@aws-sdk/client-docdb": "^3.583.0", + "@aws-sdk/client-dynamodb": "^3.585.0", + "@aws-sdk/client-ec2": "^3.585.0", + "@aws-sdk/client-lambda": "^3.583.0", + "@aws-sdk/client-neptune": "^3.583.0", + "@aws-sdk/client-rds": "^3.583.0", + "@aws-sdk/client-ssm": "^3.583.0", + "@aws-sdk/util-dynamodb": "^3.585.0", + "aws-cdk-lib": "2.130.0", + "cdk-nag": "^2.28.127", "constructs": "^10.0.5", - "source-map-support": "^0.5.21" + "source-map-support": "^0.5.21", + "uuid": "^8.3.2" }, "devDependencies": { + "@cdklabs/cdk-ssm-documents": "^0.0.41", "@types/jest": "^29.5.12", "@types/node": "^18", + "@types/uuid": "^9.0.8", "@typescript-eslint/eslint-plugin": "^6.21.0", - "aws-cdk": "^2.102.0", - "esbuild": "^0.20.2", + "aws-cdk": "2.130.0", + "esbuild": "^0.21.4", "eslint": "^8.57.0", "eslint-config-prettier": "^9.1.0", "eslint-plugin-header": "^3.1.1", @@ -35,10 +44,10 @@ "jest": "^29.7.0", "jest-extended": "^4.0.2", "jest-junit": "^16.0.0", - "projen": "^0.80.20", - "ts-jest": "^29.1.2", + "projen": "^0.82.1", + "ts-jest": "^29.1.4", "ts-node": "^10.9.2", - "typescript": "~5.1.6" + "typescript": "~5.2.x" }, "engines": { "node": ">= 18.0.0" @@ -82,41 +91,56 @@ "integrity": "sha512-DDt4SLdLOwWCjGtltH4VCST7hpOI5DzieuhGZsBpZ+AgJdSI2GCjklCXm0GCTwJG/SolkL5dtQXyUKgg9luBDg==" }, "node_modules/@aws-cdk/aws-lambda-python-alpha": { - "version": "2.102.0-alpha.0", - "resolved": "https://registry.npmjs.org/@aws-cdk/aws-lambda-python-alpha/-/aws-lambda-python-alpha-2.102.0-alpha.0.tgz", - "integrity": "sha512-Or0w7lecc4ioLDXyE2e3DKbPKCeFj+lZnHMY3sHkYXR8Fiy+xyWrULi+Nrg8q8KI19lD6QT5mKZ3jAgjysKvlw==", + "version": "2.130.0-alpha.0", + "resolved": "https://registry.npmjs.org/@aws-cdk/aws-lambda-python-alpha/-/aws-lambda-python-alpha-2.130.0-alpha.0.tgz", + "integrity": "sha512-pUGmXxx9XC+POFVnTBvNPEgMkQ2Py6krpK003GOnRuTQH34yTdrgeIvLq1RQRVwaqeDdN74dx8r567XwZG0FoA==", "engines": { "node": ">= 14.15.0" }, "peerDependencies": { - "aws-cdk-lib": "^2.102.0", + "aws-cdk-lib": "^2.130.0", "constructs": "^10.0.0" } }, - "node_modules/@aws-cdk/aws-servicecatalogappregistry-alpha": { - "version": "2.102.0-alpha.0", - "resolved": "https://registry.npmjs.org/@aws-cdk/aws-servicecatalogappregistry-alpha/-/aws-servicecatalogappregistry-alpha-2.102.0-alpha.0.tgz", - "integrity": "sha512-tJp51wh0c9bbrKItbu6pOczCl9sfyqNFHVZQqCutReQt5iBzOjBsmzbrh9+DhlICA/NpdBTGk1qu6z4KOaNqcQ==", + "node_modules/@aws-cdk/aws-neptune-alpha": { + "version": "2.130.0-alpha.0", + "resolved": "https://registry.npmjs.org/@aws-cdk/aws-neptune-alpha/-/aws-neptune-alpha-2.130.0-alpha.0.tgz", + "integrity": "sha512-M80CrWR/GYmlKKQtXxcm8DB9BXpqmoLe1xNjkNFozTcmfi4g95b0bu0hpb//wgLQGA0G3Jtoh+WAYQwePXdSdg==", "engines": { "node": ">= 14.15.0" }, "peerDependencies": { - "aws-cdk-lib": "^2.102.0", + "aws-cdk-lib": "^2.130.0", "constructs": "^10.0.0" } }, - "node_modules/@aws-cdk/integ-tests-alpha": { - "version": "2.135.0-alpha.0", - "resolved": "https://registry.npmjs.org/@aws-cdk/integ-tests-alpha/-/integ-tests-alpha-2.135.0-alpha.0.tgz", - "integrity": "sha512-xnwAh86J4ZQIF4inbRM6QKQwGBvLJgdNTLsY39jkK8jjytw/b4TFqIwgeH/TmdskZkGxwimkleLrcbwslsU8vw==", + "node_modules/@aws-cdk/aws-servicecatalogappregistry-alpha": { + "version": "2.130.0-alpha.0", + "resolved": "https://registry.npmjs.org/@aws-cdk/aws-servicecatalogappregistry-alpha/-/aws-servicecatalogappregistry-alpha-2.130.0-alpha.0.tgz", + "integrity": "sha512-GQ6dTR9OEXzqvSOH9vqyepO7//AImoXlj1K+n9JwlRsSDqkdiAQd9TYgfauz0XDpp9L10gUWpyMTlEe81vKiTw==", "engines": { "node": ">= 14.15.0" }, "peerDependencies": { - "aws-cdk-lib": "^2.135.0", + "aws-cdk-lib": "^2.130.0", "constructs": "^10.0.0" } }, + "node_modules/@aws-crypto/crc32": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@aws-crypto/crc32/-/crc32-3.0.0.tgz", + "integrity": "sha512-IzSgsrxUcsrejQbPVilIKy16kAT52EwB6zSaI+M3xxIhKh5+aldEyvI+z6erM7TCLB2BJsFrtHjp6/4/sr+3dA==", + "dependencies": { + "@aws-crypto/util": "^3.0.0", + "@aws-sdk/types": "^3.222.0", + "tslib": "^1.11.1" + } + }, + "node_modules/@aws-crypto/crc32/node_modules/tslib": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", + "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==" + }, "node_modules/@aws-crypto/ie11-detection": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/@aws-crypto/ie11-detection/-/ie11-detection-3.0.0.tgz", @@ -193,57 +217,228 @@ "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==" }, + "node_modules/@aws-sdk/client-auto-scaling": { + "version": "3.583.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-auto-scaling/-/client-auto-scaling-3.583.0.tgz", + "integrity": "sha512-kePAitT7aTaOht2lyrHgJlmZjGPWLS6F+E0Qz//ADVgRPEVC8tvw+8EYk7sCT4DqZaXEaRB63KYrnxyBLwo2Mg==", + "dependencies": { + "@aws-crypto/sha256-browser": "3.0.0", + "@aws-crypto/sha256-js": "3.0.0", + "@aws-sdk/client-sso-oidc": "3.583.0", + "@aws-sdk/client-sts": "3.583.0", + "@aws-sdk/core": "3.582.0", + "@aws-sdk/credential-provider-node": "3.583.0", + "@aws-sdk/middleware-host-header": "3.577.0", + "@aws-sdk/middleware-logger": "3.577.0", + "@aws-sdk/middleware-recursion-detection": "3.577.0", + "@aws-sdk/middleware-user-agent": "3.583.0", + "@aws-sdk/region-config-resolver": "3.577.0", + "@aws-sdk/types": "3.577.0", + "@aws-sdk/util-endpoints": "3.583.0", + "@aws-sdk/util-user-agent-browser": "3.577.0", + "@aws-sdk/util-user-agent-node": "3.577.0", + "@smithy/config-resolver": "^3.0.0", + "@smithy/core": "^2.0.1", + "@smithy/fetch-http-handler": "^3.0.1", + "@smithy/hash-node": "^3.0.0", + "@smithy/invalid-dependency": "^3.0.0", + "@smithy/middleware-content-length": "^3.0.0", + "@smithy/middleware-endpoint": "^3.0.0", + "@smithy/middleware-retry": "^3.0.1", + "@smithy/middleware-serde": "^3.0.0", + "@smithy/middleware-stack": "^3.0.0", + "@smithy/node-config-provider": "^3.0.0", + "@smithy/node-http-handler": "^3.0.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/smithy-client": "^3.0.1", + "@smithy/types": "^3.0.0", + "@smithy/url-parser": "^3.0.0", + "@smithy/util-base64": "^3.0.0", + "@smithy/util-body-length-browser": "^3.0.0", + "@smithy/util-body-length-node": "^3.0.0", + "@smithy/util-defaults-mode-browser": "^3.0.1", + "@smithy/util-defaults-mode-node": "^3.0.1", + "@smithy/util-endpoints": "^2.0.0", + "@smithy/util-middleware": "^3.0.0", + "@smithy/util-retry": "^3.0.0", + "@smithy/util-utf8": "^3.0.0", + "@smithy/util-waiter": "^3.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@aws-sdk/client-cloudformation": { + "version": "3.583.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-cloudformation/-/client-cloudformation-3.583.0.tgz", + "integrity": "sha512-jMuxCtJq85I+sXkxS07tGJvJsDb12TFjKCTaR5Q6ucMfmSng9nPSwclkOiCO2xscWUHV4OEHreIDpkB3nz4tGg==", + "dependencies": { + "@aws-crypto/sha256-browser": "3.0.0", + "@aws-crypto/sha256-js": "3.0.0", + "@aws-sdk/client-sso-oidc": "3.583.0", + "@aws-sdk/client-sts": "3.583.0", + "@aws-sdk/core": "3.582.0", + "@aws-sdk/credential-provider-node": "3.583.0", + "@aws-sdk/middleware-host-header": "3.577.0", + "@aws-sdk/middleware-logger": "3.577.0", + "@aws-sdk/middleware-recursion-detection": "3.577.0", + "@aws-sdk/middleware-user-agent": "3.583.0", + "@aws-sdk/region-config-resolver": "3.577.0", + "@aws-sdk/types": "3.577.0", + "@aws-sdk/util-endpoints": "3.583.0", + "@aws-sdk/util-user-agent-browser": "3.577.0", + "@aws-sdk/util-user-agent-node": "3.577.0", + "@smithy/config-resolver": "^3.0.0", + "@smithy/core": "^2.0.1", + "@smithy/fetch-http-handler": "^3.0.1", + "@smithy/hash-node": "^3.0.0", + "@smithy/invalid-dependency": "^3.0.0", + "@smithy/middleware-content-length": "^3.0.0", + "@smithy/middleware-endpoint": "^3.0.0", + "@smithy/middleware-retry": "^3.0.1", + "@smithy/middleware-serde": "^3.0.0", + "@smithy/middleware-stack": "^3.0.0", + "@smithy/node-config-provider": "^3.0.0", + "@smithy/node-http-handler": "^3.0.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/smithy-client": "^3.0.1", + "@smithy/types": "^3.0.0", + "@smithy/url-parser": "^3.0.0", + "@smithy/util-base64": "^3.0.0", + "@smithy/util-body-length-browser": "^3.0.0", + "@smithy/util-body-length-node": "^3.0.0", + "@smithy/util-defaults-mode-browser": "^3.0.1", + "@smithy/util-defaults-mode-node": "^3.0.1", + "@smithy/util-endpoints": "^2.0.0", + "@smithy/util-middleware": "^3.0.0", + "@smithy/util-retry": "^3.0.0", + "@smithy/util-utf8": "^3.0.0", + "@smithy/util-waiter": "^3.0.0", + "tslib": "^2.6.2", + "uuid": "^9.0.1" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@aws-sdk/client-cloudformation/node_modules/uuid": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz", + "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/@aws-sdk/client-docdb": { + "version": "3.583.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-docdb/-/client-docdb-3.583.0.tgz", + "integrity": "sha512-9hg4VQHfJdW41Xl5Q87xKCzh6ObMw+un6BH8fzcDl3jEO1x2pZh/pLSHX6fySio96zrGI8662YEZ0pQfJzmu0g==", + "dependencies": { + "@aws-crypto/sha256-browser": "3.0.0", + "@aws-crypto/sha256-js": "3.0.0", + "@aws-sdk/client-sso-oidc": "3.583.0", + "@aws-sdk/client-sts": "3.583.0", + "@aws-sdk/core": "3.582.0", + "@aws-sdk/credential-provider-node": "3.583.0", + "@aws-sdk/middleware-host-header": "3.577.0", + "@aws-sdk/middleware-logger": "3.577.0", + "@aws-sdk/middleware-recursion-detection": "3.577.0", + "@aws-sdk/middleware-sdk-rds": "3.577.0", + "@aws-sdk/middleware-user-agent": "3.583.0", + "@aws-sdk/region-config-resolver": "3.577.0", + "@aws-sdk/types": "3.577.0", + "@aws-sdk/util-endpoints": "3.583.0", + "@aws-sdk/util-user-agent-browser": "3.577.0", + "@aws-sdk/util-user-agent-node": "3.577.0", + "@smithy/config-resolver": "^3.0.0", + "@smithy/core": "^2.0.1", + "@smithy/fetch-http-handler": "^3.0.1", + "@smithy/hash-node": "^3.0.0", + "@smithy/invalid-dependency": "^3.0.0", + "@smithy/middleware-content-length": "^3.0.0", + "@smithy/middleware-endpoint": "^3.0.0", + "@smithy/middleware-retry": "^3.0.1", + "@smithy/middleware-serde": "^3.0.0", + "@smithy/middleware-stack": "^3.0.0", + "@smithy/node-config-provider": "^3.0.0", + "@smithy/node-http-handler": "^3.0.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/smithy-client": "^3.0.1", + "@smithy/types": "^3.0.0", + "@smithy/url-parser": "^3.0.0", + "@smithy/util-base64": "^3.0.0", + "@smithy/util-body-length-browser": "^3.0.0", + "@smithy/util-body-length-node": "^3.0.0", + "@smithy/util-defaults-mode-browser": "^3.0.1", + "@smithy/util-defaults-mode-node": "^3.0.1", + "@smithy/util-endpoints": "^2.0.0", + "@smithy/util-middleware": "^3.0.0", + "@smithy/util-retry": "^3.0.0", + "@smithy/util-utf8": "^3.0.0", + "@smithy/util-waiter": "^3.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, "node_modules/@aws-sdk/client-dynamodb": { - "version": "3.552.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/client-dynamodb/-/client-dynamodb-3.552.0.tgz", - "integrity": "sha512-UW7Ud4bYhIQbCBxh4Pychqufr7dOaT6t33639Mi26ShqNlGpyRD5PG6FXyj2s5nJ+DR5VGkSwRSSpWUaAiW1dw==", + "version": "3.585.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-dynamodb/-/client-dynamodb-3.585.0.tgz", + "integrity": "sha512-XQxBBRkJ1Z3OUQl8SDzU3PFAYIqLvLbKEJXBA7PyXwHg/eUBUIEDpRfUzp0ljZvvr0CQxtX8UMsRHlOcJaGMNg==", "dependencies": { "@aws-crypto/sha256-browser": "3.0.0", "@aws-crypto/sha256-js": "3.0.0", - "@aws-sdk/client-sts": "3.552.0", - "@aws-sdk/core": "3.552.0", - "@aws-sdk/credential-provider-node": "3.552.0", - "@aws-sdk/middleware-endpoint-discovery": "3.535.0", - "@aws-sdk/middleware-host-header": "3.535.0", - "@aws-sdk/middleware-logger": "3.535.0", - "@aws-sdk/middleware-recursion-detection": "3.535.0", - "@aws-sdk/middleware-user-agent": "3.540.0", - "@aws-sdk/region-config-resolver": "3.535.0", - "@aws-sdk/types": "3.535.0", - "@aws-sdk/util-endpoints": "3.540.0", - "@aws-sdk/util-user-agent-browser": "3.535.0", - "@aws-sdk/util-user-agent-node": "3.535.0", - "@smithy/config-resolver": "^2.2.0", - "@smithy/core": "^1.4.2", - "@smithy/fetch-http-handler": "^2.5.0", - "@smithy/hash-node": "^2.2.0", - "@smithy/invalid-dependency": "^2.2.0", - "@smithy/middleware-content-length": "^2.2.0", - "@smithy/middleware-endpoint": "^2.5.1", - "@smithy/middleware-retry": "^2.3.1", - "@smithy/middleware-serde": "^2.3.0", - "@smithy/middleware-stack": "^2.2.0", - "@smithy/node-config-provider": "^2.3.0", - "@smithy/node-http-handler": "^2.5.0", - "@smithy/protocol-http": "^3.3.0", - "@smithy/smithy-client": "^2.5.1", - "@smithy/types": "^2.12.0", - "@smithy/url-parser": "^2.2.0", - "@smithy/util-base64": "^2.3.0", - "@smithy/util-body-length-browser": "^2.2.0", - "@smithy/util-body-length-node": "^2.3.0", - "@smithy/util-defaults-mode-browser": "^2.2.1", - "@smithy/util-defaults-mode-node": "^2.3.1", - "@smithy/util-endpoints": "^1.2.0", - "@smithy/util-middleware": "^2.2.0", - "@smithy/util-retry": "^2.2.0", - "@smithy/util-utf8": "^2.3.0", - "@smithy/util-waiter": "^2.2.0", + "@aws-sdk/client-sso-oidc": "3.583.0", + "@aws-sdk/client-sts": "3.583.0", + "@aws-sdk/core": "3.582.0", + "@aws-sdk/credential-provider-node": "3.583.0", + "@aws-sdk/middleware-endpoint-discovery": "3.577.0", + "@aws-sdk/middleware-host-header": "3.577.0", + "@aws-sdk/middleware-logger": "3.577.0", + "@aws-sdk/middleware-recursion-detection": "3.577.0", + "@aws-sdk/middleware-user-agent": "3.583.0", + "@aws-sdk/region-config-resolver": "3.577.0", + "@aws-sdk/types": "3.577.0", + "@aws-sdk/util-endpoints": "3.583.0", + "@aws-sdk/util-user-agent-browser": "3.577.0", + "@aws-sdk/util-user-agent-node": "3.577.0", + "@smithy/config-resolver": "^3.0.0", + "@smithy/core": "^2.0.1", + "@smithy/fetch-http-handler": "^3.0.1", + "@smithy/hash-node": "^3.0.0", + "@smithy/invalid-dependency": "^3.0.0", + "@smithy/middleware-content-length": "^3.0.0", + "@smithy/middleware-endpoint": "^3.0.0", + "@smithy/middleware-retry": "^3.0.1", + "@smithy/middleware-serde": "^3.0.0", + "@smithy/middleware-stack": "^3.0.0", + "@smithy/node-config-provider": "^3.0.0", + "@smithy/node-http-handler": "^3.0.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/smithy-client": "^3.0.1", + "@smithy/types": "^3.0.0", + "@smithy/url-parser": "^3.0.0", + "@smithy/util-base64": "^3.0.0", + "@smithy/util-body-length-browser": "^3.0.0", + "@smithy/util-body-length-node": "^3.0.0", + "@smithy/util-defaults-mode-browser": "^3.0.1", + "@smithy/util-defaults-mode-node": "^3.0.1", + "@smithy/util-endpoints": "^2.0.0", + "@smithy/util-middleware": "^3.0.0", + "@smithy/util-retry": "^3.0.0", + "@smithy/util-utf8": "^3.0.0", + "@smithy/util-waiter": "^3.0.0", "tslib": "^2.6.2", "uuid": "^9.0.1" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/client-dynamodb/node_modules/uuid": { @@ -259,56 +454,57 @@ } }, "node_modules/@aws-sdk/client-ec2": { - "version": "3.552.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/client-ec2/-/client-ec2-3.552.0.tgz", - "integrity": "sha512-PDJucMlPPLmFMIvShE0sqdnriCjG1wFJLR/5kX9pc0ZTfpINRq0lzjwLN4Klmv/JLHF6B0YK0GQaN7l//7EFaA==", + "version": "3.585.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-ec2/-/client-ec2-3.585.0.tgz", + "integrity": "sha512-mLmwiMyxSBEdYjyy/YenAs8q6X0A+tDkMYw79YpvdsaIJZf/frFiMr9oz16FXaSJb2kb/Cetd7VPR6akgy0s7Q==", "dependencies": { "@aws-crypto/sha256-browser": "3.0.0", "@aws-crypto/sha256-js": "3.0.0", - "@aws-sdk/client-sts": "3.552.0", - "@aws-sdk/core": "3.552.0", - "@aws-sdk/credential-provider-node": "3.552.0", - "@aws-sdk/middleware-host-header": "3.535.0", - "@aws-sdk/middleware-logger": "3.535.0", - "@aws-sdk/middleware-recursion-detection": "3.535.0", - "@aws-sdk/middleware-sdk-ec2": "3.552.0", - "@aws-sdk/middleware-user-agent": "3.540.0", - "@aws-sdk/region-config-resolver": "3.535.0", - "@aws-sdk/types": "3.535.0", - "@aws-sdk/util-endpoints": "3.540.0", - "@aws-sdk/util-user-agent-browser": "3.535.0", - "@aws-sdk/util-user-agent-node": "3.535.0", - "@smithy/config-resolver": "^2.2.0", - "@smithy/core": "^1.4.2", - "@smithy/fetch-http-handler": "^2.5.0", - "@smithy/hash-node": "^2.2.0", - "@smithy/invalid-dependency": "^2.2.0", - "@smithy/middleware-content-length": "^2.2.0", - "@smithy/middleware-endpoint": "^2.5.1", - "@smithy/middleware-retry": "^2.3.1", - "@smithy/middleware-serde": "^2.3.0", - "@smithy/middleware-stack": "^2.2.0", - "@smithy/node-config-provider": "^2.3.0", - "@smithy/node-http-handler": "^2.5.0", - "@smithy/protocol-http": "^3.3.0", - "@smithy/smithy-client": "^2.5.1", - "@smithy/types": "^2.12.0", - "@smithy/url-parser": "^2.2.0", - "@smithy/util-base64": "^2.3.0", - "@smithy/util-body-length-browser": "^2.2.0", - "@smithy/util-body-length-node": "^2.3.0", - "@smithy/util-defaults-mode-browser": "^2.2.1", - "@smithy/util-defaults-mode-node": "^2.3.1", - "@smithy/util-endpoints": "^1.2.0", - "@smithy/util-middleware": "^2.2.0", - "@smithy/util-retry": "^2.2.0", - "@smithy/util-utf8": "^2.3.0", - "@smithy/util-waiter": "^2.2.0", + "@aws-sdk/client-sso-oidc": "3.583.0", + "@aws-sdk/client-sts": "3.583.0", + "@aws-sdk/core": "3.582.0", + "@aws-sdk/credential-provider-node": "3.583.0", + "@aws-sdk/middleware-host-header": "3.577.0", + "@aws-sdk/middleware-logger": "3.577.0", + "@aws-sdk/middleware-recursion-detection": "3.577.0", + "@aws-sdk/middleware-sdk-ec2": "3.582.0", + "@aws-sdk/middleware-user-agent": "3.583.0", + "@aws-sdk/region-config-resolver": "3.577.0", + "@aws-sdk/types": "3.577.0", + "@aws-sdk/util-endpoints": "3.583.0", + "@aws-sdk/util-user-agent-browser": "3.577.0", + "@aws-sdk/util-user-agent-node": "3.577.0", + "@smithy/config-resolver": "^3.0.0", + "@smithy/core": "^2.0.1", + "@smithy/fetch-http-handler": "^3.0.1", + "@smithy/hash-node": "^3.0.0", + "@smithy/invalid-dependency": "^3.0.0", + "@smithy/middleware-content-length": "^3.0.0", + "@smithy/middleware-endpoint": "^3.0.0", + "@smithy/middleware-retry": "^3.0.1", + "@smithy/middleware-serde": "^3.0.0", + "@smithy/middleware-stack": "^3.0.0", + "@smithy/node-config-provider": "^3.0.0", + "@smithy/node-http-handler": "^3.0.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/smithy-client": "^3.0.1", + "@smithy/types": "^3.0.0", + "@smithy/url-parser": "^3.0.0", + "@smithy/util-base64": "^3.0.0", + "@smithy/util-body-length-browser": "^3.0.0", + "@smithy/util-body-length-node": "^3.0.0", + "@smithy/util-defaults-mode-browser": "^3.0.1", + "@smithy/util-defaults-mode-node": "^3.0.1", + "@smithy/util-endpoints": "^2.0.0", + "@smithy/util-middleware": "^3.0.0", + "@smithy/util-retry": "^3.0.0", + "@smithy/util-utf8": "^3.0.0", + "@smithy/util-waiter": "^3.0.0", "tslib": "^2.6.2", "uuid": "^9.0.1" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/client-ec2/node_modules/uuid": { @@ -323,108 +519,219 @@ "uuid": "dist/bin/uuid" } }, + "node_modules/@aws-sdk/client-lambda": { + "version": "3.583.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-lambda/-/client-lambda-3.583.0.tgz", + "integrity": "sha512-d8rFjAA3UE+K7rTbp7XuMo2FVkr+S9IztkwyUo9hgWZDbdbUl7vhKSX220byJK/BZOnnIY4IXGNkbGoAT+aKyw==", + "dependencies": { + "@aws-crypto/sha256-browser": "3.0.0", + "@aws-crypto/sha256-js": "3.0.0", + "@aws-sdk/client-sso-oidc": "3.583.0", + "@aws-sdk/client-sts": "3.583.0", + "@aws-sdk/core": "3.582.0", + "@aws-sdk/credential-provider-node": "3.583.0", + "@aws-sdk/middleware-host-header": "3.577.0", + "@aws-sdk/middleware-logger": "3.577.0", + "@aws-sdk/middleware-recursion-detection": "3.577.0", + "@aws-sdk/middleware-user-agent": "3.583.0", + "@aws-sdk/region-config-resolver": "3.577.0", + "@aws-sdk/types": "3.577.0", + "@aws-sdk/util-endpoints": "3.583.0", + "@aws-sdk/util-user-agent-browser": "3.577.0", + "@aws-sdk/util-user-agent-node": "3.577.0", + "@smithy/config-resolver": "^3.0.0", + "@smithy/core": "^2.0.1", + "@smithy/eventstream-serde-browser": "^3.0.0", + "@smithy/eventstream-serde-config-resolver": "^3.0.0", + "@smithy/eventstream-serde-node": "^3.0.0", + "@smithy/fetch-http-handler": "^3.0.1", + "@smithy/hash-node": "^3.0.0", + "@smithy/invalid-dependency": "^3.0.0", + "@smithy/middleware-content-length": "^3.0.0", + "@smithy/middleware-endpoint": "^3.0.0", + "@smithy/middleware-retry": "^3.0.1", + "@smithy/middleware-serde": "^3.0.0", + "@smithy/middleware-stack": "^3.0.0", + "@smithy/node-config-provider": "^3.0.0", + "@smithy/node-http-handler": "^3.0.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/smithy-client": "^3.0.1", + "@smithy/types": "^3.0.0", + "@smithy/url-parser": "^3.0.0", + "@smithy/util-base64": "^3.0.0", + "@smithy/util-body-length-browser": "^3.0.0", + "@smithy/util-body-length-node": "^3.0.0", + "@smithy/util-defaults-mode-browser": "^3.0.1", + "@smithy/util-defaults-mode-node": "^3.0.1", + "@smithy/util-endpoints": "^2.0.0", + "@smithy/util-middleware": "^3.0.0", + "@smithy/util-retry": "^3.0.0", + "@smithy/util-stream": "^3.0.1", + "@smithy/util-utf8": "^3.0.0", + "@smithy/util-waiter": "^3.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@aws-sdk/client-neptune": { + "version": "3.583.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-neptune/-/client-neptune-3.583.0.tgz", + "integrity": "sha512-p/uOg4E1OtmfufQaTt4VPGdWLVNn6SIFZX8WUGOBTPGQ7Da/hghmA/onrGr+nURoiod9Ohmw1PozZ8Zr+pdRYw==", + "dependencies": { + "@aws-crypto/sha256-browser": "3.0.0", + "@aws-crypto/sha256-js": "3.0.0", + "@aws-sdk/client-sso-oidc": "3.583.0", + "@aws-sdk/client-sts": "3.583.0", + "@aws-sdk/core": "3.582.0", + "@aws-sdk/credential-provider-node": "3.583.0", + "@aws-sdk/middleware-host-header": "3.577.0", + "@aws-sdk/middleware-logger": "3.577.0", + "@aws-sdk/middleware-recursion-detection": "3.577.0", + "@aws-sdk/middleware-sdk-rds": "3.577.0", + "@aws-sdk/middleware-user-agent": "3.583.0", + "@aws-sdk/region-config-resolver": "3.577.0", + "@aws-sdk/types": "3.577.0", + "@aws-sdk/util-endpoints": "3.583.0", + "@aws-sdk/util-user-agent-browser": "3.577.0", + "@aws-sdk/util-user-agent-node": "3.577.0", + "@smithy/config-resolver": "^3.0.0", + "@smithy/core": "^2.0.1", + "@smithy/fetch-http-handler": "^3.0.1", + "@smithy/hash-node": "^3.0.0", + "@smithy/invalid-dependency": "^3.0.0", + "@smithy/middleware-content-length": "^3.0.0", + "@smithy/middleware-endpoint": "^3.0.0", + "@smithy/middleware-retry": "^3.0.1", + "@smithy/middleware-serde": "^3.0.0", + "@smithy/middleware-stack": "^3.0.0", + "@smithy/node-config-provider": "^3.0.0", + "@smithy/node-http-handler": "^3.0.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/smithy-client": "^3.0.1", + "@smithy/types": "^3.0.0", + "@smithy/url-parser": "^3.0.0", + "@smithy/util-base64": "^3.0.0", + "@smithy/util-body-length-browser": "^3.0.0", + "@smithy/util-body-length-node": "^3.0.0", + "@smithy/util-defaults-mode-browser": "^3.0.1", + "@smithy/util-defaults-mode-node": "^3.0.1", + "@smithy/util-endpoints": "^2.0.0", + "@smithy/util-middleware": "^3.0.0", + "@smithy/util-retry": "^3.0.0", + "@smithy/util-utf8": "^3.0.0", + "@smithy/util-waiter": "^3.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, "node_modules/@aws-sdk/client-rds": { - "version": "3.552.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/client-rds/-/client-rds-3.552.0.tgz", - "integrity": "sha512-rcSGv/F89ernCL1PAR/En0tlxxw2qODnDa3SKtqPZWhpYiZEykOh7ZhDixm9sfdy7c5IsdMVPDIiDC35CrZPsg==", + "version": "3.583.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-rds/-/client-rds-3.583.0.tgz", + "integrity": "sha512-lrTF5MEe7whpffn/s11iWqoRVLKE1/v5K4RV13Jr+7crpTMcBMjsE59L6txxygTb58Xsm3YqDqhNvrQvIT5zNw==", "dependencies": { "@aws-crypto/sha256-browser": "3.0.0", "@aws-crypto/sha256-js": "3.0.0", - "@aws-sdk/client-sts": "3.552.0", - "@aws-sdk/core": "3.552.0", - "@aws-sdk/credential-provider-node": "3.552.0", - "@aws-sdk/middleware-host-header": "3.535.0", - "@aws-sdk/middleware-logger": "3.535.0", - "@aws-sdk/middleware-recursion-detection": "3.535.0", - "@aws-sdk/middleware-sdk-rds": "3.552.0", - "@aws-sdk/middleware-user-agent": "3.540.0", - "@aws-sdk/region-config-resolver": "3.535.0", - "@aws-sdk/types": "3.535.0", - "@aws-sdk/util-endpoints": "3.540.0", - "@aws-sdk/util-user-agent-browser": "3.535.0", - "@aws-sdk/util-user-agent-node": "3.535.0", - "@smithy/config-resolver": "^2.2.0", - "@smithy/core": "^1.4.2", - "@smithy/fetch-http-handler": "^2.5.0", - "@smithy/hash-node": "^2.2.0", - "@smithy/invalid-dependency": "^2.2.0", - "@smithy/middleware-content-length": "^2.2.0", - "@smithy/middleware-endpoint": "^2.5.1", - "@smithy/middleware-retry": "^2.3.1", - "@smithy/middleware-serde": "^2.3.0", - "@smithy/middleware-stack": "^2.2.0", - "@smithy/node-config-provider": "^2.3.0", - "@smithy/node-http-handler": "^2.5.0", - "@smithy/protocol-http": "^3.3.0", - "@smithy/smithy-client": "^2.5.1", - "@smithy/types": "^2.12.0", - "@smithy/url-parser": "^2.2.0", - "@smithy/util-base64": "^2.3.0", - "@smithy/util-body-length-browser": "^2.2.0", - "@smithy/util-body-length-node": "^2.3.0", - "@smithy/util-defaults-mode-browser": "^2.2.1", - "@smithy/util-defaults-mode-node": "^2.3.1", - "@smithy/util-endpoints": "^1.2.0", - "@smithy/util-middleware": "^2.2.0", - "@smithy/util-retry": "^2.2.0", - "@smithy/util-utf8": "^2.3.0", - "@smithy/util-waiter": "^2.2.0", + "@aws-sdk/client-sso-oidc": "3.583.0", + "@aws-sdk/client-sts": "3.583.0", + "@aws-sdk/core": "3.582.0", + "@aws-sdk/credential-provider-node": "3.583.0", + "@aws-sdk/middleware-host-header": "3.577.0", + "@aws-sdk/middleware-logger": "3.577.0", + "@aws-sdk/middleware-recursion-detection": "3.577.0", + "@aws-sdk/middleware-sdk-rds": "3.577.0", + "@aws-sdk/middleware-user-agent": "3.583.0", + "@aws-sdk/region-config-resolver": "3.577.0", + "@aws-sdk/types": "3.577.0", + "@aws-sdk/util-endpoints": "3.583.0", + "@aws-sdk/util-user-agent-browser": "3.577.0", + "@aws-sdk/util-user-agent-node": "3.577.0", + "@smithy/config-resolver": "^3.0.0", + "@smithy/core": "^2.0.1", + "@smithy/fetch-http-handler": "^3.0.1", + "@smithy/hash-node": "^3.0.0", + "@smithy/invalid-dependency": "^3.0.0", + "@smithy/middleware-content-length": "^3.0.0", + "@smithy/middleware-endpoint": "^3.0.0", + "@smithy/middleware-retry": "^3.0.1", + "@smithy/middleware-serde": "^3.0.0", + "@smithy/middleware-stack": "^3.0.0", + "@smithy/node-config-provider": "^3.0.0", + "@smithy/node-http-handler": "^3.0.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/smithy-client": "^3.0.1", + "@smithy/types": "^3.0.0", + "@smithy/url-parser": "^3.0.0", + "@smithy/util-base64": "^3.0.0", + "@smithy/util-body-length-browser": "^3.0.0", + "@smithy/util-body-length-node": "^3.0.0", + "@smithy/util-defaults-mode-browser": "^3.0.1", + "@smithy/util-defaults-mode-node": "^3.0.1", + "@smithy/util-endpoints": "^2.0.0", + "@smithy/util-middleware": "^3.0.0", + "@smithy/util-retry": "^3.0.0", + "@smithy/util-utf8": "^3.0.0", + "@smithy/util-waiter": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/client-ssm": { - "version": "3.552.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/client-ssm/-/client-ssm-3.552.0.tgz", - "integrity": "sha512-u9oh0RtcnqLkIbNv1hWQ8a3EgPEl0OPKzjTyYUgguUGzGM00BQ7eEPde02uEiQIfIb4a9c77k9Ayjk8VEmcrCg==", + "version": "3.583.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-ssm/-/client-ssm-3.583.0.tgz", + "integrity": "sha512-qdbFmU50EXKIIGvU0KVkgOTlzm6bN92LAJnizDo8Fz0nQUYKLF3GdNBXpD8FIu6Y5JqO6yAvl0mEch0PvyyejA==", "dependencies": { "@aws-crypto/sha256-browser": "3.0.0", "@aws-crypto/sha256-js": "3.0.0", - "@aws-sdk/client-sts": "3.552.0", - "@aws-sdk/core": "3.552.0", - "@aws-sdk/credential-provider-node": "3.552.0", - "@aws-sdk/middleware-host-header": "3.535.0", - "@aws-sdk/middleware-logger": "3.535.0", - "@aws-sdk/middleware-recursion-detection": "3.535.0", - "@aws-sdk/middleware-user-agent": "3.540.0", - "@aws-sdk/region-config-resolver": "3.535.0", - "@aws-sdk/types": "3.535.0", - "@aws-sdk/util-endpoints": "3.540.0", - "@aws-sdk/util-user-agent-browser": "3.535.0", - "@aws-sdk/util-user-agent-node": "3.535.0", - "@smithy/config-resolver": "^2.2.0", - "@smithy/core": "^1.4.2", - "@smithy/fetch-http-handler": "^2.5.0", - "@smithy/hash-node": "^2.2.0", - "@smithy/invalid-dependency": "^2.2.0", - "@smithy/middleware-content-length": "^2.2.0", - "@smithy/middleware-endpoint": "^2.5.1", - "@smithy/middleware-retry": "^2.3.1", - "@smithy/middleware-serde": "^2.3.0", - "@smithy/middleware-stack": "^2.2.0", - "@smithy/node-config-provider": "^2.3.0", - "@smithy/node-http-handler": "^2.5.0", - "@smithy/protocol-http": "^3.3.0", - "@smithy/smithy-client": "^2.5.1", - "@smithy/types": "^2.12.0", - "@smithy/url-parser": "^2.2.0", - "@smithy/util-base64": "^2.3.0", - "@smithy/util-body-length-browser": "^2.2.0", - "@smithy/util-body-length-node": "^2.3.0", - "@smithy/util-defaults-mode-browser": "^2.2.1", - "@smithy/util-defaults-mode-node": "^2.3.1", - "@smithy/util-endpoints": "^1.2.0", - "@smithy/util-middleware": "^2.2.0", - "@smithy/util-retry": "^2.2.0", - "@smithy/util-utf8": "^2.3.0", - "@smithy/util-waiter": "^2.2.0", + "@aws-sdk/client-sso-oidc": "3.583.0", + "@aws-sdk/client-sts": "3.583.0", + "@aws-sdk/core": "3.582.0", + "@aws-sdk/credential-provider-node": "3.583.0", + "@aws-sdk/middleware-host-header": "3.577.0", + "@aws-sdk/middleware-logger": "3.577.0", + "@aws-sdk/middleware-recursion-detection": "3.577.0", + "@aws-sdk/middleware-user-agent": "3.583.0", + "@aws-sdk/region-config-resolver": "3.577.0", + "@aws-sdk/types": "3.577.0", + "@aws-sdk/util-endpoints": "3.583.0", + "@aws-sdk/util-user-agent-browser": "3.577.0", + "@aws-sdk/util-user-agent-node": "3.577.0", + "@smithy/config-resolver": "^3.0.0", + "@smithy/core": "^2.0.1", + "@smithy/fetch-http-handler": "^3.0.1", + "@smithy/hash-node": "^3.0.0", + "@smithy/invalid-dependency": "^3.0.0", + "@smithy/middleware-content-length": "^3.0.0", + "@smithy/middleware-endpoint": "^3.0.0", + "@smithy/middleware-retry": "^3.0.1", + "@smithy/middleware-serde": "^3.0.0", + "@smithy/middleware-stack": "^3.0.0", + "@smithy/node-config-provider": "^3.0.0", + "@smithy/node-http-handler": "^3.0.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/smithy-client": "^3.0.1", + "@smithy/types": "^3.0.0", + "@smithy/url-parser": "^3.0.0", + "@smithy/util-base64": "^3.0.0", + "@smithy/util-body-length-browser": "^3.0.0", + "@smithy/util-body-length-node": "^3.0.0", + "@smithy/util-defaults-mode-browser": "^3.0.1", + "@smithy/util-defaults-mode-node": "^3.0.1", + "@smithy/util-endpoints": "^2.0.0", + "@smithy/util-middleware": "^3.0.0", + "@smithy/util-retry": "^3.0.0", + "@smithy/util-utf8": "^3.0.0", + "@smithy/util-waiter": "^3.0.0", "tslib": "^2.6.2", "uuid": "^9.0.1" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/client-ssm/node_modules/uuid": { @@ -440,485 +747,502 @@ } }, "node_modules/@aws-sdk/client-sso": { - "version": "3.552.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/client-sso/-/client-sso-3.552.0.tgz", - "integrity": "sha512-IAjRj5gcuyoPe/OhciMY/UyW8C1kyXSUJFagxvbeSv8q0mEfaPBVjGgz2xSYRFhhZr3gFlGCS9SiukwOL2/VoA==", + "version": "3.583.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-sso/-/client-sso-3.583.0.tgz", + "integrity": "sha512-FNJ2MmiBtZZwgkj4+GLVrzqwmD6D8FBptrFZk7PnGkSf7v1Q8txYNI6gY938RRhYJ4lBW4cNbhPvWoDxAl90Hw==", "dependencies": { "@aws-crypto/sha256-browser": "3.0.0", "@aws-crypto/sha256-js": "3.0.0", - "@aws-sdk/core": "3.552.0", - "@aws-sdk/middleware-host-header": "3.535.0", - "@aws-sdk/middleware-logger": "3.535.0", - "@aws-sdk/middleware-recursion-detection": "3.535.0", - "@aws-sdk/middleware-user-agent": "3.540.0", - "@aws-sdk/region-config-resolver": "3.535.0", - "@aws-sdk/types": "3.535.0", - "@aws-sdk/util-endpoints": "3.540.0", - "@aws-sdk/util-user-agent-browser": "3.535.0", - "@aws-sdk/util-user-agent-node": "3.535.0", - "@smithy/config-resolver": "^2.2.0", - "@smithy/core": "^1.4.2", - "@smithy/fetch-http-handler": "^2.5.0", - "@smithy/hash-node": "^2.2.0", - "@smithy/invalid-dependency": "^2.2.0", - "@smithy/middleware-content-length": "^2.2.0", - "@smithy/middleware-endpoint": "^2.5.1", - "@smithy/middleware-retry": "^2.3.1", - "@smithy/middleware-serde": "^2.3.0", - "@smithy/middleware-stack": "^2.2.0", - "@smithy/node-config-provider": "^2.3.0", - "@smithy/node-http-handler": "^2.5.0", - "@smithy/protocol-http": "^3.3.0", - "@smithy/smithy-client": "^2.5.1", - "@smithy/types": "^2.12.0", - "@smithy/url-parser": "^2.2.0", - "@smithy/util-base64": "^2.3.0", - "@smithy/util-body-length-browser": "^2.2.0", - "@smithy/util-body-length-node": "^2.3.0", - "@smithy/util-defaults-mode-browser": "^2.2.1", - "@smithy/util-defaults-mode-node": "^2.3.1", - "@smithy/util-endpoints": "^1.2.0", - "@smithy/util-middleware": "^2.2.0", - "@smithy/util-retry": "^2.2.0", - "@smithy/util-utf8": "^2.3.0", + "@aws-sdk/core": "3.582.0", + "@aws-sdk/middleware-host-header": "3.577.0", + "@aws-sdk/middleware-logger": "3.577.0", + "@aws-sdk/middleware-recursion-detection": "3.577.0", + "@aws-sdk/middleware-user-agent": "3.583.0", + "@aws-sdk/region-config-resolver": "3.577.0", + "@aws-sdk/types": "3.577.0", + "@aws-sdk/util-endpoints": "3.583.0", + "@aws-sdk/util-user-agent-browser": "3.577.0", + "@aws-sdk/util-user-agent-node": "3.577.0", + "@smithy/config-resolver": "^3.0.0", + "@smithy/core": "^2.0.1", + "@smithy/fetch-http-handler": "^3.0.1", + "@smithy/hash-node": "^3.0.0", + "@smithy/invalid-dependency": "^3.0.0", + "@smithy/middleware-content-length": "^3.0.0", + "@smithy/middleware-endpoint": "^3.0.0", + "@smithy/middleware-retry": "^3.0.1", + "@smithy/middleware-serde": "^3.0.0", + "@smithy/middleware-stack": "^3.0.0", + "@smithy/node-config-provider": "^3.0.0", + "@smithy/node-http-handler": "^3.0.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/smithy-client": "^3.0.1", + "@smithy/types": "^3.0.0", + "@smithy/url-parser": "^3.0.0", + "@smithy/util-base64": "^3.0.0", + "@smithy/util-body-length-browser": "^3.0.0", + "@smithy/util-body-length-node": "^3.0.0", + "@smithy/util-defaults-mode-browser": "^3.0.1", + "@smithy/util-defaults-mode-node": "^3.0.1", + "@smithy/util-endpoints": "^2.0.0", + "@smithy/util-middleware": "^3.0.0", + "@smithy/util-retry": "^3.0.0", + "@smithy/util-utf8": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/client-sso-oidc": { - "version": "3.552.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/client-sso-oidc/-/client-sso-oidc-3.552.0.tgz", - "integrity": "sha512-6JYTgN/n4xTm3Z+JhEZq06pyYsgo7heYDmR+0smmauQS02Eu8lvUc2jPs/0GDAmty7J4tq3gS6TRwvf7181C2w==", + "version": "3.583.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-sso-oidc/-/client-sso-oidc-3.583.0.tgz", + "integrity": "sha512-LO3wmrFXPi2kNE46lD1XATfRrvdNxXd4DlTFouoWmr7lvqoUkcbmtkV2r/XChZA2z0HiDauphC1e8b8laJVeSg==", "dependencies": { "@aws-crypto/sha256-browser": "3.0.0", "@aws-crypto/sha256-js": "3.0.0", - "@aws-sdk/client-sts": "3.552.0", - "@aws-sdk/core": "3.552.0", - "@aws-sdk/middleware-host-header": "3.535.0", - "@aws-sdk/middleware-logger": "3.535.0", - "@aws-sdk/middleware-recursion-detection": "3.535.0", - "@aws-sdk/middleware-user-agent": "3.540.0", - "@aws-sdk/region-config-resolver": "3.535.0", - "@aws-sdk/types": "3.535.0", - "@aws-sdk/util-endpoints": "3.540.0", - "@aws-sdk/util-user-agent-browser": "3.535.0", - "@aws-sdk/util-user-agent-node": "3.535.0", - "@smithy/config-resolver": "^2.2.0", - "@smithy/core": "^1.4.2", - "@smithy/fetch-http-handler": "^2.5.0", - "@smithy/hash-node": "^2.2.0", - "@smithy/invalid-dependency": "^2.2.0", - "@smithy/middleware-content-length": "^2.2.0", - "@smithy/middleware-endpoint": "^2.5.1", - "@smithy/middleware-retry": "^2.3.1", - "@smithy/middleware-serde": "^2.3.0", - "@smithy/middleware-stack": "^2.2.0", - "@smithy/node-config-provider": "^2.3.0", - "@smithy/node-http-handler": "^2.5.0", - "@smithy/protocol-http": "^3.3.0", - "@smithy/smithy-client": "^2.5.1", - "@smithy/types": "^2.12.0", - "@smithy/url-parser": "^2.2.0", - "@smithy/util-base64": "^2.3.0", - "@smithy/util-body-length-browser": "^2.2.0", - "@smithy/util-body-length-node": "^2.3.0", - "@smithy/util-defaults-mode-browser": "^2.2.1", - "@smithy/util-defaults-mode-node": "^2.3.1", - "@smithy/util-endpoints": "^1.2.0", - "@smithy/util-middleware": "^2.2.0", - "@smithy/util-retry": "^2.2.0", - "@smithy/util-utf8": "^2.3.0", + "@aws-sdk/client-sts": "3.583.0", + "@aws-sdk/core": "3.582.0", + "@aws-sdk/credential-provider-node": "3.583.0", + "@aws-sdk/middleware-host-header": "3.577.0", + "@aws-sdk/middleware-logger": "3.577.0", + "@aws-sdk/middleware-recursion-detection": "3.577.0", + "@aws-sdk/middleware-user-agent": "3.583.0", + "@aws-sdk/region-config-resolver": "3.577.0", + "@aws-sdk/types": "3.577.0", + "@aws-sdk/util-endpoints": "3.583.0", + "@aws-sdk/util-user-agent-browser": "3.577.0", + "@aws-sdk/util-user-agent-node": "3.577.0", + "@smithy/config-resolver": "^3.0.0", + "@smithy/core": "^2.0.1", + "@smithy/fetch-http-handler": "^3.0.1", + "@smithy/hash-node": "^3.0.0", + "@smithy/invalid-dependency": "^3.0.0", + "@smithy/middleware-content-length": "^3.0.0", + "@smithy/middleware-endpoint": "^3.0.0", + "@smithy/middleware-retry": "^3.0.1", + "@smithy/middleware-serde": "^3.0.0", + "@smithy/middleware-stack": "^3.0.0", + "@smithy/node-config-provider": "^3.0.0", + "@smithy/node-http-handler": "^3.0.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/smithy-client": "^3.0.1", + "@smithy/types": "^3.0.0", + "@smithy/url-parser": "^3.0.0", + "@smithy/util-base64": "^3.0.0", + "@smithy/util-body-length-browser": "^3.0.0", + "@smithy/util-body-length-node": "^3.0.0", + "@smithy/util-defaults-mode-browser": "^3.0.1", + "@smithy/util-defaults-mode-node": "^3.0.1", + "@smithy/util-endpoints": "^2.0.0", + "@smithy/util-middleware": "^3.0.0", + "@smithy/util-retry": "^3.0.0", + "@smithy/util-utf8": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" - }, - "peerDependencies": { - "@aws-sdk/credential-provider-node": "^3.552.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/client-sts": { - "version": "3.552.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/client-sts/-/client-sts-3.552.0.tgz", - "integrity": "sha512-rOZlAj8GyFgUBESyKezes67A8Kj5+KjRhfBHMXrkcM5h9UOIz5q7QdkSQOmzWwRoPDmmAqb6t+y041/76TnPEg==", + "version": "3.583.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-sts/-/client-sts-3.583.0.tgz", + "integrity": "sha512-xDMxiemPDWr9dY2Q4AyixkRnk/hvS6fs6OWxuVCz1WO47YhaAfOsEGAgQMgDLLaOfj/oLU5D14uTNBEPGh4rBA==", "dependencies": { "@aws-crypto/sha256-browser": "3.0.0", "@aws-crypto/sha256-js": "3.0.0", - "@aws-sdk/core": "3.552.0", - "@aws-sdk/middleware-host-header": "3.535.0", - "@aws-sdk/middleware-logger": "3.535.0", - "@aws-sdk/middleware-recursion-detection": "3.535.0", - "@aws-sdk/middleware-user-agent": "3.540.0", - "@aws-sdk/region-config-resolver": "3.535.0", - "@aws-sdk/types": "3.535.0", - "@aws-sdk/util-endpoints": "3.540.0", - "@aws-sdk/util-user-agent-browser": "3.535.0", - "@aws-sdk/util-user-agent-node": "3.535.0", - "@smithy/config-resolver": "^2.2.0", - "@smithy/core": "^1.4.2", - "@smithy/fetch-http-handler": "^2.5.0", - "@smithy/hash-node": "^2.2.0", - "@smithy/invalid-dependency": "^2.2.0", - "@smithy/middleware-content-length": "^2.2.0", - "@smithy/middleware-endpoint": "^2.5.1", - "@smithy/middleware-retry": "^2.3.1", - "@smithy/middleware-serde": "^2.3.0", - "@smithy/middleware-stack": "^2.2.0", - "@smithy/node-config-provider": "^2.3.0", - "@smithy/node-http-handler": "^2.5.0", - "@smithy/protocol-http": "^3.3.0", - "@smithy/smithy-client": "^2.5.1", - "@smithy/types": "^2.12.0", - "@smithy/url-parser": "^2.2.0", - "@smithy/util-base64": "^2.3.0", - "@smithy/util-body-length-browser": "^2.2.0", - "@smithy/util-body-length-node": "^2.3.0", - "@smithy/util-defaults-mode-browser": "^2.2.1", - "@smithy/util-defaults-mode-node": "^2.3.1", - "@smithy/util-endpoints": "^1.2.0", - "@smithy/util-middleware": "^2.2.0", - "@smithy/util-retry": "^2.2.0", - "@smithy/util-utf8": "^2.3.0", + "@aws-sdk/client-sso-oidc": "3.583.0", + "@aws-sdk/core": "3.582.0", + "@aws-sdk/credential-provider-node": "3.583.0", + "@aws-sdk/middleware-host-header": "3.577.0", + "@aws-sdk/middleware-logger": "3.577.0", + "@aws-sdk/middleware-recursion-detection": "3.577.0", + "@aws-sdk/middleware-user-agent": "3.583.0", + "@aws-sdk/region-config-resolver": "3.577.0", + "@aws-sdk/types": "3.577.0", + "@aws-sdk/util-endpoints": "3.583.0", + "@aws-sdk/util-user-agent-browser": "3.577.0", + "@aws-sdk/util-user-agent-node": "3.577.0", + "@smithy/config-resolver": "^3.0.0", + "@smithy/core": "^2.0.1", + "@smithy/fetch-http-handler": "^3.0.1", + "@smithy/hash-node": "^3.0.0", + "@smithy/invalid-dependency": "^3.0.0", + "@smithy/middleware-content-length": "^3.0.0", + "@smithy/middleware-endpoint": "^3.0.0", + "@smithy/middleware-retry": "^3.0.1", + "@smithy/middleware-serde": "^3.0.0", + "@smithy/middleware-stack": "^3.0.0", + "@smithy/node-config-provider": "^3.0.0", + "@smithy/node-http-handler": "^3.0.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/smithy-client": "^3.0.1", + "@smithy/types": "^3.0.0", + "@smithy/url-parser": "^3.0.0", + "@smithy/util-base64": "^3.0.0", + "@smithy/util-body-length-browser": "^3.0.0", + "@smithy/util-body-length-node": "^3.0.0", + "@smithy/util-defaults-mode-browser": "^3.0.1", + "@smithy/util-defaults-mode-node": "^3.0.1", + "@smithy/util-endpoints": "^2.0.0", + "@smithy/util-middleware": "^3.0.0", + "@smithy/util-retry": "^3.0.0", + "@smithy/util-utf8": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" - }, - "peerDependencies": { - "@aws-sdk/credential-provider-node": "^3.552.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/core": { - "version": "3.552.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/core/-/core-3.552.0.tgz", - "integrity": "sha512-T7ovljf6fCvIHG9SOSZqGmbVbqZPXPywLAcU+onk/fYLZJj6kjfzKZzSAUBI0nO1OKpuP/nCHaCp51NLWNqsnw==", - "dependencies": { - "@smithy/core": "^1.4.2", - "@smithy/protocol-http": "^3.3.0", - "@smithy/signature-v4": "^2.2.1", - "@smithy/smithy-client": "^2.5.1", - "@smithy/types": "^2.12.0", + "version": "3.582.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/core/-/core-3.582.0.tgz", + "integrity": "sha512-ofmD96IQc9g1dbyqlCyxu5fCG7kIl9p1NoN5+vGBUyLdbmPCV3Pdg99nRHYEJuv2MgGx5AUFGDPMHcqbJpnZIw==", + "dependencies": { + "@smithy/core": "^2.0.1", + "@smithy/protocol-http": "^4.0.0", + "@smithy/signature-v4": "^3.0.0", + "@smithy/smithy-client": "^3.0.1", + "@smithy/types": "^3.0.0", "fast-xml-parser": "4.2.5", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/credential-provider-env": { - "version": "3.535.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-env/-/credential-provider-env-3.535.0.tgz", - "integrity": "sha512-XppwO8c0GCGSAvdzyJOhbtktSEaShg14VJKg8mpMa1XcgqzmcqqHQjtDWbx5rZheY1VdpXZhpEzJkB6LpQejpA==", + "version": "3.577.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-env/-/credential-provider-env-3.577.0.tgz", + "integrity": "sha512-Jxu255j0gToMGEiqufP8ZtKI8HW90lOLjwJ3LrdlD/NLsAY0tOQf1fWc53u28hWmmNGMxmCrL2p66IOgMDhDUw==", "dependencies": { - "@aws-sdk/types": "3.535.0", - "@smithy/property-provider": "^2.2.0", - "@smithy/types": "^2.12.0", + "@aws-sdk/types": "3.577.0", + "@smithy/property-provider": "^3.0.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/credential-provider-http": { - "version": "3.552.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-http/-/credential-provider-http-3.552.0.tgz", - "integrity": "sha512-vsmu7Cz1i45pFEqzVb4JcFmAmVnWFNLsGheZc8SCptlqCO5voETrZZILHYIl4cjKkSDk3pblBOf0PhyjqWW6WQ==", - "dependencies": { - "@aws-sdk/types": "3.535.0", - "@smithy/fetch-http-handler": "^2.5.0", - "@smithy/node-http-handler": "^2.5.0", - "@smithy/property-provider": "^2.2.0", - "@smithy/protocol-http": "^3.3.0", - "@smithy/smithy-client": "^2.5.1", - "@smithy/types": "^2.12.0", - "@smithy/util-stream": "^2.2.0", + "version": "3.582.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-http/-/credential-provider-http-3.582.0.tgz", + "integrity": "sha512-kGOUKw5ryPkDIYB69PjK3SicVLTbWB06ouFN2W1EvqUJpkQGPAUGzYcomKtt3mJaCTf/1kfoaHwARAl6KKSP8Q==", + "dependencies": { + "@aws-sdk/types": "3.577.0", + "@smithy/fetch-http-handler": "^3.0.1", + "@smithy/node-http-handler": "^3.0.0", + "@smithy/property-provider": "^3.0.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/smithy-client": "^3.0.1", + "@smithy/types": "^3.0.0", + "@smithy/util-stream": "^3.0.1", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/credential-provider-ini": { - "version": "3.552.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-ini/-/credential-provider-ini-3.552.0.tgz", - "integrity": "sha512-/Z9y+P4M/eZA/5hGH3Kwm6TOIAiVtsIo7sC/x7hZPXn/IMJQ2QmxzeMozVqMWzx8+2zUA/dmgmWnHoVvH4R/jg==", - "dependencies": { - "@aws-sdk/client-sts": "3.552.0", - "@aws-sdk/credential-provider-env": "3.535.0", - "@aws-sdk/credential-provider-process": "3.535.0", - "@aws-sdk/credential-provider-sso": "3.552.0", - "@aws-sdk/credential-provider-web-identity": "3.552.0", - "@aws-sdk/types": "3.535.0", - "@smithy/credential-provider-imds": "^2.3.0", - "@smithy/property-provider": "^2.2.0", - "@smithy/shared-ini-file-loader": "^2.4.0", - "@smithy/types": "^2.12.0", + "version": "3.583.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-ini/-/credential-provider-ini-3.583.0.tgz", + "integrity": "sha512-8I0oWNg/yps6ctjhEeL/qJ9BIa/+xXP7RPDQqFKZ2zBkWbmLLOoMWXRvl8uKUBD6qCe+DGmcu9skfVXeXSesEQ==", + "dependencies": { + "@aws-sdk/credential-provider-env": "3.577.0", + "@aws-sdk/credential-provider-process": "3.577.0", + "@aws-sdk/credential-provider-sso": "3.583.0", + "@aws-sdk/credential-provider-web-identity": "3.577.0", + "@aws-sdk/types": "3.577.0", + "@smithy/credential-provider-imds": "^3.0.0", + "@smithy/property-provider": "^3.0.0", + "@smithy/shared-ini-file-loader": "^3.0.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" + }, + "peerDependencies": { + "@aws-sdk/client-sts": "^3.583.0" } }, "node_modules/@aws-sdk/credential-provider-node": { - "version": "3.552.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-node/-/credential-provider-node-3.552.0.tgz", - "integrity": "sha512-GUH5awokiR4FcALeQxOrNZtDKJgzEza6NW9HYxAaHt0LNSHCjG21zMFDPYAXlDjlPP9AIdWmVvYrfJoPJI28AQ==", - "dependencies": { - "@aws-sdk/credential-provider-env": "3.535.0", - "@aws-sdk/credential-provider-http": "3.552.0", - "@aws-sdk/credential-provider-ini": "3.552.0", - "@aws-sdk/credential-provider-process": "3.535.0", - "@aws-sdk/credential-provider-sso": "3.552.0", - "@aws-sdk/credential-provider-web-identity": "3.552.0", - "@aws-sdk/types": "3.535.0", - "@smithy/credential-provider-imds": "^2.3.0", - "@smithy/property-provider": "^2.2.0", - "@smithy/shared-ini-file-loader": "^2.4.0", - "@smithy/types": "^2.12.0", + "version": "3.583.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-node/-/credential-provider-node-3.583.0.tgz", + "integrity": "sha512-yBNypBXny7zJH85SzxDj8s1mbLXv9c/Vbq0qR3R3POj2idZ6ywB/qlIRC1XwBuv49Wvg8kA1wKXk3K3jrpcVIw==", + "dependencies": { + "@aws-sdk/credential-provider-env": "3.577.0", + "@aws-sdk/credential-provider-http": "3.582.0", + "@aws-sdk/credential-provider-ini": "3.583.0", + "@aws-sdk/credential-provider-process": "3.577.0", + "@aws-sdk/credential-provider-sso": "3.583.0", + "@aws-sdk/credential-provider-web-identity": "3.577.0", + "@aws-sdk/types": "3.577.0", + "@smithy/credential-provider-imds": "^3.0.0", + "@smithy/property-provider": "^3.0.0", + "@smithy/shared-ini-file-loader": "^3.0.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/credential-provider-process": { - "version": "3.535.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-process/-/credential-provider-process-3.535.0.tgz", - "integrity": "sha512-9O1OaprGCnlb/kYl8RwmH7Mlg8JREZctB8r9sa1KhSsWFq/SWO0AuJTyowxD7zL5PkeS4eTvzFFHWCa3OO5epA==", - "dependencies": { - "@aws-sdk/types": "3.535.0", - "@smithy/property-provider": "^2.2.0", - "@smithy/shared-ini-file-loader": "^2.4.0", - "@smithy/types": "^2.12.0", + "version": "3.577.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-process/-/credential-provider-process-3.577.0.tgz", + "integrity": "sha512-Gin6BWtOiXxIgITrJ3Nwc+Y2P1uVT6huYR4EcbA/DJUPWyO0n9y5UFLewPvVbLkRn15JeEqErBLUrHclkiOKtw==", + "dependencies": { + "@aws-sdk/types": "3.577.0", + "@smithy/property-provider": "^3.0.0", + "@smithy/shared-ini-file-loader": "^3.0.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/credential-provider-sso": { - "version": "3.552.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-sso/-/credential-provider-sso-3.552.0.tgz", - "integrity": "sha512-h+xyWG4HMqf4SFzilpK1u50fO2aIBRg3nwuXRy9v5E2qdpJgZS2JXibO1jNHd+JXq4qjs2YG1WK2fGcdxZJ2bQ==", - "dependencies": { - "@aws-sdk/client-sso": "3.552.0", - "@aws-sdk/token-providers": "3.552.0", - "@aws-sdk/types": "3.535.0", - "@smithy/property-provider": "^2.2.0", - "@smithy/shared-ini-file-loader": "^2.4.0", - "@smithy/types": "^2.12.0", + "version": "3.583.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-sso/-/credential-provider-sso-3.583.0.tgz", + "integrity": "sha512-G/1EvL9tBezSiU+06tG4K/kOvFfPjnheT4JSXqjPM7+vjKzgp2jxp1J9MMd69zs4jVWon932zMeGgjrCplzMEg==", + "dependencies": { + "@aws-sdk/client-sso": "3.583.0", + "@aws-sdk/token-providers": "3.577.0", + "@aws-sdk/types": "3.577.0", + "@smithy/property-provider": "^3.0.0", + "@smithy/shared-ini-file-loader": "^3.0.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/credential-provider-web-identity": { - "version": "3.552.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-web-identity/-/credential-provider-web-identity-3.552.0.tgz", - "integrity": "sha512-6jXfXaLKDy3S4LHR8ZXIIZw5B80uiYjnPp4bmqmY18LGeoZxmkJ/SfkwypVruezCu+GpA+IubmIbc5TQi6BCAw==", - "dependencies": { - "@aws-sdk/client-sts": "3.552.0", - "@aws-sdk/types": "3.535.0", - "@smithy/property-provider": "^2.2.0", - "@smithy/types": "^2.12.0", + "version": "3.577.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-web-identity/-/credential-provider-web-identity-3.577.0.tgz", + "integrity": "sha512-ZGHGNRaCtJJmszb9UTnC7izNCtRUttdPlLdMkh41KPS32vfdrBDHs1JrpbZijItRj1xKuOXsiYSXLAaHGcLh8Q==", + "dependencies": { + "@aws-sdk/types": "3.577.0", + "@smithy/property-provider": "^3.0.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" + }, + "peerDependencies": { + "@aws-sdk/client-sts": "^3.577.0" } }, "node_modules/@aws-sdk/endpoint-cache": { - "version": "3.535.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/endpoint-cache/-/endpoint-cache-3.535.0.tgz", - "integrity": "sha512-sPG2l00iVuporK9AmPWq4UBcJURs2RN+vKA8QLRQANmQS3WFHWHamvGltxCjK79izkeqri882V4XlFpZfWhemA==", + "version": "3.572.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/endpoint-cache/-/endpoint-cache-3.572.0.tgz", + "integrity": "sha512-CzuRWMj/xtN9p9eP915nlPmlyniTzke732Ow/M60++gGgB3W+RtZyFftw3TEx+NzNhd1tH54dEcGiWdiNaBz3Q==", "dependencies": { "mnemonist": "0.38.3", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/middleware-endpoint-discovery": { - "version": "3.535.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-endpoint-discovery/-/middleware-endpoint-discovery-3.535.0.tgz", - "integrity": "sha512-+EsqJB5A15RoTf0HxUdknF3hp+2WDg0HWc+QERUctzzYXy9l5LIQjmhQ96cWDyFttKmHE+4h6fjMZjJEeWOeYQ==", - "dependencies": { - "@aws-sdk/endpoint-cache": "3.535.0", - "@aws-sdk/types": "3.535.0", - "@smithy/node-config-provider": "^2.3.0", - "@smithy/protocol-http": "^3.3.0", - "@smithy/types": "^2.12.0", + "version": "3.577.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-endpoint-discovery/-/middleware-endpoint-discovery-3.577.0.tgz", + "integrity": "sha512-duLI1awiBV7xyi+SQQnFy0J2s9Fhk5miHR5LsyEpk4p4M1Zi9hbBMg3wOdoxGCnNGn56PcP70isD79BfrbWwlA==", + "dependencies": { + "@aws-sdk/endpoint-cache": "3.572.0", + "@aws-sdk/types": "3.577.0", + "@smithy/node-config-provider": "^3.0.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/middleware-host-header": { - "version": "3.535.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-host-header/-/middleware-host-header-3.535.0.tgz", - "integrity": "sha512-0h6TWjBWtDaYwHMQJI9ulafeS4lLaw1vIxRjbpH0svFRt6Eve+Sy8NlVhECfTU2hNz/fLubvrUxsXoThaLBIew==", + "version": "3.577.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-host-header/-/middleware-host-header-3.577.0.tgz", + "integrity": "sha512-9ca5MJz455CODIVXs0/sWmJm7t3QO4EUa1zf8pE8grLpzf0J94bz/skDWm37Pli13T3WaAQBHCTiH2gUVfCsWg==", "dependencies": { - "@aws-sdk/types": "3.535.0", - "@smithy/protocol-http": "^3.3.0", - "@smithy/types": "^2.12.0", + "@aws-sdk/types": "3.577.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/middleware-logger": { - "version": "3.535.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-logger/-/middleware-logger-3.535.0.tgz", - "integrity": "sha512-huNHpONOrEDrdRTvSQr1cJiRMNf0S52NDXtaPzdxiubTkP+vni2MohmZANMOai/qT0olmEVX01LhZ0ZAOgmg6A==", + "version": "3.577.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-logger/-/middleware-logger-3.577.0.tgz", + "integrity": "sha512-aPFGpGjTZcJYk+24bg7jT4XdIp42mFXSuPt49lw5KygefLyJM/sB0bKKqPYYivW0rcuZ9brQ58eZUNthrzYAvg==", "dependencies": { - "@aws-sdk/types": "3.535.0", - "@smithy/types": "^2.12.0", + "@aws-sdk/types": "3.577.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/middleware-recursion-detection": { - "version": "3.535.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-recursion-detection/-/middleware-recursion-detection-3.535.0.tgz", - "integrity": "sha512-am2qgGs+gwqmR4wHLWpzlZ8PWhm4ktj5bYSgDrsOfjhdBlWNxvPoID9/pDAz5RWL48+oH7I6SQzMqxXsFDikrw==", + "version": "3.577.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-recursion-detection/-/middleware-recursion-detection-3.577.0.tgz", + "integrity": "sha512-pn3ZVEd2iobKJlR3H+bDilHjgRnNrQ6HMmK9ZzZw89Ckn3Dcbv48xOv4RJvu0aU8SDLl/SNCxppKjeLDTPGBNA==", "dependencies": { - "@aws-sdk/types": "3.535.0", - "@smithy/protocol-http": "^3.3.0", - "@smithy/types": "^2.12.0", + "@aws-sdk/types": "3.577.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/middleware-sdk-ec2": { - "version": "3.552.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-sdk-ec2/-/middleware-sdk-ec2-3.552.0.tgz", - "integrity": "sha512-/SILaJnX9+NHuDIvFMbBASp1HVy3VxhWcoB4yti4iiwfiitXX0SOWIXWTlMK1EaKyNs5TXeYAUUi1085xkaPEw==", - "dependencies": { - "@aws-sdk/types": "3.535.0", - "@aws-sdk/util-format-url": "3.535.0", - "@smithy/middleware-endpoint": "^2.5.1", - "@smithy/protocol-http": "^3.3.0", - "@smithy/signature-v4": "^2.2.1", - "@smithy/smithy-client": "^2.5.1", - "@smithy/types": "^2.12.0", + "version": "3.582.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-sdk-ec2/-/middleware-sdk-ec2-3.582.0.tgz", + "integrity": "sha512-0MXufDYUzOJk0K0fLwRk7Sq1L0EQI5qAngkeFuY8V66ZKWlb/lE0OmEei9CY2/fBsI4Aym0grRB6owGTETvpBQ==", + "dependencies": { + "@aws-sdk/types": "3.577.0", + "@aws-sdk/util-format-url": "3.577.0", + "@smithy/middleware-endpoint": "^3.0.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/signature-v4": "^3.0.0", + "@smithy/smithy-client": "^3.0.1", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/middleware-sdk-rds": { - "version": "3.552.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-sdk-rds/-/middleware-sdk-rds-3.552.0.tgz", - "integrity": "sha512-PW5EMwE5YX0nz5uzYxmo1doZR7w5kryy72yDfNL+XgS4YdLUvvoGmGQ4UOUqbyXYczgrGSMCMsTAFAsUwHbMzg==", - "dependencies": { - "@aws-sdk/types": "3.535.0", - "@aws-sdk/util-format-url": "3.535.0", - "@smithy/middleware-endpoint": "^2.5.1", - "@smithy/protocol-http": "^3.3.0", - "@smithy/signature-v4": "^2.2.1", - "@smithy/types": "^2.12.0", + "version": "3.577.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-sdk-rds/-/middleware-sdk-rds-3.577.0.tgz", + "integrity": "sha512-pLEi04MbPBX8M5aFuJWl1TOIQkWAshgdLCfFbVB8TVarx2t6WwOCUdq4URYpQilPr4ESY4ls++DfAtkJil/iGQ==", + "dependencies": { + "@aws-sdk/types": "3.577.0", + "@aws-sdk/util-format-url": "3.577.0", + "@smithy/middleware-endpoint": "^3.0.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/signature-v4": "^3.0.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/middleware-user-agent": { - "version": "3.540.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-user-agent/-/middleware-user-agent-3.540.0.tgz", - "integrity": "sha512-8Rd6wPeXDnOYzWj1XCmOKcx/Q87L0K1/EHqOBocGjLVbN3gmRxBvpmR1pRTjf7IsWfnnzN5btqtcAkfDPYQUMQ==", - "dependencies": { - "@aws-sdk/types": "3.535.0", - "@aws-sdk/util-endpoints": "3.540.0", - "@smithy/protocol-http": "^3.3.0", - "@smithy/types": "^2.12.0", + "version": "3.583.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-user-agent/-/middleware-user-agent-3.583.0.tgz", + "integrity": "sha512-xVNXXXDWvBVI/AeVtSdA9SVumqxiZaESk/JpUn9GMkmtTKfter0Cweap+1iQ9j8bRAO0vNhmIkbcvdB1S4WVUw==", + "dependencies": { + "@aws-sdk/types": "3.577.0", + "@aws-sdk/util-endpoints": "3.583.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/region-config-resolver": { - "version": "3.535.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/region-config-resolver/-/region-config-resolver-3.535.0.tgz", - "integrity": "sha512-IXOznDiaItBjsQy4Fil0kzX/J3HxIOknEphqHbOfUf+LpA5ugcsxuQQONrbEQusCBnfJyymrldBvBhFmtlU9Wg==", - "dependencies": { - "@aws-sdk/types": "3.535.0", - "@smithy/node-config-provider": "^2.3.0", - "@smithy/types": "^2.12.0", - "@smithy/util-config-provider": "^2.3.0", - "@smithy/util-middleware": "^2.2.0", + "version": "3.577.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/region-config-resolver/-/region-config-resolver-3.577.0.tgz", + "integrity": "sha512-4ChCFACNwzqx/xjg3zgFcW8Ali6R9C95cFECKWT/7CUM1D0MGvkclSH2cLarmHCmJgU6onKkJroFtWp0kHhgyg==", + "dependencies": { + "@aws-sdk/types": "3.577.0", + "@smithy/node-config-provider": "^3.0.0", + "@smithy/types": "^3.0.0", + "@smithy/util-config-provider": "^3.0.0", + "@smithy/util-middleware": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/token-providers": { - "version": "3.552.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/token-providers/-/token-providers-3.552.0.tgz", - "integrity": "sha512-5dNE2KqtgkT+DQXfkSmzmVSB72LpjSIK86lLD9LeQ1T+b0gfEd74MAl/AGC15kQdKLg5I3LlN5q32f1fkmYR8g==", - "dependencies": { - "@aws-sdk/client-sso-oidc": "3.552.0", - "@aws-sdk/types": "3.535.0", - "@smithy/property-provider": "^2.2.0", - "@smithy/shared-ini-file-loader": "^2.4.0", - "@smithy/types": "^2.12.0", + "version": "3.577.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/token-providers/-/token-providers-3.577.0.tgz", + "integrity": "sha512-0CkIZpcC3DNQJQ1hDjm2bdSy/Xjs7Ny5YvSsacasGOkNfk+FdkiQy6N67bZX3Zbc9KIx+Nz4bu3iDeNSNplnnQ==", + "dependencies": { + "@aws-sdk/types": "3.577.0", + "@smithy/property-provider": "^3.0.0", + "@smithy/shared-ini-file-loader": "^3.0.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" + }, + "peerDependencies": { + "@aws-sdk/client-sso-oidc": "^3.577.0" } }, "node_modules/@aws-sdk/types": { - "version": "3.535.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/types/-/types-3.535.0.tgz", - "integrity": "sha512-aY4MYfduNj+sRR37U7XxYR8wemfbKP6lx00ze2M2uubn7mZotuVrWYAafbMSXrdEMSToE5JDhr28vArSOoLcSg==", + "version": "3.577.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/types/-/types-3.577.0.tgz", + "integrity": "sha512-FT2JZES3wBKN/alfmhlo+3ZOq/XJ0C7QOZcDNrpKjB0kqYoKjhVKZ/Hx6ArR0czkKfHzBBEs6y40ebIHx2nSmA==", "dependencies": { - "@smithy/types": "^2.12.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" + } + }, + "node_modules/@aws-sdk/util-dynamodb": { + "version": "3.585.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/util-dynamodb/-/util-dynamodb-3.585.0.tgz", + "integrity": "sha512-VqHZ8atNWaQwJ5tuKYkEwArhcAAZmFeDM/tE5mhUIzQGvXEmU58H6GsBw43ZVFYERMlUwrsp9fR2YxRprH7Iow==", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + }, + "peerDependencies": { + "@aws-sdk/client-dynamodb": "^3.585.0" } }, "node_modules/@aws-sdk/util-endpoints": { - "version": "3.540.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/util-endpoints/-/util-endpoints-3.540.0.tgz", - "integrity": "sha512-1kMyQFAWx6f8alaI6UT65/5YW/7pDWAKAdNwL6vuJLea03KrZRX3PMoONOSJpAS5m3Ot7HlWZvf3wZDNTLELZw==", + "version": "3.583.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/util-endpoints/-/util-endpoints-3.583.0.tgz", + "integrity": "sha512-ZC9mb2jq6BFXPYsUsD2tmYcnlmd+9PGNwnFNn8jk4abna5Jjk2wDknN81ybktmBR5ttN9W8ugmktuKtvAMIDCQ==", "dependencies": { - "@aws-sdk/types": "3.535.0", - "@smithy/types": "^2.12.0", - "@smithy/util-endpoints": "^1.2.0", + "@aws-sdk/types": "3.577.0", + "@smithy/types": "^3.0.0", + "@smithy/util-endpoints": "^2.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/util-format-url": { - "version": "3.535.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/util-format-url/-/util-format-url-3.535.0.tgz", - "integrity": "sha512-ElbNkm0bddu53CuW44Iuux1ZbTV50fydbSh/4ypW3LrmUvHx193ogj0HXQ7X26kmmo9rXcsrLdM92yIeTjidVg==", + "version": "3.577.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/util-format-url/-/util-format-url-3.577.0.tgz", + "integrity": "sha512-SyEGC2J+y/krFRuPgiF02FmMYhqbiIkOjDE6k4nYLJQRyS6XEAGxZoG+OHeOVEM+bsDgbxokXZiM3XKGu6qFIg==", "dependencies": { - "@aws-sdk/types": "3.535.0", - "@smithy/querystring-builder": "^2.2.0", - "@smithy/types": "^2.12.0", + "@aws-sdk/types": "3.577.0", + "@smithy/querystring-builder": "^3.0.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@aws-sdk/util-locate-window": { @@ -933,28 +1257,28 @@ } }, "node_modules/@aws-sdk/util-user-agent-browser": { - "version": "3.535.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/util-user-agent-browser/-/util-user-agent-browser-3.535.0.tgz", - "integrity": "sha512-RWMcF/xV5n+nhaA/Ff5P3yNP3Kur/I+VNZngog4TEs92oB/nwOdAg/2JL8bVAhUbMrjTjpwm7PItziYFQoqyig==", + "version": "3.577.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/util-user-agent-browser/-/util-user-agent-browser-3.577.0.tgz", + "integrity": "sha512-zEAzHgR6HWpZOH7xFgeJLc6/CzMcx4nxeQolZxVZoB5pPaJd3CjyRhZN0xXeZB0XIRCWmb4yJBgyiugXLNMkLA==", "dependencies": { - "@aws-sdk/types": "3.535.0", - "@smithy/types": "^2.12.0", + "@aws-sdk/types": "3.577.0", + "@smithy/types": "^3.0.0", "bowser": "^2.11.0", "tslib": "^2.6.2" } }, "node_modules/@aws-sdk/util-user-agent-node": { - "version": "3.535.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/util-user-agent-node/-/util-user-agent-node-3.535.0.tgz", - "integrity": "sha512-dRek0zUuIT25wOWJlsRm97nTkUlh1NDcLsQZIN2Y8KxhwoXXWtJs5vaDPT+qAg+OpcNj80i1zLR/CirqlFg/TQ==", + "version": "3.577.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/util-user-agent-node/-/util-user-agent-node-3.577.0.tgz", + "integrity": "sha512-XqvtFjbSMtycZTWVwDe8DRWovuoMbA54nhUoZwVU6rW9OSD6NZWGR512BUGHFaWzW0Wg8++Dj10FrKTG2XtqfA==", "dependencies": { - "@aws-sdk/types": "3.535.0", - "@smithy/node-config-provider": "^2.3.0", - "@smithy/types": "^2.12.0", + "@aws-sdk/types": "3.577.0", + "@smithy/node-config-provider": "^3.0.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" }, "peerDependencies": { "aws-crt": ">=1.0.0" @@ -973,187 +1297,6 @@ "tslib": "^2.3.1" } }, - "node_modules/@aws-solutions-constructs/aws-lambda-dynamodb": { - "version": "2.54.1", - "resolved": "https://registry.npmjs.org/@aws-solutions-constructs/aws-lambda-dynamodb/-/aws-lambda-dynamodb-2.54.1.tgz", - "integrity": "sha512-mVk4dNNRHcPIOxEgTAo6laSoFp20Z+rHaGYuyTbsbihjYQ1T2AMiQIneOik04tA1qoB+L2jMOyjsZxqYCJbxGQ==", - "dependencies": { - "@aws-cdk/integ-tests-alpha": "2.135.0-alpha.0", - "@aws-solutions-constructs/core": "2.54.1", - "constructs": "^10.0.0" - }, - "peerDependencies": { - "@aws-solutions-constructs/core": "2.54.1", - "aws-cdk-lib": "^2.135.0", - "constructs": "^10.0.0" - } - }, - "node_modules/@aws-solutions-constructs/core": { - "version": "2.54.1", - "resolved": "https://registry.npmjs.org/@aws-solutions-constructs/core/-/core-2.54.1.tgz", - "integrity": "sha512-e2U9uIPyMtRXe8xSgKvVxlPce70oEzLlmtckVzla1rfBbBsZnPC3E+zU4zwr3HHEPXaJdLgxsbrygBUAwsdFEw==", - "bundleDependencies": [ - "deepmerge", - "npmlog", - "deep-diff" - ], - "dependencies": { - "@aws-cdk/integ-tests-alpha": "2.135.0-alpha.0", - "constructs": "^10.0.0", - "deep-diff": "^1.0.2", - "deepmerge": "^4.0.0", - "npmlog": "^7.0.0" - }, - "peerDependencies": { - "aws-cdk-lib": "^2.135.0", - "constructs": "^10.0.0" - } - }, - "node_modules/@aws-solutions-constructs/core/node_modules/ansi-regex": { - "version": "5.0.1", - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/@aws-solutions-constructs/core/node_modules/aproba": { - "version": "2.0.0", - "inBundle": true, - "license": "ISC" - }, - "node_modules/@aws-solutions-constructs/core/node_modules/are-we-there-yet": { - "version": "4.0.2", - "inBundle": true, - "license": "ISC", - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - } - }, - "node_modules/@aws-solutions-constructs/core/node_modules/color-support": { - "version": "1.1.3", - "inBundle": true, - "license": "ISC", - "bin": { - "color-support": "bin.js" - } - }, - "node_modules/@aws-solutions-constructs/core/node_modules/console-control-strings": { - "version": "1.1.0", - "inBundle": true, - "license": "ISC" - }, - "node_modules/@aws-solutions-constructs/core/node_modules/deep-diff": { - "version": "1.0.2", - "inBundle": true, - "license": "MIT" - }, - "node_modules/@aws-solutions-constructs/core/node_modules/deepmerge": { - "version": "4.3.1", - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/@aws-solutions-constructs/core/node_modules/emoji-regex": { - "version": "8.0.0", - "inBundle": true, - "license": "MIT" - }, - "node_modules/@aws-solutions-constructs/core/node_modules/gauge": { - "version": "5.0.1", - "inBundle": true, - "license": "ISC", - "dependencies": { - "aproba": "^1.0.3 || ^2.0.0", - "color-support": "^1.1.3", - "console-control-strings": "^1.1.0", - "has-unicode": "^2.0.1", - "signal-exit": "^4.0.1", - "string-width": "^4.2.3", - "strip-ansi": "^6.0.1", - "wide-align": "^1.1.5" - }, - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - } - }, - "node_modules/@aws-solutions-constructs/core/node_modules/has-unicode": { - "version": "2.0.1", - "inBundle": true, - "license": "ISC" - }, - "node_modules/@aws-solutions-constructs/core/node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/@aws-solutions-constructs/core/node_modules/npmlog": { - "version": "7.0.1", - "inBundle": true, - "license": "ISC", - "dependencies": { - "are-we-there-yet": "^4.0.0", - "console-control-strings": "^1.1.0", - "gauge": "^5.0.0", - "set-blocking": "^2.0.0" - }, - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - } - }, - "node_modules/@aws-solutions-constructs/core/node_modules/set-blocking": { - "version": "2.0.0", - "inBundle": true, - "license": "ISC" - }, - "node_modules/@aws-solutions-constructs/core/node_modules/signal-exit": { - "version": "4.1.0", - "inBundle": true, - "license": "ISC", - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/@aws-solutions-constructs/core/node_modules/string-width": { - "version": "4.2.3", - "inBundle": true, - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/@aws-solutions-constructs/core/node_modules/strip-ansi": { - "version": "6.0.1", - "inBundle": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/@aws-solutions-constructs/core/node_modules/wide-align": { - "version": "1.1.5", - "inBundle": true, - "license": "ISC", - "dependencies": { - "string-width": "^1.0.2 || 2 || 3 || 4" - } - }, "node_modules/@babel/code-frame": { "version": "7.22.13", "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", @@ -1753,55 +1896,722 @@ "node": ">=6.9.0" } }, - "node_modules/@babel/traverse": { - "version": "7.23.2", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz", - "integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==", + "node_modules/@babel/traverse": { + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz", + "integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.23.0", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/parser": "^7.23.0", + "@babel/types": "^7.23.0", + "debug": "^4.1.0", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse/node_modules/globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/types": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz", + "integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==", + "dev": true, + "dependencies": { + "@babel/helper-string-parser": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.20", + "to-fast-properties": "^2.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@bcoe/v8-coverage": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", + "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", + "dev": true + }, + "node_modules/@cdklabs/cdk-ssm-documents": { + "version": "0.0.41", + "resolved": "https://registry.npmjs.org/@cdklabs/cdk-ssm-documents/-/cdk-ssm-documents-0.0.41.tgz", + "integrity": "sha512-eJVq1rr9gmAFn1xqn3qm1p7kgTOZLJdpaFRwJOQLLnAqdp7O6CRj6pWWUH25ZxPR6B6CHA5firo4IAeyNKzN1Q==", + "bundleDependencies": [ + "aws-sdk", + "immutable", + "js-yaml", + "jsonpath", + "python-shell", + "synchronized-promise" + ], + "dev": true, + "dependencies": { + "aws-sdk": "^2.1135.0", + "immutable": "^4.0.0", + "js-yaml": "^4.1.0", + "jsonpath": "^1.1.1", + "python-shell": "^3.0.1", + "synchronized-promise": "^0.3.1" + }, + "peerDependencies": { + "aws-cdk-lib": "^2.87.0", + "constructs": "^10.0.5" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/argparse": { + "version": "2.0.1", + "dev": true, + "inBundle": true, + "license": "Python-2.0" + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/available-typed-arrays": { + "version": "1.0.5", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/aws-sdk": { + "version": "2.1358.0", + "dev": true, + "inBundle": true, + "license": "Apache-2.0", + "dependencies": { + "buffer": "4.9.2", + "events": "1.1.1", + "ieee754": "1.1.13", + "jmespath": "0.16.0", + "querystring": "0.2.0", + "sax": "1.2.1", + "url": "0.10.3", + "util": "^0.12.4", + "uuid": "8.0.0", + "xml2js": "0.5.0" + }, + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/base64-js": { + "version": "1.5.1", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "inBundle": true, + "license": "MIT" + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/bindings": { + "version": "1.5.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "file-uri-to-path": "1.0.0" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/buffer": { + "version": "4.9.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "base64-js": "^1.0.2", + "ieee754": "^1.1.4", + "isarray": "^1.0.0" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/buffer/node_modules/ieee754": { + "version": "1.2.1", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "inBundle": true, + "license": "BSD-3-Clause" + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/call-bind": { + "version": "1.0.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.1", + "get-intrinsic": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/deasync": { + "version": "0.1.26", + "dev": true, + "hasInstallScript": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "bindings": "^1.5.0", + "node-addon-api": "^1.7.1" + }, + "engines": { + "node": ">=0.11.0" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/deep-is": { + "version": "0.1.4", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/escodegen": { + "version": "1.14.3", + "dev": true, + "inBundle": true, + "license": "BSD-2-Clause", + "dependencies": { + "esprima": "^4.0.1", + "estraverse": "^4.2.0", + "esutils": "^2.0.2", + "optionator": "^0.8.1" + }, + "bin": { + "escodegen": "bin/escodegen.js", + "esgenerate": "bin/esgenerate.js" + }, + "engines": { + "node": ">=4.0" + }, + "optionalDependencies": { + "source-map": "~0.6.1" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/escodegen/node_modules/estraverse": { + "version": "4.3.0", + "dev": true, + "inBundle": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/esprima": { + "version": "4.0.1", + "dev": true, + "inBundle": true, + "license": "BSD-2-Clause", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/esutils": { + "version": "2.0.3", + "dev": true, + "inBundle": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/events": { + "version": "1.1.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.4.x" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/fast-levenshtein": { + "version": "2.0.6", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/file-uri-to-path": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/for-each": { + "version": "0.3.3", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "is-callable": "^1.1.3" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/for-each/node_modules/is-callable": { + "version": "1.2.7", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/function-bind": { + "version": "1.1.1", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/get-intrinsic": { + "version": "1.1.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/gopd": { + "version": "1.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/gopd/node_modules/get-intrinsic": { + "version": "1.2.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/has": { + "version": "1.0.3", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.1" + }, + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/has-symbols": { + "version": "1.0.3", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/has-tostringtag": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/ieee754": { + "version": "1.1.13", + "dev": true, + "inBundle": true, + "license": "BSD-3-Clause" + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/immutable": { + "version": "4.1.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/inherits": { + "version": "2.0.4", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/is-arguments": { + "version": "1.1.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/is-generator-function": { + "version": "1.0.10", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/is-typed-array": { + "version": "1.1.10", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.5", + "call-bind": "^1.0.2", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/isarray": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/jmespath": { + "version": "0.16.0", + "dev": true, + "inBundle": true, + "license": "Apache-2.0", + "engines": { + "node": ">= 0.6.0" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/js-yaml": { + "version": "4.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/jsonpath": { + "version": "1.1.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "esprima": "1.2.2", + "static-eval": "2.0.2", + "underscore": "1.12.1" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/jsonpath/node_modules/esprima": { + "version": "1.2.2", + "dev": true, + "inBundle": true, + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/node-addon-api": { + "version": "1.7.2", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/optionator": { + "version": "0.8.3", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "deep-is": "~0.1.3", + "fast-levenshtein": "~2.0.6", + "levn": "~0.3.0", + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2", + "word-wrap": "~1.2.3" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/optionator/node_modules/levn": { + "version": "0.3.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/prelude-ls": { + "version": "1.1.2", + "dev": true, + "inBundle": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/python-shell": { + "version": "3.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/querystring": { + "version": "0.2.0", + "dev": true, + "inBundle": true, + "engines": { + "node": ">=0.4.x" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/sax": { + "version": "1.2.1", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/source-map": { + "version": "0.6.1", + "dev": true, + "inBundle": true, + "license": "BSD-3-Clause", + "optional": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/static-eval": { + "version": "2.0.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "escodegen": "^1.8.1" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/synchronized-promise": { + "version": "0.3.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "deasync": "^0.1.15" + }, + "engines": { + "node": ">=4.2.0" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/type-check": { + "version": "0.3.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "~1.1.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/underscore": { + "version": "1.12.1", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/url": { + "version": "0.10.3", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "punycode": "1.3.2", + "querystring": "0.2.0" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/url/node_modules/punycode": { + "version": "1.3.2", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/util": { + "version": "0.12.5", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "is-arguments": "^1.0.4", + "is-generator-function": "^1.0.7", + "is-typed-array": "^1.1.3", + "which-typed-array": "^1.1.2" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/uuid": { + "version": "8.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/which-typed-array": { + "version": "1.1.9", "dev": true, + "inBundle": true, + "license": "MIT", "dependencies": { - "@babel/code-frame": "^7.22.13", - "@babel/generator": "^7.23.0", - "@babel/helper-environment-visitor": "^7.22.20", - "@babel/helper-function-name": "^7.23.0", - "@babel/helper-hoist-variables": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.6", - "@babel/parser": "^7.23.0", - "@babel/types": "^7.23.0", - "debug": "^4.1.0", - "globals": "^11.1.0" + "available-typed-arrays": "^1.0.5", + "call-bind": "^1.0.2", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-tostringtag": "^1.0.0", + "is-typed-array": "^1.1.10" }, "engines": { - "node": ">=6.9.0" + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/@babel/traverse/node_modules/globals": { - "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", - "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/word-wrap": { + "version": "1.2.4", "dev": true, + "inBundle": true, + "license": "MIT", "engines": { - "node": ">=4" + "node": ">=0.10.0" } }, - "node_modules/@babel/types": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz", - "integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==", + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/xml2js": { + "version": "0.5.0", "dev": true, + "inBundle": true, + "license": "MIT", "dependencies": { - "@babel/helper-string-parser": "^7.22.5", - "@babel/helper-validator-identifier": "^7.22.20", - "to-fast-properties": "^2.0.0" + "sax": ">=0.6.0", + "xmlbuilder": "~11.0.0" }, "engines": { - "node": ">=6.9.0" + "node": ">=4.0.0" } }, - "node_modules/@bcoe/v8-coverage": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", - "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", - "dev": true + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/xml2js/node_modules/sax": { + "version": "1.2.4", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/@cdklabs/cdk-ssm-documents/node_modules/xml2js/node_modules/xmlbuilder": { + "version": "11.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4.0" + } }, "node_modules/@cspotcode/source-map-support": { "version": "0.8.1", @@ -1826,9 +2636,9 @@ } }, "node_modules/@esbuild/aix-ppc64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.20.2.tgz", - "integrity": "sha512-D+EBOJHXdNZcLJRBkhENNG8Wji2kgc9AZ9KiPr1JuZjsNtyHzrsfLRrY0tk2H2aoFu6RANO1y1iPPUCDYWkb5g==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.4.tgz", + "integrity": "sha512-Zrm+B33R4LWPLjDEVnEqt2+SLTATlru1q/xYKVn8oVTbiRBGmK2VIMoIYGJDGyftnGaC788IuzGFAlb7IQ0Y8A==", "cpu": [ "ppc64" ], @@ -1842,9 +2652,9 @@ } }, "node_modules/@esbuild/android-arm": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.20.2.tgz", - "integrity": "sha512-t98Ra6pw2VaDhqNWO2Oph2LXbz/EJcnLmKLGBJwEwXX/JAN83Fym1rU8l0JUWK6HkIbWONCSSatf4sf2NBRx/w==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.4.tgz", + "integrity": "sha512-E7H/yTd8kGQfY4z9t3nRPk/hrhaCajfA3YSQSBrst8B+3uTcgsi8N+ZWYCaeIDsiVs6m65JPCaQN/DxBRclF3A==", "cpu": [ "arm" ], @@ -1858,9 +2668,9 @@ } }, "node_modules/@esbuild/android-arm64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.20.2.tgz", - "integrity": "sha512-mRzjLacRtl/tWU0SvD8lUEwb61yP9cqQo6noDZP/O8VkwafSYwZ4yWy24kan8jE/IMERpYncRt2dw438LP3Xmg==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.4.tgz", + "integrity": "sha512-fYFnz+ObClJ3dNiITySBUx+oNalYUT18/AryMxfovLkYWbutXsct3Wz2ZWAcGGppp+RVVX5FiXeLYGi97umisA==", "cpu": [ "arm64" ], @@ -1874,9 +2684,9 @@ } }, "node_modules/@esbuild/android-x64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.20.2.tgz", - "integrity": "sha512-btzExgV+/lMGDDa194CcUQm53ncxzeBrWJcncOBxuC6ndBkKxnHdFJn86mCIgTELsooUmwUm9FkhSp5HYu00Rg==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.4.tgz", + "integrity": "sha512-mDqmlge3hFbEPbCWxp4fM6hqq7aZfLEHZAKGP9viq9wMUBVQx202aDIfc3l+d2cKhUJM741VrCXEzRFhPDKH3Q==", "cpu": [ "x64" ], @@ -1890,9 +2700,9 @@ } }, "node_modules/@esbuild/darwin-arm64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.20.2.tgz", - "integrity": "sha512-4J6IRT+10J3aJH3l1yzEg9y3wkTDgDk7TSDFX+wKFiWjqWp/iCfLIYzGyasx9l0SAFPT1HwSCR+0w/h1ES/MjA==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.4.tgz", + "integrity": "sha512-72eaIrDZDSiWqpmCzVaBD58c8ea8cw/U0fq/PPOTqE3c53D0xVMRt2ooIABZ6/wj99Y+h4ksT/+I+srCDLU9TA==", "cpu": [ "arm64" ], @@ -1906,9 +2716,9 @@ } }, "node_modules/@esbuild/darwin-x64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.20.2.tgz", - "integrity": "sha512-tBcXp9KNphnNH0dfhv8KYkZhjc+H3XBkF5DKtswJblV7KlT9EI2+jeA8DgBjp908WEuYll6pF+UStUCfEpdysA==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.4.tgz", + "integrity": "sha512-uBsuwRMehGmw1JC7Vecu/upOjTsMhgahmDkWhGLWxIgUn2x/Y4tIwUZngsmVb6XyPSTXJYS4YiASKPcm9Zitag==", "cpu": [ "x64" ], @@ -1922,9 +2732,9 @@ } }, "node_modules/@esbuild/freebsd-arm64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.20.2.tgz", - "integrity": "sha512-d3qI41G4SuLiCGCFGUrKsSeTXyWG6yem1KcGZVS+3FYlYhtNoNgYrWcvkOoaqMhwXSMrZRl69ArHsGJ9mYdbbw==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.4.tgz", + "integrity": "sha512-8JfuSC6YMSAEIZIWNL3GtdUT5NhUA/CMUCpZdDRolUXNAXEE/Vbpe6qlGLpfThtY5NwXq8Hi4nJy4YfPh+TwAg==", "cpu": [ "arm64" ], @@ -1938,9 +2748,9 @@ } }, "node_modules/@esbuild/freebsd-x64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.20.2.tgz", - "integrity": "sha512-d+DipyvHRuqEeM5zDivKV1KuXn9WeRX6vqSqIDgwIfPQtwMP4jaDsQsDncjTDDsExT4lR/91OLjRo8bmC1e+Cw==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.4.tgz", + "integrity": "sha512-8d9y9eQhxv4ef7JmXny7591P/PYsDFc4+STaxC1GBv0tMyCdyWfXu2jBuqRsyhY8uL2HU8uPyscgE2KxCY9imQ==", "cpu": [ "x64" ], @@ -1954,9 +2764,9 @@ } }, "node_modules/@esbuild/linux-arm": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.20.2.tgz", - "integrity": "sha512-VhLPeR8HTMPccbuWWcEUD1Az68TqaTYyj6nfE4QByZIQEQVWBB8vup8PpR7y1QHL3CpcF6xd5WVBU/+SBEvGTg==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.4.tgz", + "integrity": "sha512-2rqFFefpYmpMs+FWjkzSgXg5vViocqpq5a1PSRgT0AvSgxoXmGF17qfGAzKedg6wAwyM7UltrKVo9kxaJLMF/g==", "cpu": [ "arm" ], @@ -1970,9 +2780,9 @@ } }, "node_modules/@esbuild/linux-arm64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.20.2.tgz", - "integrity": "sha512-9pb6rBjGvTFNira2FLIWqDk/uaf42sSyLE8j1rnUpuzsODBq7FvpwHYZxQ/It/8b+QOS1RYfqgGFNLRI+qlq2A==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.4.tgz", + "integrity": "sha512-/GLD2orjNU50v9PcxNpYZi+y8dJ7e7/LhQukN3S4jNDXCKkyyiyAz9zDw3siZ7Eh1tRcnCHAo/WcqKMzmi4eMQ==", "cpu": [ "arm64" ], @@ -1986,9 +2796,9 @@ } }, "node_modules/@esbuild/linux-ia32": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.20.2.tgz", - "integrity": "sha512-o10utieEkNPFDZFQm9CoP7Tvb33UutoJqg3qKf1PWVeeJhJw0Q347PxMvBgVVFgouYLGIhFYG0UGdBumROyiig==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.4.tgz", + "integrity": "sha512-pNftBl7m/tFG3t2m/tSjuYeWIffzwAZT9m08+9DPLizxVOsUl8DdFzn9HvJrTQwe3wvJnwTdl92AonY36w/25g==", "cpu": [ "ia32" ], @@ -2002,9 +2812,9 @@ } }, "node_modules/@esbuild/linux-loong64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.20.2.tgz", - "integrity": "sha512-PR7sp6R/UC4CFVomVINKJ80pMFlfDfMQMYynX7t1tNTeivQ6XdX5r2XovMmha/VjR1YN/HgHWsVcTRIMkymrgQ==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.4.tgz", + "integrity": "sha512-cSD2gzCK5LuVX+hszzXQzlWya6c7hilO71L9h4KHwqI4qeqZ57bAtkgcC2YioXjsbfAv4lPn3qe3b00Zt+jIfQ==", "cpu": [ "loong64" ], @@ -2018,9 +2828,9 @@ } }, "node_modules/@esbuild/linux-mips64el": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.20.2.tgz", - "integrity": "sha512-4BlTqeutE/KnOiTG5Y6Sb/Hw6hsBOZapOVF6njAESHInhlQAghVVZL1ZpIctBOoTFbQyGW+LsVYZ8lSSB3wkjA==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.4.tgz", + "integrity": "sha512-qtzAd3BJh7UdbiXCrg6npWLYU0YpufsV9XlufKhMhYMJGJCdfX/G6+PNd0+v877X1JG5VmjBLUiFB0o8EUSicA==", "cpu": [ "mips64el" ], @@ -2034,9 +2844,9 @@ } }, "node_modules/@esbuild/linux-ppc64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.20.2.tgz", - "integrity": "sha512-rD3KsaDprDcfajSKdn25ooz5J5/fWBylaaXkuotBDGnMnDP1Uv5DLAN/45qfnf3JDYyJv/ytGHQaziHUdyzaAg==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.4.tgz", + "integrity": "sha512-yB8AYzOTaL0D5+2a4xEy7OVvbcypvDR05MsB/VVPVA7nL4hc5w5Dyd/ddnayStDgJE59fAgNEOdLhBxjfx5+dg==", "cpu": [ "ppc64" ], @@ -2050,9 +2860,9 @@ } }, "node_modules/@esbuild/linux-riscv64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.20.2.tgz", - "integrity": "sha512-snwmBKacKmwTMmhLlz/3aH1Q9T8v45bKYGE3j26TsaOVtjIag4wLfWSiZykXzXuE1kbCE+zJRmwp+ZbIHinnVg==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.4.tgz", + "integrity": "sha512-Y5AgOuVzPjQdgU59ramLoqSSiXddu7F3F+LI5hYy/d1UHN7K5oLzYBDZe23QmQJ9PIVUXwOdKJ/jZahPdxzm9w==", "cpu": [ "riscv64" ], @@ -2066,9 +2876,9 @@ } }, "node_modules/@esbuild/linux-s390x": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.20.2.tgz", - "integrity": "sha512-wcWISOobRWNm3cezm5HOZcYz1sKoHLd8VL1dl309DiixxVFoFe/o8HnwuIwn6sXre88Nwj+VwZUvJf4AFxkyrQ==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.4.tgz", + "integrity": "sha512-Iqc/l/FFwtt8FoTK9riYv9zQNms7B8u+vAI/rxKuN10HgQIXaPzKZc479lZ0x6+vKVQbu55GdpYpeNWzjOhgbA==", "cpu": [ "s390x" ], @@ -2082,9 +2892,9 @@ } }, "node_modules/@esbuild/linux-x64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.20.2.tgz", - "integrity": "sha512-1MdwI6OOTsfQfek8sLwgyjOXAu+wKhLEoaOLTjbijk6E2WONYpH9ZU2mNtR+lZ2B4uwr+usqGuVfFT9tMtGvGw==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.4.tgz", + "integrity": "sha512-Td9jv782UMAFsuLZINfUpoF5mZIbAj+jv1YVtE58rFtfvoKRiKSkRGQfHTgKamLVT/fO7203bHa3wU122V/Bdg==", "cpu": [ "x64" ], @@ -2098,9 +2908,9 @@ } }, "node_modules/@esbuild/netbsd-x64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.20.2.tgz", - "integrity": "sha512-K8/DhBxcVQkzYc43yJXDSyjlFeHQJBiowJ0uVL6Tor3jGQfSGHNNJcWxNbOI8v5k82prYqzPuwkzHt3J1T1iZQ==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.4.tgz", + "integrity": "sha512-Awn38oSXxsPMQxaV0Ipb7W/gxZtk5Tx3+W+rAPdZkyEhQ6968r9NvtkjhnhbEgWXYbgV+JEONJ6PcdBS+nlcpA==", "cpu": [ "x64" ], @@ -2114,9 +2924,9 @@ } }, "node_modules/@esbuild/openbsd-x64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.20.2.tgz", - "integrity": "sha512-eMpKlV0SThJmmJgiVyN9jTPJ2VBPquf6Kt/nAoo6DgHAoN57K15ZghiHaMvqjCye/uU4X5u3YSMgVBI1h3vKrQ==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.4.tgz", + "integrity": "sha512-IsUmQeCY0aU374R82fxIPu6vkOybWIMc3hVGZ3ChRwL9hA1TwY+tS0lgFWV5+F1+1ssuvvXt3HFqe8roCip8Hg==", "cpu": [ "x64" ], @@ -2130,9 +2940,9 @@ } }, "node_modules/@esbuild/sunos-x64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.20.2.tgz", - "integrity": "sha512-2UyFtRC6cXLyejf/YEld4Hajo7UHILetzE1vsRcGL3earZEW77JxrFjH4Ez2qaTiEfMgAXxfAZCm1fvM/G/o8w==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.4.tgz", + "integrity": "sha512-hsKhgZ4teLUaDA6FG/QIu2q0rI6I36tZVfM4DBZv3BG0mkMIdEnMbhc4xwLvLJSS22uWmaVkFkqWgIS0gPIm+A==", "cpu": [ "x64" ], @@ -2146,9 +2956,9 @@ } }, "node_modules/@esbuild/win32-arm64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.20.2.tgz", - "integrity": "sha512-GRibxoawM9ZCnDxnP3usoUDO9vUkpAxIIZ6GQI+IlVmr5kP3zUq+l17xELTHMWTWzjxa2guPNyrpq1GWmPvcGQ==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.4.tgz", + "integrity": "sha512-UUfMgMoXPoA/bvGUNfUBFLCh0gt9dxZYIx9W4rfJr7+hKe5jxxHmfOK8YSH4qsHLLN4Ck8JZ+v7Q5fIm1huErg==", "cpu": [ "arm64" ], @@ -2162,9 +2972,9 @@ } }, "node_modules/@esbuild/win32-ia32": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.20.2.tgz", - "integrity": "sha512-HfLOfn9YWmkSKRQqovpnITazdtquEW8/SoHW7pWpuEeguaZI4QnCRW6b+oZTztdBnZOS2hqJ6im/D5cPzBTTlQ==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.4.tgz", + "integrity": "sha512-yIxbspZb5kGCAHWm8dexALQ9en1IYDfErzjSEq1KzXFniHv019VT3mNtTK7t8qdy4TwT6QYHI9sEZabONHg+aw==", "cpu": [ "ia32" ], @@ -2178,9 +2988,9 @@ } }, "node_modules/@esbuild/win32-x64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.20.2.tgz", - "integrity": "sha512-N49X4lJX27+l9jbLKSqZ6bKNjzQvHaT8IIFUy+YIqmXQdjYCToGWwOItDrfby14c78aDd5NHQl29xingXfCdLQ==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.4.tgz", + "integrity": "sha512-sywLRD3UK/qRJt0oBwdpYLBibk7KiRfbswmWRDabuncQYSlf8aLEEUor/oP6KRz8KEG+HoiVLBhPRD5JWjS8Sg==", "cpu": [ "x64" ], @@ -2788,158 +3598,220 @@ } }, "node_modules/@smithy/abort-controller": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@smithy/abort-controller/-/abort-controller-2.2.0.tgz", - "integrity": "sha512-wRlta7GuLWpTqtFfGo+nZyOO1vEvewdNR1R4rTxpC8XU6vG/NDyrFBhwLZsqg1NUoR1noVaXJPC/7ZK47QCySw==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/abort-controller/-/abort-controller-3.0.0.tgz", + "integrity": "sha512-p6GlFGBt9K4MYLu72YuJ523NVR4A8oHlC5M2JO6OmQqN8kAc/uh1JqLE+FizTokrSJGg0CSvC+BrsmGzKtsZKA==", "dependencies": { - "@smithy/types": "^2.12.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/config-resolver": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@smithy/config-resolver/-/config-resolver-2.2.0.tgz", - "integrity": "sha512-fsiMgd8toyUba6n1WRmr+qACzXltpdDkPTAaDqc8QqPBUzO+/JKwL6bUBseHVi8tu9l+3JOK+tSf7cay+4B3LA==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/config-resolver/-/config-resolver-3.0.0.tgz", + "integrity": "sha512-2GzOfADwYLQugYkKQhIyZyQlM05K+tMKvRnc6eFfZcpJGRfKoMUMYdPlBKmqHwQFXQKBrGV6cxL9oymWgDzvFw==", "dependencies": { - "@smithy/node-config-provider": "^2.3.0", - "@smithy/types": "^2.12.0", - "@smithy/util-config-provider": "^2.3.0", - "@smithy/util-middleware": "^2.2.0", + "@smithy/node-config-provider": "^3.0.0", + "@smithy/types": "^3.0.0", + "@smithy/util-config-provider": "^3.0.0", + "@smithy/util-middleware": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/core": { - "version": "1.4.2", - "resolved": "https://registry.npmjs.org/@smithy/core/-/core-1.4.2.tgz", - "integrity": "sha512-2fek3I0KZHWJlRLvRTqxTEri+qV0GRHrJIoLFuBMZB4EMg4WgeBGfF0X6abnrNYpq55KJ6R4D6x4f0vLnhzinA==", - "dependencies": { - "@smithy/middleware-endpoint": "^2.5.1", - "@smithy/middleware-retry": "^2.3.1", - "@smithy/middleware-serde": "^2.3.0", - "@smithy/protocol-http": "^3.3.0", - "@smithy/smithy-client": "^2.5.1", - "@smithy/types": "^2.12.0", - "@smithy/util-middleware": "^2.2.0", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@smithy/core/-/core-2.0.1.tgz", + "integrity": "sha512-rcMkjvwxH/bER+oZUPR0yTA0ELD6m3A+d92+CFkdF6HJFCBB1bXo7P5pm21L66XwTN01B6bUhSCQ7cymWRD8zg==", + "dependencies": { + "@smithy/middleware-endpoint": "^3.0.0", + "@smithy/middleware-retry": "^3.0.1", + "@smithy/middleware-serde": "^3.0.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/smithy-client": "^3.0.1", + "@smithy/types": "^3.0.0", + "@smithy/util-middleware": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/credential-provider-imds": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@smithy/credential-provider-imds/-/credential-provider-imds-2.3.0.tgz", - "integrity": "sha512-BWB9mIukO1wjEOo1Ojgl6LrG4avcaC7T/ZP6ptmAaW4xluhSIPZhY+/PI5YKzlk+jsm+4sQZB45Bt1OfMeQa3w==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/credential-provider-imds/-/credential-provider-imds-3.0.0.tgz", + "integrity": "sha512-lfmBiFQcA3FsDAPxNfY0L7CawcWtbyWsBOHo34nF095728JLkBX4Y9q/VPPE2r7fqMVK+drmDigqE2/SSQeVRA==", "dependencies": { - "@smithy/node-config-provider": "^2.3.0", - "@smithy/property-provider": "^2.2.0", - "@smithy/types": "^2.12.0", - "@smithy/url-parser": "^2.2.0", + "@smithy/node-config-provider": "^3.0.0", + "@smithy/property-provider": "^3.0.0", + "@smithy/types": "^3.0.0", + "@smithy/url-parser": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" + } + }, + "node_modules/@smithy/eventstream-codec": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/eventstream-codec/-/eventstream-codec-3.0.0.tgz", + "integrity": "sha512-PUtyEA0Oik50SaEFCZ0WPVtF9tz/teze2fDptW6WRXl+RrEenH8UbEjudOz8iakiMl3lE3lCVqYf2Y+znL8QFQ==", + "dependencies": { + "@aws-crypto/crc32": "3.0.0", + "@smithy/types": "^3.0.0", + "@smithy/util-hex-encoding": "^3.0.0", + "tslib": "^2.6.2" + } + }, + "node_modules/@smithy/eventstream-serde-browser": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/eventstream-serde-browser/-/eventstream-serde-browser-3.0.0.tgz", + "integrity": "sha512-NB7AFiPN4NxP/YCAnrvYR18z2/ZsiHiF7VtG30gshO9GbFrIb1rC8ep4NGpJSWrz6P64uhPXeo4M0UsCLnZKqw==", + "dependencies": { + "@smithy/eventstream-serde-universal": "^3.0.0", + "@smithy/types": "^3.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@smithy/eventstream-serde-config-resolver": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/eventstream-serde-config-resolver/-/eventstream-serde-config-resolver-3.0.0.tgz", + "integrity": "sha512-RUQG3vQ3LX7peqqHAbmayhgrF5aTilPnazinaSGF1P0+tgM3vvIRWPHmlLIz2qFqB9LqFIxditxc8O2Z6psrRw==", + "dependencies": { + "@smithy/types": "^3.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@smithy/eventstream-serde-node": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/eventstream-serde-node/-/eventstream-serde-node-3.0.0.tgz", + "integrity": "sha512-baRPdMBDMBExZXIUAoPGm/hntixjt/VFpU6+VmCyiYJYzRHRxoaI1MN+5XE+hIS8AJ2GCHLMFEIOLzq9xx1EgQ==", + "dependencies": { + "@smithy/eventstream-serde-universal": "^3.0.0", + "@smithy/types": "^3.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@smithy/eventstream-serde-universal": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/eventstream-serde-universal/-/eventstream-serde-universal-3.0.0.tgz", + "integrity": "sha512-HNFfShmotWGeAoW4ujP8meV9BZavcpmerDbPIjkJbxKbN8RsUcpRQ/2OyIxWNxXNH2GWCAxuSB7ynmIGJlQ3Dw==", + "dependencies": { + "@smithy/eventstream-codec": "^3.0.0", + "@smithy/types": "^3.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" } }, "node_modules/@smithy/fetch-http-handler": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@smithy/fetch-http-handler/-/fetch-http-handler-2.5.0.tgz", - "integrity": "sha512-BOWEBeppWhLn/no/JxUL/ghTfANTjT7kg3Ww2rPqTUY9R4yHPXxJ9JhMe3Z03LN3aPwiwlpDIUcVw1xDyHqEhw==", - "dependencies": { - "@smithy/protocol-http": "^3.3.0", - "@smithy/querystring-builder": "^2.2.0", - "@smithy/types": "^2.12.0", - "@smithy/util-base64": "^2.3.0", + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@smithy/fetch-http-handler/-/fetch-http-handler-3.0.1.tgz", + "integrity": "sha512-uaH74i5BDj+rBwoQaXioKpI0SHBJFtOVwzrCpxZxphOW0ki5jhj7dXvDMYM2IJem8TpdFvS2iC08sjOblfFGFg==", + "dependencies": { + "@smithy/protocol-http": "^4.0.0", + "@smithy/querystring-builder": "^3.0.0", + "@smithy/types": "^3.0.0", + "@smithy/util-base64": "^3.0.0", "tslib": "^2.6.2" } }, "node_modules/@smithy/hash-node": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@smithy/hash-node/-/hash-node-2.2.0.tgz", - "integrity": "sha512-zLWaC/5aWpMrHKpoDF6nqpNtBhlAYKF/7+9yMN7GpdR8CzohnWfGtMznPybnwSS8saaXBMxIGwJqR4HmRp6b3g==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/hash-node/-/hash-node-3.0.0.tgz", + "integrity": "sha512-84qXstNemP3XS5jcof0el6+bDfjzuvhJPQTEfro3lgtbCtKgzPm3MgiS6ehXVPjeQ5+JS0HqmTz8f/RYfzHVxw==", "dependencies": { - "@smithy/types": "^2.12.0", - "@smithy/util-buffer-from": "^2.2.0", - "@smithy/util-utf8": "^2.3.0", + "@smithy/types": "^3.0.0", + "@smithy/util-buffer-from": "^3.0.0", + "@smithy/util-utf8": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/invalid-dependency": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@smithy/invalid-dependency/-/invalid-dependency-2.2.0.tgz", - "integrity": "sha512-nEDASdbKFKPXN2O6lOlTgrEEOO9NHIeO+HVvZnkqc8h5U9g3BIhWsvzFo+UcUbliMHvKNPD/zVxDrkP1Sbgp8Q==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/invalid-dependency/-/invalid-dependency-3.0.0.tgz", + "integrity": "sha512-F6wBBaEFgJzj0s4KUlliIGPmqXemwP6EavgvDqYwCH40O5Xr2iMHvS8todmGVZtuJCorBkXsYLyTu4PuizVq5g==", "dependencies": { - "@smithy/types": "^2.12.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" } }, "node_modules/@smithy/is-array-buffer": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@smithy/is-array-buffer/-/is-array-buffer-2.2.0.tgz", - "integrity": "sha512-GGP3O9QFD24uGeAXYUjwSTXARoqpZykHadOmA8G5vfJPK0/DC67qa//0qvqrJzL1xc8WQWX7/yc7fwudjPHPhA==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/is-array-buffer/-/is-array-buffer-3.0.0.tgz", + "integrity": "sha512-+Fsu6Q6C4RSJiy81Y8eApjEB5gVtM+oFKTffg+jSuwtvomJJrhUJBu2zS8wjXSgH/g1MKEWrzyChTBe6clb5FQ==", "dependencies": { "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/middleware-content-length": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@smithy/middleware-content-length/-/middleware-content-length-2.2.0.tgz", - "integrity": "sha512-5bl2LG1Ah/7E5cMSC+q+h3IpVHMeOkG0yLRyQT1p2aMJkSrZG7RlXHPuAgb7EyaFeidKEnnd/fNaLLaKlHGzDQ==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/middleware-content-length/-/middleware-content-length-3.0.0.tgz", + "integrity": "sha512-3C4s4d/iGobgCtk2tnWW6+zSTOBg1PRAm2vtWZLdriwTroFbbWNSr3lcyzHdrQHnEXYCC5K52EbpfodaIUY8sg==", "dependencies": { - "@smithy/protocol-http": "^3.3.0", - "@smithy/types": "^2.12.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/middleware-endpoint": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/@smithy/middleware-endpoint/-/middleware-endpoint-2.5.1.tgz", - "integrity": "sha512-1/8kFp6Fl4OsSIVTWHnNjLnTL8IqpIb/D3sTSczrKFnrE9VMNWxnrRKNvpUHOJ6zpGD5f62TPm7+17ilTJpiCQ==", - "dependencies": { - "@smithy/middleware-serde": "^2.3.0", - "@smithy/node-config-provider": "^2.3.0", - "@smithy/shared-ini-file-loader": "^2.4.0", - "@smithy/types": "^2.12.0", - "@smithy/url-parser": "^2.2.0", - "@smithy/util-middleware": "^2.2.0", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/middleware-endpoint/-/middleware-endpoint-3.0.0.tgz", + "integrity": "sha512-aXOAWztw/5qAfp0NcA2OWpv6ZI/E+Dh9mByif7i91D/0iyYNUcKvskmXiowKESFkuZ7PIMd3VOR4fTibZDs2OQ==", + "dependencies": { + "@smithy/middleware-serde": "^3.0.0", + "@smithy/node-config-provider": "^3.0.0", + "@smithy/shared-ini-file-loader": "^3.0.0", + "@smithy/types": "^3.0.0", + "@smithy/url-parser": "^3.0.0", + "@smithy/util-middleware": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/middleware-retry": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/@smithy/middleware-retry/-/middleware-retry-2.3.1.tgz", - "integrity": "sha512-P2bGufFpFdYcWvqpyqqmalRtwFUNUA8vHjJR5iGqbfR6mp65qKOLcUd6lTr4S9Gn/enynSrSf3p3FVgVAf6bXA==", - "dependencies": { - "@smithy/node-config-provider": "^2.3.0", - "@smithy/protocol-http": "^3.3.0", - "@smithy/service-error-classification": "^2.1.5", - "@smithy/smithy-client": "^2.5.1", - "@smithy/types": "^2.12.0", - "@smithy/util-middleware": "^2.2.0", - "@smithy/util-retry": "^2.2.0", + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@smithy/middleware-retry/-/middleware-retry-3.0.1.tgz", + "integrity": "sha512-hBhSEuL841FhJBK/19WpaGk5YWSzFk/P2UaVjANGKRv3eYNO8Y1lANWgqnuPWjOyCEWMPr58vELFDWpxvRKANw==", + "dependencies": { + "@smithy/node-config-provider": "^3.0.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/service-error-classification": "^3.0.0", + "@smithy/smithy-client": "^3.0.1", + "@smithy/types": "^3.0.0", + "@smithy/util-middleware": "^3.0.0", + "@smithy/util-retry": "^3.0.0", "tslib": "^2.6.2", "uuid": "^9.0.1" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/middleware-retry/node_modules/uuid": { @@ -2955,247 +3827,247 @@ } }, "node_modules/@smithy/middleware-serde": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@smithy/middleware-serde/-/middleware-serde-2.3.0.tgz", - "integrity": "sha512-sIADe7ojwqTyvEQBe1nc/GXB9wdHhi9UwyX0lTyttmUWDJLP655ZYE1WngnNyXREme8I27KCaUhyhZWRXL0q7Q==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/middleware-serde/-/middleware-serde-3.0.0.tgz", + "integrity": "sha512-I1vKG1foI+oPgG9r7IMY1S+xBnmAn1ISqployvqkwHoSb8VPsngHDTOgYGYBonuOKndaWRUGJZrKYYLB+Ane6w==", "dependencies": { - "@smithy/types": "^2.12.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/middleware-stack": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@smithy/middleware-stack/-/middleware-stack-2.2.0.tgz", - "integrity": "sha512-Qntc3jrtwwrsAC+X8wms8zhrTr0sFXnyEGhZd9sLtsJ/6gGQKFzNB+wWbOcpJd7BR8ThNCoKt76BuQahfMvpeA==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/middleware-stack/-/middleware-stack-3.0.0.tgz", + "integrity": "sha512-+H0jmyfAyHRFXm6wunskuNAqtj7yfmwFB6Fp37enytp2q047/Od9xetEaUbluyImOlGnGpaVGaVfjwawSr+i6Q==", "dependencies": { - "@smithy/types": "^2.12.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/node-config-provider": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@smithy/node-config-provider/-/node-config-provider-2.3.0.tgz", - "integrity": "sha512-0elK5/03a1JPWMDPaS726Iw6LpQg80gFut1tNpPfxFuChEEklo2yL823V94SpTZTxmKlXFtFgsP55uh3dErnIg==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/node-config-provider/-/node-config-provider-3.0.0.tgz", + "integrity": "sha512-buqfaSdDh0zo62EPLf8rGDvcpKwGpO5ho4bXS2cdFhlOta7tBkWJt+O5uiaAeICfIOfPclNOndshDNSanX2X9g==", "dependencies": { - "@smithy/property-provider": "^2.2.0", - "@smithy/shared-ini-file-loader": "^2.4.0", - "@smithy/types": "^2.12.0", + "@smithy/property-provider": "^3.0.0", + "@smithy/shared-ini-file-loader": "^3.0.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/node-http-handler": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@smithy/node-http-handler/-/node-http-handler-2.5.0.tgz", - "integrity": "sha512-mVGyPBzkkGQsPoxQUbxlEfRjrj6FPyA3u3u2VXGr9hT8wilsoQdZdvKpMBFMB8Crfhv5dNkKHIW0Yyuc7eABqA==", - "dependencies": { - "@smithy/abort-controller": "^2.2.0", - "@smithy/protocol-http": "^3.3.0", - "@smithy/querystring-builder": "^2.2.0", - "@smithy/types": "^2.12.0", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/node-http-handler/-/node-http-handler-3.0.0.tgz", + "integrity": "sha512-3trD4r7NOMygwLbUJo4eodyQuypAWr7uvPnebNJ9a70dQhVn+US8j/lCnvoJS6BXfZeF7PkkkI0DemVJw+n+eQ==", + "dependencies": { + "@smithy/abort-controller": "^3.0.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/querystring-builder": "^3.0.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/property-provider": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@smithy/property-provider/-/property-provider-2.2.0.tgz", - "integrity": "sha512-+xiil2lFhtTRzXkx8F053AV46QnIw6e7MV8od5Mi68E1ICOjCeCHw2XfLnDEUHnT9WGUIkwcqavXjfwuJbGlpg==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/property-provider/-/property-provider-3.0.0.tgz", + "integrity": "sha512-LmbPgHBswdXCrkWWuUwBm9w72S2iLWyC/5jet9/Y9cGHtzqxi+GVjfCfahkvNV4KXEwgnH8EMpcrD9RUYe0eLQ==", "dependencies": { - "@smithy/types": "^2.12.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/protocol-http": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/@smithy/protocol-http/-/protocol-http-3.3.0.tgz", - "integrity": "sha512-Xy5XK1AFWW2nlY/biWZXu6/krgbaf2dg0q492D8M5qthsnU2H+UgFeZLbM76FnH7s6RO/xhQRkj+T6KBO3JzgQ==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@smithy/protocol-http/-/protocol-http-4.0.0.tgz", + "integrity": "sha512-qOQZOEI2XLWRWBO9AgIYuHuqjZ2csyr8/IlgFDHDNuIgLAMRx2Bl8ck5U5D6Vh9DPdoaVpuzwWMa0xcdL4O/AQ==", "dependencies": { - "@smithy/types": "^2.12.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/querystring-builder": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@smithy/querystring-builder/-/querystring-builder-2.2.0.tgz", - "integrity": "sha512-L1kSeviUWL+emq3CUVSgdogoM/D9QMFaqxL/dd0X7PCNWmPXqt+ExtrBjqT0V7HLN03Vs9SuiLrG3zy3JGnE5A==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/querystring-builder/-/querystring-builder-3.0.0.tgz", + "integrity": "sha512-bW8Fi0NzyfkE0TmQphDXr1AmBDbK01cA4C1Z7ggwMAU5RDz5AAv/KmoRwzQAS0kxXNf/D2ALTEgwK0U2c4LtRg==", "dependencies": { - "@smithy/types": "^2.12.0", - "@smithy/util-uri-escape": "^2.2.0", + "@smithy/types": "^3.0.0", + "@smithy/util-uri-escape": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/querystring-parser": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@smithy/querystring-parser/-/querystring-parser-2.2.0.tgz", - "integrity": "sha512-BvHCDrKfbG5Yhbpj4vsbuPV2GgcpHiAkLeIlcA1LtfpMz3jrqizP1+OguSNSj1MwBHEiN+jwNisXLGdajGDQJA==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/querystring-parser/-/querystring-parser-3.0.0.tgz", + "integrity": "sha512-UzHwthk0UEccV4dHzPySnBy34AWw3V9lIqUTxmozQ+wPDAO9csCWMfOLe7V9A2agNYy7xE+Pb0S6K/J23JSzfQ==", "dependencies": { - "@smithy/types": "^2.12.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/service-error-classification": { - "version": "2.1.5", - "resolved": "https://registry.npmjs.org/@smithy/service-error-classification/-/service-error-classification-2.1.5.tgz", - "integrity": "sha512-uBDTIBBEdAQryvHdc5W8sS5YX7RQzF683XrHePVdFmAgKiMofU15FLSM0/HU03hKTnazdNRFa0YHS7+ArwoUSQ==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/service-error-classification/-/service-error-classification-3.0.0.tgz", + "integrity": "sha512-3BsBtOUt2Gsnc3X23ew+r2M71WwtpHfEDGhHYHSDg6q1t8FrWh15jT25DLajFV1H+PpxAJ6gqe9yYeRUsmSdFA==", "dependencies": { - "@smithy/types": "^2.12.0" + "@smithy/types": "^3.0.0" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/shared-ini-file-loader": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@smithy/shared-ini-file-loader/-/shared-ini-file-loader-2.4.0.tgz", - "integrity": "sha512-WyujUJL8e1B6Z4PBfAqC/aGY1+C7T0w20Gih3yrvJSk97gpiVfB+y7c46T4Nunk+ZngLq0rOIdeVeIklk0R3OA==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/shared-ini-file-loader/-/shared-ini-file-loader-3.0.0.tgz", + "integrity": "sha512-REVw6XauXk8xE4zo5aGL7Rz4ywA8qNMUn8RtWeTRQsgAlmlvbJ7CEPBcaXU2NDC3AYBgYAXrGyWD8XrN8UGDog==", "dependencies": { - "@smithy/types": "^2.12.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/signature-v4": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/@smithy/signature-v4/-/signature-v4-2.2.1.tgz", - "integrity": "sha512-j5fHgL1iqKTsKJ1mTcw88p0RUcidDu95AWSeZTgiYJb+QcfwWU/UpBnaqiB59FNH5MiAZuSbOBnZlwzeeY2tIw==", - "dependencies": { - "@smithy/is-array-buffer": "^2.2.0", - "@smithy/types": "^2.12.0", - "@smithy/util-hex-encoding": "^2.2.0", - "@smithy/util-middleware": "^2.2.0", - "@smithy/util-uri-escape": "^2.2.0", - "@smithy/util-utf8": "^2.3.0", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/signature-v4/-/signature-v4-3.0.0.tgz", + "integrity": "sha512-kXFOkNX+BQHe2qnLxpMEaCRGap9J6tUGLzc3A9jdn+nD4JdMwCKTJ+zFwQ20GkY+mAXGatyTw3HcoUlR39HwmA==", + "dependencies": { + "@smithy/is-array-buffer": "^3.0.0", + "@smithy/types": "^3.0.0", + "@smithy/util-hex-encoding": "^3.0.0", + "@smithy/util-middleware": "^3.0.0", + "@smithy/util-uri-escape": "^3.0.0", + "@smithy/util-utf8": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/smithy-client": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/@smithy/smithy-client/-/smithy-client-2.5.1.tgz", - "integrity": "sha512-jrbSQrYCho0yDaaf92qWgd+7nAeap5LtHTI51KXqmpIFCceKU3K9+vIVTUH72bOJngBMqa4kyu1VJhRcSrk/CQ==", - "dependencies": { - "@smithy/middleware-endpoint": "^2.5.1", - "@smithy/middleware-stack": "^2.2.0", - "@smithy/protocol-http": "^3.3.0", - "@smithy/types": "^2.12.0", - "@smithy/util-stream": "^2.2.0", + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@smithy/smithy-client/-/smithy-client-3.0.1.tgz", + "integrity": "sha512-KAiFY4Y4jdHxR+4zerH/VBhaFKM8pbaVmJZ/CWJRwtM/CmwzTfXfvYwf6GoUwiHepdv+lwiOXCuOl6UBDUEINw==", + "dependencies": { + "@smithy/middleware-endpoint": "^3.0.0", + "@smithy/middleware-stack": "^3.0.0", + "@smithy/protocol-http": "^4.0.0", + "@smithy/types": "^3.0.0", + "@smithy/util-stream": "^3.0.1", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/types": { - "version": "2.12.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-2.12.0.tgz", - "integrity": "sha512-QwYgloJ0sVNBeBuBs65cIkTbfzV/Q6ZNPCJ99EICFEdJYG50nGIY/uYXp+TbsdJReIuPr0a0kXmCvren3MbRRw==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/types/-/types-3.0.0.tgz", + "integrity": "sha512-VvWuQk2RKFuOr98gFhjca7fkBS+xLLURT8bUjk5XQoV0ZLm7WPwWPPY3/AwzTLuUBDeoKDCthfe1AsTUWaSEhw==", "dependencies": { "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/url-parser": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@smithy/url-parser/-/url-parser-2.2.0.tgz", - "integrity": "sha512-hoA4zm61q1mNTpksiSWp2nEl1dt3j726HdRhiNgVJQMj7mLp7dprtF57mOB6JvEk/x9d2bsuL5hlqZbBuHQylQ==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/url-parser/-/url-parser-3.0.0.tgz", + "integrity": "sha512-2XLazFgUu+YOGHtWihB3FSLAfCUajVfNBXGGYjOaVKjLAuAxx3pSBY3hBgLzIgB17haf59gOG3imKqTy8mcrjw==", "dependencies": { - "@smithy/querystring-parser": "^2.2.0", - "@smithy/types": "^2.12.0", + "@smithy/querystring-parser": "^3.0.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" } }, "node_modules/@smithy/util-base64": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@smithy/util-base64/-/util-base64-2.3.0.tgz", - "integrity": "sha512-s3+eVwNeJuXUwuMbusncZNViuhv2LjVJ1nMwTqSA0XAC7gjKhqqxRdJPhR8+YrkoZ9IiIbFk/yK6ACe/xlF+hw==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/util-base64/-/util-base64-3.0.0.tgz", + "integrity": "sha512-Kxvoh5Qtt0CDsfajiZOCpJxgtPHXOKwmM+Zy4waD43UoEMA+qPxxa98aE/7ZhdnBFZFXMOiBR5xbcaMhLtznQQ==", "dependencies": { - "@smithy/util-buffer-from": "^2.2.0", - "@smithy/util-utf8": "^2.3.0", + "@smithy/util-buffer-from": "^3.0.0", + "@smithy/util-utf8": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/util-body-length-browser": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@smithy/util-body-length-browser/-/util-body-length-browser-2.2.0.tgz", - "integrity": "sha512-dtpw9uQP7W+n3vOtx0CfBD5EWd7EPdIdsQnWTDoFf77e3VUf05uA7R7TGipIo8e4WL2kuPdnsr3hMQn9ziYj5w==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/util-body-length-browser/-/util-body-length-browser-3.0.0.tgz", + "integrity": "sha512-cbjJs2A1mLYmqmyVl80uoLTJhAcfzMOyPgjwAYusWKMdLeNtzmMz9YxNl3/jRLoxSS3wkqkf0jwNdtXWtyEBaQ==", "dependencies": { "tslib": "^2.6.2" } }, "node_modules/@smithy/util-body-length-node": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@smithy/util-body-length-node/-/util-body-length-node-2.3.0.tgz", - "integrity": "sha512-ITWT1Wqjubf2CJthb0BuT9+bpzBfXeMokH/AAa5EJQgbv9aPMVfnM76iFIZVFf50hYXGbtiV71BHAthNWd6+dw==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/util-body-length-node/-/util-body-length-node-3.0.0.tgz", + "integrity": "sha512-Tj7pZ4bUloNUP6PzwhN7K386tmSmEET9QtQg0TgdNOnxhZvCssHji+oZTUIuzxECRfG8rdm2PMw2WCFs6eIYkA==", "dependencies": { "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/util-buffer-from": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@smithy/util-buffer-from/-/util-buffer-from-2.2.0.tgz", - "integrity": "sha512-IJdWBbTcMQ6DA0gdNhh/BwrLkDR+ADW5Kr1aZmd4k3DIF6ezMV4R2NIAmT08wQJ3yUK82thHWmC/TnK/wpMMIA==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/util-buffer-from/-/util-buffer-from-3.0.0.tgz", + "integrity": "sha512-aEOHCgq5RWFbP+UDPvPot26EJHjOC+bRgse5A8V3FSShqd5E5UN4qc7zkwsvJPPAVsf73QwYcHN1/gt/rtLwQA==", "dependencies": { - "@smithy/is-array-buffer": "^2.2.0", + "@smithy/is-array-buffer": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/util-config-provider": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@smithy/util-config-provider/-/util-config-provider-2.3.0.tgz", - "integrity": "sha512-HZkzrRcuFN1k70RLqlNK4FnPXKOpkik1+4JaBoHNJn+RnJGYqaa3c5/+XtLOXhlKzlRgNvyaLieHTW2VwGN0VQ==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/util-config-provider/-/util-config-provider-3.0.0.tgz", + "integrity": "sha512-pbjk4s0fwq3Di/ANL+rCvJMKM5bzAQdE5S/6RL5NXgMExFAi6UgQMPOm5yPaIWPpr+EOXKXRonJ3FoxKf4mCJQ==", "dependencies": { "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/util-defaults-mode-browser": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/@smithy/util-defaults-mode-browser/-/util-defaults-mode-browser-2.2.1.tgz", - "integrity": "sha512-RtKW+8j8skk17SYowucwRUjeh4mCtnm5odCL0Lm2NtHQBsYKrNW0od9Rhopu9wF1gHMfHeWF7i90NwBz/U22Kw==", + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@smithy/util-defaults-mode-browser/-/util-defaults-mode-browser-3.0.1.tgz", + "integrity": "sha512-nW5kEzdJn1Bn5TF+gOPHh2rcPli8JU9vSSXLbfg7uPnfR1TMRQqs9zlYRhIb87NeSxIbpdXOI94tvXSy+fvDYg==", "dependencies": { - "@smithy/property-provider": "^2.2.0", - "@smithy/smithy-client": "^2.5.1", - "@smithy/types": "^2.12.0", + "@smithy/property-provider": "^3.0.0", + "@smithy/smithy-client": "^3.0.1", + "@smithy/types": "^3.0.0", "bowser": "^2.11.0", "tslib": "^2.6.2" }, @@ -3204,16 +4076,16 @@ } }, "node_modules/@smithy/util-defaults-mode-node": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/@smithy/util-defaults-mode-node/-/util-defaults-mode-node-2.3.1.tgz", - "integrity": "sha512-vkMXHQ0BcLFysBMWgSBLSk3+leMpFSyyFj8zQtv5ZyUBx8/owVh1/pPEkzmW/DR/Gy/5c8vjLDD9gZjXNKbrpA==", - "dependencies": { - "@smithy/config-resolver": "^2.2.0", - "@smithy/credential-provider-imds": "^2.3.0", - "@smithy/node-config-provider": "^2.3.0", - "@smithy/property-provider": "^2.2.0", - "@smithy/smithy-client": "^2.5.1", - "@smithy/types": "^2.12.0", + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@smithy/util-defaults-mode-node/-/util-defaults-mode-node-3.0.1.tgz", + "integrity": "sha512-TFk+Qb+elLc/MOhtSp+50fstyfZ6avQbgH2d96xUBpeScu+Al9elxv+UFAjaTHe0HQe5n+wem8ZLpXvU8lwV6Q==", + "dependencies": { + "@smithy/config-resolver": "^3.0.0", + "@smithy/credential-provider-imds": "^3.0.0", + "@smithy/node-config-provider": "^3.0.0", + "@smithy/property-provider": "^3.0.0", + "@smithy/smithy-client": "^3.0.1", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { @@ -3221,106 +4093,106 @@ } }, "node_modules/@smithy/util-endpoints": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@smithy/util-endpoints/-/util-endpoints-1.2.0.tgz", - "integrity": "sha512-BuDHv8zRjsE5zXd3PxFXFknzBG3owCpjq8G3FcsXW3CykYXuEqM3nTSsmLzw5q+T12ZYuDlVUZKBdpNbhVtlrQ==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@smithy/util-endpoints/-/util-endpoints-2.0.0.tgz", + "integrity": "sha512-+exaXzEY3DNt2qtA2OtRNSDlVrE4p32j1JSsQkzA5AdP0YtJNjkYbYhJxkFmPYcjI1abuwopOZCwUmv682QkiQ==", "dependencies": { - "@smithy/node-config-provider": "^2.3.0", - "@smithy/types": "^2.12.0", + "@smithy/node-config-provider": "^3.0.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">= 14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/util-hex-encoding": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@smithy/util-hex-encoding/-/util-hex-encoding-2.2.0.tgz", - "integrity": "sha512-7iKXR+/4TpLK194pVjKiasIyqMtTYJsgKgM242Y9uzt5dhHnUDvMNb+3xIhRJ9QhvqGii/5cRUt4fJn3dtXNHQ==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/util-hex-encoding/-/util-hex-encoding-3.0.0.tgz", + "integrity": "sha512-eFndh1WEK5YMUYvy3lPlVmYY/fZcQE1D8oSf41Id2vCeIkKJXPcYDCZD+4+xViI6b1XSd7tE+s5AmXzz5ilabQ==", "dependencies": { "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/util-middleware": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@smithy/util-middleware/-/util-middleware-2.2.0.tgz", - "integrity": "sha512-L1qpleXf9QD6LwLCJ5jddGkgWyuSvWBkJwWAZ6kFkdifdso+sk3L3O1HdmPvCdnCK3IS4qWyPxev01QMnfHSBw==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/util-middleware/-/util-middleware-3.0.0.tgz", + "integrity": "sha512-q5ITdOnV2pXHSVDnKWrwgSNTDBAMHLptFE07ua/5Ty5WJ11bvr0vk2a7agu7qRhrCFRQlno5u3CneU5EELK+DQ==", "dependencies": { - "@smithy/types": "^2.12.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/util-retry": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@smithy/util-retry/-/util-retry-2.2.0.tgz", - "integrity": "sha512-q9+pAFPTfftHXRytmZ7GzLFFrEGavqapFc06XxzZFcSIGERXMerXxCitjOG1prVDR9QdjqotF40SWvbqcCpf8g==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/util-retry/-/util-retry-3.0.0.tgz", + "integrity": "sha512-nK99bvJiziGv/UOKJlDvFF45F00WgPLKVIGUfAK+mDhzVN2hb/S33uW2Tlhg5PVBoqY7tDVqL0zmu4OxAHgo9g==", "dependencies": { - "@smithy/service-error-classification": "^2.1.5", - "@smithy/types": "^2.12.0", + "@smithy/service-error-classification": "^3.0.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">= 14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/util-stream": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@smithy/util-stream/-/util-stream-2.2.0.tgz", - "integrity": "sha512-17faEXbYWIRst1aU9SvPZyMdWmqIrduZjVOqCPMIsWFNxs5yQQgFrJL6b2SdiCzyW9mJoDjFtgi53xx7EH+BXA==", - "dependencies": { - "@smithy/fetch-http-handler": "^2.5.0", - "@smithy/node-http-handler": "^2.5.0", - "@smithy/types": "^2.12.0", - "@smithy/util-base64": "^2.3.0", - "@smithy/util-buffer-from": "^2.2.0", - "@smithy/util-hex-encoding": "^2.2.0", - "@smithy/util-utf8": "^2.3.0", + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@smithy/util-stream/-/util-stream-3.0.1.tgz", + "integrity": "sha512-7F7VNNhAsfMRA8I986YdOY5fE0/T1/ZjFF6OLsqkvQVNP3vZ/szYDfGCyphb7ioA09r32K/0qbSFfNFU68aSzA==", + "dependencies": { + "@smithy/fetch-http-handler": "^3.0.1", + "@smithy/node-http-handler": "^3.0.0", + "@smithy/types": "^3.0.0", + "@smithy/util-base64": "^3.0.0", + "@smithy/util-buffer-from": "^3.0.0", + "@smithy/util-hex-encoding": "^3.0.0", + "@smithy/util-utf8": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/util-uri-escape": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@smithy/util-uri-escape/-/util-uri-escape-2.2.0.tgz", - "integrity": "sha512-jtmJMyt1xMD/d8OtbVJ2gFZOSKc+ueYJZPW20ULW1GOp/q/YIM0wNh+u8ZFao9UaIGz4WoPW8hC64qlWLIfoDA==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/util-uri-escape/-/util-uri-escape-3.0.0.tgz", + "integrity": "sha512-LqR7qYLgZTD7nWLBecUi4aqolw8Mhza9ArpNEQ881MJJIU2sE5iHCK6TdyqqzcDLy0OPe10IY4T8ctVdtynubg==", "dependencies": { "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/util-utf8": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@smithy/util-utf8/-/util-utf8-2.3.0.tgz", - "integrity": "sha512-R8Rdn8Hy72KKcebgLiv8jQcQkXoLMOGGv5uI1/k0l+snqkOzQ1R0ChUBCxWMlBsFMekWjq0wRudIweFs7sKT5A==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/util-utf8/-/util-utf8-3.0.0.tgz", + "integrity": "sha512-rUeT12bxFnplYDe815GXbq/oixEGHfRFFtcTF3YdDi/JaENIM6aSYYLJydG83UNzLXeRI5K8abYd/8Sp/QM0kA==", "dependencies": { - "@smithy/util-buffer-from": "^2.2.0", + "@smithy/util-buffer-from": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@smithy/util-waiter": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@smithy/util-waiter/-/util-waiter-2.2.0.tgz", - "integrity": "sha512-IHk53BVw6MPMi2Gsn+hCng8rFA3ZmR3Rk7GllxDUW9qFJl/hiSvskn7XldkECapQVkIg/1dHpMAxI9xSTaLLSA==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@smithy/util-waiter/-/util-waiter-3.0.0.tgz", + "integrity": "sha512-+fEXJxGDLCoqRKVSmo0auGxaqbiCo+8oph+4auefYjaNxjOLKSY2MxVQfRzo65PaZv4fr+5lWg+au7vSuJJ/zw==", "dependencies": { - "@smithy/abort-controller": "^2.2.0", - "@smithy/types": "^2.12.0", + "@smithy/abort-controller": "^3.0.0", + "@smithy/types": "^3.0.0", "tslib": "^2.6.2" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.0.0" } }, "node_modules/@tsconfig/node10": { @@ -3444,9 +4316,9 @@ "dev": true }, "node_modules/@types/node": { - "version": "18.19.31", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.31.tgz", - "integrity": "sha512-ArgCD39YpyyrtFKIqMDvjz79jto5fcI/SVUs2HwB+f0dAzq68yqOdyaSivLiLugSziTpNXLQrVb7RZFmdZzbhA==", + "version": "18.19.33", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.33.tgz", + "integrity": "sha512-NR9+KrpSajr2qBVp/Yt5TU/rp+b5Mayi3+OlMlcg2cVCfRmcG5PWZ7S4+MG9PZ5gWBoc9Pd0BKSRViuBCRPu0A==", "dev": true, "dependencies": { "undici-types": "~5.26.4" @@ -3464,6 +4336,12 @@ "integrity": "sha512-Hl219/BT5fLAaz6NDkSuhzasy49dwQS/DSdu4MdggFB8zcXv7vflBI3xp7FEmkmdDkBUI2bPUNeMttp2knYdxw==", "dev": true }, + "node_modules/@types/uuid": { + "version": "9.0.8", + "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-9.0.8.tgz", + "integrity": "sha512-jg+97EGIcY9AGHJJRaaPVgetKDsrTgbRjQ5Msgjh/DQKEFl0DtyRr/VCOyD1T2R1MNeWPK/u7JoGhlDZnKBAfA==", + "dev": true + }, "node_modules/@types/yargs": { "version": "17.0.26", "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.26.tgz", @@ -4159,9 +5037,9 @@ } }, "node_modules/aws-cdk": { - "version": "2.137.0", - "resolved": "https://registry.npmjs.org/aws-cdk/-/aws-cdk-2.137.0.tgz", - "integrity": "sha512-3pf3SVDwNZvo3EfhO3yl1B+KbRHz7T4UmPifUEKfOwk7ABAFLRSNddZuUlF560XSBTFLkrZoeBDa0/MLJT6F4g==", + "version": "2.130.0", + "resolved": "https://registry.npmjs.org/aws-cdk/-/aws-cdk-2.130.0.tgz", + "integrity": "sha512-MsjGzQ2kZv0FEfXvpW7FTJRnefew0GrYt9M2SMN2Yn45+yjugGl2X8to416kABeFz1OFqW56hq8Y5BiLuFDVLQ==", "dev": true, "bin": { "cdk": "bin/cdk" @@ -4174,9 +5052,9 @@ } }, "node_modules/aws-cdk-lib": { - "version": "2.137.0", - "resolved": "https://registry.npmjs.org/aws-cdk-lib/-/aws-cdk-lib-2.137.0.tgz", - "integrity": "sha512-pD3AGdKBa8q1+vVWRabiDHuecVMlP8ERGPHc9Pb0dVlpbC/ODC6XXC1S0TAMsr0JI5Lh6pk4vL5cC+spsMeotw==", + "version": "2.130.0", + "resolved": "https://registry.npmjs.org/aws-cdk-lib/-/aws-cdk-lib-2.130.0.tgz", + "integrity": "sha512-yK7ibePipdjlI4AFM94fwwtsCkmpWJ0JFZTMPahahC/3Pxe/BA/nnI/4Namvl5QPxW5QlU0xQYU7cywioq3RQg==", "bundleDependencies": [ "@balena/dockerignore", "case", @@ -4203,7 +5081,7 @@ "minimatch": "^3.1.2", "punycode": "^2.3.1", "semver": "^7.6.0", - "table": "^6.8.2", + "table": "^6.8.1", "yaml": "1.10.2" }, "engines": { @@ -4491,7 +5369,7 @@ } }, "node_modules/aws-cdk-lib/node_modules/table": { - "version": "6.8.2", + "version": "6.8.1", "inBundle": true, "license": "BSD-3-Clause", "dependencies": { @@ -4793,9 +5671,9 @@ ] }, "node_modules/cdk-nag": { - "version": "2.28.87", - "resolved": "https://registry.npmjs.org/cdk-nag/-/cdk-nag-2.28.87.tgz", - "integrity": "sha512-LHfHglpFcL4Mz6YVxbBbh6JsjJuNN6qqliQ+PkZ1r2g18uQunfbwR2GjJ5HWeylYLRCzeo4NnRay3A+y7ybbvg==", + "version": "2.28.127", + "resolved": "https://registry.npmjs.org/cdk-nag/-/cdk-nag-2.28.127.tgz", + "integrity": "sha512-kDOxm5E6j2Cp9vh911/06aiVB0eFB+QDuA9HDbZlq4oXdtmG0r3roZIom8SvoJkkzoL2HrNrHXs2veFmn9qJew==", "peerDependencies": { "aws-cdk-lib": "^2.116.0", "constructs": "^10.0.5" @@ -5228,9 +6106,9 @@ } }, "node_modules/esbuild": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.20.2.tgz", - "integrity": "sha512-WdOOppmUNU+IbZ0PaDiTst80zjnrOkyJNHoKupIcVyU8Lvla3Ugx94VzkQ32Ijqd7UhHJy75gNWDMUekcrSJ6g==", + "version": "0.21.4", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.4.tgz", + "integrity": "sha512-sFMcNNrj+Q0ZDolrp5pDhH0nRPN9hLIM3fRPwgbLYJeSHHgnXSnbV3xYgSVuOeLWH9c73VwmEverVzupIv5xuA==", "dev": true, "hasInstallScript": true, "bin": { @@ -5240,29 +6118,29 @@ "node": ">=12" }, "optionalDependencies": { - "@esbuild/aix-ppc64": "0.20.2", - "@esbuild/android-arm": "0.20.2", - "@esbuild/android-arm64": "0.20.2", - "@esbuild/android-x64": "0.20.2", - "@esbuild/darwin-arm64": "0.20.2", - "@esbuild/darwin-x64": "0.20.2", - "@esbuild/freebsd-arm64": "0.20.2", - "@esbuild/freebsd-x64": "0.20.2", - "@esbuild/linux-arm": "0.20.2", - "@esbuild/linux-arm64": "0.20.2", - "@esbuild/linux-ia32": "0.20.2", - "@esbuild/linux-loong64": "0.20.2", - "@esbuild/linux-mips64el": "0.20.2", - "@esbuild/linux-ppc64": "0.20.2", - "@esbuild/linux-riscv64": "0.20.2", - "@esbuild/linux-s390x": "0.20.2", - "@esbuild/linux-x64": "0.20.2", - "@esbuild/netbsd-x64": "0.20.2", - "@esbuild/openbsd-x64": "0.20.2", - "@esbuild/sunos-x64": "0.20.2", - "@esbuild/win32-arm64": "0.20.2", - "@esbuild/win32-ia32": "0.20.2", - "@esbuild/win32-x64": "0.20.2" + "@esbuild/aix-ppc64": "0.21.4", + "@esbuild/android-arm": "0.21.4", + "@esbuild/android-arm64": "0.21.4", + "@esbuild/android-x64": "0.21.4", + "@esbuild/darwin-arm64": "0.21.4", + "@esbuild/darwin-x64": "0.21.4", + "@esbuild/freebsd-arm64": "0.21.4", + "@esbuild/freebsd-x64": "0.21.4", + "@esbuild/linux-arm": "0.21.4", + "@esbuild/linux-arm64": "0.21.4", + "@esbuild/linux-ia32": "0.21.4", + "@esbuild/linux-loong64": "0.21.4", + "@esbuild/linux-mips64el": "0.21.4", + "@esbuild/linux-ppc64": "0.21.4", + "@esbuild/linux-riscv64": "0.21.4", + "@esbuild/linux-s390x": "0.21.4", + "@esbuild/linux-x64": "0.21.4", + "@esbuild/netbsd-x64": "0.21.4", + "@esbuild/openbsd-x64": "0.21.4", + "@esbuild/sunos-x64": "0.21.4", + "@esbuild/win32-arm64": "0.21.4", + "@esbuild/win32-ia32": "0.21.4", + "@esbuild/win32-x64": "0.21.4" } }, "node_modules/escalade": { @@ -6118,9 +6996,9 @@ } }, "node_modules/hasown": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.1.tgz", - "integrity": "sha512-1/th4MHjnwncwXsIW6QMzlvYL9kG5e/CpVvLRZe4XPa8TOUNbCELqmvhDmnkNsAjwaG4+I8gJJL0JBvTTLO9qA==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.0.tgz", + "integrity": "sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA==", "dev": true, "dependencies": { "function-bind": "^1.1.2" @@ -7869,9 +8747,9 @@ } }, "node_modules/projen": { - "version": "0.80.20", - "resolved": "https://registry.npmjs.org/projen/-/projen-0.80.20.tgz", - "integrity": "sha512-XVQBMdBilS/x0KmFyUH3xBIE3ZDXInAErPIAg56dGYQ0epl21kbuCYvsWdPwPQ8wdNi+D/gQUEaf/uenidH5kg==", + "version": "0.82.1", + "resolved": "https://registry.npmjs.org/projen/-/projen-0.82.1.tgz", + "integrity": "sha512-Qa0lALlR9XjcugOuLZbWYMO4NaGu964LavudwC993I2NLpYj3DFv/TdRJOfwxvYsIJI7609luI162Ts5Sz69sw==", "bundleDependencies": [ "@iarna/toml", "case", @@ -7898,7 +8776,7 @@ "fast-json-patch": "^3.1.1", "glob": "^8", "ini": "^2.0.0", - "semver": "^7.6.0", + "semver": "^7.6.2", "shx": "^0.3.4", "xmlbuilder2": "^3.1.1", "yaml": "^2.2.2", @@ -8300,18 +9178,6 @@ "js-yaml": "bin/js-yaml.js" } }, - "node_modules/projen/node_modules/lru-cache": { - "version": "6.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/projen/node_modules/minimatch": { "version": "3.1.2", "dev": true, @@ -8414,13 +9280,10 @@ } }, "node_modules/projen/node_modules/semver": { - "version": "7.6.0", + "version": "7.6.2", "dev": true, "inBundle": true, "license": "ISC", - "dependencies": { - "lru-cache": "^6.0.0" - }, "bin": { "semver": "bin/semver.js" }, @@ -8584,14 +9447,8 @@ "node": ">=10" } }, - "node_modules/projen/node_modules/yallist": { - "version": "4.0.0", - "dev": true, - "inBundle": true, - "license": "ISC" - }, "node_modules/projen/node_modules/yaml": { - "version": "2.4.1", + "version": "2.4.2", "dev": true, "inBundle": true, "license": "ISC", @@ -9221,9 +10078,9 @@ } }, "node_modules/ts-jest": { - "version": "29.1.2", - "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.2.tgz", - "integrity": "sha512-br6GJoH/WUX4pu7FbZXuWGKGNDuU7b8Uj77g/Sp7puZV6EXzuByl6JrECvm0MzVzSTkSHWTihsXt+5XYER5b+g==", + "version": "29.1.4", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.4.tgz", + "integrity": "sha512-YiHwDhSvCiItoAgsKtoLFCuakDzDsJ1DLDnSouTaTmdOcOwIkSzbLXduaQ6M5DRVhuZC/NYaaZ/mtHbWMv/S6Q==", "dev": true, "dependencies": { "bs-logger": "0.x", @@ -9239,10 +10096,11 @@ "ts-jest": "cli.js" }, "engines": { - "node": "^16.10.0 || ^18.0.0 || >=20.0.0" + "node": "^14.15.0 || ^16.10.0 || ^18.0.0 || >=20.0.0" }, "peerDependencies": { "@babel/core": ">=7.0.0-beta.0 <8", + "@jest/transform": "^29.0.0", "@jest/types": "^29.0.0", "babel-jest": "^29.0.0", "jest": "^29.0.0", @@ -9252,6 +10110,9 @@ "@babel/core": { "optional": true }, + "@jest/transform": { + "optional": true + }, "@jest/types": { "optional": true }, @@ -9443,9 +10304,9 @@ } }, "node_modules/typescript": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.1.6.tgz", - "integrity": "sha512-zaWCozRZ6DLEWAWFrVDz1H6FVXzUSfTy5FUMWsQlU8Ym5JP9eO4xkTIROFCQvhQf61z6O/G6ugw3SgAnvvm+HA==", + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz", + "integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==", "dev": true, "bin": { "tsc": "bin/tsc", @@ -9519,7 +10380,6 @@ "version": "8.3.2", "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", - "dev": true, "bin": { "uuid": "dist/bin/uuid" } diff --git a/package.json b/package.json index c6360364..882288cd 100644 --- a/package.json +++ b/package.json @@ -22,6 +22,8 @@ "test:app": "npx projen test:app", "test:app:ci": "npx projen test:app:ci", "test:cdk": "npx projen test:cdk", + "test:cdk-tests": "npx projen test:cdk-tests", + "test:cdk-tests:ci": "npx projen test:cdk-tests:ci", "test:cdk:ci": "npx projen test:cdk:ci", "test:ci": "npx projen test:ci", "test:cli": "npx projen test:cli", @@ -29,6 +31,7 @@ "test:eslint": "npx projen test:eslint", "test:prettier": "npx projen test:prettier", "test:update": "npx projen test:update", + "test:update-snapshots": "npx projen test:update-snapshots", "test:watch": "npx projen test:watch", "upgrade": "npx projen upgrade", "watch": "npx projen watch", @@ -40,11 +43,13 @@ "organization": true }, "devDependencies": { + "@cdklabs/cdk-ssm-documents": "^0.0.41", "@types/jest": "^29.5.12", "@types/node": "^18", + "@types/uuid": "^9.0.8", "@typescript-eslint/eslint-plugin": "^6.21.0", - "aws-cdk": "^2.102.0", - "esbuild": "^0.20.2", + "aws-cdk": "2.130.0", + "esbuild": "^0.21.4", "eslint": "^8.57.0", "eslint-config-prettier": "^9.1.0", "eslint-plugin-header": "^3.1.1", @@ -53,28 +58,38 @@ "jest": "^29.7.0", "jest-extended": "^4.0.2", "jest-junit": "^16.0.0", - "projen": "^0.80.20", - "ts-jest": "^29.1.2", + "projen": "^0.82.1", + "ts-jest": "^29.1.4", "ts-node": "^10.9.2", - "typescript": "~5.1.6" + "typescript": "~5.2.x" }, "dependencies": { - "@aws-cdk/aws-lambda-python-alpha": "^2.102.0-alpha.0", - "@aws-cdk/aws-servicecatalogappregistry-alpha": "^2.102.0-alpha.0", - "@aws-sdk/client-dynamodb": "^3.552.0", - "@aws-sdk/client-ec2": "^3.552.0", - "@aws-sdk/client-rds": "^3.552.0", - "@aws-sdk/client-ssm": "^3.552.0", - "@aws-solutions-constructs/aws-lambda-dynamodb": "^2.54.1", - "aws-cdk-lib": "^2.102.0", - "cdk-nag": "^2.28.87", + "@aws-cdk/aws-lambda-python-alpha": "2.130.0-alpha.0", + "@aws-cdk/aws-neptune-alpha": "2.130.0-alpha.0", + "@aws-cdk/aws-servicecatalogappregistry-alpha": "2.130.0-alpha.0", + "@aws-sdk/client-auto-scaling": "^3.583.0", + "@aws-sdk/client-cloudformation": "^3.583.0", + "@aws-sdk/client-docdb": "^3.583.0", + "@aws-sdk/client-dynamodb": "^3.585.0", + "@aws-sdk/client-ec2": "^3.585.0", + "@aws-sdk/client-lambda": "^3.583.0", + "@aws-sdk/client-neptune": "^3.583.0", + "@aws-sdk/client-rds": "^3.583.0", + "@aws-sdk/client-ssm": "^3.583.0", + "@aws-sdk/util-dynamodb": "^3.585.0", + "aws-cdk-lib": "2.130.0", + "cdk-nag": "^2.28.127", "constructs": "^10.0.5", - "source-map-support": "^0.5.21" + "source-map-support": "^0.5.21", + "uuid": "^8.3.2" }, "engines": { "node": ">= 18.0.0" }, "license": "Apache-2.0", - "version": "1.5.6", + "publishConfig": { + "access": "public" + }, + "version": "3.0.0", "//": "~~ Generated by projen. To modify, edit .projenrc.ts and run \"npx projen\"." } diff --git a/source/app/.gitattributes b/source/app/.gitattributes new file mode 100644 index 00000000..f7738878 --- /dev/null +++ b/source/app/.gitattributes @@ -0,0 +1,9 @@ +# ~~ Generated by projen. To modify, edit .projenrc.js and run "npx projen". + +/.gitattributes linguist-generated +/.gitignore linguist-generated +/.projen/** linguist-generated +/.projen/deps.json linguist-generated +/.projen/files.json linguist-generated +/.projen/tasks.json linguist-generated +/pyproject.toml linguist-generated \ No newline at end of file diff --git a/source/app/.gitignore b/source/app/.gitignore new file mode 100644 index 00000000..96b13f89 --- /dev/null +++ b/source/app/.gitignore @@ -0,0 +1,83 @@ +# ~~ Generated by projen. To modify, edit .projenrc.js and run "npx projen". +node_modules/ +!/.gitattributes +!/.projen/tasks.json +!/.projen/deps.json +!/.projen/files.json +!/pyproject.toml +/poetry.toml +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST +*.manifest +*.spec +pip-log.txt +pip-delete-this-directory.txt +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ +*.mo +*.pot +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal +instance/ +.webassets-cache +.scrapy +docs/_build/ +.pybuilder/ +target/ +.ipynb_checkpoints +profile_default/ +ipython_config.py +__pypackages__/ +celerybeat-schedule +celerybeat.pid +*.sage.py +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ +.spyderproject +.spyproject +.ropeproject +/site +.mypy_cache/ +.dmypy.json +dmypy.json +.pyre/ +.pytype/ +cython_debug/ diff --git a/source/app/.projen/deps.json b/source/app/.projen/deps.json new file mode 100644 index 00000000..6eff90ed --- /dev/null +++ b/source/app/.projen/deps.json @@ -0,0 +1,145 @@ +{ + "dependencies": [ + { + "name": "black", + "version": "^24.3.0", + "type": "devenv" + }, + { + "name": "boto3-stubs-lite", + "version": "{version = \"^1.34.1\", extras = [\"autoscaling\",\"cloudwatch\",\"dynamodb\",\"ec2\",\"ecs\",\"lambda\",\"logs\",\"rds\",\"resourcegroupstaggingapi\",\"sns\",\"ssm\",\"sts\"]}", + "type": "devenv" + }, + { + "name": "boto3", + "version": "^1.34.1", + "type": "devenv" + }, + { + "name": "botocore-stubs", + "version": "^1.31.66", + "type": "devenv" + }, + { + "name": "botocore", + "version": "^1.34.1", + "type": "devenv" + }, + { + "name": "flake8", + "version": "^6.1.0", + "type": "devenv" + }, + { + "name": "freezegun", + "version": "^1.3.1", + "type": "devenv" + }, + { + "name": "isort", + "version": "^5.12.0", + "type": "devenv" + }, + { + "name": "jmespath", + "version": "1.0.1", + "type": "devenv" + }, + { + "name": "moto", + "version": "{version = \"^5.0.2\", extras = [\"autoscaling\",\"dynamodb\",\"ec2\",\"logs\",\"rds\",\"resourcegroupstaggingapi\",\"ssm\"]}", + "type": "devenv" + }, + { + "name": "mypy", + "version": "^1.7.1", + "type": "devenv" + }, + { + "name": "pytest-cov", + "version": "^4.1.0", + "type": "devenv" + }, + { + "name": "pytest-mock", + "version": "^3.12.0", + "type": "devenv" + }, + { + "name": "pytest-runner", + "version": "^6.0.1", + "type": "devenv" + }, + { + "name": "pytest-xdist", + "version": "^3.5.0", + "type": "devenv" + }, + { + "name": "pytest", + "version": "^7.4.3", + "type": "devenv" + }, + { + "name": "python-dateutil", + "version": "2.8.2", + "type": "devenv" + }, + { + "name": "tox", + "version": "^4.11.4", + "type": "devenv" + }, + { + "name": "types-freezegun", + "version": "^1.1.10", + "type": "devenv" + }, + { + "name": "types-jmespath", + "version": "1.0.1", + "type": "devenv" + }, + { + "name": "types-python-dateutil", + "version": "2.8.2", + "type": "devenv" + }, + { + "name": "types-requests", + "version": "2.31.0.6", + "type": "devenv" + }, + { + "name": "types-urllib3", + "version": "^1.26.15", + "type": "devenv" + }, + { + "name": "tzdata", + "version": "^2023.3", + "type": "devenv" + }, + { + "name": "urllib3", + "version": "^1.26.15", + "type": "devenv" + }, + { + "name": "aws-lambda-powertools", + "version": "^2.26.0", + "type": "runtime" + }, + { + "name": "packaging", + "version": "^24.0", + "type": "runtime" + }, + { + "name": "python", + "version": "^3.11", + "type": "runtime" + } + ], + "//": "~~ Generated by projen. To modify, edit .projenrc.js and run \"npx projen\"." +} diff --git a/source/app/.projen/files.json b/source/app/.projen/files.json new file mode 100644 index 00000000..ca22e0c3 --- /dev/null +++ b/source/app/.projen/files.json @@ -0,0 +1,12 @@ +{ + "files": [ + ".gitattributes", + ".gitignore", + ".projen/deps.json", + ".projen/files.json", + ".projen/tasks.json", + "poetry.toml", + "pyproject.toml" + ], + "//": "~~ Generated by projen. To modify, edit .projenrc.js and run \"npx projen\"." +} diff --git a/source/app/.projen/tasks.json b/source/app/.projen/tasks.json new file mode 100644 index 00000000..15242f7e --- /dev/null +++ b/source/app/.projen/tasks.json @@ -0,0 +1,101 @@ +{ + "tasks": { + "build": { + "name": "build", + "description": "Full release build", + "steps": [ + { + "spawn": "pre-compile" + }, + { + "spawn": "compile" + }, + { + "spawn": "post-compile" + }, + { + "spawn": "test" + }, + { + "spawn": "package" + } + ] + }, + "compile": { + "name": "compile", + "description": "Only compile" + }, + "default": { + "name": "default", + "description": "Synthesize project files", + "steps": [ + { + "exec": "npx projen default", + "cwd": "../.." + } + ] + }, + "install": { + "name": "install", + "description": "Install dependencies and update lockfile", + "steps": [ + { + "exec": "poetry lock --no-update && poetry install" + } + ] + }, + "install:ci": { + "name": "install:ci", + "description": "Install dependencies with frozen lockfile", + "steps": [ + { + "exec": "poetry check --lock && poetry install" + } + ] + }, + "package": { + "name": "package", + "description": "Creates the distribution package", + "steps": [ + { + "exec": "poetry build" + } + ] + }, + "post-compile": { + "name": "post-compile", + "description": "Runs after successful compilation" + }, + "pre-compile": { + "name": "pre-compile", + "description": "Prepare the project for compilation" + }, + "publish": { + "name": "publish", + "description": "Uploads the package to PyPI.", + "steps": [ + { + "exec": "poetry publish" + } + ] + }, + "publish:test": { + "name": "publish:test", + "description": "Uploads the package against a test PyPI endpoint.", + "steps": [ + { + "exec": "poetry publish -r testpypi" + } + ] + }, + "test": { + "name": "test", + "description": "Run tests" + } + }, + "env": { + "VIRTUAL_ENV": "$(poetry env info -p || poetry run poetry env info -p)", + "PATH": "$(echo $(poetry env info -p)/bin:$PATH)" + }, + "//": "~~ Generated by projen. To modify, edit .projenrc.js and run \"npx projen\"." +} diff --git a/source/app/README.md b/source/app/README.md new file mode 100644 index 00000000..18443ae2 --- /dev/null +++ b/source/app/README.md @@ -0,0 +1 @@ +# Instance Scheduler on AWS diff --git a/source/app/instance_scheduler/__init__.py b/source/app/instance_scheduler/__init__.py index 07045752..660ada2c 100644 --- a/source/app/instance_scheduler/__init__.py +++ b/source/app/instance_scheduler/__init__.py @@ -1,19 +1,11 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -from typing import Literal +import tomllib +from pathlib import Path -UnknownState = Literal["unknown"] -AnyState = Literal["any"] -StoppedState = Literal["stopped"] -StoppedForResizeState = Literal["stopped_for_resize"] -RunningState = Literal["running"] -RetainRunningState = Literal["retain-running"] +__version__ = "unknown" -ScheduleState = ( - UnknownState - | AnyState - | StoppedState - | StoppedForResizeState - | RunningState - | RetainRunningState -) +pyproject_toml_file_path = Path(__file__, "../../pyproject.toml").resolve() +if pyproject_toml_file_path.exists() and pyproject_toml_file_path.is_file(): + with open(pyproject_toml_file_path, "rb") as file: + __version__ = tomllib.load(file)["tool"]["poetry"]["version"] diff --git a/source/app/instance_scheduler/configuration/__init__.py b/source/app/instance_scheduler/configuration/__init__.py index 7ad091bd..5a765433 100644 --- a/source/app/instance_scheduler/configuration/__init__.py +++ b/source/app/instance_scheduler/configuration/__init__.py @@ -1,18 +1,5 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -from typing import Optional - -from instance_scheduler.configuration.config_dynamodb_adapter import ( - ConfigDynamodbAdapter, -) -from instance_scheduler.configuration.scheduler_config import GlobalConfig -from instance_scheduler.configuration.scheduler_config_builder import ( - SchedulerConfigBuilder, -) -from instance_scheduler.util.app_env import get_app_env -from instance_scheduler.util.logger import Logger - -ENV_STACK = "STACK_NAME" # name of months MONTH_NAMES = [ @@ -50,9 +37,6 @@ # enable SSM maintenance windows ENABLE_SSM_MAINTENANCE_WINDOWS = "enable_ssm_maintenance_windows" -# metrics flag -METRICS = "use_metrics" - # regions REGIONS = "regions" @@ -90,8 +74,6 @@ CREATE_RDS_SNAPSHOT = "create_rds_snapshot" # stop new instances STOP_NEW_INSTANCES = "stop_new_instances" -# use maintenance windows -USE_MAINTENANCE_WINDOW = "use_maintenance_window" # ssm maiantenance windows to use for EC2 SSM_MAINTENANCE_WINDOW = "ssm_maintenance_window" # name of timezone setting for a schedule @@ -140,28 +122,3 @@ TAG_VAL_MONTH = "month" TAG_VAL_DAY = "day" TAG_VAL_TIMEZONE = "timezone" - -__configuration: Optional[GlobalConfig] = None - - -def get_global_configuration(logger: Optional[Logger]) -> GlobalConfig: - """ - Returns the scheduler configuration - :return: scheduler configuration - """ - global __configuration - if __configuration is None: - configdata = ConfigDynamodbAdapter(get_app_env().config_table_name).config - __configuration = SchedulerConfigBuilder(logger=logger).build(configdata) - if logger is not None: - logger.debug("Configuration loaded\n{}", str(__configuration)) - return __configuration - - -def unload_global_configuration() -> None: - """ - Force the configuration to unload - :return: - """ - global __configuration - __configuration = None diff --git a/source/app/instance_scheduler/configuration/config_admin.py b/source/app/instance_scheduler/configuration/config_admin.py deleted file mode 100644 index 25a02269..00000000 --- a/source/app/instance_scheduler/configuration/config_admin.py +++ /dev/null @@ -1,1032 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -import json -import re -from datetime import datetime, timedelta -from typing import TYPE_CHECKING, Any, Literal, Optional, TypedDict, TypeVar -from zoneinfo import ZoneInfo - -from boto3.dynamodb.conditions import Key - -from instance_scheduler import ScheduleState, configuration -from instance_scheduler.configuration.config_dynamodb_adapter import ( - ConfigDynamodbAdapter, -) -from instance_scheduler.configuration.instance_schedule import Instance -from instance_scheduler.configuration.scheduler_config import GlobalConfig -from instance_scheduler.configuration.scheduler_config_builder import ( - SchedulerConfigBuilder, -) -from instance_scheduler.configuration.setbuilders.month_setbuilder import ( - MonthSetBuilder, -) -from instance_scheduler.configuration.setbuilders.monthday_setbuilder import ( - MonthdaySetBuilder, -) -from instance_scheduler.configuration.setbuilders.weekday_setbuilder import ( - WeekdaySetBuilder, -) -from instance_scheduler.util.app_env import get_app_env -from instance_scheduler.util.dynamodb_utils import DynamoDBUtils -from instance_scheduler.util.logger import Logger - -if TYPE_CHECKING: - from aws_lambda_powertools.utilities.typing import LambdaContext - - T = TypeVar("T") -else: - LambdaContext = object - T = object - -ERR_PERIOD_BEGIN_LATER_THAN_END = ( - "error: period begintime {} can not be later than endtime {}" -) -ERR_SCHEDULE_INVALID_OVERRIDE = "{} is not a valid value for {}, possible values are {}" -ERR_SCHEDULE_OVERWRITE_OVERRIDE_EXCLUSIVE = ( - "{} option is mutually exclusive with {} option" -) -ERR_CREATE_PERIOD_EXISTS = "error: period {} already exists" -ERR_CREATE_SCHEDULE_EXISTS = "error: schedule {} already exists" -ERR_DEL_PERIOD_IN_USE = ( - "error: period {} can not be deleted because it is still used in schedule(s) {}" -) -ERR_PERIOD_NOT_FOUND = "not found: period {} does not exist" -ERR_DEL_SCHEDULE_NAME_EMPTY = "error: schedule name parameter can not be empty" -ERR_SCHEDULE_NOT_FOUND = "not found: schedule {} does not exist" -ERR_EMPTY_PERIOD_NAME = "error: period name parameter can not be empty" -ERR_GET_SCHEDULE_NAME_EMPTY = "error: error schedule name parameter can not be empty" -ERR_GET_USAGE_INVALID_END_DATE = ( - "error: invalid enddate {}, must be a valid date in format yyyymmdd {}" -) -ERR_GET_USAGE_INVALID_START_DATE = ( - "error: invalid startdate {}, must be a valid date in format yyyymmdd {}" -) -ERR_GET_USAGE_SCHEDULE_NAME_EMPTY = ( - "error: error schedule name parameter can not be empty" -) -ERR_GET_USAGE_START_MUST_BE_LESS_OR_EQUAL_STOP = ( - "stop_date must be equal or later than start_date" -) -ERR_NAME_PARAM_MISSING = "error: name parameter is missing" -ERR_NO_PERIODS = "error: at least one period condition must be specified" -ERR_PERIOD_INVALID_MONTHDAYS = "error: {} is not a valid month days specification" -ERR_PERIOD_INVALID_MONTHS = "error: {} is not a valid months specification" -ERR_PERIOD_INVALID_TIME = "error: {} {} is not a valid time" -ERR_PERIOD_INVALID_WEEKDAYS = "error: {} is not a valid weekdays specification {}" -ERR_PERIOD_UNKNOWN_PARAMETER = ( - "error: {} is not a valid parameter, valid parameters are {}" -) -ERR_SCHEDULE_INVALID_BOOLEAN = "error: {} for parameter {} is not a valid boolean value" -ERR_SCHEDULE_INVALID_TIMEZONE = "error: {} is not a valid time zone for parameter {}" -ERR_SCHEDULE_NAME_MISSING = "error: name parameter is missing" -ERR_SCHEDULE_NO_PERIOD = "error: at least one period must be specified for a schedule" -ERR_SCHEDULE_PERIOD_DOES_NOT_EXISTS = "error: not found: period {} does not exist" -ERR_SCHEDULE_UNKNOWN_PARAMETER = ( - "error: {} is not a valid parameter, valid parameters are {}" -) -ERR_UPDATE_INVALID_BOOL_PARAM = ( - "error: {} for parameter {} is not a valid boolean value" -) -ERR_UPDATE_INVALID_TZ_PARAMETER = "error: {} is not a valid time zone for parameter {}" -ERR_UPDATE_SCHEDULE_NAME_EMPTY = "error: schedule name parameter can not be empty" -ERR_UPDATE_TAGNAME_EMPTY = "error: tagname parameter must be specified" -ERR_UPDATE_UNKNOWN_PARAMETER = "error: {} is not a valid parameter" -ERR_UPDATE_UNKNOWN_SERVICE = "{} is not a supported service" -ERR_STOP_MUST_BE_LATER_OR_EQUAL_TO_START = ( - "stop_date must be equal or later than start_date" -) - -INF_ADD_ACCOUNT_EVENT_PERMISSION = ( - "Add permission for account {} to put events on message bus, sid is {}" -) -INF_REMOVE_EVENT_PERMISSION = ( - "Remove permission for account {} to put events on event bus, sid = {}" -) - -ConfigTableConfigItemType = Literal["config"] -ConfigTableScheduleItemType = Literal["schedule"] -ConfigTablePeriodItemType = Literal["period"] - -ConfigTableItemType = ( - ConfigTableConfigItemType | ConfigTableScheduleItemType | ConfigTablePeriodItemType -) - - -class ConfigTableItem(TypedDict, total=False): - type: ConfigTableItemType - name: str - - -class ConfigTableConfigItem(ConfigTableItem): - use_metrics: bool - remote_account_ids: set[str] - organization_id: str - scheduler_role_name: str - namespace: str - aws_partition: str - default_timezone: str - regions: set[str] | list[str] - schedule_lambda_account: bool - tagname: str - trace: bool - enable_ssm_maintenance_windows: bool - scheduled_services: set[str] - schedule_clusters: bool - create_rds_snapshot: bool - started_tags: str - stopped_tags: str - - -OverrideStatusRunning = Literal["running"] -OverrideStatusStopped = Literal["stopped"] - -OverrideStatus = OverrideStatusRunning | OverrideStatusStopped - - -class ConfigTableScheduleItem(ConfigTableItem, total=False): - timezone: str - periods: set[str] - description: str - overwrite: bool - use_metrics: bool - stop_new_instances: bool - use_maintenance_window: bool - ssm_maintenance_window: str - retain_running: bool - enforced: bool - hibernate: bool - override_status: OverrideStatus - configured_in_stack: str - - -class ConfigTablePeriodItem(ConfigTableItem, total=False): - begintime: str - endtime: str - weekdays: set[str] - monthdays: set[str] - months: set[str] - description: str - - -class ConfigAdmin: - """ - Implements admin api for Scheduler - """ - - TYPE_ATTR = "type" - # regex for checking time formats H:MM and HH:MM - TIME_REGEX = "^([0|1]?[0-9]|2[0-3]):[0-5][0-9]$" - - SUPPORTED_SERVICES = ["ec2", "rds"] - - class CustomEncoder(json.JSONEncoder): - """ - Custom encoding to handle unsupported data types - """ - - def default(self, o: Any) -> Any: - if isinstance(o, set): - return list(o) - if isinstance(o, datetime): - return o.replace(second=0).strftime("%x %X") - - return json.JSONEncoder.default(self, o) - - def __init__( - self, logger: Optional[Logger], context: Optional[LambdaContext] - ) -> None: - """ - Initializes the config API - :param logger: logger for the admin api - :param context: Lambda context - """ - self._table_name = self.table_name - self._table: Any = DynamoDBUtils.get_dynamodb_table_resource_ref( - self._table_name - ) - self._configuration: Optional[GlobalConfig] = None - self._logger = logger - self._context = context - - @property - def configuration(self) -> GlobalConfig: - """ - Returns and cached configuration - :return: scheduler configuration - """ - if self._configuration is None: - configdata = ConfigDynamodbAdapter(self._table.name).config - self._configuration = SchedulerConfigBuilder(logger=self._logger).build( - configdata - ) - return self._configuration - - @property - def table_name(self) -> str: - return get_app_env().config_table_name - - def update_config(self, settings: ConfigTableConfigItem) -> Any: - """ - Updates configuration, validates new values - :param settings: settings values - :return: updated values - """ - valid_attributes = [ - configuration.METRICS, - configuration.REMOTE_ACCOUNT_IDS, - configuration.ORGANIZATION_ID, - configuration.SCHEDULER_ROLE_NAME, - configuration.NAMESPACE, - configuration.AWS_PARTITION, - configuration.DEFAULT_TIMEZONE, - configuration.REGIONS, - configuration.SCHEDULE_LAMBDA_ACCOUNT, - configuration.TAGNAME, - configuration.TRACE, - configuration.ENABLE_SSM_MAINTENANCE_WINDOWS, - ConfigAdmin.TYPE_ATTR, - configuration.SCHEDULED_SERVICES, - configuration.SCHEDULE_CLUSTERS, - configuration.CREATE_RDS_SNAPSHOT, - configuration.STARTED_TAGS, - configuration.STOPPED_TAGS, - ] - - checked_settings: dict[str, str | bool | set[str]] = {} - - for attr in settings: - if attr in [ConfigAdmin.TYPE_ATTR, configuration.NAME]: - continue - - # only valid fields - if attr not in valid_attributes: - raise ValueError(ERR_UPDATE_UNKNOWN_PARAMETER.format(attr)) - - # remove None fields - if settings[attr] is None: # type: ignore[literal-required] - continue - - # remove empty strings - if len(str(settings[attr])) == 0: # type: ignore[literal-required] - continue - - # make sure these fields are set as sets - if attr in [ - configuration.REGIONS, - configuration.REMOTE_ACCOUNT_IDS, - configuration.SCHEDULED_SERVICES, - ]: - temp = self._ensure_set(settings[attr]) # type: ignore[literal-required] - if len(settings[attr]) > 0: # type: ignore[literal-required] - checked_settings[attr] = temp - - continue - - # make sure these fields are valid booleans - if attr in [ - configuration.METRICS, - configuration.TRACE, - configuration.ENABLE_SSM_MAINTENANCE_WINDOWS, - configuration.SCHEDULE_LAMBDA_ACCOUNT, - configuration.CREATE_RDS_SNAPSHOT, - configuration.SCHEDULE_CLUSTERS, - ]: - bool_value = self._ensure_bool(settings[attr]) # type: ignore[literal-required] - if bool_value is None: - raise ValueError( - ERR_UPDATE_INVALID_BOOL_PARAM.format(settings[attr], attr) # type: ignore[literal-required] - ) - checked_settings[attr] = bool_value - continue - - # validate timezone - if attr == configuration.DEFAULT_TIMEZONE: - default_tz = settings[configuration.DEFAULT_TIMEZONE] # type: ignore[literal-required] - if not SchedulerConfigBuilder.is_valid_timezone(default_tz): - raise ValueError( - ERR_UPDATE_INVALID_TZ_PARAMETER.format( - default_tz, configuration.DEFAULT_TIMEZONE - ) - ) - checked_settings[attr] = default_tz - continue - - checked_settings[attr] = settings[attr] # type: ignore[literal-required] - - if configuration.TAGNAME not in settings: - raise ValueError(ERR_UPDATE_TAGNAME_EMPTY) - - for service in settings.get("scheduled_services", []): - if service not in ConfigAdmin.SUPPORTED_SERVICES: - raise ValueError(ERR_UPDATE_UNKNOWN_SERVICE.format(service)) - - # keys for config item - checked_settings[ConfigAdmin.TYPE_ATTR] = "config" - checked_settings[configuration.NAME] = "scheduler" - - self._table.put_item(Item=checked_settings) - - return ConfigAdmin._for_output(checked_settings) - - def list_periods(self) -> dict[Literal["periods"], Any]: - """ - Lists all periods - :return: all configured periods - """ - periods = self._list_periods() - for period in periods: - ConfigAdmin._for_output(period) - return {"periods": ConfigAdmin._for_output(periods)} - - def get_period( - self, name: str, exception_if_not_exists: bool = True - ) -> Optional[dict[Literal["period"], Any]]: - """ - Gets a specific period - :param name: name of the period - :param exception_if_not_exists: set to True to raise an exception if it does not exist - :return: - """ - if name is None or len(name) == 0: - raise ValueError(ERR_EMPTY_PERIOD_NAME) - period = self._get_period(name) - if period is None: - if exception_if_not_exists: - raise ValueError(ERR_PERIOD_NOT_FOUND.format(name)) - return None - return {"period": ConfigAdmin._for_output(period)} - - def create_period(self, kwargs: ConfigTablePeriodItem) -> dict[str, Any]: - """ - Creates a new period - :param kwargs: period parameters, see validate_period for allowed parameters - :return: Validated and created period - """ - period = self._validate_period(kwargs) - name = period["name"] - if self._get_period(name) is not None: - raise ValueError(ERR_CREATE_PERIOD_EXISTS.format(name)) - self._table.put_item(Item=period) - return {"period": ConfigAdmin._for_output(period)} - - def update_period(self, kwargs: ConfigTablePeriodItem) -> dict[str, Any]: - """ - Updates an existing period - :param kwargs: period data, see validate_period for allowed parameters - :return: validated and updated period - """ - period = self._validate_period(kwargs) - name = period["name"] - if name is None or len(name) == 0: - raise ValueError(ERR_EMPTY_PERIOD_NAME) - if self._get_period(name) is None: - raise ValueError(ERR_PERIOD_NOT_FOUND.format(name)) - self._table.put_item(Item=period) - return {"period": ConfigAdmin._for_output(period)} - - def delete_period( - self, name: str, exception_if_not_exists: bool = False - ) -> Optional[dict[Literal["period"], str]]: - """ - Deletes a period. Note that a period can ony be deleted when not longer used in any schedule - :param name: Name of the period - :param exception_if_not_exists: Set to true is an exception should be raised if the period did not exist - :return: - """ - if name is None or len(name) == 0: - raise ValueError(ERR_EMPTY_PERIOD_NAME) - - # test if period is used in any schedule - schedules_using_period = [] - for s in self._list_schedules(): - for p in s.get(configuration.PERIODS, []): - # period can be a combination of name and instance type - if p.split(configuration.INSTANCE_TYPE_SEP)[0] == name: - schedules_using_period.append(s[configuration.NAME]) - - if len(schedules_using_period) > 0: - raise ValueError( - ERR_DEL_PERIOD_IN_USE.format(name, ", ".join(schedules_using_period)) - ) - - if self._get_period(name) is not None: - self._table.delete_item(Key={"name": name, "type": "period"}) - return {"period": name} - else: - if exception_if_not_exists: - raise ValueError(ERR_PERIOD_NOT_FOUND.format(name)) - return None - - def list_schedules(self) -> dict[Literal["schedules"], Any]: - """ - List all configured schedules - :return: all schedules - """ - schedules = self._list_schedules() - return {"schedules": ConfigAdmin._for_output(schedules)} - - def get_schedule( - self, name: str, exception_if_not_exists: bool = True - ) -> Optional[dict[Literal["schedule"], Any]]: - """ - Gets the information for a specific schedule - :param name: name of the schedule - :param exception_if_not_exists: set to True if an exception should be raised if the schedule does not exist - :return: schedule data, Non if schedule does not exists and exception_if_not_exists is set to False - """ - if name is None or len(name) == 0: - raise ValueError(ERR_GET_SCHEDULE_NAME_EMPTY) - schedule = self._get_schedule(name) - if schedule is None: - if exception_if_not_exists: - raise ValueError(ERR_SCHEDULE_NOT_FOUND.format(name)) - return None - return {"schedule": ConfigAdmin._for_output(schedule)} - - def create_schedule( - self, kwargs: ConfigTableScheduleItem - ) -> dict[Literal["schedule"], Any]: - """ - Creates a new schedule - :param kwargs: schedule data, see validate_schedule for allowed parameters - :return: Validated data of created schedule - """ - schedule = self._validate_schedule(kwargs) - name = schedule["name"] - if self._get_schedule(name) is not None: - raise ValueError(ERR_CREATE_SCHEDULE_EXISTS.format(name)) - self._table.put_item(Item=schedule) - return {"schedule": ConfigAdmin._for_output(schedule)} - - def update_schedule( - self, kwargs: ConfigTableScheduleItem - ) -> dict[Literal["schedule"], Any]: - """ - Updates an existing schedule - :param kwargs: schedule data, see validate_schedule for allowed parameters - :return: Validated updated schedule - """ - schedule = self._validate_schedule(kwargs) - name = schedule["name"] - if name is None or len(name) == 0: - raise ValueError(ERR_UPDATE_SCHEDULE_NAME_EMPTY) - if self._get_schedule(name) is None: - raise ValueError(ERR_SCHEDULE_NOT_FOUND.format(name)) - self._table.put_item(Item=schedule) - return {"schedule": ConfigAdmin._for_output(schedule)} - - def delete_schedule( - self, name: str, exception_if_not_exists: bool = True - ) -> Optional[dict[Literal["schedule"], str]]: - """ - Deletes a schedule - :param name: name of the schedule - :param exception_if_not_exists: True if an exception should be raised if the schedule does not exists - :return: Deleted schedule, None if it did not exist - """ - if name is None or len(name) == 0: - raise ValueError(ERR_DEL_SCHEDULE_NAME_EMPTY) - if self._get_schedule(name) is None: - if exception_if_not_exists: - raise ValueError(ERR_SCHEDULE_NOT_FOUND.format(name)) - return None - self._table.delete_item(Key={"name": name, "type": "schedule"}) - return {"schedule": name} - - def get_schedule_usage( - self, - name: str, - startdate: Optional[datetime] = None, - enddate: Optional[datetime] = None, - ) -> Any: - """ - Get running periods for a schedule in a period - :param name: name of the schedule - :param startdate: start date of the period, None is today - :param enddate: end date of the period, None is today - :return: dictionary containing the periods in the specified in which instances are running as well as the % saving - in running hours - """ - if name is None or len(name) == 0: - raise ValueError(ERR_GET_USAGE_SCHEDULE_NAME_EMPTY) - - schedule = self.configuration.get_schedule(name) - if schedule is None: - raise ValueError(ERR_SCHEDULE_NOT_FOUND.format(name)) - - if startdate: - if not isinstance(startdate, datetime): - try: - start = datetime.strptime(startdate, "%Y%m%d").replace( - tzinfo=ZoneInfo(schedule.timezone) - ) - except ValueError as ex: - raise ValueError( - ERR_GET_USAGE_INVALID_START_DATE.format(startdate, str(ex)) - ) - else: - start = startdate - else: - tz = ZoneInfo(schedule.timezone) - start = startdate or datetime.now(tz) - - if enddate: - if not isinstance(enddate, datetime): - try: - end = datetime.strptime(enddate, "%Y%m%d").replace( - tzinfo=ZoneInfo(schedule.timezone) - ) - except ValueError as ex: - raise ValueError( - ERR_GET_USAGE_INVALID_END_DATE.format(enddate, str(ex)) - ) - else: - end = enddate - else: - end = start - - if start > end: - raise ValueError(ERR_GET_USAGE_START_MUST_BE_LESS_OR_EQUAL_STOP) - - periods = self.calculate_schedule_usage_for_period( - name, start_dt=start, stop_dt=end - ) - - # to json and back again using custom encoder to convert datetimes - return ConfigAdmin._for_output(periods) - - @staticmethod - def _for_output(item: Any) -> Any: - # to anf from json using custom encoder to convert datetime and set type data into string and lists - return json.loads(json.dumps(item, cls=ConfigAdmin.CustomEncoder)) - - @staticmethod - def _ensure_set(s: list[str] | set[str] | str) -> set[str]: - if isinstance(s, list): - return set(s) - if isinstance(s, str): - return set(s.split(",")) - return s - - @staticmethod - def _ensure_bool(b: Any) -> Optional[bool]: - s = str(b).lower() - if s == "true": - return True - if s == "false": - return False - return None - - def _validate_period(self, period: ConfigTablePeriodItem) -> ConfigTablePeriodItem: - result: ConfigTablePeriodItem = {} - - def is_valid_time(s: Any) -> bool: - return re.match(ConfigAdmin.TIME_REGEX, s) is not None - - # allowed and validated parameters - valid_parameters = [ - configuration.BEGINTIME, - configuration.ENDTIME, - configuration.WEEKDAYS, - configuration.MONTHDAYS, - configuration.MONTHS, - configuration.NAME, - configuration.DESCRIPTION, - ] - - for attr in period: - # indicates type for config entry - if attr == ConfigAdmin.TYPE_ATTR: - continue - - # parameter is allowed? - if attr not in valid_parameters: - raise ValueError( - ERR_PERIOD_UNKNOWN_PARAMETER.format(attr, str(valid_parameters)) - ) - - # remove None values - if period[attr] is None or len(str(period[attr])) == 0: # type: ignore[literal-required] - continue - - # period name - if attr == configuration.NAME: - result[attr] = period[attr] # type: ignore[literal-required] - continue - - # description - if attr == configuration.DESCRIPTION: - result[attr] = period[attr] # type: ignore[literal-required] - continue - - # validate start and end types times - if attr in [configuration.BEGINTIME, configuration.ENDTIME]: - time_str = period[attr] # type: ignore[literal-required] - if not is_valid_time(time_str): - raise ValueError(ERR_PERIOD_INVALID_TIME.format(attr, time_str)) - result[attr] = str( # type: ignore[literal-required] - datetime.strptime(time_str, configuration.TIME_FORMAT_STRING).time() - )[0 : len(configuration.TIME_FORMAT_STRING)] - if ( - configuration.BEGINTIME in result - and configuration.ENDTIME in result - ): - begintime = datetime.strptime( - result["begintime"], - configuration.TIME_FORMAT_STRING, - ).time() - endtime = datetime.strptime( - result["endtime"], configuration.TIME_FORMAT_STRING - ).time() - if begintime > endtime: - raise ValueError( - ERR_PERIOD_BEGIN_LATER_THAN_END.format( - result["begintime"], - result["endtime"], - ) - ) - - continue - - # check weekdays, monthdays and month sets - if attr in [ - configuration.WEEKDAYS, - configuration.MONTHDAYS, - configuration.MONTHS, - ]: - temp = self._ensure_set(period[attr]) # type: ignore[literal-required] - - if len(temp) == 0: - continue - - # validate month - if attr == configuration.MONTHS: - try: - MonthSetBuilder().build(temp) - result[attr] = temp # type: ignore[literal-required] - continue - except Exception: - raise ValueError( - ERR_PERIOD_INVALID_MONTHS.format(str(period[attr])) # type: ignore[literal-required] - ) - - # validate weekdays - if attr == configuration.WEEKDAYS: - try: - wdb = WeekdaySetBuilder(year=2016, month=12, day=31) - wdb.build(temp) - result[attr] = temp # type: ignore[literal-required] - continue - except Exception as ex: - raise ValueError( - ERR_PERIOD_INVALID_WEEKDAYS.format(str(period[attr]), ex) # type: ignore[literal-required] - ) - - # validate monthdays - if attr == configuration.MONTHDAYS: - try: - MonthdaySetBuilder(year=2016, month=12).build(temp) - result[attr] = temp # type: ignore[literal-required] - continue - except Exception: - raise ValueError( - ERR_PERIOD_INVALID_MONTHDAYS.format(str(period[attr])) # type: ignore[literal-required] - ) - - if configuration.NAME not in result: - raise ValueError(ERR_NAME_PARAM_MISSING) - - for condition in [ - configuration.BEGINTIME, - configuration.ENDTIME, - configuration.WEEKDAYS, - configuration.MONTHS, - configuration.MONTHDAYS, - ]: - if condition in result: - break - else: - raise ValueError(ERR_NO_PERIODS) - - result["type"] = "period" - - return result - - # check schedule before writing it to the database - def _validate_schedule( - self, schedule: ConfigTableScheduleItem - ) -> ConfigTableScheduleItem: - validated_schedule: ConfigTableScheduleItem = {} - - # allowed parameters - valid_parameters = [ - configuration.TIMEZONE, - configuration.PERIODS, - configuration.NAME, - configuration.DESCRIPTION, - configuration.OVERWRITE, - configuration.METRICS, - configuration.STOP_NEW_INSTANCES, - configuration.USE_MAINTENANCE_WINDOW, - configuration.SSM_MAINTENANCE_WINDOW, - configuration.RETAINED_RUNNING, - configuration.ENFORCED, - configuration.HIBERNATE, - configuration.OVERRIDE_STATUS, - configuration.SCHEDULE_CONFIG_STACK, - ] - - for attr in schedule: - if attr == ConfigAdmin.TYPE_ATTR: - continue - - if attr not in valid_parameters: - raise ValueError( - ERR_SCHEDULE_UNKNOWN_PARAMETER.format(attr, valid_parameters) - ) - - # skip None values - if schedule[attr] is None or len(str(schedule[attr])) == 0: # type: ignore[literal-required] - continue - - # check periods set - if attr == configuration.PERIODS: - temp = self._ensure_set(schedule[attr]) # type: ignore[literal-required] - if len(temp) > 0: - validated_schedule[attr] = temp # type: ignore[literal-required] - continue - - if attr in [configuration.NAME, configuration.SSM_MAINTENANCE_WINDOW]: - validated_schedule[attr] = schedule[attr] # type: ignore[literal-required] - continue - - # make sure these fields are valid booleans - if attr in [ - configuration.METRICS, - configuration.STOP_NEW_INSTANCES, - configuration.USE_MAINTENANCE_WINDOW, - configuration.RETAINED_RUNNING, - configuration.HIBERNATE, - configuration.ENFORCED, - ]: - bool_value = self._ensure_bool(schedule[attr]) # type: ignore[literal-required] - if bool_value is None: - raise ValueError( - ERR_SCHEDULE_INVALID_BOOLEAN.format(schedule[attr], attr) # type: ignore[literal-required] - ) - validated_schedule[attr] = bool_value # type: ignore[literal-required] - continue - - # overwrite status, now deprecated, use PROP_OVERRIDE_STATUS instead - if attr == configuration.OVERWRITE: - if configuration.OVERRIDE_STATUS in schedule: - raise ValueError( - ERR_SCHEDULE_OVERWRITE_OVERRIDE_EXCLUSIVE.format( - configuration.OVERWRITE, configuration.OVERRIDE_STATUS - ) - ) - - bool_value = self._ensure_bool(schedule[attr]) # type: ignore[literal-required] - if bool_value is None: - raise ValueError( - ERR_SCHEDULE_INVALID_BOOLEAN.format(schedule[attr], attr) # type: ignore[literal-required] - ) - validated_schedule["override_status"] = ( - "running" if bool_value else "stopped" - ) - continue - - if attr == configuration.OVERRIDE_STATUS: - if configuration.OVERWRITE in schedule: - raise ValueError( - ERR_SCHEDULE_OVERWRITE_OVERRIDE_EXCLUSIVE.format( - configuration.OVERWRITE, configuration.OVERRIDE_STATUS - ) - ) - if schedule[attr] not in configuration.OVERRIDE_STATUS_VALUES: # type: ignore[literal-required] - raise ValueError( - ERR_SCHEDULE_INVALID_OVERRIDE.format( - schedule[attr], # type: ignore[literal-required] - attr, - ",".join(configuration.OVERRIDE_STATUS_VALUES), - ) - ) - validated_schedule[attr] = schedule[attr] # type: ignore[literal-required] - continue - - # description - if attr in [configuration.DESCRIPTION, configuration.SCHEDULE_CONFIG_STACK]: - validated_schedule[attr] = schedule[attr] # type: ignore[literal-required] - continue - - # validate timezone - if attr == configuration.TIMEZONE: - timezone = schedule[configuration.TIMEZONE] # type: ignore[literal-required] - if not SchedulerConfigBuilder.is_valid_timezone(timezone): - raise ValueError( - ERR_SCHEDULE_INVALID_TIMEZONE.format( - timezone, configuration.TIMEZONE - ) - ) - validated_schedule[attr] = timezone # type: ignore[literal-required] - - # name is mandatory - if configuration.NAME not in validated_schedule: - raise ValueError(ERR_SCHEDULE_NAME_MISSING) - - # if there is no overwrite there must be at least one period - if configuration.OVERRIDE_STATUS not in schedule and ( - configuration.PERIODS not in schedule - or len(schedule[configuration.PERIODS]) == 0 # type: ignore[literal-required] - ): - raise ValueError(ERR_SCHEDULE_NO_PERIOD) - - # validate if periods are in configuration - if configuration.PERIODS in validated_schedule: - # get list of all configured periods - periods_from_db = [p[configuration.NAME] for p in self._list_periods()] - - configured_periods = validated_schedule["periods"] - if not isinstance( - configured_periods, set - ): # should be impossible, but mypy cannot currently prove it - raise ValueError( - "Expected configuration periods to be a string set but received {} instead", - type(configured_periods), - ) - - for configured_period in configured_periods: - # todo: this behavior of splitting period names to get Instance_type is too widely known and needs - # todo: to be centralized somewhere - if ( - configured_period.split(configuration.INSTANCE_TYPE_SEP)[0] - not in periods_from_db - ): - raise ValueError( - ERR_SCHEDULE_PERIOD_DOES_NOT_EXISTS.format(configured_period) - ) - - # indicates this is a schedule - validated_schedule["type"] = "schedule" - - return validated_schedule - - def _items_of_type(self, config_type: ConfigTableItemType) -> list[Any]: - result = [] - - args = {"FilterExpression": Key("type").eq(config_type), "ConsistentRead": True} - - while True: - resp = self._table.scan(**args) # todo: why are we doing a scan here? - result += resp.get("Items", []) - if "LastEvaluatedKey" in resp: - args["ExclusiveStartKey"] = resp["LastEvaluatedKey"] - else: - break - - return result - - def _list_schedules(self) -> list[Any]: - return self._items_of_type("schedule") - - def _list_periods(self) -> list[Any]: - return self._items_of_type("period") - - def _get_schedule(self, schedule_name: str) -> Any: - resp = self._table.get_item( - Key={"name": schedule_name, "type": "schedule"}, ConsistentRead=True - ) - return resp.get("Item", None) - - def _get_period(self, period_name: str) -> Any: - resp = self._table.get_item( - Key={"name": period_name, "type": "period"}, ConsistentRead=True - ) - return resp.get("Item", None) - - def calculate_schedule_usage_for_period( - self, - schedule_name: str, - start_dt: datetime, - stop_dt: Optional[datetime] = None, - logger: Optional[Logger] = None, - ) -> dict[str, Any]: - result = {} - - def running_seconds(startdt: datetime, stopdt: datetime) -> int: - return max(int((stopdt - startdt).total_seconds()), 60) - - def running_hours(startdt: datetime, stopdt: datetime) -> int: - return int(((stopdt - startdt).total_seconds() - 1) / 3600) + 1 - - def make_period(started_dt: Any, stopped_dt: Any) -> dict[str, Any]: - running_period = { - "begin": started_dt, - "end": stopped_dt, - "billing_hours": running_hours(started_dt, stopped_dt), - "billing_seconds": running_seconds(started_dt, stopped_dt), - } - return running_period - - self._logger = logger - - stop = stop_dt or start_dt - if start_dt > stop: - raise ValueError(ERR_STOP_MUST_BE_LATER_OR_EQUAL_TO_START) - - dt = ( - start_dt - if isinstance(start_dt, datetime) - else datetime(start_dt.year, start_dt.month, start_dt.day) - ) - - config_data = ConfigDynamodbAdapter(self._table.name).config - - while dt <= stop: - self._configuration = SchedulerConfigBuilder(logger=self._logger).build( - config_data - ) - conf = SchedulerConfigBuilder(self._logger).build(config=config_data, dt=dt) - schedule = conf.get_schedule(schedule_name) - if schedule is None: - raise ValueError - - timeline = {dt.replace(hour=0, minute=0)} - for p in schedule.periods: - begintime = p["period"].begintime - endtime = p["period"].endtime - if begintime is None and endtime is None: - timeline.add(dt.replace(hour=0, minute=0)) - timeline.add(dt.replace(hour=23, minute=59)) - else: - if begintime: - timeline.add( - dt.replace(hour=begintime.hour, minute=begintime.minute) - ) - if endtime: - timeline.add( - dt.replace(hour=endtime.hour, minute=endtime.minute) - ) - - running_periods = {} - started = None - starting_period = None - current_state: Optional[ScheduleState] = None - inst = Instance( - instance_str="instance", - allow_resize=False, - id="", - hibernate=False, - state="", - state_name="", - is_running=False, - is_terminated=False, - current_state="stopped", - instancetype="", - maintenance_window=None, - tags={}, - name="", - schedule_name="", - ) - for tm in sorted(list(timeline)): - desired_state, _, period = schedule.get_desired_state( - inst, tm, self._logger, False - ) - - if current_state != desired_state: - if desired_state == "running": - started = tm - current_state = "running" - starting_period = period - elif desired_state == "stopped": - stopped = tm - ( - desired_state_with_adj_check, - __, - ___, - ) = schedule.get_desired_state(inst, tm, self._logger, True) - if desired_state_with_adj_check == "running": - stopped += timedelta(minutes=1) - if current_state == "running": - current_state = "stopped" - running_periods[starting_period] = make_period( - started, stopped - ) - - if current_state == "running": - stopped = dt.replace(hour=23, minute=59) + timedelta(minutes=1) - running_periods[starting_period] = make_period(started, stopped) - - result[str(dt.date())] = { - "running_periods": running_periods, - "billing_seconds": sum( - [running_periods[ps]["billing_seconds"] for ps in running_periods] - ), - "billing_hours": sum( - [running_periods[ph]["billing_hours"] for ph in running_periods] - ), - } - - dt += timedelta(days=1) - - return {"schedule": schedule_name, "usage": result} diff --git a/source/app/instance_scheduler/configuration/config_dynamodb_adapter.py b/source/app/instance_scheduler/configuration/config_dynamodb_adapter.py deleted file mode 100644 index 089762a5..00000000 --- a/source/app/instance_scheduler/configuration/config_dynamodb_adapter.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -from boto3.dynamodb.conditions import Key - -from instance_scheduler import configuration -from instance_scheduler.util.dynamodb_utils import DynamoDBUtils - - -class ConfigDynamodbAdapter: - """ - Adapter to load configuration from a DynamoDB storage type. - """ - - def __init__(self, tablename): - self._tablename = tablename - self._config = None - - @property - def config(self): - """ - Returns and cached the configuration data - :return: - """ - if self._config is None: - self._config = self._get_config() - return self._config - - def _get_config(self): - dynamodb_table = DynamoDBUtils.get_dynamodb_table_resource_ref(self._tablename) - - resp = dynamodb_table.get_item( - Key={"name": "scheduler", "type": "config"}, ConsistentRead=True - ) - config = resp.get("Item", {}) - resp = dynamodb_table.query(KeyConditionExpression=Key("type").eq("period")) - config[configuration.PERIODS] = resp.get("Items") - resp = dynamodb_table.query(KeyConditionExpression=Key("type").eq("schedule")) - config[configuration.SCHEDULES] = resp.get("Items") - - return config diff --git a/source/app/instance_scheduler/configuration/global_config_builder.py b/source/app/instance_scheduler/configuration/global_config_builder.py new file mode 100644 index 00000000..e69de29b diff --git a/source/app/instance_scheduler/configuration/instance_schedule.py b/source/app/instance_scheduler/configuration/instance_schedule.py index 6c461f8a..96b770a8 100644 --- a/source/app/instance_scheduler/configuration/instance_schedule.py +++ b/source/app/instance_scheduler/configuration/instance_schedule.py @@ -1,19 +1,20 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -from collections.abc import Callable from dataclasses import dataclass, field from datetime import datetime, timedelta -from typing import Any, ClassVar, Optional, TypedDict +from typing import Optional, Sequence, TypedDict from zoneinfo import ZoneInfo -from typing_extensions import NotRequired +from aws_lambda_powertools import Logger as PowerToolsLogger -from instance_scheduler import ScheduleState, configuration +from instance_scheduler import configuration from instance_scheduler.configuration.running_period import RunningPeriod from instance_scheduler.configuration.running_period_dict_element import ( RunningPeriodDictElement, ) +from instance_scheduler.schedulers.states import ScheduleState from instance_scheduler.util.logger import Logger +from instance_scheduler.util.time import is_aware DEBUG_ACTIVE_PERIOD_IN_SCHEDULE = 'Active period{} in schedule "{}": {}' DEBUG_NO_RUNNING_PERIODS = 'No running periods at this time found in schedule "{}" for this time, desired state is {}' @@ -30,30 +31,6 @@ DEBUG_USED_TIME_FOR_SCHEDULE = "Time used to determine desired for instance is {}" -class Instance(TypedDict): - id: str - arn: NotRequired[str] - allow_resize: bool - hibernate: bool - state: Any - state_name: str - is_running: bool - is_terminated: bool - current_state: ScheduleState - instancetype: str - engine_type: NotRequired[str] - maintenance_window: Optional["InstanceSchedule"] - tags: dict[str, str] - name: str - schedule_name: Optional[str] - is_cluster: NotRequired[bool] - resized: NotRequired[bool] - account: NotRequired[str] - region: NotRequired[str] - service: NotRequired[str] - instance_str: NotRequired[str] - - class PeriodWithDesiredState(TypedDict): period: RunningPeriod instancetype: Optional[str] @@ -62,41 +39,28 @@ class PeriodWithDesiredState(TypedDict): @dataclass class InstanceSchedule: - STATE_UNKNOWN: ClassVar[str] = "unknown" - STATE_ANY: ClassVar[str] = "any" - STATE_STOPPED: ClassVar[str] = "stopped" - STATE_STOPPED_FOR_RESIZE: ClassVar[str] = "stopped_for_resize" - STATE_RUNNING: ClassVar[str] = "running" - STATE_RETAIN_RUNNING: ClassVar[str] = "retain-running" - - # todo: reduce the number of optionals here, it complicates all downstream dependencies name: str + timezone: ZoneInfo periods: list[RunningPeriodDictElement] = field(default_factory=list) - # todo: UTC was defined as the default in the original comments but we need to confirm - # exactly how default tz is loaded from cfn input parameters, test it, and then decide if we should remove this - # fallback entirely - timezone: str = "UTC" override_status: Optional[str] = None description: Optional[str] = None - use_metrics: Optional[bool] = None - stop_new_instances: Optional[bool] = None - use_maintenance_window: Optional[bool] = False - ssm_maintenance_window: Optional[str] = None - enforced: Optional[bool] = False - hibernate: Optional[bool] = False - retain_running: Optional[bool] = False - # todo: this value is loaded in global_config but is not respected by scheduling_context. - # when these are unified, this may be a behavioral change to consider + stop_new_instances: bool = True + ssm_maintenance_window: Optional[Sequence[str]] = None + enforced: bool = False + hibernate: bool = False + retain_running: bool = False configured_in_stack: Optional[str] = None def __post_init__(self) -> None: - self._logger: Optional[Logger] = None + self._logger: Optional[Logger | PowerToolsLogger] = None def _log_debug(self, msg: str, *args: Optional[str]) -> None: if self._logger is not None: self._logger.debug(msg, *args) - def __str__(self) -> str: + def __str__( # NOSONAR -- (cog-complexity) is just a string-formatting function + self, + ) -> str: s = 'Schedule "{}": '.format(self.name) attributes = [] if self.description: @@ -115,15 +79,9 @@ def __str__(self) -> str: "" if self.stop_new_instances else "not " ) ) - if self.use_maintenance_window is not None: - attributes.append( - "maintenance windows are {} used to start instances".format( - "" if self.use_maintenance_window else "not " - ) - ) - if self.ssm_maintenance_window is not None and self.use_maintenance_window: + if self.ssm_maintenance_window: attributes.append( - "SSM maintenance window is {} used to start EC2 instances".format( + "SSM maintenance windows configured for this scheduler are {}".format( self.ssm_maintenance_window ) ) @@ -157,195 +115,128 @@ def __str__(self) -> str: s += "\n".join(attributes) return s + def _localize_time(self, time: datetime) -> datetime: + if not is_aware(time): + raise ValueError("Attempted to localize non-timezone-aware datetime") + return time.astimezone(self.timezone) + def get_desired_state( self, - instance: Instance, dt: datetime, - logger: Optional[Logger] = None, + logger: Optional[Logger | PowerToolsLogger] = None, check_adjacent_periods: bool = True, ) -> tuple[ScheduleState, Optional[str], Optional[str]]: """ Test if an instance should be running at a specific moment in this schedule - :param instance: the instance to test :param logger: logger for logging output of scheduling logic :param dt: date time to use for scheduling, THIS MUST BE A TIMEZONE-AWARE DATETIME :param check_adjacent_periods: check for adjacent periods in a schedule :return: desired state, instance type and name of the active period of the schedule if the state is running """ + self._logger = logger - # gets the local time using the configured timezone - def get_check_time(time: datetime) -> datetime: - return time.astimezone(ZoneInfo(self.timezone)) - - # actions for desired state is running - def handle_running_state( - inst: Instance, periods: list[PeriodWithDesiredState] - ) -> tuple[ScheduleState, Optional[str], str]: - # used to determining most nearest period if more than one period returns a running state in a schedule - def latest_starttime( - p1: PeriodWithDesiredState, p2: PeriodWithDesiredState - ) -> PeriodWithDesiredState: - if p1["period"].begintime is None: - return p2 - if p2["period"].begintime is None: - return p1 - return p1 if p1["period"].begintime > p2["period"].begintime else p2 - - # test if we need to change the type of the instance - def requires_adjust_instance_size( - desired_instance_type: Optional[str], checked_instance: Instance - ) -> bool: - return ( - checked_instance["allow_resize"] - and desired_instance_type is not None - and checked_instance["is_running"] - and desired_instance_type != checked_instance["instancetype"] - ) - - # reduce is removed from python3, replace by minimal implementation for python3 compatibility - def _reduce( - fn: Callable[ - [PeriodWithDesiredState, PeriodWithDesiredState], - PeriodWithDesiredState, - ], - items: list[PeriodWithDesiredState], - ) -> Optional[PeriodWithDesiredState]: - if items is None or len(list(items)) == 0: - return None - else: - result = items[0] - i = 1 - while i < len(items): - result = fn(result, items[i]) - i += 1 - return result - - # nearest period in schedule with running state - current_running_period = _reduce(latest_starttime, periods) - - if not current_running_period: - raise ValueError( - "Tried to find the latest start time of an empty list of periods" - ) - - multiple_active_periods = len(list(periods)) > 1 - - self._log_debug( - DEBUG_ACTIVE_PERIOD_IN_SCHEDULE.format( - "s" if multiple_active_periods else "", - self.name, - ",".join('"' + per["period"].name + '"' for per in periods), - ) - ) - if multiple_active_periods: - self._log_debug( - DEBUG_USED_PERIOD.format(current_running_period["period"].name) - ) - - desired_state: ScheduleState = "running" - desired_type: Optional[str] = ( - current_running_period["instancetype"] if inst["allow_resize"] else None - ) + # must localize the scheduling time to the timezone of the schedule + localized_time = self._localize_time(dt) + self._log_debug( + f"Time used to determine desired_state for schedule {self.name}" + f" is {localized_time.strftime('%c')} {localized_time.tzname()}" + ) + desired_state, desired_type, period_name = self._get_desired_state_at_time( + localized_time + ) - # check if the instance type matches the desired type, if not set the status to stopped if the instance is currently - # and the instance will be started with the desired type at the next invocation - if requires_adjust_instance_size(desired_type, inst): - desired_state = "stopped_for_resize" + """ + when returning a stopped state, it is possible that we are immediately between 2 adjacent running periods + (4:00-12:00, 12:01-5:00). In this scenario if we ran at 12:00 we would get a "stopped" state because we are + at the end of the first period but the second period has not started yet. To overcome this, we check + 1 minute in the past and 1 minute in the future (11:59 and 12:01) and if both checks return a "running" + state we return the desired state (running) and type of the new period about to be entered, + """ + if ( + len(self.periods) > 1 + and desired_state == ScheduleState.STOPPED + and check_adjacent_periods + ): + self._log_debug("Checking for adjacent running periods at current time") + ( + prev_desired_state, + prev_desired_type, + prev_period_name, + ) = self._get_desired_state_at_time(localized_time - timedelta(minutes=1)) + ( + future_desired_state, + future_desired_type, + future_period_name, + ) = self._get_desired_state_at_time(localized_time + timedelta(minutes=1)) + + if ( + prev_desired_state == ScheduleState.RUNNING + and future_desired_state == ScheduleState.RUNNING + ): self._log_debug( - DEBUG_SET_DESIRED_INSTANCE_TYPE, - inst["instancetype"], - desired_type, - desired_state, + "Adjacent periods found, keep instance in running state" ) - return desired_state, desired_type, current_running_period["period"].name + return future_desired_state, future_desired_type, future_period_name - # actions for desired state is any state - def handle_any_state() -> tuple[ScheduleState, None, None]: - desired_state: ScheduleState = "any" - self._log_debug(DEBUG_STATE_ANY, self.name, desired_state) - return desired_state, None, None + return desired_state, desired_type, period_name - # actions for desired state is stopped - def handle_stopped_state() -> tuple[ScheduleState, None, None]: - desired_state: ScheduleState = "stopped" - self._log_debug(DEBUG_NO_RUNNING_PERIODS, self.name, desired_state) - return desired_state, None, None + def _get_desired_state_at_time( + self, + localized_time: datetime, + ) -> tuple[ScheduleState, Optional[str], Optional[str]]: + """ + core logic for determining the desired state of a schedule at a specific instant in time + :param localized_time: a datetime object that MUST BE LOCALIZED to the schedule's current timezone using + _localalize_time(). Failure to localize this datetime to the correct timezone will cause incorrect timezone + behavior + :return: a tuple of (desired_state, desired_type, period_name) where period_name is the name of the + "most authoritative" period that determined the other 2 values + """ - # actions if there is an override value set for the schema - def handle_override_status() -> tuple[ScheduleState, None, str]: + # override_status flag short-circuit override + if self.override_status: desired_state: ScheduleState = ( - "running" + ScheduleState.RUNNING if self.override_status == configuration.OVERRIDE_STATUS_RUNNING - else "stopped" + else ScheduleState.STOPPED ) self._log_debug(DEBUG_OVERRIDE_STATUS, self.override_status, desired_state) return desired_state, None, "override_status" - self._logger = logger - - # always on or off - if self.override_status is not None: - return handle_override_status() - - # test if time is withing any period of the schedule - localized_time = get_check_time(dt) - - self._log_debug(DEBUG_USED_TIME_FOR_SCHEDULE, localized_time.strftime("%c")) - - # get the desired state for every period in the schedule + # get a list of all period schedules along with their desired states at the specified time periods_with_desired_states = self.get_periods_with_desired_states( localized_time ) - # get periods from the schema that have a running state - periods_with_running_state = [ - p for p in periods_with_desired_states if p["state"] == "running" - ] - - if any(periods_with_running_state): - return handle_running_state(instance, periods_with_running_state) - - period_with_any_state = filter( - lambda period: period["state"] == "any", - periods_with_desired_states, - ) - if any(period_with_any_state): - return handle_any_state() - - if len(periods_with_desired_states) > 1 and check_adjacent_periods: - self._log_debug("Checking for adjacent running periods at current time") - self._log_debug("Checking states for previous minute") - last_minute_running_periods = [ - p - for p in self.get_periods_with_desired_states( - localized_time - timedelta(minutes=1) - ) - if p["state"] == "running" + # desired states have a relative priority of running > any > stopped. The desired state of a schedule is + # the highest priority state of any period within that schedule + if any(period["state"] == "running" for period in periods_with_desired_states): + """ + in the event that we have multiple overlapping periods that are all in the "running" state we must pick + a specific period from which to get the "desired_type" from. This "most_authoritative" period + is defined as the period that most recently started. + + For example: + Period1 = 6am-6pm, Period2 = 10am-4pm, CurrentTime = 1pm + most authoritative period = Period2 because it is the most recent one to have started + """ + running_periods = [ + period + for period in periods_with_desired_states + if period["state"] == "running" ] - self._log_debug( - "Running period(s) for previous minute {}", - ",".join([p["period"].name for p in last_minute_running_periods]), - ) - if len(last_minute_running_periods) > 0: - self._log_debug("Checking states for next minute") - next_minute_running_periods = [ - p - for p in self.get_periods_with_desired_states( - localized_time + timedelta(minutes=1) - ) - if p["state"] == "running" - ] - self._log_debug( - "Running period(s) for next minute {}", - ",".join([p["period"].name for p in next_minute_running_periods]), - ) - if len(next_minute_running_periods): - self._log_debug( - "Adjacent periods found, keep instance in running state" - ) - return handle_running_state(instance, last_minute_running_periods) + current_running_period = get_nearest_running_period(running_periods) + desired_type: Optional[str] = current_running_period["instancetype"] - return handle_stopped_state() + return ( + ScheduleState.RUNNING, + desired_type, + current_running_period["period"].name, + ) + elif any(period["state"] == "any" for period in periods_with_desired_states): + return ScheduleState.ANY, None, None + else: + return ScheduleState.STOPPED, None, None def get_periods_with_desired_states( self, time: datetime @@ -359,3 +250,26 @@ def get_periods_with_desired_states( for p in self.periods ] return periods_with_desired_states + + +def get_nearest_running_period( + periods: list[PeriodWithDesiredState], +) -> PeriodWithDesiredState: + """given a list of running periods, get "nearest period" which is defined as + the running period with the latest start time""" + + if not periods: + raise ValueError( + "Tried to find the latest start time of an empty list of periods" + ) + + latest_period = periods[0] + for period in periods: + if period["period"].begintime is None: + continue # begintime of none cannot possibly be more recent than latest period + elif latest_period["period"].begintime is None: + latest_period = period + elif period["period"].begintime > latest_period["period"].begintime: + latest_period = period + + return latest_period diff --git a/source/app/instance_scheduler/configuration/running_period.py b/source/app/instance_scheduler/configuration/running_period.py index 4c96909c..524a26d5 100644 --- a/source/app/instance_scheduler/configuration/running_period.py +++ b/source/app/instance_scheduler/configuration/running_period.py @@ -4,18 +4,16 @@ from datetime import datetime, time from typing import Literal, Optional -from instance_scheduler import ScheduleState, configuration -from instance_scheduler.util.display_helper import set_str, time_str +from aws_lambda_powertools import Logger as PowerToolsLogger + +from instance_scheduler.cron.cron_recurrence_expression import CronRecurrenceExpression +from instance_scheduler.schedulers.states import ScheduleState +from instance_scheduler.util.display_helper import time_str from instance_scheduler.util.logger import Logger -DEBUG_CHECK_DT_START_TIME = "{} Time {} is {} starttime {}, returned state is {}" -DEBUG_CHECK_DT_START_AND_STOP = "{} Time {} is {} {}-{}, returned state is {}" -DEBUG_CHECK_DT = 'Checking conditions for period "{}"' -DEBUG_CHECK_DT_STOP_TIME = "{} Time {} is {} stoptime {}, returned state is {}" -DEBUG_CHECK_DT_UNDEFINED_START_STOP = "{} No start or stop time, returned state is {}" -DEBUG_CHECK_MONTH = '{} Month "{}" {}in months ({})' -DEBUG_CHECK_MONTH_DAY = "{} Day of month {} {}in month days ({})" -DEBUG_CHECK_WEEKDAYS = '{} Weekday "{}" {}in weekdays ({})' + +class RunningPeriodValidationException(Exception): + """An error occurred while validating the consistency of a running period""" @dataclass @@ -23,44 +21,30 @@ class RunningPeriod: name: str begintime: Optional[time] = None endtime: Optional[time] = None - weekdays: Optional[set[int]] = None - months: Optional[set[int]] = None - monthdays: Optional[set[int]] = None + cron_recurrence: CronRecurrenceExpression = CronRecurrenceExpression() def __post_init__(self) -> None: - self._logger: Optional[Logger] = None - - def __str__(self) -> str: - s = 'Period "{}": '.format(self.name) - conditions = [] - if self.begintime: - conditions.append("starts at {}".format(time_str(self.begintime))) - if self.endtime: - conditions.append("ends at {}".format(time_str(self.endtime))) - if self.weekdays is not None: - conditions.append( - "on weekdays ({})".format( - set_str(self.weekdays, configuration.WEEKDAY_NAMES) - ) - ) - if self.monthdays: - conditions.append("on monthdays ({})".format(set_str(self.monthdays))) - if self.months: - conditions.append( - "in months ({})".format( - set_str(self.months, configuration.MONTH_NAMES, offset=1) - ) + self._logger: Optional[Logger | PowerToolsLogger] = None + self._validate() + + def _validate(self) -> None: + if self.name is None or self.name == "": + raise RunningPeriodValidationException( + "Constraint Violation: name cannot be none or empty" ) - s += ", ".join(conditions) - return s + if self.begintime and self.endtime and self.begintime > self.endtime: + raise RunningPeriodValidationException( + f"Constraint Violation: begintime {self.begintime.strftime('%H:%M')} " + f"must be before endtime {self.endtime.strftime('%H:%M')}" + ) def _log_debug(self, msg: str, *args: str) -> None: if self._logger is not None: self._logger.debug(msg, *args) def get_desired_state( - self, logger: Optional[Logger], current_dt: datetime + self, logger: Optional[Logger | PowerToolsLogger], current_dt: datetime ) -> ScheduleState: """ Test if the instance should be running at the specified dt, all conditions configured a period should be true @@ -68,126 +52,79 @@ def get_desired_state( :param current_dt: time to test :return: desired state for the instance in the period """ - - def state_str(checked: bool) -> Literal["[running]", "[stopped]"]: - return "[running]" if checked else "[stopped]" - - def check_running_state_str( - checked_state: str, - ) -> Literal["[running]", "[stopped]"]: - return state_str(checked_state != "stopped") - - def not_str(is_not_not: bool) -> str: - return "" if is_not_not else "not " - - # check day of month - def check_monthday(dt: datetime) -> bool: - result = self.monthdays is None or dt.day in self.monthdays - if self.monthdays: - self._log_debug( - DEBUG_CHECK_MONTH_DAY, - state_str(result), - str(dt.day), - "" if result else "not ", - set_str(self.monthdays), - ) - return result - - # check month - def check_month(dt: datetime) -> bool: - result = self.months is None or dt.month in self.months - if self.months: - self._log_debug( - DEBUG_CHECK_MONTH, - state_str(result), - configuration.MONTH_NAMES[dt.month - 1], - not_str(result), - set_str( - self.months, displaynames=configuration.MONTH_NAMES, offset=1 - ), - ) - return result - - # check weekday - def check_weekday(dt: datetime) -> bool: - result = self.weekdays is None or dt.weekday() in self.weekdays - if self.weekdays is not None: - self._log_debug( - DEBUG_CHECK_WEEKDAYS, - state_str(result), - configuration.WEEKDAY_NAMES[dt.weekday()], - not_str(result), - set_str(self.weekdays, displaynames=configuration.WEEKDAY_NAMES), - ) - return result - - # check time - def check_time(dt: datetime) -> ScheduleState: - t = time(dt.hour, dt.minute, dt.second) - ts = time_str(t) - - # no start and stop time, means running all day - if self.begintime is None and self.endtime is None: - desired_state: ScheduleState = "running" - self._log_debug( - DEBUG_CHECK_DT_UNDEFINED_START_STOP, state_str(True), desired_state - ) - return desired_state - elif self.begintime is None and self.endtime is not None: - # just the end time, stopped if later than that time - desired_state = "stopped" if t >= self.endtime else "any" - self._log_debug( - DEBUG_CHECK_DT_STOP_TIME, - check_running_state_str(desired_state), - ts, - "before" if desired_state == "any" else "after", - time_str(self.endtime), - desired_state, - ) - return desired_state - - elif self.begintime is not None and self.endtime is None: - # just the start time, running if later that that time - desired_state = "running" if t >= self.begintime else "any" - self._log_debug( - DEBUG_CHECK_DT_START_TIME, - check_running_state_str(desired_state), - ts, - "before" if desired_state == "any" else "after", - time_str(self.begintime), - desired_state, - ) - return desired_state - elif self.begintime is not None and self.endtime is not None: - # start and stop time, test if time falls in the period defined by these times - desired_state = ( - "running" if self.begintime <= t < self.endtime else "stopped" - ) - - self._log_debug( - DEBUG_CHECK_DT_START_AND_STOP, - check_running_state_str(desired_state), - ts, - "within" if desired_state == "running" else "outside", - time_str(self.begintime), - time_str(self.endtime), - desired_state, - ) - else: - assert False, "unreachable" - # the above defines all 4 possible combinations of none/not_none on begintime and endtime - # so this should be impossible to reach - - return desired_state - self._logger = logger + self._log_debug('Checking conditions for period "{}"', self.name) + if not self.cron_recurrence.contains(current_dt): + return ScheduleState.STOPPED + return self.check_time(current_dt) - state: ScheduleState = "stopped" - - self._log_debug(DEBUG_CHECK_DT, self.name) - for check in [check_weekday, check_month, check_monthday]: - if not check(current_dt): - return state - - state = check_time(current_dt) - return state + def check_time(self, dt: datetime) -> ScheduleState: + """ + check the given time against the begin and end times of this period + :param dt: + :return: + """ + time_to_check = time(dt.hour, dt.minute, dt.second) + + # debug msg + period_type: str + + # no start and stop time, means running all day + if self.begintime is None and self.endtime is None: + desired_state: ScheduleState = ScheduleState.RUNNING + period_type = "all-day" + elif self.begintime is None and self.endtime is not None: + # just the end time, stopped if later than that time + desired_state = ( + ScheduleState.STOPPED + if time_to_check >= self.endtime + else ScheduleState.ANY + ) + period_type = f"1-sided stop ({time_str(self.endtime)})" + elif self.begintime is not None and self.endtime is None: + # just the start time, running if later that that time + desired_state = ( + ScheduleState.RUNNING + if time_to_check >= self.begintime + else ScheduleState.ANY + ) + period_type = f"1-sided start ({time_str(self.begintime)})" + elif self.begintime is not None and self.endtime is not None: + # start and stop time, test if time falls in the period defined by these times + desired_state = ( + ScheduleState.RUNNING + if self.begintime <= time_to_check < self.endtime + else ScheduleState.STOPPED + ) + period_type = f"range ({time_str(self.begintime)}-{time_str(self.endtime)})" + else: + assert False, "unreachable" + # the above defines all 4 possible combinations of none/not_none on begintime and endtime + # so this should be impossible to reach + + self._log_debug( + "Period CheckTime Result:\n" + " PeriodType: {}\n" + " timeChecked: {} ({})\n" + " desiredState: {}", + period_type, + dt.isoformat(), + time_str(time_to_check), + desired_state, + ) + return desired_state + + +# string format helpers for debug messages +def _not_str(is_not_not: bool) -> str: + return "" if is_not_not else "not " + + +def _state_str(checked: bool) -> Literal["[running]", "[stopped]"]: + return "[running]" if checked else "[stopped]" + + +def _check_running_state_str( + checked_state: str, +) -> Literal["[running]", "[stopped]"]: + return _state_str(checked_state != "stopped") diff --git a/source/app/instance_scheduler/configuration/running_period_dict_element.py b/source/app/instance_scheduler/configuration/running_period_dict_element.py index 692dba7c..d65534f1 100644 --- a/source/app/instance_scheduler/configuration/running_period_dict_element.py +++ b/source/app/instance_scheduler/configuration/running_period_dict_element.py @@ -1,8 +1,6 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -from typing import Optional, TypedDict - -from typing_extensions import NotRequired +from typing import NotRequired, Optional, TypedDict from instance_scheduler.configuration.running_period import RunningPeriod diff --git a/source/app/instance_scheduler/configuration/scheduler_config.py b/source/app/instance_scheduler/configuration/scheduler_config.py deleted file mode 100644 index a3e63fa5..00000000 --- a/source/app/instance_scheduler/configuration/scheduler_config.py +++ /dev/null @@ -1,182 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -import copy -import os -from datetime import datetime, timezone -from typing import Any, Optional -from zoneinfo import ZoneInfo - -from instance_scheduler import configuration -from instance_scheduler.configuration.instance_schedule import InstanceSchedule -from instance_scheduler.configuration.scheduling_context import TagTemplate - -# class to hold the configuration for the instance scheduler -INF_SCHEDULE_DISPLAY = ( - "Configuration:\n" - 'Scheduled services = "{}"\n' - 'Schedule clusters = "{}"\n' - 'Create RDS instance snapshot = "{}"\n' - 'Tagname = "{}"\n' - 'Default timezone = "{}"\n' - 'Trace = "{}"\n' - 'Enable SSM Maintenance Windows = "{}"\n' - 'Use metrics = "{}"\n' - 'Regions = "{}"\n' - 'Started tags = "{}"\n' - 'Stopped tags = "{}"\n' - 'Process Lambda account = "{}"\n' - 'Scheduler Role Name = "{}"\n' - 'Namespace = "{}"\n' - 'Organization Id = "{}"\n' - 'Aws Partition = "{}"\n' - 'Remote Account Ids = "{}"' -) - -TAG_VAL_STR = "{{{}}}" - - -class GlobalConfig: - """ - Implements scheduler configuration - """ - - def __init__( - self, - scheduled_services: list[str], - schedule_clusters: bool, - tag_name: str, - regions: list[str], - default_timezone: ZoneInfo, - schedules: dict[str, InstanceSchedule], - trace: bool, - enable_ssm_maintenance_windows: bool, - use_metrics: bool, - remote_account_ids: list[str], - namespace: str, - aws_partition: str, - scheduler_role_name: str, - organization_id: str, - schedule_lambda_account: bool, - create_rds_snapshot: bool, - started_tags: str = "", - stopped_tags: str = "", - ) -> None: - """ - Initializes schedule configuration instance - :param scheduled_services: services handled by the scheduler - :param schedule_clusters schedule RDS multi-AZ and Aurora clusters - :param tag_name: name of the tag to define schedule for instances - :param regions: regions to handle - :param default_timezone: default timezone for schedules - :param schedules: instance running schedules - :param trace: set to true for detailed logging - :param enable_ssm_maintenance_windows: set to true for enable solution to retrieve SSM Maintenance Windows. - :param use_metrics: global flag to enable metrics collection - :param remote_account_ids: remote account ids - :param namespace: namespace for the stack - :param aws_partition: the aws partition where the solution is installed. - :param scheduler_role_name: scheduler role name - :param organization_id: organization id - :param schedule_lambda_account: set to true to schedule instances in account in which scheduler is installed - :param create_rds_snapshot create snapshot before stopping non-cluster rds instances - :param started_tags: start tags in string format - :param stopped_tags: stop tags in string format - """ - self.tag_name = tag_name - self.schedules = schedules - self.default_timezone = default_timezone - self.trace = trace - self.enable_ssm_maintenance_windows = enable_ssm_maintenance_windows - self.use_metrics = use_metrics - self.regions = regions - self.remote_account_ids = remote_account_ids - self.namespace = namespace - self.aws_partition = aws_partition - self.organization_id = organization_id - self.scheduler_role_name = scheduler_role_name - self.schedule_lambda_account = schedule_lambda_account - self.scheduled_services = scheduled_services - self.schedule_clusters = schedule_clusters - self.create_rds_snapshot = create_rds_snapshot - self._service_settings = None - self.started_tags = ( - [] - if started_tags in ["" or None] - else self.tag_list(self.build_tags_from_template(started_tags)) - ) - self.stopped_tags = ( - [] - if stopped_tags in ["" or None] - else self.tag_list(self.build_tags_from_template(stopped_tags)) - ) - - def get_schedule(self, name: str) -> Optional[InstanceSchedule]: - """ - Get a schedule by its name - :param name: name of the schedule - :return: Schedule, None f it does not exist - """ - return self.schedules[name] if name in self.schedules else None - - @classmethod - def build_tags_from_template( - cls, tags_str: Any, tag_variables: Optional[Any] = None - ) -> dict[str, str]: - lastkey = None - tags = {} - for tag in tags_str.split(","): - if "=" in tag: - t = tag.partition("=") - tags[t[0]] = t[2] - lastkey = t[0] - elif lastkey is not None: - tags[lastkey] = ",".join([tags[lastkey], tag]) - - tag_vars = {} if tag_variables is None else copy.copy(tag_variables) - - dt = datetime.now(timezone.utc) - tag_vars.update( - { - configuration.TAG_VAL_SCHEDULER: os.getenv(configuration.ENV_STACK, ""), - configuration.TAG_VAL_YEAR: "{:0>4d}".format(dt.year), - configuration.TAG_VAL_MONTH: "{:0>2d}".format(dt.month), - configuration.TAG_VAL_DAY: "{:0>2d}".format(dt.day), - configuration.TAG_VAL_HOUR: "{:0>2d}".format(dt.hour), - configuration.TAG_VAL_MINUTE: "{:0>2d}".format(dt.minute), - configuration.TAG_VAL_TIMEZONE: "UTC", - } - ) - - for tag in tags: - value = tags[tag] - if value not in ["", None]: - for v in tag_vars: - tags[tag] = tags[tag].replace(TAG_VAL_STR.format(v), tag_vars[v]) - return tags - - @classmethod - def tag_list(cls, tags_dict: dict[str, str]) -> list[TagTemplate]: - return [{"Key": t, "Value": tags_dict[t]} for t in tags_dict] - - def __str__(self) -> str: - s = INF_SCHEDULE_DISPLAY.format( - ", ".join(self.scheduled_services), - str(self.schedule_clusters), - str(self.create_rds_snapshot), - self.tag_name, - str(self.default_timezone), - str(self.trace), - str(self.enable_ssm_maintenance_windows), - str(self.use_metrics), - ", ".join(self.regions), - str(self.started_tags), - str(self.stopped_tags), - str(self.schedule_lambda_account), - str(self.scheduler_role_name), - str(self.namespace), - str(self.organization_id), - str(self.aws_partition), - ", ".join(self.remote_account_ids), - ) - - return s diff --git a/source/app/instance_scheduler/configuration/scheduler_config_builder.py b/source/app/instance_scheduler/configuration/scheduler_config_builder.py deleted file mode 100644 index 79c54c69..00000000 --- a/source/app/instance_scheduler/configuration/scheduler_config_builder.py +++ /dev/null @@ -1,418 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -import datetime -import re as regex -import time -import zoneinfo -from typing import TYPE_CHECKING, Any, Optional -from zoneinfo import ZoneInfo - -from boto3.session import Session - -from instance_scheduler import boto_retry, configuration -from instance_scheduler.configuration.instance_schedule import InstanceSchedule -from instance_scheduler.configuration.running_period import RunningPeriod -from instance_scheduler.configuration.scheduler_config import GlobalConfig -from instance_scheduler.configuration.setbuilders.month_setbuilder import ( - MonthSetBuilder, -) -from instance_scheduler.configuration.setbuilders.monthday_setbuilder import ( - MonthdaySetBuilder, -) -from instance_scheduler.configuration.setbuilders.weekday_setbuilder import ( - WeekdaySetBuilder, -) -from instance_scheduler.util.app_env import get_app_env -from instance_scheduler.util.logger import Logger - -if TYPE_CHECKING: - from mypy_boto3_ssm.client import SSMClient -else: - SSMClient = object - -REGEX_SSM_PARAM = "{param:(.+?)}" - - -MSG_BEGIN_MUST_BEFORE_END = "Begin time {} must be earlier than end time in {}" -MSG_DUPLICATE_PERIOD_NAME_FOUND = 'Duplicate period name "{}" found' -MSG_DUPLICATE_SCHEDULE_NAME_FOUND = 'Duplicate schedule name "{}" found' -MSG_INVALID_DEFAULT_TIMEZONE = '"{}" is not a valid timezone, use zoneinfo.available_timezones() to list all valid zones' -MSG_INVALID_OVERRIDE_STATUS = "{} is not a valid value for {}, possible values are {}" -MSG_NAME_MISSING_IN_PERIOD = 'Name missing in period "{}"' -MSG_NAME_MISSING_IN_SCHEDULE = "Name missing in schedule {}" -MSG_OVERWRITE_OVERRIDE_MUTUAL_EXCLUSIVE = "{} and {} are mutually exclusive options" -MSG_SCHEDULE_IS_NOT_DEFINED = 'Period "{}" used in schedule "{}" is not defined' -MSG_TAGNAME_MISSING_IN_CONFIGURATION = "tagname is missing in configuration" -MSG_INVALID_SCHEDULE_TIMEZONE = ( - '"{}" is in schedule config "{}" is not a valid timezone, check zoneinfo.available_timezones() for ' - "valid zones" -) - - -class SchedulerConfigBuilder: - """ - Class that implements logic for building the configuration from the raw stored configuration data. - """ - - _checked_timezones: dict[str, Any] = dict() - _invalid_timezones: set[str] = set() - _all_timezones = {tz.lower(): tz for tz in zoneinfo.available_timezones()} - - def __init__(self, logger: Optional[Logger]) -> None: - self._logger = logger - self._config: Any = None - self._ssm: Optional[SSMClient] = None - - @property - def ssm(self) -> SSMClient: - if self._ssm is None: - self._ssm = boto_retry.get_client_with_standard_retry("ssm") - return self._ssm - - def build( - self, config: dict[Any, Any], dt: Optional[datetime.datetime] = None - ) -> GlobalConfig: - self._config = config - - config_date = dt if dt is not None else datetime.datetime.now() - - try: - scheduler_metrics = config.get(configuration.METRICS, False) - - remote_account_ids_from_ssm = self.get_remote_account_ids_from_ssm(config) - - app_env = get_app_env() - - session = Session() - aws_partition = session.get_partition_for_region(session.region_name) - - return GlobalConfig( - scheduled_services=app_env.scheduled_services(), - schedule_clusters=app_env.enable_rds_clusters, - create_rds_snapshot=app_env.enable_rds_snapshots, - tag_name=app_env.schedule_tag_key, - regions=app_env.schedule_regions, - default_timezone=app_env.default_timezone, - schedules=self._build_schedules( - config, - str(app_env.default_timezone), - scheduler_metrics, - config_date, - ), - trace=app_env.enable_debug_logging, - enable_ssm_maintenance_windows=app_env.enable_ec2_ssm_maintenance_windows, - use_metrics=app_env.enable_cloudwatch_metrics, - remote_account_ids=remote_account_ids_from_ssm, - aws_partition=aws_partition, - namespace=app_env.app_namespace, - scheduler_role_name=app_env.scheduler_role_name, - organization_id=config.get(configuration.ORGANIZATION_ID, ""), - schedule_lambda_account=app_env.enable_schedule_hub_account, - started_tags=",".join(app_env.start_tags), - stopped_tags=",".join(app_env.stop_tags), - ) - except ValueError as ex: - if self._logger is not None: - self._logger.error(str(ex)) - raise ex - - def get_remote_account_ids_from_ssm(self, config: dict[Any, Any]) -> list[str]: - remote_account_ids_from_ssm = [] - for account_id in config.get(configuration.REMOTE_ACCOUNT_IDS, []) or []: - if regex.match(REGEX_SSM_PARAM, account_id): - account = regex.findall(REGEX_SSM_PARAM, account_id) - if len(account) > 0: - resp = self.ssm.get_parameters(Names=list(set(account))) - for p in resp.get("Parameters", []): - if p["Type"] == "StringList": - remote_account_ids_from_ssm += p["Value"].split(",") - else: - remote_account_ids_from_ssm.append(p["Value"]) - else: - remote_account_ids_from_ssm.append(account_id) - return remote_account_ids_from_ssm - - # build the schedules from the configuration - def _build_schedules( - self, - conf: Any, - dflt_tz: str, - scheduler_use_metrics: bool, - dt: datetime.datetime, - ) -> dict[str, InstanceSchedule]: - schedules = {} - - # use the periods to build the schedules that can be assigned to the instances - for schedule_config in conf.get(configuration.SCHEDULES, []): - schedule = self._build_schedule( - schedule_config, dflt_tz, scheduler_use_metrics, dt - ) - if schedule is None: - continue - if schedule.name in schedules: - if self._logger is not None: - self._logger.error(MSG_DUPLICATE_SCHEDULE_NAME_FOUND, schedule.name) - continue - - schedules[schedule.name] = schedule - - return schedules - - def _build_schedule( - self, - schedule_config: Any, - dflt_tz: str, - scheduler_use_config: bool, - dt: datetime.datetime, - ) -> Optional[InstanceSchedule]: - # gets the timezone - def get_timezone(schedule_configuration: Any) -> str: - schedule_timezone = schedule_configuration.get(configuration.TIMEZONE) - if not schedule_timezone: - schedule_timezone = dflt_tz - - validated = SchedulerConfigBuilder.validated_timezone(schedule_timezone) - if validated is None: - raise ValueError( - MSG_INVALID_SCHEDULE_TIMEZONE.format( - schedule_timezone, schedule_configuration - ) - ) - return validated - - def get_schedule_name(config): - schedule_name = config.get(configuration.NAME, None) - if not schedule_name: - raise ValueError(MSG_NAME_MISSING_IN_SCHEDULE.format(config)) - return schedule_name - - def get_override_status(config): - if configuration.OVERWRITE in config: - if configuration.OVERRIDE_STATUS in config: - raise ValueError( - MSG_OVERWRITE_OVERRIDE_MUTUAL_EXCLUSIVE.format( - configuration.OVERWRITE, configuration.OVERRIDE_STATUS - ) - ) - overwrite = config[configuration.OVERWRITE] - return ( - configuration.OVERRIDE_STATUS_RUNNING - if overwrite - else configuration.OVERRIDE_STATUS_STOPPED - ) - status = config.get(configuration.OVERRIDE_STATUS, None) - if ( - status is not None - and status not in configuration.OVERRIDE_STATUS_VALUES - ): - raise ValueError( - MSG_INVALID_OVERRIDE_STATUS.format( - status, - configuration.OVERRIDE_STATUS, - ",".join(configuration.OVERRIDE_STATUS_VALUES), - ) - ) - return status - - try: - timezone = get_timezone(schedule_config) - override_status = get_override_status(schedule_config) - periods_for_schedule = [] - - # ignore periods if there is an always on or if override_status option is used - if not override_status: - # use current date and time for timezone of schedule - current_schema_dt = dt.now( - SchedulerConfigBuilder._get_timezone(timezone) - ) - periods_for_schedule = self._get_schedule_periods( - schedule_config, current_schema_dt - ) - - return InstanceSchedule( - name=get_schedule_name(schedule_config), - periods=periods_for_schedule, - timezone=timezone, - override_status=override_status, - description=schedule_config.get(configuration.DESCRIPTION, ""), - use_metrics=schedule_config.get( - configuration.METRICS, scheduler_use_config - ), - stop_new_instances=schedule_config.get( - configuration.STOP_NEW_INSTANCES, True - ), - use_maintenance_window=schedule_config.get( - configuration.USE_MAINTENANCE_WINDOW, False - ), - ssm_maintenance_window=schedule_config.get( - configuration.SSM_MAINTENANCE_WINDOW, None - ), - enforced=schedule_config.get(configuration.ENFORCED, False), - hibernate=schedule_config.get(configuration.HIBERNATE, False), - retain_running=schedule_config.get(configuration.RETAINED_RUNNING), - configured_in_stack=schedule_config.get("configured_in_stack", None), - ) - - except ValueError as ex: - if self._logger is not None: - self._logger.error(str(ex)) - return None - - def _get_schedule_periods(self, schedule_config, schema_dt): - def get_config_for_period(period_name): - for cfg in self._config.get(configuration.PERIODS, []): - if configuration.NAME in cfg and cfg[configuration.NAME] == period_name: - return cfg - return None - - schedule_periods = [] - - for period_config in schedule_config.get(configuration.PERIODS, []): - instancetype = None - if configuration.INSTANCE_TYPE_SEP in period_config: - name, instancetype = period_config.rsplit( - configuration.INSTANCE_TYPE_SEP, 1 - ) - else: - name = period_config - - period_config = get_config_for_period(name) - if period_config is None: - raise ValueError( - MSG_SCHEDULE_IS_NOT_DEFINED.format(name, schedule_config) - ) - - new_period = self._build_period(period_config, schema_dt) - - schedule_periods.append( - {"period": new_period, "instancetype": instancetype} - ) - - return schedule_periods - - @staticmethod - def get_time_from_string(timestr: str) -> Optional[datetime.time]: - """ - Standardised method to build time object instance from time string - :param timestr: string in format as defined in configuration.TIME_FORMAT_STRING - :return: time object from time string, None if the time is invalid - """ - if not timestr: - return None - try: - tm = time.strptime(timestr, configuration.TIME_FORMAT_STRING) - except ValueError: - return None - return datetime.time(tm.tm_hour, tm.tm_min, 0) - - @classmethod - def _build_period(cls, period_configuration, dt): - def get_periodname(config): - period_name = config.get(configuration.NAME) - if not period_name: - raise ValueError(MSG_NAME_MISSING_IN_PERIOD.format(config)) - return period_name - - # reads a start and end time from config - def get_begin_and_end(period_name, config): - begin = SchedulerConfigBuilder.get_time_from_string( - config.get(configuration.BEGINTIME) - ) - end = SchedulerConfigBuilder.get_time_from_string( - config.get(configuration.ENDTIME) - ) - if (begin and end) and begin > end: - raise ValueError( - MSG_BEGIN_MUST_BEFORE_END.format(begin, end, period_name) - ) - return begin, end - - def build_period_config_set(period_config, set_builder, config_name): - config_set = period_config.get(config_name) - if config_set: - return set_builder.build(config_set) - else: - return None - - name = get_periodname(period_configuration) - begin_time, end_time = get_begin_and_end(name, period_configuration) - - week_days = build_period_config_set( - period_configuration, - WeekdaySetBuilder(year=dt.year, month=dt.month, day=dt.day), - configuration.WEEKDAYS, - ) - months = build_period_config_set( - period_configuration, MonthSetBuilder(), configuration.MONTHS - ) - monthdays = build_period_config_set( - period_configuration, - MonthdaySetBuilder(year=dt.year, month=dt.month), - configuration.MONTHDAYS, - ) - - return RunningPeriod( - name=name, - begintime=begin_time, - endtime=end_time, - weekdays=week_days, - months=months, - monthdays=monthdays, - ) - - @staticmethod - def is_valid_timezone(tz: str) -> bool: - """ - Generic and optimized method to test the validity of a timezone name - :param tz: - :return: True if the timezone is valid, else False - """ - return SchedulerConfigBuilder.validated_timezone(tz) is not None - - @staticmethod - def validated_timezone(tz: str) -> Optional[str]: - """ - Generic and optimized method to get a timezone from a timezone name - :param tz: name of the timezone - :return: timezone instance, None if it not valid - """ - tz_lower = str(tz).lower() - # -----------cache----------------# - if tz_lower in SchedulerConfigBuilder._checked_timezones: - return str(SchedulerConfigBuilder._checked_timezones[tz_lower]) - - if tz_lower in SchedulerConfigBuilder._invalid_timezones: - return None - - # -----------check----------------# - validated = SchedulerConfigBuilder._all_timezones.get(tz_lower, None) - if validated is not None: - # keep list off approved timezones to make next checks much faster - SchedulerConfigBuilder._checked_timezones[tz_lower] = ZoneInfo(validated) - return validated - else: - SchedulerConfigBuilder._invalid_timezones.add(tz_lower) - return None - - @staticmethod - def _get_timezone(tz_name): - tz_lower = str(tz_name).lower() - # did we use this one before, reuse it - tz = SchedulerConfigBuilder._checked_timezones.get(tz_lower) - if tz is not None: - return tz - - # avoid repeated lookup for invalid timezones - if tz_lower not in SchedulerConfigBuilder._invalid_timezones: - # case insensitive lookup for timezone name - tz_str = SchedulerConfigBuilder._all_timezones.get(tz_lower) - if tz_str is not None: - # found it, no need to check for invalid timezone here because of lookup - tz = ZoneInfo(tz_str) - SchedulerConfigBuilder._checked_timezones[tz_lower] = tz - return tz - - # not a valid timezone - SchedulerConfigBuilder._invalid_timezones.add(tz_lower) - - raise ValueError("Timezone {} is not a valid timezone".format(tz_name)) diff --git a/source/app/instance_scheduler/configuration/scheduling_context.py b/source/app/instance_scheduler/configuration/scheduling_context.py index a30556fe..912f9f10 100644 --- a/source/app/instance_scheduler/configuration/scheduling_context.py +++ b/source/app/instance_scheduler/configuration/scheduling_context.py @@ -3,14 +3,15 @@ import copy import datetime import time +from collections.abc import Mapping from dataclasses import dataclass, field from typing import Any, Optional, TypedDict from zoneinfo import ZoneInfo from instance_scheduler import configuration from instance_scheduler.configuration.instance_schedule import InstanceSchedule -from instance_scheduler.configuration.running_period import RunningPeriod from instance_scheduler.util.app_env import get_app_env +from instance_scheduler.util.time import is_aware class TagTemplate(TypedDict): @@ -24,234 +25,28 @@ class SchedulingContext: service: str region: str current_dt: datetime.datetime - tag_name: str default_timezone: ZoneInfo - schedules: dict[str, InstanceSchedule] - schedule_clusters: bool - trace: bool - enable_ssm_maintenance_windows: bool - use_metrics: bool - namespace: str - aws_partition: str - scheduler_role_name: str - organization_id: str - schedule_lambda_account: bool - create_rds_snapshot: bool + schedules: Mapping[str, InstanceSchedule] + scheduling_interval_minutes: int started_tags: list[TagTemplate] = field(default_factory=list) stopped_tags: list[TagTemplate] = field(default_factory=list) + def __post_init__(self) -> None: + if not is_aware(self.current_dt): + raise ValueError( + f"SchedulingContext datetime must be timezone-Aware. Received: {self.current_dt}" + ) + def get_schedule(self, name: Optional[str]) -> Optional[InstanceSchedule]: """ Get a schedule by its name :param name: name of the schedule :return: Schedule, None f it does not exist """ + if not name: + return None return self.schedules[name] if name in self.schedules else None - def to_dict(self) -> dict[Any, Any]: - """ - build a dictionary from a context instance to be passed safely in the event of a lambda function - - note - adapted from original code, much refactoring is almost certainly possible - :param config: input SchedulerConfig - :return: the schedule as a dict - """ - result = {} - - # include values only if set? - for attr in [ - "tag_name", - "trace", - "namespace", - "scheduler_role_name", - "organization_id", - "aws_partition", - "enable_ssm_maintenance_windows", - "use_metrics", - "schedule_clusters", - "create_rds_snapshot", - "schedule_lambda_account", - "started_tags", - "stopped_tags", - ]: - if attr in self.__dict__ and self.__dict__[attr] is not None: - result[attr] = self.__dict__[attr] - - result["default_timezone"] = str(self.default_timezone) - result["current_dt"] = self.current_dt.isoformat() - - for attr in ["started_tags", "stopped_tags"]: - if attr in self.__dict__ and self.__dict__[attr] is not None: - result[attr] = ",".join( - [ - "{}={}".format(t.get("Key"), t.get("Value")) - for t in self.__dict__[attr] - ] - ) - - for attr in ["region", "account_id", "service"]: - result[attr] = self.__dict__[attr] - - # initialize schedules/periods - result["schedules"] = {} - result["periods"] = {} - - # putting schedule object into list of schedules by key - # this could be done recursively - for schedule_name in self.schedules: - result["schedules"][schedule_name] = {} - schedule = self.schedules[schedule_name] - for attr in [ - "name", - "timezone", - "override_status", - "stop_new_instances", - "use_metrics", - "enforced", - "hibernate", - "use_maintenance_window", - "ssm_maintenance_window", - "retain_running", - ]: - if attr in schedule.__dict__ and schedule.__dict__[attr] is not None: - result["schedules"][schedule_name][attr] = schedule.__dict__[attr] - - if schedule.override_status is not None: - continue - - result["schedules"][schedule_name]["periods"] = [] - - for p in schedule.periods: - period = p["period"] - instance_type = p.get("instancetype", None) - result["schedules"][schedule_name]["periods"].append( - period.name - + ( - ("{}{}".format(configuration.INSTANCE_TYPE_SEP, instance_type)) - if instance_type - else "" - ) - ) - if period.name in result["periods"]: - continue - - result["periods"][period.name] = {} - for attr in ["begintime", "endtime"]: - tm = period.__dict__[attr] - if tm is not None: - result["periods"][period.name][attr] = "{:0>2d}:{:0>2d}".format( - tm.hour, tm.minute - ) - - for attr in ["weekdays", "monthdays", "months"]: - s = period.__dict__[attr] - if s is None: - continue - result["periods"][period.name][attr] = list(s) - return result - - -def from_dict(config_dict: dict[Any, Any]) -> SchedulingContext: - """ - build a configuration object instance that is passed as a dictionary in the event of a lambda function - :param config_dict: a dictionary representation of a schedule - :return: a SchedulerConfig built from the dict - """ - - config_args = {} - for attr in [ - "tag_name", - "trace", - "namespace", - "scheduler_role_name", - "organization_id", - "aws_partition", - "enable_ssm_maintenance_windows", - "use_metrics", - "schedule_clusters", - "create_rds_snapshot", - "schedule_lambda_account", - ]: - config_args[attr] = config_dict.get(attr, None) - - for attr in ["region", "account_id", "service"]: - config_args[attr] = config_dict.get(attr, "") - - config_args["current_dt"] = datetime.datetime.fromisoformat( - config_dict.get("current_dt", "") - ) - config_args["default_timezone"] = ZoneInfo(config_dict["default_timezone"]) - - for attr in ["started_tags", "stopped_tags"]: - config_args[attr] = build_tags_from_template(config_dict.get(attr, "")) - periods = {} - - for period_name in config_dict.get("periods", {}): - period_data = config_dict["periods"][period_name] - period_args = {"name": period_name} - - for attr in ["begintime", "endtime"]: - if attr in period_data: - period_args[attr] = get_time_from_string(period_data[attr]) - - for attr in ["weekdays", "monthdays", "months"]: - if attr in period_data: - period_args[attr] = set(period_data.get(attr, None)) - - period = RunningPeriod(**period_args) - periods[period_name] = period - - config_args["schedules"] = {} - - for schedule_name in config_dict.get("schedules", {}): - schedule_args = {} - schedule_data = config_dict["schedules"][schedule_name] - for attr in [ - "name", - "timezone", - "override_status", - "stop_new_instances", - "use_metrics", - "enforced", - "hibernate", - "use_maintenance_window", - "ssm_maintenance_window", - "retain_running", - ]: - schedule_args[attr] = schedule_data.get(attr, None) - - if schedule_args["override_status"] is None: - schedule_args["periods"] = [] - - for schedule_period in schedule_data.get("periods"): - temp = schedule_period.split(configuration.INSTANCE_TYPE_SEP) - if len(temp) > 1: - name = temp[0] - instance_type = temp[1] - else: - name = schedule_period - instance_type = None - schedule_args["periods"].append( - {"period": periods[name], "instancetype": instance_type} - ) - - schedule = InstanceSchedule(**schedule_args) - config_args["schedules"][schedule_name] = schedule - - config = SchedulingContext(**config_args) - - if ( - config.current_dt.tzinfo is None - or config.current_dt.tzinfo.utcoffset(config.current_dt) is None - ): - raise ValueError( - "Attempted to build scheduling_context with timezone unaware scheduling time! time received: " - "{}".format(config_dict.get("current_dt")) - ) - # https://docs.python.org/3/library/datetime.html#determining-if-an-object-is-aware-or-naive - - return config - def get_time_from_string(timestr: Optional[str]) -> Optional[datetime.time]: """ diff --git a/source/app/instance_scheduler/configuration/setbuilders/month_setbuilder.py b/source/app/instance_scheduler/configuration/setbuilders/month_setbuilder.py deleted file mode 100644 index 014e6e4e..00000000 --- a/source/app/instance_scheduler/configuration/setbuilders/month_setbuilder.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 - - -import calendar - -from instance_scheduler.configuration.setbuilders.setbuilder import SetBuilder - - -class MonthSetBuilder(SetBuilder): - """ - Class for building month sets, 1-12 ans jan-dec - """ - - def __init__(self, wrap: bool = True, ignorecase: bool = True) -> None: - """ - Initializes set builder for month sets - :param wrap: Set to True to allow wrapping at last month of the year - :param ignorecase: Set to True to ignore case when mapping month names - """ - SetBuilder.__init__( - self, - names=calendar.month_abbr[1:], - significant_name_characters=3, - offset=1, - ignorecase=ignorecase, - wrap=wrap, - ) diff --git a/source/app/instance_scheduler/configuration/setbuilders/monthday_setbuilder.py b/source/app/instance_scheduler/configuration/setbuilders/monthday_setbuilder.py deleted file mode 100644 index 3711cd67..00000000 --- a/source/app/instance_scheduler/configuration/setbuilders/monthday_setbuilder.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -import calendar -from typing import Optional - -from instance_scheduler.configuration.setbuilders.setbuilder import SetBuilder - - -class MonthdaySetBuilder(SetBuilder): - """ - Class for building sets of monthdays, 1-(28-31), ',', '-', '/", "*", W for nearest weekday, L for last day of month - """ - - WILDCARD_WEEKDAY = "W" - WILDCARD_LAST_WEEKDAY = "L" - - def __init__(self, year: int, month: int) -> None: - """ - Initializes monthday set builder. - :param year: Year of month to build sets for, only required for month aware 'W' and 'L' features in expressions - :param month: Month to build sets for, only required for month aware 'W' and 'L' features in expressions - """ - self.year = year - self.month = month - self._firstweekday, self._lastday = calendar.monthrange(year, month) - - SetBuilder.__init__( - self, - min_value=1, - max_value=self._lastday, - offset=1, - ignorecase=False, - wrap=False, - last_item_wildcard=MonthdaySetBuilder.WILDCARD_LAST_WEEKDAY, - ) - - self._post_custom_parsers = [self._parse_weekday] - - def _parse_weekday(self, day_str: str) -> Optional[list[int]]: - # dayW return working day nearest to day - return self._get_weekday(day_str) - - def _parse_unknown(self, item: str) -> Optional[list[int]]: - return [] if item in [str(d) for d in range(self.last, 32)] else None - - def _seperator_characters(self) -> str: - # adding W to separator characters, it should not be formatted - return SetBuilder._seperator_characters(self) + self.WILDCARD_WEEKDAY - - def _get_weekday(self, day_str: str) -> Optional[list[int]]: - # returns working day nearest to day in month, string is in format dayW - if (1 < len(day_str) <= 3) and day_str.endswith(self.WILDCARD_WEEKDAY): - day = self._get_value_by_str(day_str[0:-1]) - if day is not None: - # calculated day of week based on first weekday of month - weekday = ((day % 7) + self._firstweekday - 1) % 7 - # for Saturdays use Friday, or Monday if it is the first day of the month - if weekday == 5: - day = day - 1 if day > 1 else day + 2 - # for Sundays use next Monday, or Saturday if it is the last day of the month - elif weekday == 6: - day = day + 1 if day < self._lastday else day - 2 - # for other days just return the specified day - return [day] - - return None diff --git a/source/app/instance_scheduler/configuration/setbuilders/setbuilder.py b/source/app/instance_scheduler/configuration/setbuilders/setbuilder.py deleted file mode 100644 index 66cb43c1..00000000 --- a/source/app/instance_scheduler/configuration/setbuilders/setbuilder.py +++ /dev/null @@ -1,502 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -import logging -from collections.abc import Callable, Sequence -from typing import Optional - -Parser = Callable[[str], Optional[list[int]]] - - -class SetBuilder: - """ - # class for building set of values from values names or value strings - """ - - # all values in set - WILDCARD_ALL = "*?" - - # first value in set - WILDCARD_FIRST = "^" - - # last value in set - WILDCARD_LAST = "$" - - # increment operator - INCREMENT_CHARACTER = "/" - - # range operator - RANGE_CHARACTER = "-" - - def __init__( - self, - names: Optional[Sequence[str]] = None, - min_value: Optional[int] = None, - max_value: Optional[int] = None, - offset: Optional[int] = None, - wrap: bool = False, - ignorecase: bool = True, - significant_name_characters: Optional[int] = None, - first_item_wildcard: str = WILDCARD_FIRST, - all_items_wildcards: str = WILDCARD_ALL, - last_item_wildcard: str = WILDCARD_LAST, - ) -> None: - """ - - :param names: Names for values - :param min_value: Value for first item in set created from min and max value - :param max_value: Max value for value in set created from min and max value - :param offset: Offset for first value in set created from names - :param wrap: Set to True to let sets wrap at max value - :param ignorecase: Set to True to ignore case when mapping values from their names - :param significant_name_characters: Number of significant characters to use when mapping values from their names - :param first_item_wildcard: First item wildcard - :param all_items_wildcards: All Items wildcard - :param last_item_wildcard: Last item wildcard - """ - - # use value names to setup builder - if names is not None: - # min-value and max_value are not allowed - if min_value is not None or max_value is not None: - raise ValueError( - "min_value and max_value parameters can not be used with names parameter" - ) - - # names to display for values - self._displaynames = [str(i) for i in names] - # names to identify values, use only the specified number of significant characters - self._names = ( - names - if significant_name_characters == 0 - else [name[0:significant_name_characters] for name in names] - ) - # convert to lowercase if case is ignored - if ignorecase: - self._names = [name.lower() for name in self._names] - # offset for values - self._offset = offset if offset else 0 - self._min_value = self._offset - self._max_value = len(names) - 1 + self._offset - # build list to identify values by their numeric string value - self._values = self.values = [ - str(i + self._offset) for i in range(0, len(self._names)) - ] - - else: - # setup builder with min and max values instead if names - - # both must be present - if min_value is None or max_value is None: - raise ValueError( - "min_value or max_value may not be None if names parameter is None" - ) - - # min must be less or equal than max - if min_value > max_value: - raise ValueError( - "min_value parameter should be less or equal to max_value parameter" - ) - - # build names to identify values - self._names = [str(i) for i in range(min_value, max_value + 1)] - self._min_value = min_value - self._max_value = max_value - self._values = self._names - # names used for display - self._displaynames = self._values - # offset may not conflict with min value - if offset is not None and offset != min_value: - raise ValueError( - "offset parameter should not be used or have the same value as min_value" - ) - self._offset = min_value - - self._logging = logging.getLogger("SetBuilder") - - self._wrap = wrap - self._ignorecase = ignorecase - self._all_items_wildcard_characters = all_items_wildcards - self._first_item_wildcard = first_item_wildcard - self._last_item_wildcard_character = last_item_wildcard - self._significant_name_characters = ( - significant_name_characters - if names is not None and significant_name_characters is not None - else 0 - ) - - # custom parsers to be executed before standard parsers - self._pre_custom_parsers: list[Parser] = [] - # custom parsers to be executes after standard parsers - self._post_custom_parsers: list[Parser] = [] - # setup list of standard parsers - self._standard_parsers: list[Parser] = [ - self._parse_name, # name - self._parse_value, # value, first and last wildcard - self._parse_name_range, # name-name - self._parse_value_range, # value-value - self._parse_all, # all items wildcard - self._parse_name_incr, # name/incr - self._parse_value_incr, # value/incr - self._parse_name_range_incr, # name-name/incr - self._parse_value_range_incr, - ] # value-value/incr - - def build(self, set_spec: str | list[str] | set[str]) -> set[int]: - """ - Builds set of values from string or list of strings - :param set_spec: Sets as comma separated string or list of strings - :return: - """ - if isinstance(set_spec, str) or isinstance(set_spec, type("")): - set_string_list = set_spec.split(",") - return self._get_set_items(set_string_list) - elif isinstance(set_spec, list) or isinstance(set_spec, set): - return self.build(",".join(set_spec)) - else: - raise ValueError("set_str argument must be of type string, set or array") - - @property - def first(self) -> int: - """ - Return lowest possible value in set - :return: Lowest possible value in set - """ - return self._offset - - @property - def last(self) -> int: - """ - Return highest possible value in set - :return: Highest possible value in set - """ - return len(self._names) - 1 + self._offset - - @property - def all(self) -> set[int]: - """ - Returns all items in set - :return: All items in set - """ - return set(self._all) - - @property - def _all(self) -> list[int]: - # internal function to return all items in set - return [val + self._offset for val in range(0, len(self._values))] - - def _parse_name(self, name_str: str) -> Optional[list[int]]: - # gets a set item by its name - return self._get_single_item(name_str, self._get_value_by_name) - - def _parse_value(self, value_str: str) -> Optional[list[int]]: - # value - return self._get_single_item(value_str, self._get_value_by_str) - - def _parse_name_range(self, name_range_str: str) -> Optional[list[int]]: - # name-name - return self._get_range_from_str(name_range_str, self._get_value_by_name) - - def _parse_value_range(self, value_range_str: str) -> Optional[list[int]]: - # value-value - return self._get_range_from_str(value_range_str, fn=self._get_value_by_str) - - def _parse_name_incr(self, name_incr_str: str) -> Optional[list[int]]: - # name/incr - return self._get_increment(name_incr_str, self._get_name_incr) - - def _parse_value_incr(self, value_incr_str: str) -> Optional[list[int]]: - # value/incr - return self._get_increment(value_incr_str, self._get_value_incr) - - def _parse_name_range_incr(self, name_range_incr_str: str) -> Optional[list[int]]: - # name-name/incr - return self._get_increment(name_range_incr_str, fn=self._get_name_range_incr) - - def _parse_value_range_incr(self, value_range_incr_str: str) -> Optional[list[int]]: - # value-value/incr - return self._get_increment(value_range_incr_str, fn=self._get_value_range_incr) - - def _parse_all(self, all_wildcard_str: str) -> Optional[list[int]]: - # wildcards - if ( - len(all_wildcard_str) == 1 - and all_wildcard_str in self._all_items_wildcard_characters - ): - return self._all - return None - - def _parse_unknown(self, _: str) -> Optional[list[int]]: - # handle unknown items - return None - - @property - def _parsers(self) -> list[Parser]: - # flattened list of all parsers - return [ - parser - for parsers in [ - self._pre_custom_parsers, - self._standard_parsers, - self._post_custom_parsers, - ] - for parser in parsers - ] - - def _special_items(self) -> str: - # special items that do not need pre-formatting or must be excluded from formatting - return "".join( - [ - self._all_items_wildcard_characters, - self._first_item_wildcard, - self._last_item_wildcard_character, - ] - ) - - def _seperator_characters(self) -> str: - # character that separates name from instructions like increments - return SetBuilder.INCREMENT_CHARACTER - - def _get_set_items(self, set_string_list: list[str]) -> set[int]: - # gets the items from a list of strings - set_items = set() - - # for every string in the list - for set_str in set_string_list: - s = self._format_item(set_str) - - # go through list off all parsers for the builder class - for parser in self._parsers: - # execute parser - value = parser(s) - # did it return a value - if value is not None: - self._logging.debug( - 'Parser : {}("{}") returns {}'.format( - parser.__name__, set_str, value - ) - ) - # add result from parser to result set - if len(value) > 0: - set_items.update(set(value)) - # if the parser is "all-items" wildcard there is no need for further processing as all items are in the result - if parser == self._parse_all: - return set_items - break - else: - # if this point is reached none of the parsers returned one or more items, try _parse_unknown - value = self._parse_unknown(s) - if value is not None: - self._logging.debug( - '{}("{}") returns {}'.format( - self._parse_unknown.__name__, set_str, value - ) - ) - set_items.update(set(value)) - else: - # if it does not return a value then raise an exception because of an unknown item - raise ValueError('"{}" is an unknown value'.format(set_str)) - - return set_items - - def _format_item(self, set_str: str) -> str: - # pre-processes the item before trying to parse it - s = set_str.strip() - - # immediately return if it is a special item - if len(s) == 1 and s in self._special_items(): - return s - - str_after_separator = None - - # check if the string has a separator, in that case remove and store string after and including the character - for c in self._seperator_characters(): - if c in s: - i = s.index(c) - str_after_separator = s[i:] - s = s[0:i] - break - - # truncate to significant characters - if self._significant_name_characters > 0: - s = SetBuilder.RANGE_CHARACTER.join( - [ - t[0 : self._significant_name_characters] - for t in s.split(self.RANGE_CHARACTER) - ] - ) - - # case sensitivity, to lowercase if case is ignored - if self._ignorecase: - s = s.lower() - - # append separator and remaining part if it was truncated - if str_after_separator is not None: - s += str_after_separator - - return s - - @staticmethod - def _get_single_item( - item_str: str, fn: Callable[[str], Optional[int]] - ) -> Optional[list[int]]: - # function to return single set items in a uniform way as a set - value = fn(item_str) - if value is not None: - return [value] - return None - - def _get_value_by_name(self, name_str: str) -> Optional[int]: - # gets the value of a set item by its name, also handled first and last item wildcards - # internal iterator for testing for names - def from_name(name: str) -> Optional[int]: - if name in self._names: - return self._names.index(name) + self._offset - return None - - # loop to test for name and wildcards - for fn in [from_name, self._get_first_value, self._get_last_value]: - value = fn(name_str) - if value is not None: - return value - return None - - def _get_value_by_str(self, value_str: str) -> Optional[int]: - # gets the value of a set item by its numeric string - s = value_str - while len(s) > 1 and s[0] == "0": - s = s[1:] - if s in self._values: - return self._values.index(s) + self._offset - return None - - def _get_range_from_str( - self, range_str: str, fn: Callable[[str], Optional[int]], incr: int = 1 - ) -> Optional[list[int]]: - # gets a range from a string, items are retrieved using the function specified by fn - # check if there is a range separator in the string - set_range = range_str.split(self.RANGE_CHARACTER) - # check for valid name of value for start - if len(set_range) == 2: - start = fn(set_range[0]) - if start is not None: - # check for valid name or value for end - end = fn(set_range[1]) - if end is not None: - return self._get_range(start, end, incr) - return None - - def _get_last_value(self, last_wildcard_str: str) -> Optional[int]: - # returns the last possible value if the str is the last wildcard character - if ( - len(last_wildcard_str) == 1 - and last_wildcard_str == self._last_item_wildcard_character - ): - return self.last - return None - - def _get_first_value(self, first_wildcard_str: str) -> Optional[int]: - # returns the first possible value if the str is the first item wildcard character - if ( - len(first_wildcard_str) == 1 - and first_wildcard_str == self._first_item_wildcard - ): - return self.first - return None - - def _get_range(self, start: int, end: int, step: int = 1) -> list[int]: - # gets a range of items for the specified start, end and step value - - # check if wrapping is needed and allowed - if not self._wrap and start > end: - raise ValueError( - "start ({}) must be less or equal to end ({}) if wrap is false".format( - start, end - ) - ) - - # this is the start - result = [start] - current = start - skip_to_next_value = step - # until we reach the end value - while current != end: - # get next - current += 1 - skip_to_next_value -= 1 - # handle wrapping - current %= len(self._values) + self._offset - current = max(current, self._offset) - - # handle step - if skip_to_next_value == 0: - result.append(current) - skip_to_next_value = step - return result - - @staticmethod - def _get_increment( - incr_str: str, fn: Callable[[str, int], Optional[list[int]]] - ) -> Optional[list[int]]: - # returns a set of values using a start value and a increment - temp = incr_str.split(SetBuilder.INCREMENT_CHARACTER) - # check if there is an increment character and if the increment value is present and valid - if len(temp) == 2: - try: - incr = int(temp[1]) - except ValueError: - raise ValueError( - "Increment value must be an integer value ({})".format(temp[1]) - ) - - if incr <= 0: - raise ValueError("Increment value must be > 0 ({})".format(incr)) - - return fn(temp[0], incr) - return None - - def _get_increment_by_string( - self, incr_string: str, fn: Callable[[str], Optional[int]], incr: int - ) -> Optional[list[int]]: - # get increment items for start value retrieved by function fn - - start = fn(incr_string) - if start is not None: - return self._get_range(start=start, end=self.last, step=incr) - return None - - def _get_name_incr(self, name_incr_str: str, incr: int) -> Optional[list[int]]: - # get increment items for start value retrieved by its name - return self._get_increment_by_string( - name_incr_str, self._get_value_by_name, incr - ) - - def _get_value_incr(self, value_incr_str: str, incr: int) -> Optional[list[int]]: - # get increment items for start value retrieved by its value string - return self._get_increment_by_string( - value_incr_str, self._get_value_by_str, incr - ) - - def _get_range_increment( - self, incr_str: str, fn: Callable[[str], Optional[int]], incr: int - ) -> Optional[list[int]]: - # gets increment values from a range specified by the name of the start and end value retrieved by function fn - set_range = self._get_range_from_str(incr_str, fn, incr) - if set_range is not None: - return set_range - return None - - def _get_name_range_incr( - self, name_range_incr_str: str, incr: int - ) -> Optional[list[int]]: - # gets increment values from a range specified by the name of the start and end value retrieved by their names - return self._get_range_increment( - name_range_incr_str, self._get_value_by_name, incr - ) - - def _get_value_range_incr( - self, value_range_incr_str: str, incr: int - ) -> Optional[list[int]]: - # gets increment values from a range specified by the name of the start and end value retrieved by their value strings - return self._get_range_increment( - value_range_incr_str, self._get_value_by_str, incr - ) diff --git a/source/app/instance_scheduler/configuration/setbuilders/weekday_setbuilder.py b/source/app/instance_scheduler/configuration/setbuilders/weekday_setbuilder.py deleted file mode 100644 index acf0adcc..00000000 --- a/source/app/instance_scheduler/configuration/setbuilders/weekday_setbuilder.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -import calendar -from collections.abc import Callable -from typing import Optional - -from instance_scheduler.configuration.setbuilders.setbuilder import SetBuilder - - -def _requires_date_attributes( - fn: Callable[["WeekdaySetBuilder", str], Optional[list[int]]] -) -> Callable[["WeekdaySetBuilder", str], Optional[list[int]]]: - # this modifier is used to mark to use methods that need year, day and month which are optional for the builder - def check(self: "WeekdaySetBuilder", value: str) -> Optional[list[int]]: - if self._year is None or self._month is None or self._day is None: - raise ValueError( - "year, month and day parameters must be specified when creating the {} for using method {}".format( - self.__class__.__name__, fn.__name__ - ) - ) - - # and if we're checking for the first time then get the first weekday and numbers for the month - if self._first_weekday_in_month is None: - self._first_weekday_in_month, self._days_in_month = calendar.monthrange( - self._year, self._month - ) - - return fn(self, value) - - check.__name__ = fn.__name__ - return check - - -class WeekdaySetBuilder(SetBuilder): - """ - Class to build sets for weekdays, 0-6 or Mon-Sun, day#n for nth occurrence of day in month, or L for last - occurrence of the day for the month - """ - - WEEKDAY_NUMBER_CHAR = "#" - LAST_DAY_WILDCARD = "L" - - def __init__( - self, - wrap: bool = True, - year: Optional[int] = None, - month: Optional[int] = None, - day: Optional[int] = None, - ignorecase: bool = True, - ) -> None: - """ - - :param wrap: Set to True to allow wrapping at last day of the week - :param year: Year of week to build sets for, only required for date aware '#' and 'L' features in expressions - :param month: Month of week to build sets for, only required for date aware '#' and 'L' features in expressions - :param day: Day in week to build sets for, only required for date aware '#' and 'L' features in expressions - :param ignorecase: Set to True to ignore case when mapping day names to set values - """ - SetBuilder.__init__( - self, - names=calendar.day_abbr, - wrap=wrap, - ignorecase=ignorecase, - significant_name_characters=3, - last_item_wildcard=WeekdaySetBuilder.LAST_DAY_WILDCARD, - ) - self._year = year - self._month = month - self._day = day - self._first_weekday_in_month: Optional[int] = None - self._days_in_month: Optional[int] = None - - self._post_custom_parsers = [ - self._parse_name_number, # name#num - self._parse_value_number, # value#num - self._parse_name_last_weekday, # nameL - self._parse_value_last_weekday, - ] # valueL - - def _seperator_characters(self) -> str: - # Add last day wildcard as it needs for formatting before parsing - return ( - SetBuilder._seperator_characters(self) - + WeekdaySetBuilder.WEEKDAY_NUMBER_CHAR - + self.LAST_DAY_WILDCARD - ) - - @_requires_date_attributes - def _parse_name_number(self, name_number_str: str) -> Optional[list[int]]: - # weekday_name#occurence - return self._get_occurrence_item( - number_str=name_number_str, fn=self._get_value_by_name - ) - - @_requires_date_attributes - def _parse_value_number(self, value_number_str: str) -> Optional[list[int]]: - # weekday value# occurrence - return self._get_occurrence_item( - number_str=value_number_str, fn=self._get_value_by_str - ) - - def _get_occurrence_item( - self, number_str: str, fn: Callable[[str], Optional[int]] - ) -> Optional[list[int]]: - # gets the nth occurrence of a weekday retrieved by function fn - - # check for separator - temp = number_str.split(self.WEEKDAY_NUMBER_CHAR) - # check for occurrence number and if it is valid - if len(temp) == 2: - try: - number = int(temp[1]) - except ValueError: - raise ValueError( - "Number value must be an integer value ({})".format(temp[1]) - ) - - if number < 1 or number > 5: - raise ValueError( - "Number value must be in range 1-5 ({})".format(temp[1]) - ) - - # get the weekday - weekday = fn(temp[0]) - if weekday is None: - return None - - # gets the first occurrence of that weekday in the month - day_for_number_weekday = self._get_day_for_first_occurrence_month(weekday) - - monthday = day_for_number_weekday + ((number - 1) * 7) - return [weekday] if self._day == monthday else [] - - return None - - def _get_day_for_first_occurrence_month(self, weekday: int) -> int: - # calculated the first occurrence of a weekday in a month - if self._first_weekday_in_month is None: - raise ValueError("Expected first weekday in month to be set") - - day = 1 - if weekday != self._first_weekday_in_month: - day += (weekday - self._first_weekday_in_month) % 7 - return day - - @_requires_date_attributes - def _parse_name_last_weekday(self, name_last_weekday: str) -> Optional[list[int]]: - # nameL, returns last occurrence of weekday, specified by its name, in a month - return self._get_last_day_for_weekday_in_month( - name_last_weekday, self._get_value_by_name - ) - - @_requires_date_attributes - def _parse_value_last_weekday(self, value_last_weekday: str) -> Optional[list[int]]: - # valueL, returns last occurrence of weekday, specified by its value, string in a month - return self._get_last_day_for_weekday_in_month( - value_last_weekday, self._get_value_by_str - ) - - def _get_last_day_for_weekday_in_month( - self, last_weekday_str: str, fn: Callable[[str], Optional[int]] - ) -> Optional[list[int]]: - # weekdayL, returns last occurrence of weekday, retrieved by function fn, string in a month - if self._days_in_month is None: - raise ValueError("Expected days in month to be set") - - if last_weekday_str.endswith(WeekdaySetBuilder.LAST_DAY_WILDCARD): - weekday = fn(last_weekday_str[:-1]) - if weekday is not None: - day_for_number_weekday = self._get_day_for_first_occurrence_month( - weekday - ) - while day_for_number_weekday + 7 <= self._days_in_month: - day_for_number_weekday += 7 - return [weekday] if day_for_number_weekday == self._day else [] - return None diff --git a/source/app/instance_scheduler/configuration/ssm.py b/source/app/instance_scheduler/configuration/ssm.py new file mode 100644 index 00000000..8698412b --- /dev/null +++ b/source/app/instance_scheduler/configuration/ssm.py @@ -0,0 +1,48 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import re +from typing import TYPE_CHECKING, Final, Iterator, Sequence + +from instance_scheduler.boto_retry import get_client_with_standard_retry + +if TYPE_CHECKING: + from mypy_boto3_ssm import SSMClient +else: + SSMClient = object + + +def replace_ssm_references_with_account_ids( + raw_account_ids_list: Sequence[str], +) -> Iterator[str]: + """ + for any account ids provided in the format {param:[param-name]}, fetch the corresponding + SSM parameter list and append it to the list of account_ids + + :param raw_account_ids_list: a raw list of account_ids that may or may not contain ssm_param references + :return: a new list of account_ids after ssm_param references have been fetched + """ + REGEX_SSM_PARAM: Final = "{param:(.+?)}" + + for account_id in raw_account_ids_list: + if re.match(REGEX_SSM_PARAM, account_id): + param_names = re.findall(REGEX_SSM_PARAM, account_id) + for ssm_account_id in fetch_account_ids_from_ssm_params(param_names): + yield ssm_account_id + else: + yield account_id + + +def fetch_account_ids_from_ssm_params(param_names: list[str]) -> list[str]: + if len(param_names) == 0: + return [] + + ssm_client: SSMClient = get_client_with_standard_retry("ssm") + resp = ssm_client.get_parameters(Names=list(set(param_names))) # remove duplicates + + account_ids = [] + for p in resp.get("Parameters", []): + if p["Type"] == "StringList": + account_ids += p["Value"].split(",") + else: + account_ids.append(p["Value"]) + return account_ids diff --git a/source/app/instance_scheduler/configuration/time_utils.py b/source/app/instance_scheduler/configuration/time_utils.py new file mode 100644 index 00000000..0c8e46e9 --- /dev/null +++ b/source/app/instance_scheduler/configuration/time_utils.py @@ -0,0 +1,30 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import datetime +import re +import time + +TIME_FORMAT = "HH:MM" +"""human-readable time format that can be displayed to users if an input fails is_valid_time_str""" + + +def is_valid_time_str(timestr: str) -> bool: + """ + verify that a string matches the time format expected by parse_time_str + + a human-readable representation of a valid time_format can be accessed as TIME_FORMAT + """ + return re.match(r"^([0|1]?\d|2[0-3]):[0-5]\d$", timestr) is not None + + +def parse_time_str(timestr: str) -> datetime.time: + """ + Standardised method to build time object instance from time string + :param timestr: string in format as defined in configuration.TIME_FORMAT_STRING + :return: time object from time string, None if the time is invalid + """ + try: + tm = time.strptime(timestr, "%H:%M") + except ValueError: + raise ValueError(f"Invalid time string {timestr}, must match {TIME_FORMAT}") + return datetime.time(tm.tm_hour, tm.tm_min, 0) diff --git a/source/app/instance_scheduler/configuration/setbuilders/__init__.py b/source/app/instance_scheduler/cron/__init__.py similarity index 100% rename from source/app/instance_scheduler/configuration/setbuilders/__init__.py rename to source/app/instance_scheduler/cron/__init__.py diff --git a/source/app/instance_scheduler/cron/asg.py b/source/app/instance_scheduler/cron/asg.py new file mode 100644 index 00000000..fd30c35a --- /dev/null +++ b/source/app/instance_scheduler/cron/asg.py @@ -0,0 +1,123 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +"""Convert cron expressions into recurrence strings for AutoScaling Group Scheduled +Scaling Actions""" +from typing import Final + +from instance_scheduler.cron.expression import ( + CronAll, + CronExpression, + CronLastWeekday, + CronNearestWeekday, + CronNthWeekday, + CronRange, + CronSingleValueLast, + CronSingleValueNumeric, + CronUnion, +) +from instance_scheduler.cron.parser import month_abbrs, weekday_abbrs + +# month names are one-indexed +_month_value_to_abbr: Final = {i + 1: name for i, name in enumerate(month_abbrs)} + + +def to_asg_expr_months(expr: CronExpression) -> str: + match expr: + case CronAll(): + return "*" + case CronSingleValueNumeric(value=_): + return _month_value_to_abbr[expr.value] + case CronRange(start=_, end=None, interval=_): + return f"{to_asg_expr_months(expr.start)}/{expr.interval}" + case CronRange(start=_, end=_, interval=1): + if not expr.end: + raise ValueError() + return f"{to_asg_expr_months(expr.start)}-{to_asg_expr_months(expr.end)}" + case CronRange(start=_, end=_, interval=_): + if not expr.end: + raise ValueError() + return f"{to_asg_expr_months(expr.start)}-{to_asg_expr_months(expr.end)}/{expr.interval}" + case CronUnion(): + return ",".join(to_asg_expr_months(sub_expr) for sub_expr in expr.exprs) + case CronSingleValueLast(): + return to_asg_expr_months(CronSingleValueNumeric(value=12)) + case CronNearestWeekday(): + raise ValueError("Nearest weekday in month-of-year expression is malformed") + case CronNthWeekday(): + raise ValueError("Nth weekday in month-of-year expression is malformed") + case CronLastWeekday(): + raise ValueError("Last weekday in month-of-year expression is malformed") + + +def to_asg_expr_monthdays(expr: CronExpression) -> str: + match expr: + case CronAll(): + return "*" + case CronSingleValueNumeric(value=_): + return str(expr.value) + case CronSingleValueLast(): + return "L" + case CronRange(start=_, end=_, interval=1): + if expr.end is None: + raise ValueError( + "Range with only start and interval of one is malformed" + ) + return ( + f"{to_asg_expr_monthdays(expr.start)}-{to_asg_expr_monthdays(expr.end)}" + ) + case CronRange(start=_, end=None, interval=_): + return f"{to_asg_expr_monthdays(expr.start)}/{expr.interval}" + case CronRange(start=_, end=_, interval=_): + if expr.end is None: + # mypy is unable to narrow this type appropriately + raise RuntimeError("Unexpected pattern matching behavior") + return f"{to_asg_expr_monthdays(expr.start)}-{to_asg_expr_monthdays(expr.end)}/{expr.interval}" + case CronUnion(): + return ",".join(to_asg_expr_monthdays(sub_expr) for sub_expr in expr.exprs) + case CronNearestWeekday(): + raise NotImplementedError( + "Nearest weekday not supported by underlying service" + ) + case CronNthWeekday(): + raise ValueError("Nth weekday in day-of-month expression is malformed") + case CronLastWeekday(): + raise ValueError("Last weekday in day-of-month expression is malformed") + + +# Instance Scheduler uses zero to mean Monday, unlike most cron implementations +_weekday_value_to_abbr: Final = {i: name for i, name in enumerate(weekday_abbrs)} + + +def to_asg_expr_weekdays(expr: CronExpression) -> str: + match expr: + case CronAll(): + return "*" + case CronSingleValueNumeric(value=_): + return _weekday_value_to_abbr[expr.value] + case CronSingleValueLast(): + return "L" + case CronRange(start=_, end=_, interval=1): + if expr.end is None: + raise ValueError( + "Range with only start and interval of one is malformed" + ) + return ( + f"{to_asg_expr_weekdays(expr.start)}-{to_asg_expr_weekdays(expr.end)}" + ) + case CronRange(start=_, end=None, interval=_): + return f"{to_asg_expr_weekdays(expr.start)}/{expr.interval}" + case CronRange(start=_, end=_, interval=_): + if expr.end is None: + # mypy is unable to narrow this type appropriately + raise RuntimeError("Unexpected pattern matching behavior") + return f"{to_asg_expr_weekdays(expr.start)}-{to_asg_expr_weekdays(expr.end)}/{expr.interval}" + case CronUnion(): + return ",".join(to_asg_expr_weekdays(sub_expr) for sub_expr in expr.exprs) + case CronNearestWeekday(): + raise ValueError("Nearest weekday in day-of-week expression is malformed") + case CronNthWeekday(): + raise NotImplementedError("Nth weekday not supported by underlying service") + case CronLastWeekday(): + raise NotImplementedError( + "Last weekday not supported by underlying service" + ) diff --git a/source/app/instance_scheduler/cron/cron_recurrence_expression.py b/source/app/instance_scheduler/cron/cron_recurrence_expression.py new file mode 100644 index 00000000..1d824ee0 --- /dev/null +++ b/source/app/instance_scheduler/cron/cron_recurrence_expression.py @@ -0,0 +1,61 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from dataclasses import dataclass +from datetime import datetime +from typing import Any + +from instance_scheduler.cron.cron_to_running_period import ( + monthday_cron_expr_contains, + months_cron_expr_contains, + weekday_cron_expr_contains, +) +from instance_scheduler.cron.expression import CronAll, CronExpression +from instance_scheduler.cron.parser import ( + parse_monthdays_expr, + parse_months_expr, + parse_weekdays_expr, +) + + +@dataclass(frozen=True) +class CronRecurrenceExpression: + """A cron recurrence expression for days and months, but not time of day""" + + monthdays: CronExpression = CronAll() + months: CronExpression = CronAll() + weekdays: CronExpression = CronAll() + + def to_asg_scheduled_action(self) -> Any: + raise NotImplementedError + + @classmethod + def parse( + cls, + *, + monthdays: set[str] = {"*"}, + months: set[str] = {"*"}, + weekdays: set[str] = {"*"}, + ) -> "CronRecurrenceExpression": + return CronRecurrenceExpression( + monthdays=parse_monthdays_expr(monthdays), + months=parse_months_expr(months), + weekdays=parse_weekdays_expr(weekdays), + ) + + def contains(self, dt: datetime) -> bool: + """Does `dt` satisfy the recurrence defined in `expr`""" + # When both days-of-month and days-of-week are specified, the normal behavior for + # cron is to trigger on a day that satisfies either expression. However, Instance + # Scheduler has historically checked that a date satisfies all fields. If a field is + # missing from the period definition, it is considered satisfied. This means that if + # a running period is not missing (`None`) and not a wildcard for all values, only + # days that satisfy the intersection of days-of-month and days-of-week satisfy the + # expression. This is a departure from standard cron behavior that may surprise + # customers. + return all( + ( + monthday_cron_expr_contains(self.monthdays, dt), + months_cron_expr_contains(self.months, dt), + weekday_cron_expr_contains(self.weekdays, dt), + ) + ) diff --git a/source/app/instance_scheduler/cron/cron_to_running_period.py b/source/app/instance_scheduler/cron/cron_to_running_period.py new file mode 100644 index 00000000..17932221 --- /dev/null +++ b/source/app/instance_scheduler/cron/cron_to_running_period.py @@ -0,0 +1,262 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +"""Determine if a `datetime` satisfies a recurrence expression""" +from calendar import monthrange +from dataclasses import dataclass +from datetime import date, datetime, timedelta + +from instance_scheduler.cron.expression import ( + CronAll, + CronExpression, + CronLastWeekday, + CronNearestWeekday, + CronNthWeekday, + CronRange, + CronSingleValueLast, + CronSingleValueNumeric, + CronUnion, + FullCronExpression, +) + +""" +range/wrapping behavior notes (last updated 3/1/24) + + Correct handling of wrapping behavior presents some somewhat unintuitive problems when the domain changes over time +like it does for monthdays. For example. suppose the expr "30-L" is used as a monthday expression. in various months/years +this can resolve to 30-30, 30-31, (both simple cases) and 30-28 (a potential wrap case). It is assumed in this last case +that 30-L in a month with 28 days should take no action at all + + However another possible scenario is an expression of 21-9/2 which might reasonably be interpreted as +"starting on the 21st, run every 2nd day until the 9th of the next month, but don't run on the 10th-20th". +In specific terms: In a month with 30 days, run on {21, 23, 25, 27, 29} in the current month and {1, 3, 5, 7, 9} +in the next month. However, this is NOT the normal behavior of cron because cron is typically evaluated within the context of +the current month only (meaning that wrapping accounts for the number of days in the current month, not the prev month). +For example, in the transition between March and April, this cron would resolve to {2, 4, 6, 8, 21, 23, 25, 27, 31} +in March (with 31 days) followed by {1, 3, 5, 7, 9, 21, 23, 25, 27, 29} in April (with 30 days) leading to two +concurrent days being scheduled (Mar 31st and Apr 1st). + + We do not currently attempt to address this peculiarity and instead simply make the assertion that while days/months +(with consistent domains of 0-6 and 1-12 respectively) do support wrapping, monthdays do NOT wrap. In addition +If an expr specifies a start that is after the end of a given domain, the expression will resolve to an empty set of +running values and WILL NOT WRAP. Thus a weekday expression of "7-3" would never be true because weekdays only go from +0-6 and we will not auto-wrap this malformed expression; likewise a monthday expression of 30-L will run on the 30th/31st +of every month when possible and will NOT run at all in months with < 30 days. + + Similarly ranges that start from L and plan to wrap around (such as L-5) are not considered a legal format +(this being enforced by the typing of CronRange). This leaves open the option to potentially support "L-5" as +"the last 5 days/months of the week/month/year" in the future. +""" # noqa: W291 + + +def in_period(expr: FullCronExpression, dt: datetime) -> bool: + """Does `dt` satisfy the recurrence defined in `expr`""" + # When both days-of-month and days-of-week are specified, the normal behavior for + # cron is to trigger on a day that satisfies either expression. However, Instance + # Scheduler has historically checked that a date satisfies all fields. If a field is + # missing from the period definition, it is considered satisfied. This means that if + # a running period is not missing (`None`) and not a wildcard for all values, only + # days that satisfy the intersection of days-of-month and days-of-week satisfy the + # expression. This is a departure from standard cron behavior that may surprise + # customers. + return all( + ( + monthday_cron_expr_contains(expr.days_of_month, dt), + months_cron_expr_contains(expr.months_of_year, dt), + weekday_cron_expr_contains(expr.days_of_week, dt), + ) + ) + + +def months_cron_expr_contains(expr: CronExpression, dt: datetime) -> bool: + """ + Does `dt` satisfy `expr` when interpreted as a months-of-year expression + note: dt is assumed to be timezone-aware and already translated into the correct timezone + """ + months_domain = IntDomain(1, 12) + + match expr: + case CronAll(): + return True + case CronSingleValueNumeric(): + return dt.month == expr.value + case CronRange(): + running_months = _range_to_discrete_values(expr, months_domain) + return dt.month in running_months + case CronUnion(): + return any( + months_cron_expr_contains(sub_expr, dt) for sub_expr in expr.exprs + ) + case CronSingleValueLast(): + return dt.month == months_domain.end + case CronNearestWeekday(): + raise ValueError("Nearest Weekday not supported by month expression") + case CronNthWeekday(): + raise ValueError("Nth Weekday not supported by month expression") + case CronLastWeekday(): + raise ValueError("Last Weekday not supported by month expression") + + +def monthday_cron_expr_contains(expr: CronExpression, dt: datetime) -> bool: + """Does `dt` satisfy `expr` when interpreted as a days-of-month expression""" + _, days_in_month = monthrange(dt.year, dt.month) + monthdays_domain = IntDomain(1, days_in_month) + + match expr: + case CronAll(): + return True + case CronSingleValueNumeric(): + return dt.day == expr.value + case CronRange(): + return dt.day in _range_to_discrete_values(expr, monthdays_domain) + case CronUnion(): + return any( + monthday_cron_expr_contains(sub_expr, dt) for sub_expr in expr.exprs + ) + case CronSingleValueLast(): + return dt.day == monthdays_domain.end + case CronNearestWeekday(): + nearest_weekday = resolve_nearest_weekday_as_monthday(expr.value.value, dt) + return dt.day == nearest_weekday + case CronNthWeekday(): + raise ValueError("Nth Weekday not supported by monthday expression") + case CronLastWeekday(): + raise ValueError("Last Weekday not supported by monthday expression") + + +def weekday_cron_expr_contains(expr: CronExpression, dt: datetime) -> bool: + """Does `dt` satisfy `expr` when interpreted as a days-of-week expression""" + weekdays_domain = IntDomain(0, 6) + + match expr: + case CronAll(): + return True + case CronSingleValueNumeric(): + return dt.weekday() == expr.value + case CronRange(): + return dt.weekday() in _range_to_discrete_values(expr, weekdays_domain) + case CronUnion(): + return any( + weekday_cron_expr_contains(sub_expr, dt) for sub_expr in expr.exprs + ) + case CronSingleValueLast(): + return dt.weekday() == weekdays_domain.end + case CronNearestWeekday(): + raise NotImplementedError + case CronNthWeekday(): + return ( + resolve_nth_weekday_as_monthday( + weekday=expr.day.value, n=expr.n, reference_date=dt + ) + == dt.day + ) + case CronLastWeekday(): + return resolve_last_weekday_as_monthday(expr.day.value, dt) == dt.day + + +def resolve_nearest_weekday_as_monthday(monthday: int, reference_date: date) -> int: + """resolve the nearest weekday to the monthday in the month of the reference date""" + target_date = date( + year=reference_date.year, month=reference_date.month, day=monthday + ) + match target_date.weekday(): + case 5: # saturday + if monthday == 1: + # going backward would be the prev month, so go forward instead + return monthday + 2 + else: + return monthday - 1 + case 6: # sunday + _, last_day_of_month = monthrange(target_date.year, target_date.month) + if monthday == last_day_of_month: + # going forward would be the next month, so go backward instead + return monthday - 2 + else: + return monthday + 1 + case _: + return monthday + + +def resolve_last_weekday_as_monthday(weekday: int, reference_date: date) -> int: + """resolve the last weekday within the month of the reference date""" + _, last_day_of_month = monthrange(reference_date.year, reference_date.month) + + # iterate backwards from the last day until we find the desired weekday + for monthday in reversed(range(1, last_day_of_month + 1)): + pointer_date = date( + year=reference_date.year, month=reference_date.month, day=monthday + ) + if pointer_date.weekday() == weekday: + return monthday + + # catch all that should not be possible assuming weekday is between 0-6 + raise ValueError(f"weekday {weekday} not found within month of {reference_date}") + + +def resolve_nth_weekday_as_monthday(weekday: int, n: int, reference_date: date) -> int: + """ + resolves the monthday of the nth occurrence of the specified weekday or -1 if there is no such nth occurrence + """ + first_occurrence = _resolve_first_occurrence_of_weekday_in_month( + weekday, reference_date + ) + nth_occurrence = first_occurrence + timedelta(days=7 * (n - 1)) + if nth_occurrence.month == reference_date.month: + return nth_occurrence.day + else: + return -1 + + +def _resolve_first_occurrence_of_weekday_in_month( + weekday: int, reference_date: date +) -> date: + reference_date = reference_date.replace(day=1) + offset_to_first_day = weekday - reference_date.weekday() % 7 + return reference_date + timedelta(days=offset_to_first_day) + + +@dataclass +class IntDomain: + start: int + end: int + + def __post_init__(self) -> None: + assert self.start <= self.end, "start must be less than end" + + def width(self) -> int: + return self.end - self.start + + def contains(self, val: int) -> bool: + return self.start <= val <= self.end + + +def _range_to_discrete_values(expr: CronRange, domain: IntDomain) -> set[int]: + + start = _cron_single_val_to_int(expr.start, domain) + end = _cron_single_val_to_int(expr.end, domain) + + if start > domain.end: + return set() + + will_wrap = start > end + pointer = start + values = set() + while will_wrap or pointer <= end: + if domain.contains(pointer): + values.add(pointer) + + pointer += expr.interval + if will_wrap and pointer > domain.end: + pointer -= domain.width() + 1 + will_wrap = False + + return values + + +def _cron_single_val_to_int( + expr: CronSingleValueNumeric | CronSingleValueLast | None, domain: IntDomain +) -> int: + match expr: + case CronSingleValueNumeric(): + return expr.value + case CronSingleValueLast() | None: + return domain.end diff --git a/source/app/instance_scheduler/cron/expression.py b/source/app/instance_scheduler/cron/expression.py new file mode 100644 index 00000000..ac153593 --- /dev/null +++ b/source/app/instance_scheduler/cron/expression.py @@ -0,0 +1,147 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +""" +The class hierarchy defined in this module is a high-level representation of the fields +of a cron expression. A cron expression is a string that defines the recurrence for a +job executed by the cron daemon, a job scheduler on *nix distributions. + +A cron expression most commonly takes the form of: + + + +cron expressions are not defined by a standard, so the specific supported features as +well as the exact behavior of features will differ depending on the implementation. The +form of the expression may differ as well. + +Default features supported by most implementations include: + +- names instead of integer values for months and weekdays +- ranges (e.g. 4-6, Mon-Fri) +- steps (e.g. */2, or every other day/month) + +Extensions supported by many implementations include: + +- wildcard for last day (L) +- wildcard for nth weekday (e.g. Mon#3, or the third Monday of the month) +- wildcard for nearest weekday (e.g. 13W, or the nearest weekday to the 13th) + +Deviations from the above definition include: + +- additional seconds field at the beginning of the expression +- additional years field at the end of the expression +- combinations of different expressions separated by commas + +The specific features supported and the behavior of the features for each field differ +depending on the specific implementation. This class hierarchy is intended to be an +abstract representation of an expression as defined by the user. An expression can exist +independent of it being used as a day-of-month or day-of-week field, but certain +features only make sense for certain fields (e.g. nearest weekday). + +As an example, consider the expression "1-5/2". This expression should be parsed as "a +step starting on the first value, repeating every two iterations, stopping at the fifth +value". This expression could be resolved equally well as a day-of-month field meaning +the first of the month, the third of the month, and the fifth of the month, or as a +day-of-week field meaning Monday, Wednesday, and Friday[1]. + +The intended use of this class hierarchy is for it to be produced by a parser that may +or may not have knowledge of the specific field being parsed or the specific +implementation where the expression was defined. After parsing, the expression retains +no semantic meaning specific to the field or implementation. Optionally, a validator +that does have knowledge of the specific field and/or implementation can decide if the +expression conforms to the expected feature set and behavior for the field or +implementation. Finally, the business logic is implemented with knowledge of the feature +set supported by the target implementation. + +As a simple example, consider the expression "?" as a day-of-week field value. Parsing +the expression should result in an "all values" representation. Validation confirms that +this meaning is correct for a day-of-week field in an Instance Scheduler period +definition. The business logic is to translate this expression to a recurrence string +for AutoScaling Groups. The day-of-week field for ASGs supports the "all values" +feature, but it only uses the asterisk wildcard. The output of the conversion to ASG +day-of-week recurrence string will be "*". + +[1] Though most cron implementations use the value one to mean Monday in a day-of-week +field, Instance Scheduler was implemented with the value zero meaning Monday, which +corresponds to the values in the Python `calendar` package. +""" +from dataclasses import dataclass + + +@dataclass(frozen=True) +class CronUnion: + """The union of multiple other expressions, typically represented by a + comma-separated list""" + + exprs: tuple["CronExpression", ...] + + +@dataclass(frozen=True) +class CronAll: + """All values""" + + +@dataclass(frozen=True) +class CronSingleValueNumeric: + """A single numeric value""" + + value: int + + +@dataclass(frozen=True) +class CronSingleValueLast: + """The last possible value""" + + +@dataclass(frozen=True) +class CronNearestWeekday: + """The weekday nearest the specified value""" + + value: CronSingleValueNumeric + + +@dataclass(frozen=True) +class CronNthWeekday: + """The nth occurence of the specified weekday in a month""" + + day: CronSingleValueNumeric + n: int + + +@dataclass(frozen=True) +class CronLastWeekday: + """The last occurence of the specified weekday in a month""" + + day: CronSingleValueNumeric + + +@dataclass(frozen=True) +class CronRange: + """A range of values beginning at `start`, repeating every `interval` values, up to + and including `end`. `start` may be after `end`, in which case the range wraps if + allowed.""" + + start: CronSingleValueNumeric + end: CronSingleValueNumeric | CronSingleValueLast | None = None + interval: int = 1 + + +CronExpression = ( + CronUnion + | CronAll + | CronSingleValueNumeric + | CronSingleValueLast + | CronNearestWeekday + | CronNthWeekday + | CronLastWeekday + | CronRange +) +"""A union type for the possible values of an arbitrary cron expression""" + + +@dataclass(frozen=True) +class FullCronExpression: + """A cron recurrence expression for days and months, but not time of day""" + + days_of_month: CronExpression + months_of_year: CronExpression + days_of_week: CronExpression diff --git a/source/app/instance_scheduler/cron/parser.py b/source/app/instance_scheduler/cron/parser.py new file mode 100644 index 00000000..add4cfc3 --- /dev/null +++ b/source/app/instance_scheduler/cron/parser.py @@ -0,0 +1,297 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +""" +Parse a string used in a period definition for a day-of-month, month-of-year, or +day-of-week field to an abstract representation. + +Use knowledge of the features supported by Instance Scheduler periods. Though each field +supports different features, it is more convenient to allow any reasonable expression to +be parsed, then later use a validator for the specific field to raise errors. This way +we only have to define one set of parsers. +""" +import re +from collections.abc import Callable, Mapping +from dataclasses import replace +from functools import partial +from itertools import chain +from typing import Final + +from instance_scheduler.cron.expression import ( + CronAll, + CronExpression, + CronLastWeekday, + CronNearestWeekday, + CronNthWeekday, + CronRange, + CronSingleValueLast, + CronSingleValueNumeric, + CronUnion, + FullCronExpression, +) +from instance_scheduler.cron.validator import ( + validate_monthdays_expression, + validate_months_expression, + validate_weekdays_expression, +) + +PeriodDefnStr = set[str] | None + + +def parse_period_def( + *, + days_of_month: PeriodDefnStr, + months_of_year: PeriodDefnStr, + days_of_week: PeriodDefnStr, +) -> FullCronExpression: + return FullCronExpression( + days_of_month=parse_monthdays_expr(days_of_month), + months_of_year=parse_months_expr(months_of_year), + days_of_week=parse_weekdays_expr(days_of_week), + ) + + +def parse_months_expr(months_expr: PeriodDefnStr) -> CronExpression: + result: Final = _parse_multi_general(months_expr, _month_name_to_value) + validate_months_expression(result) + return result + + +def parse_monthdays_expr(monthdays_expr: PeriodDefnStr) -> CronExpression: + result: Final = _parse_multi_general(monthdays_expr, {}) + validate_monthdays_expression(result) + return result + + +def parse_weekdays_expr(weekdays_expr: PeriodDefnStr) -> CronExpression: + result: Final = _parse_multi_general(weekdays_expr, _weekday_name_to_value) + validate_weekdays_expression(result) + return result + + +def _parse_multi_general( + exprs: PeriodDefnStr, domain: Mapping[str, int] +) -> CronExpression: + if exprs is None: + return CronAll() + if len(exprs) == 0: + raise ValueError("Zero-length string set is malformed") + if len(exprs) == 1: + return _parse_single_general(next(iter(exprs)), domain) + else: + return CronUnion( + exprs=tuple(_parse_single_general(expr, domain) for expr in exprs) + ) + + +def _parse_single_general(expr: str, domain: Mapping[str, int]) -> CronExpression: + exprs: Final = expr.split(",") + if len(exprs) > 1: + return CronUnion( + exprs=tuple(_parse_single_general(sub_expr, domain) for sub_expr in exprs) + ) + return _general_parse(exprs[0].strip().lower(), domain) + + +def _general_parse(expr: str, domain: Mapping[str, int]) -> CronExpression: + parsers: Final[list[Callable[[str], CronExpression]]] = [ + partial(_parse_single_value_general, domain=domain), + _parse_all_values, + _parse_last_value, + partial(_parse_step, domain=domain), + partial(_parse_range, domain=domain), + partial(_parse_nth_weekday, domain=domain), + partial(_parse_last_weekday, domain=domain), + _parse_nearest_weekday, + ] + for parser in parsers: + try: + return parser(expr) + except ValueError: + pass + raise ValueError(f"Could not parse as any form of cron expression: {expr}") + + +# period definitions are not localized +month_names: Final = ( + "january", + "february", + "march", + "april", + "may", + "june", + "july", + "august", + "september", + "october", + "november", + "december", +) +month_abbrs: Final = list(month_name[0:3] for month_name in month_names) +# month names can be the full month name, or the first three letters +_month_name_to_value: Final = { + name: i + 1 for i, name in chain(enumerate(month_names), enumerate(month_abbrs)) +} + + +# period definitions are not localized +weekday_names: Final = ( + "monday", + "tuesday", + "wednesday", + "thursday", + "friday", + "saturday", + "sunday", +) +weekday_abbrs: Final = list(weekday_name[0:3] for weekday_name in weekday_names) +# weekday names can be the full day name, or the first three letters +_weekday_name_to_value: Final = { + name: i for i, name in chain(enumerate(weekday_names), enumerate(weekday_abbrs)) +} + + +_single_value_re: Final = re.compile(r"^(\d+)$") + + +def _parse_single_numeric_value(expr: str) -> CronSingleValueNumeric: + if not (match := _single_value_re.match(expr)): + raise ValueError(f"Could not parse as single numeric value: {expr}") + + value: Final = int(match.group(1)) + return CronSingleValueNumeric(value=value) + + +def _parse_single_numeric_value_by_name( + expr: str, domain: Mapping[str, int] +) -> CronSingleValueNumeric: + if expr.lower() not in domain: + raise ValueError(f"Could not parse as single name: {expr}") + + value: Final = domain[expr] + return CronSingleValueNumeric(value=value) + + +def _parse_single_value_numeric( + expr: str, domain: Mapping[str, int] +) -> CronSingleValueNumeric: + parsers: Final[list[Callable[[str], CronSingleValueNumeric]]] = [ + _parse_single_numeric_value, + partial(_parse_single_numeric_value_by_name, domain=domain), + ] + for parser in parsers: + try: + return parser(expr) + except ValueError: + pass + raise ValueError(f"Could not parse as single value: {expr}") + + +def _parse_single_value_general( + expr: str, domain: Mapping[str, int] +) -> CronSingleValueNumeric | CronSingleValueLast: + parsers: Final[ + list[Callable[[str], CronSingleValueNumeric | CronSingleValueLast]] + ] = [ + _parse_single_numeric_value, + partial(_parse_single_numeric_value_by_name, domain=domain), + _parse_last_value, + ] + for parser in parsers: + try: + return parser(expr) + except ValueError: + pass + raise ValueError(f"Could not parse as single value: {expr}") + + +_last_re: Final = re.compile(r"^L$", flags=re.IGNORECASE) + + +def _parse_last_value(expr: str) -> CronSingleValueLast: + if not _last_re.match(expr): + raise ValueError(f"Could not parse as last value wildcard: {expr}") + + return CronSingleValueLast() + + +all_values_re: Final = re.compile(r"^[*?]$") + + +def _parse_all_values(expr: str) -> CronAll: + if not all_values_re.match(expr): + raise ValueError(f"Could not parse as all values wildcard: {expr}") + + return CronAll() + + +_step_re: Final = re.compile(r"^(.+)/.*(\d+).*$") + + +def _parse_step(expr: str, domain: Mapping[str, int]) -> CronRange: + if not (match := _step_re.match(expr)): + raise ValueError(f"Could not parse as step expression: {expr}") + + range_expr: Final = match.group(1) + interval: Final = int(match.group(2).strip()) + + try: + range_ = _parse_range(range_expr, domain=domain) + return replace(range_, interval=interval) + except ValueError: + pass + + try: + start: Final = _parse_single_value_general(range_expr, domain) + if isinstance(start, CronSingleValueLast): + raise ValueError(f"Unable to parse step expression starting with L: {expr}") + return CronRange(start=start, interval=interval) + except ValueError: + raise ValueError(f"Could not parse range of step expression: {expr}") + + +_range_re: Final = re.compile(r"^(.+)-(.+)$") + + +def _parse_range(expr: str, domain: Mapping[str, int]) -> CronRange: + if not (match := _range_re.match(expr)): + raise ValueError(f"Could not parse as range expression: {expr}") + + start: Final = _parse_single_value_general(match.group(1).strip(), domain) + end: Final = _parse_single_value_general(match.group(2).strip(), domain) + + if isinstance(start, CronSingleValueLast): + raise ValueError(f"Unable to parse range expression starting with L: {expr}") + + return CronRange(start=start, end=end) + + +_nth_weekday_re: Final = re.compile(r"^(.*)#(\d+)$") + + +def _parse_nth_weekday(expr: str, domain: Mapping[str, int]) -> CronExpression: + if not (match := _nth_weekday_re.match(expr)): + raise ValueError(f"Could not parse as Nth weekday expression: {expr}") + + return CronNthWeekday( + day=_parse_single_value_numeric(match.group(1), domain), n=int(match.group(2)) + ) + + +_last_weekday_re: Final = re.compile(r"^(.*)L$", flags=re.IGNORECASE) + + +def _parse_last_weekday(expr: str, domain: Mapping[str, int]) -> CronExpression: + if not (match := _last_weekday_re.match(expr)): + raise ValueError(f"Could not parse as last weekday expression: {expr}") + + return CronLastWeekday(day=_parse_single_value_numeric(match.group(1), domain)) + + +_nearest_weekday_re: Final = re.compile(r"^(\d+)W$", flags=re.IGNORECASE) + + +def _parse_nearest_weekday(expr: str) -> CronExpression: + if not (match := _nearest_weekday_re.match(expr)): + raise ValueError(f"Could not parse as nearest weekday expression: {expr}") + + return CronNearestWeekday(value=CronSingleValueNumeric(int(match.group(1)))) diff --git a/source/app/instance_scheduler/cron/validator.py b/source/app/instance_scheduler/cron/validator.py new file mode 100644 index 00000000..756d90a1 --- /dev/null +++ b/source/app/instance_scheduler/cron/validator.py @@ -0,0 +1,122 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +""" +Validate a parsed cron expression in the context of the Instance Scheduler +implementation and the specific field. There are potentially valid strings that we have +rejected in the past, such as the last wildcard "L" in a range expresssion within a +month-of-year expression. +""" +from instance_scheduler.cron.expression import ( + CronAll, + CronExpression, + CronLastWeekday, + CronNearestWeekday, + CronNthWeekday, + CronRange, + CronSingleValueLast, + CronSingleValueNumeric, + CronUnion, +) + + +def validate_months_expression(expr: CronExpression) -> None: + match expr: + case CronAll(): + return + case CronSingleValueNumeric(): + if expr.value < 1 or expr.value > 12: + raise ValueError( + f"Numeric value in month-of-year expression must be between 1 and 12: {expr.value}" + ) + case CronSingleValueLast(): + # backwards compatibility reasons only. we could likely allow this without issue now + raise ValueError("Last value wildcard not supported in months expressions") + case CronRange(): + if isinstance(expr.start, CronSingleValueLast) or isinstance( + expr.end, CronSingleValueLast + ): + raise ValueError( + "Last value wildcard in month-of-year range expression is malformed" + ) + validate_months_expression(expr.start) + if expr.end is not None: + validate_months_expression(expr.end) + case CronUnion(): + for sub_expr in expr.exprs: + validate_months_expression(sub_expr) + case CronNearestWeekday(): + raise ValueError("Nearest weekday in month-of-year expression is malformed") + case CronNthWeekday(): + raise ValueError("Nth weekday in month-of-year expression is malformed") + case CronLastWeekday(): + raise ValueError("Last weekday in month-of-year expression is malformed") + + +def validate_monthdays_expression(expr: CronExpression) -> None: + match expr: + case CronAll(): + return + case CronSingleValueNumeric(): + if expr.value < 1 or expr.value > 31: + raise ValueError( + f"Numeric value in day-of-month expression must be between 1 and 31: {expr.value}" + ) + case CronSingleValueLast(): + return + case CronRange(): + validate_monthdays_expression(expr.start) + if expr.end is not None: + validate_monthdays_expression(expr.end) + if ( + isinstance(expr.end, CronSingleValueNumeric) + and expr.start.value > expr.end.value + ): + raise ValueError( + f"Range wrapping is not supported for monthday expressions. received: {expr.start.value}-{expr.end.value}" + ) + + case CronUnion(): + for sub_expr in expr.exprs: + validate_monthdays_expression(sub_expr) + case CronNearestWeekday(): + validate_monthdays_expression(expr.value) + case CronNthWeekday(): + raise ValueError("Nth weekday in day-of-month expression is malformed") + case CronLastWeekday(): + raise ValueError("Last weekday in day-of-month expression is malformed") + + +def validate_weekdays_expression(expr: CronExpression) -> None: + match expr: + case CronAll(): + return + case CronSingleValueNumeric(): + if expr.value < 0 or expr.value > 6: + raise ValueError( + f"Numeric value in day-of-week expression must be between 0 and 6: {expr.value}" + ) + case CronSingleValueLast(): + return + case CronRange(): + if isinstance(expr.start, CronSingleValueLast) or isinstance( + expr.end, CronSingleValueLast + ): + raise ValueError( + "Last value wildcard in day-of-week range expression is malformed" + ) + validate_weekdays_expression(expr.start) + if expr.end is not None: + validate_weekdays_expression(expr.end) + case CronUnion(): + for sub_expr in expr.exprs: + validate_weekdays_expression(sub_expr) + case CronNearestWeekday(): + raise ValueError("Nearest weekday in day-of-week expression is malformed") + case CronNthWeekday(): + if expr.n < 1 or expr.n > 5: + raise ValueError( + f"Value for N in Nth weekday expression in day-of-week expression must be between 1 and 5: {expr.n}" + ) + validate_weekdays_expression(expr.day) + case CronLastWeekday(): + validate_weekdays_expression(expr.day) diff --git a/source/app/instance_scheduler/handler/__init__.py b/source/app/instance_scheduler/handler/__init__.py index 57302271..04f8b7b7 100644 --- a/source/app/instance_scheduler/handler/__init__.py +++ b/source/app/instance_scheduler/handler/__init__.py @@ -1,24 +1,2 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -from collections.abc import Sequence as _Sequence -from typing import Any as _Any -from typing import Final as _Final - -from instance_scheduler.handler.base import Handler -from instance_scheduler.handler.cfn_schedule import CfnScheduleHandler -from instance_scheduler.handler.cli import CliHandler -from instance_scheduler.handler.config_resource import SchedulerSetupHandler -from instance_scheduler.handler.scheduling_orchestrator import ( - SchedulingOrchestratorHandler, -) -from instance_scheduler.handler.scheduling_request import SchedulingRequestHandler -from instance_scheduler.handler.spoke_registration import SpokeRegistrationHandler - -handlers: _Final[_Sequence[type[Handler[_Any]]]] = ( - SchedulingRequestHandler, - SchedulerSetupHandler, - CfnScheduleHandler, - CliHandler, - SchedulingOrchestratorHandler, - SpokeRegistrationHandler, -) diff --git a/source/app/instance_scheduler/handler/asg.py b/source/app/instance_scheduler/handler/asg.py new file mode 100644 index 00000000..14da689b --- /dev/null +++ b/source/app/instance_scheduler/handler/asg.py @@ -0,0 +1,205 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from dataclasses import dataclass +from datetime import datetime, timezone +from typing import TYPE_CHECKING, Any, Final, Set, cast +from uuid import uuid4 + +from aws_lambda_powertools.logging import Logger + +from instance_scheduler.handler.environments.asg_env import AsgEnv +from instance_scheduler.handler.scheduling_request import ( + SchedulingRequest, + validate_scheduler_request, +) +from instance_scheduler.model.period_definition import PeriodDefinition +from instance_scheduler.model.store.dynamo_period_definition_store import ( + DynamoPeriodDefinitionStore, +) +from instance_scheduler.model.store.dynamo_schedule_definition_store import ( + DynamoScheduleDefinitionStore, +) +from instance_scheduler.model.store.period_definition_store import ( + UnknownPeriodException, +) +from instance_scheduler.model.store.schedule_definition_store import ( + UnknownScheduleException, +) +from instance_scheduler.ops_metrics.metric_type.asg_count_metric import AsgCountMetric +from instance_scheduler.ops_metrics.metrics import collect_metric, get_metrics_env +from instance_scheduler.service.asg import AsgService +from instance_scheduler.util.session_manager import assume_role +from instance_scheduler.util.sns_handler import SnsHandler + +if TYPE_CHECKING: + from aws_lambda_powertools.utilities.typing import LambdaContext +else: + LambdaContext = object + +ASG_SERVICE: Final = "asg" + + +@dataclass(frozen=True) +class AsgMetricsDefinition: + region: str + num_tagged_auto_scaling_groups: int + num_schedules: int + + +logger: Final = Logger(log_uncaught_exceptions=True, use_rfc3339=True) + + +@logger.inject_lambda_context(log_event=True) +def lambda_handler(event: dict[str, Any], context: LambdaContext) -> None: + env: Final = AsgEnv.from_env() + sns_handler: Final = SnsHandler( + topic_arn=env.issues_topic_arn, + log_group_name=context.log_group_name, + log_stream_name=context.log_stream_name, + raise_exceptions=env.logger_raise_exceptions, + ) + logger.addHandler(sns_handler) + # correlation ID should eventually come from event + logger.set_correlation_id(str(uuid4())) + + validate_scheduler_request(event) + request: Final = cast(SchedulingRequest, event) + + [num_tagged_auto_scaling_groups, num_schedules] = schedule_auto_scaling_groups( + schedule_tag_key=env.schedule_tag_key, + config_table_name=env.config_table_name, + account_id=request["account"], + region=request["region"], + scheduling_role_name=env.asg_scheduling_role_name, + asg_scheduled_tag_key=env.scheduled_tag_key, + rule_prefix=env.rule_prefix, + schedule_names=request.get("schedule_names"), + ) + + # Send operational metrics when the handler is called by ASG orchestrator. + # ASG orchestrator does not send schedule names when calling the handler. + if not request.get("schedule_names"): + send_operational_metrics( + AsgMetricsDefinition( + region=request["region"], + num_tagged_auto_scaling_groups=num_tagged_auto_scaling_groups, + num_schedules=num_schedules, + ) + ) + + +def schedule_auto_scaling_groups( + *, + schedule_tag_key: str, + config_table_name: str, + account_id: str, + region: str, + scheduling_role_name: str, + asg_scheduled_tag_key: str, + rule_prefix: str, + schedule_names: list[str] | None, +) -> tuple[int, int]: + """ + Schedule auto scaling groups. + When a schedule or periods are not found in the config DynamoDB table, it raises an exception. + When there are schedule names provided, as schedules are updated, + it always schedules auto scaling groups tagged with schedule names unless auto scaling groups are stopped. + + :param schedule_tag_key: a schedule tag key which is configured when launching the solution + :param config_table_name: a config DynamoDB table name + :param account_id: an AWS account ID + :param region: an AWS region + :param scheduling_role_name: a scheduling role name to assume + :param asg_scheduled_tag_key: an auto scaling scheduled tag key: `scheduled` + :param rule_prefix: an auto scaling group schedule rule prefix provided when launching the solution + :param schedule_names: a list of schedule names which would be provided when updating schedules on the config DynamoDB table + :return: number of tagged auto scaling groups and number of schedules in tags + """ + + period_store: Final = DynamoPeriodDefinitionStore(config_table_name) + schedule_store: Final = DynamoScheduleDefinitionStore(config_table_name) + is_schedule_override: Final[bool] = schedule_names is not None + schedules_in_tag: Final[Set[str]] = set() + num_tagged_auto_scaling_groups = 0 + + session: Final = assume_role( + account=account_id, region=region, role_name=scheduling_role_name + ).session + asg_service: Final = AsgService( + session=session, + schedule_tag_key=schedule_tag_key, + asg_scheduled_tag_key=asg_scheduled_tag_key, + rule_prefix=rule_prefix, + ) + + for group in asg_service.get_schedulable_groups(schedule_names): + try: + schedule_tag = list( + filter(lambda tag: tag["Key"] == schedule_tag_key, group["Tags"]) + ) + + if len(schedule_tag) == 0: + continue + + num_tagged_auto_scaling_groups += 1 + + schedule_name = schedule_tag[0]["Value"] + schedule_definition = schedule_store.find_by_name(schedule_name) + + if not schedule_definition: + raise UnknownScheduleException("No schedule found") + + schedules_in_tag.add(schedule_name) + + period_definitions: list[PeriodDefinition] = [] + + for period_id in schedule_definition.periods: + period_name = period_id.name + period = period_store.find_by_name(period_name) + + if not period: + raise UnknownPeriodException("Period not found") + + period_definitions.append(period) + + asg_service.schedule_auto_scaling_group( + group=group, + schedule_definition=schedule_definition, + period_definitions=period_definitions, + is_schedule_override=is_schedule_override, + ) + except Exception as err: + logger.error( + f'Error configuring schedule "{schedule_name}" for group "{group["AutoScalingGroupARN"]}": {err}', + ) + + return num_tagged_auto_scaling_groups, len(schedules_in_tag) + + +def send_operational_metrics(asg_metrics_definition: AsgMetricsDefinition) -> None: + """ + Send operational metrics when the handler is called by ASG orchestrator. + It only sends the number of tagged auto scaling groups and schedules associated with the auto scaling groups + to align with other metrics. The metric is sent once a day so it does not increase the numbers for the day. + + :param asg_metrics_definition: + """ + + try: + # `get_metrics_env` can raise `AppEnvError` so the exception should be captured. + metrics_uuid = get_metrics_env().metrics_uuid + current_time = datetime.now(timezone.utc) + + # To make sure to send once a day only + if current_time.hour == metrics_uuid.int % 24: + collect_metric( + metric=AsgCountMetric( + service=ASG_SERVICE, + region=asg_metrics_definition.region, + num_instances=asg_metrics_definition.num_tagged_auto_scaling_groups, + num_schedules=asg_metrics_definition.num_schedules, + ), + logger=logger, + ) + except Exception as e: + logger.warning(f"Failed sending operational metrics: {e}") diff --git a/source/app/instance_scheduler/handler/asg_orchestrator.py b/source/app/instance_scheduler/handler/asg_orchestrator.py new file mode 100644 index 00000000..a77aacf6 --- /dev/null +++ b/source/app/instance_scheduler/handler/asg_orchestrator.py @@ -0,0 +1,81 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import json +from datetime import datetime, timezone +from itertools import product +from typing import TYPE_CHECKING, Any, Final +from uuid import uuid4 + +from aws_lambda_powertools.logging import Logger +from boto3.session import Session + +from instance_scheduler.boto_retry import get_client_with_standard_retry +from instance_scheduler.handler.environments.asg_orch_env import AsgOrchEnv +from instance_scheduler.handler.scheduling_request import SchedulingRequest +from instance_scheduler.model.store.ddb_config_item_store import DdbConfigItemStore +from instance_scheduler.util.scheduling_target import get_account_ids +from instance_scheduler.util.sns_handler import SnsHandler + +if TYPE_CHECKING: + from aws_lambda_powertools.utilities.typing import LambdaContext + from mypy_boto3_lambda.client import LambdaClient +else: + LambdaContext = object + LambdaClient = object + +logger: Final = Logger(log_uncaught_exceptions=True, use_rfc3339=True) + + +@logger.inject_lambda_context(log_event=True) +def lambda_handler(_: dict[str, Any], context: LambdaContext) -> None: + env: Final = AsgOrchEnv.from_env() + sns_handler: Final = SnsHandler( + topic_arn=env.issues_topic_arn, + log_group_name=context.log_group_name, + log_stream_name=context.log_stream_name, + raise_exceptions=env.logger_raise_exceptions, + ) + logger.addHandler(sns_handler) + # correlation ID should eventually come from source event + logger.set_correlation_id(str(uuid4())) + + orchestrate_asgs(env, context) + + +def orchestrate_asgs( + env: AsgOrchEnv, context: LambdaContext, schedule_names: list[str] | None = None +) -> None: + ddb_config_item_store: Final = DdbConfigItemStore(env.config_table_name) + ddb_config_item: Final = ddb_config_item_store.get() + account_ids: Final = get_account_ids(ddb_config_item, env, logger, context) + regions: Final = ( + env.schedule_regions if env.schedule_regions else [Session().region_name] + ) + lambda_client: LambdaClient = get_client_with_standard_retry("lambda") + dispatch_time: Final = datetime.now(timezone.utc).isoformat() + + for account_id, region in product(account_ids, regions): + try: + request = SchedulingRequest( + action="scheduler:run", + account=account_id, + region=region, + service="asg", + current_dt=dispatch_time, + dispatch_time=dispatch_time, + ) + if schedule_names is not None: + request["schedule_names"] = schedule_names + lambda_client.invoke( + FunctionName=env.asg_scheduler_name, + InvocationType="Event", + Payload=str.encode(json.dumps(request)), + ) + except Exception as err: + logger.error( + { + "Message": "Failed to invoke ASG scheduling request handler", + "Error": err, + "Request": request, + } + ) diff --git a/source/app/instance_scheduler/handler/cfn_schedule.py b/source/app/instance_scheduler/handler/cfn_schedule.py index 9a34a19c..b3acc845 100644 --- a/source/app/instance_scheduler/handler/cfn_schedule.py +++ b/source/app/instance_scheduler/handler/cfn_schedule.py @@ -2,18 +2,24 @@ # SPDX-License-Identifier: Apache-2.0 from collections.abc import Mapping from datetime import datetime, timezone -from typing import TYPE_CHECKING, Any, Optional, TypedDict, TypeGuard +from typing import TYPE_CHECKING, Any, NotRequired, Optional, TypedDict, TypeGuard -from instance_scheduler import configuration -from instance_scheduler.configuration.config_admin import ( - ConfigAdmin, - ConfigTablePeriodItem, +from botocore.exceptions import ClientError + +from instance_scheduler.model.period_definition import PeriodDefinition +from instance_scheduler.model.period_identifier import PeriodIdentifier +from instance_scheduler.model.schedule_definition import ScheduleDefinition +from instance_scheduler.model.store.dynamo_period_definition_store import ( + DynamoPeriodDefinitionStore, +) +from instance_scheduler.model.store.dynamo_schedule_definition_store import ( + DynamoScheduleDefinitionStore, ) -from instance_scheduler.util import safe_json from instance_scheduler.util.app_env import get_app_env from instance_scheduler.util.custom_resource import ( CustomResource, CustomResourceRequest, + CustomResourceResponse, ) from instance_scheduler.util.logger import Logger @@ -22,102 +28,65 @@ else: LambdaContext = object -INF_DELETE_SCHEDULE = "Deleted schedule {}" -INF_DELETED_PERIOD = "Deleted period {}" -INF_PERIOD_CREATED = "Created period {}" -INF_PERIOD_NAME = "Creating period {}" -INF_SCHEDULE_CREATED = "Created schedule {}" -INF_SCHEDULE_NAME = "Creating schedule {}" - -ERR_INVALID_SCHEDULE_PROPERTY = ( - "{} is not a valid property for a schedule, valid schedule properties are {}" -) -ERR_INVALID_PERIOD_PROPERTY = ( - "{} is not a valid property for a schedule period, valid period properties are {}" -) -PERIOD_DESCRIPTION = "Schedule {} period {}, do not delete or update manually" -PERIOD_NAME = "{}-period-{:0>4d}" - -PROP_BEGIN_TIME = "BeginTime" -PROP_DESCRIPTION = "Description" -PROP_END_TIME = "EndTime" -PROP_ENFORCED = "Enforced" -PROP_HIBERNATE = "Hibernate" -PROP_RETAIN_RUNNING = "RetainRunning" -PROP_INSTANCE_TYPE = "InstanceType" -PROP_METRICS = "Metrics" -PROP_MONTH_DAYS = "MonthDays" -PROP_MONTHS = "Months" -PROP_NAME = "Name" -PROP_OVERRIDE_STATUS = "OverrideStatus" -PROP_OVERWRITE = "Overwrite" -PROP_PERIODS = "Periods" -PROP_STACK_NAME = "SchedulerStack" -PROP_NO_STACK_PREFIX = "NoStackPrefix" -PROP_STOP_NEW = "StopNewInstances" -PROP_TIMEZONE = "Timezone" -PROP_USE_MAINTENANCE_WINDOW = "UseMaintenanceWindow" -PROP_SSM_MAINTENANCE_WINDOW = "SsmMaintenanceWindow" -PROP_WEEKDAYS = "WeekDays" - -VALID_SCHEDULE_PROPERTIES = [ - PROP_DESCRIPTION, - PROP_ENFORCED, - PROP_RETAIN_RUNNING, - PROP_METRICS, - PROP_NAME, - PROP_OVERWRITE, - PROP_PERIODS, - PROP_STOP_NEW, - PROP_TIMEZONE, - PROP_USE_MAINTENANCE_WINDOW, - PROP_SSM_MAINTENANCE_WINDOW, - PROP_NO_STACK_PREFIX, - PROP_HIBERNATE, - "ServiceToken", - # these values used to be part of the sample template in the IG, but have been removed 7/23, - # customers may still have old templates that include them property, so we need to not break compatibility - "Timeout", - PROP_OVERRIDE_STATUS, -] - -VALID_PERIOD_PROPERTIES = [ - PROP_BEGIN_TIME, - PROP_DESCRIPTION, - PROP_END_TIME, - PROP_INSTANCE_TYPE, - PROP_MONTH_DAYS, - PROP_MONTHS, - PROP_WEEKDAYS, -] - -LOG_STREAM = "{}-{:0>4d}{:0>2d}{:0>2d}" +""" + SampleSchedule: + Type: 'Custom::ServiceInstanceSchedule' + Properties: + ServiceToken: !Ref ServiceInstanceScheduleServiceTokenARN #do not edit this line + NoStackPrefix: 'False' + Name: my-renamed-sample-schedule + Description: a full sample template for creating cfn schedules showing all possible values + Timezone: America/New_York + Enforced: 'True' + Hibernate: 'True' + RetainRunning: 'True' + StopNewInstances: 'True' + SsmMaintenanceWindow: 'my_window_name' + OverrideStatus: 'running' + Periods: + - Description: run from 9-5 on the first 3 days of March + BeginTime: '9:00' + EndTime: '17:00' + InstanceType: 't2.micro' + MonthDays: '1-3' + Months: '3' + - Description: run from 2pm-5pm on the weekends + BeginTime: '14:00' + EndTime: '17:00' + InstanceType: 't2.micro' + WeekDays: 'Sat-Sun' +""" class CfnSchedulePeriodProperties(TypedDict, total=False): - Description: Optional[str] - BeginTime: Optional[str] - EndTime: Optional[str] - InstanceType: Optional[str] - MonthDays: Optional[str] - Months: Optional[str] - WeekDays: Optional[str] + Description: NotRequired[str] + BeginTime: NotRequired[str] + EndTime: NotRequired[str] + InstanceType: NotRequired[str] + MonthDays: NotRequired[str] + Months: NotRequired[str] + WeekDays: NotRequired[str] class CfnScheduleResourceProperties(TypedDict, total=False): ServiceToken: str - NoStackPrefix: Optional[str] - Name: Optional[str] - Description: Optional[str] - Timezone: Optional[str] - Enforced: Optional[str] - Hibernate: Optional[str] - RetainRunning: Optional[str] - StopNewInstances: Optional[str] - UseMaintenanceWindow: Optional[str] - SsmMaintenanceWindow: Optional[str] - Periods: list[CfnSchedulePeriodProperties] + NoStackPrefix: NotRequired[str] + Name: NotRequired[str] + Description: NotRequired[str] + Timezone: NotRequired[str] + Enforced: NotRequired[str] + Hibernate: NotRequired[str] + RetainRunning: NotRequired[str] + StopNewInstances: NotRequired[str] + SsmMaintenanceWindow: NotRequired[list[str] | str] + Metrics: NotRequired[str] + OverrideStatus: NotRequired[str] + Periods: NotRequired[list[CfnSchedulePeriodProperties]] + + +class InvalidScheduleConfiguration(Exception): + pass class CfnScheduleHandler(CustomResource[CfnScheduleResourceProperties]): @@ -136,21 +105,25 @@ def __init__( :param context: Lambda context """ CustomResource.__init__(self, event, context) - self.number_of_periods = 0 + self._logger = self._init_logger() + app_env = get_app_env() + self.schedule_store = DynamoScheduleDefinitionStore(app_env.config_table_name) + self.period_store = DynamoPeriodDefinitionStore(app_env.config_table_name) + def _init_logger(self) -> Logger: app_env = get_app_env() classname = self.__class__.__name__ dt = datetime.now(timezone.utc) - log_stream = LOG_STREAM.format(classname, dt.year, dt.month, dt.day) - self._logger = Logger( + log_stream = "{}-{:0>4d}{:0>2d}{:0>2d}".format( + classname, dt.year, dt.month, dt.day + ) + return Logger( log_group=app_env.log_group, log_stream=log_stream, topic_arn=app_env.topic_arn, debug=app_env.enable_debug_logging, ) - self._admin = ConfigAdmin(logger=self._logger, context=context) - @staticmethod def is_handling_request( event: Mapping[str, Any] @@ -165,240 +138,263 @@ def is_handling_request( and event.get("ResourceType") == "Custom::ServiceInstanceSchedule" ) - @classmethod - def _set_if_specified( - cls, - source: Any, - source_name: Any, - dest: Any, - dest_name: Any = None, - default: Any = None, - ) -> None: - val = source.get(source_name, default) - if val is not None: - dest[dest_name if dest_name is not None else source_name] = val - - @property - def _schedule_resource_name(self) -> Any: - name = self.resource_properties.get(PROP_NAME, None) - if name is None: - name = self.logical_resource_id - if ( - str(self.resource_properties.get(PROP_NO_STACK_PREFIX, "False")).lower() - == "true" - ): - return name - return "{}-{}".format(self.stack_name, name) - - def _create_period(self, period: Any) -> tuple[str, Any]: - self.number_of_periods += 1 - - period_name = PERIOD_NAME.format( - self._schedule_resource_name, self.number_of_periods - ) - self._logger.info(INF_PERIOD_NAME, period_name) + def _create_request(self) -> CustomResourceResponse: + """ + create a new CloudFormation-Managed schedule - for p in period: - if p not in VALID_PERIOD_PROPERTIES: - raise ValueError( - ERR_INVALID_PERIOD_PROPERTY.format( - p, ", ".join(VALID_PERIOD_PROPERTIES) - ) + This request will fail if creating a new schedule would overwrite an existing one + """ + self._logger.info(f"received create request for:\n{self.resource_properties}") + try: + schedule_def, period_defs = self._parse_schedule_template_item( + self.resource_properties + ) + # ----- Begin Transaction ------- + with self.schedule_store.new_transaction() as transaction: + transaction.add( + self.schedule_store.transact_put(schedule_def, overwrite=False) ) - - create_period_args: ConfigTablePeriodItem = {"name": period_name} - - self._set_if_specified( - period, PROP_BEGIN_TIME, create_period_args, configuration.BEGINTIME - ) - self._set_if_specified( - period, PROP_END_TIME, create_period_args, configuration.ENDTIME - ) - self._set_if_specified( - period, PROP_MONTH_DAYS, create_period_args, configuration.MONTHDAYS - ) - self._set_if_specified( - period, PROP_MONTHS, create_period_args, configuration.MONTHS - ) - self._set_if_specified( - period, PROP_WEEKDAYS, create_period_args, configuration.WEEKDAYS - ) - - create_period_args["description"] = PERIOD_DESCRIPTION.format( - self._schedule_resource_name, self.number_of_periods - ) - description_config = period.get(PROP_DESCRIPTION, None) - if description_config is not None: - create_period_args["description"] = "{}, {}".format( - description_config, create_period_args["description"] + for period_def in period_defs: + transaction.add( + self.period_store.transact_put(period_def, overwrite=False) + ) + # ------ End Transaction -------- + self._logger.info( + f"successfully created schedule:" + f"\n{schedule_def}" + f"\n{[str(period_def) for period_def in period_defs]}" ) + return self.OkResponse(physical_resource_id=schedule_def.name) + except ClientError as ce: + # indicates a transaction failure + return self.ErrorResponse(reason=f"unable to create schedule: {ce}") + finally: + self._logger.flush() - instance_type = period.get(PROP_INSTANCE_TYPE, None) - period = self._admin.create_period(create_period_args) - - self._logger.info(INF_PERIOD_CREATED, safe_json(period, 3)) - - return period_name, instance_type - - def _delete_periods(self) -> None: - i = 0 - while True: - i += 1 - name = PERIOD_NAME.format(self._schedule_resource_name, i) - period = self._admin.delete_period(name, exception_if_not_exists=False) - if period is None: - break - else: - self._logger.info(INF_DELETED_PERIOD, name) + def _update_request(self) -> CustomResourceResponse: + """ + CloudFormation update request against a schedule managed by a CFN stack - def _create_schedule(self) -> None: - self._logger.info(INF_SCHEDULE_NAME, self._schedule_resource_name) + There are 2 possible scenarios that we need to handle - create_schedule_args = {configuration.NAME: self._schedule_resource_name} + Schedule name not changed by update -- schedule should be updated in place - ps = self.resource_properties + To handle this, we perform a write transaction with overwrite=true to update the schedule in dynamodb + without needing any additional deletions - for pr in ps: - # fix for typo in older release, fix parameter if old version with typo is used for compatibility - if pr == "UseMaintenaceWindow": - pr = PROP_USE_MAINTENANCE_WINDOW - if pr not in VALID_SCHEDULE_PROPERTIES: - raise ValueError( - ERR_INVALID_SCHEDULE_PROPERTY.format( - pr, ", ".join(VALID_SCHEDULE_PROPERTIES) - ) - ) + Schedule name changes due to update -- old schedule must be deleted AND we must be careful not to conflict + with other already existing schedules - self._set_if_specified( - ps, PROP_METRICS, create_schedule_args, dest_name=configuration.METRICS - ) - self._set_if_specified( - ps, PROP_OVERWRITE, create_schedule_args, dest_name=configuration.OVERWRITE - ) - self._set_if_specified( - ps, - PROP_OVERRIDE_STATUS, - create_schedule_args, - dest_name=configuration.OVERRIDE_STATUS, - ) - self._set_if_specified( - ps, - PROP_USE_MAINTENANCE_WINDOW, - create_schedule_args, - dest_name=configuration.USE_MAINTENANCE_WINDOW, - ) - self._set_if_specified( - ps, - PROP_ENFORCED, - create_schedule_args, - dest_name=configuration.ENFORCED, - default=False, - ) - self._set_if_specified( - ps, - PROP_HIBERNATE, - create_schedule_args, - dest_name=configuration.HIBERNATE, - default=False, - ) - self._set_if_specified( - ps, - PROP_RETAIN_RUNNING, - create_schedule_args, - dest_name=configuration.RETAINED_RUNNING, - default=False, - ) - self._set_if_specified( - ps, - PROP_STOP_NEW, - create_schedule_args, - dest_name=configuration.STOP_NEW_INSTANCES, - default=True, - ) - self._set_if_specified( - ps, - PROP_TIMEZONE, - create_schedule_args, - dest_name=configuration.TIMEZONE, - default="UTC", - ) - self._set_if_specified( - ps, - PROP_DESCRIPTION, - create_schedule_args, - dest_name=configuration.DESCRIPTION, - ) - self._set_if_specified( - ps, - PROP_SSM_MAINTENANCE_WINDOW, - create_schedule_args, - dest_name=configuration.SSM_MAINTENANCE_WINDOW, - ) + To handle not conflicting with existing schedules, we set overwrite=false such that if changing the + schedule name would overwrite an existing schedule we will instead error on the write transaction - create_schedule_args[configuration.SCHEDULE_CONFIG_STACK] = self.stack_id + To handle correctly deleting the old schedule, we return the new schedule name as the + physical_resource_id of this resource. When this returned physical_resource_id changes (which it will + because the schedule name has changed from the last change request), CloudFormation will issue a + delete_request against the original resource which will handle the deletion behavior for us. + """ - periods = [] + self._logger.info(f"received update request for:\n{self.resource_properties}") try: - self.number_of_periods = 0 - for period in ps.get(PROP_PERIODS, []): # type:ignore[attr-defined] - period_name, instance_type = self._create_period(period) - if instance_type is not None: - period_name = "{}{}{}".format( - period_name, configuration.INSTANCE_TYPE_SEP, instance_type + schedule_def, period_defs = self._parse_schedule_template_item( + self.resource_properties + ) + old_sched_def, _ = self._parse_schedule_template_item( + self.old_resource_properties + ) + # ----- Begin Transaction ------- + with self.schedule_store.new_transaction() as transaction: + if schedule_def.name == old_sched_def.name: + # we are updating the same schedule, so we need to overwrite + transaction.add( + self.schedule_store.transact_put(schedule_def, overwrite=True) + ) + else: + # the schedule name is changing, fail if a schedule already exists with the same name + transaction.add( + self.schedule_store.transact_put(schedule_def, overwrite=False) ) - periods.append(period_name) - - create_schedule_args[configuration.PERIODS] = periods - schedule = self._admin.create_schedule(create_schedule_args) # type: ignore[arg-type] - # todo remove the typing ignore here (requires refactoring this method) - self.physical_resource_id = self._schedule_resource_name - self._logger.info(INF_SCHEDULE_CREATED, safe_json(schedule, 3)) - except Exception as ex: - self._delete_periods() - raise ex + for period_def in period_defs: + transaction.add( + self.period_store.transact_put(period_def, overwrite=True) + ) + # ------ End Transaction -------- + self._logger.info( + f"successfully updated schedule:" + f"\n{schedule_def}" + f"\n{[str(period_def) for period_def in period_defs]}" + ) + return self.OkResponse(physical_resource_id=schedule_def.name) + except ClientError as ce: + # indicates a transaction failure + return self.ErrorResponse(reason=f"unable to update schedule: {ce}") + finally: + self._logger.flush() - def _delete_schedule(self) -> None: - schedule = self._admin.delete_schedule( - name=self._schedule_resource_name, exception_if_not_exists=False - ) - if schedule is not None: - self._delete_periods() - self._logger.info(INF_DELETE_SCHEDULE, self._schedule_resource_name) + # handles Delete request from CloudFormation - def _update_schedule(self) -> None: - self._delete_schedule() - self._create_schedule() + def _delete_request(self) -> CustomResourceResponse: + """ + delete a cloudformation managed schedule - def _create_request(self) -> bool: + This request will indicate potential problems by failing if the schedule or any of its periods do not exist + """ + self._logger.info(f"received delete request for:\n{self.resource_properties}") try: - self._create_schedule() - return True - except Exception as ex: - self._logger.error(str(ex)) - return False + schedule_def, period_defs = self._parse_schedule_template_item( + self.resource_properties + ) + # ----- Begin Transaction ------- + with self.schedule_store.new_transaction() as transaction: + transaction.add( + self.schedule_store.transact_delete( + schedule_def.name, error_if_missing=False + ) + ) + for period_def in period_defs: + transaction.add( + self.period_store.transact_delete( + period_def.name, error_if_missing=False + ) + ) + # ------ End Transaction -------- + self._logger.info( + f"successfully deleted schedule {schedule_def.name} and periods {[p_def.name for p_def in period_defs]}" + ) + return self.OkResponse(physical_resource_id=schedule_def.name) + except ClientError as ce: + # indicates a transaction failure + return self.ErrorResponse(reason=f"unable to delete schedule: {ce}") finally: self._logger.flush() - def _update_request(self) -> bool: - try: - self._update_schedule() - return True - except Exception as ex: - self._logger.error(str(ex)) - return False - finally: - self._logger.flush() + def _parse_schedule_template_item( + self, resource_properties: CfnScheduleResourceProperties + ) -> tuple[ScheduleDefinition, list[PeriodDefinition]]: + # ---------------- Validation ----------------# + _validate_schedule_props_structure(resource_properties) + for period_props in resource_properties.get("Periods", []): + _validate_period_props_structure(period_props) + + # ------------ PARSE SCHEDULE NAME ------------# + schedule_name: str = resource_properties.get("Name", self.logical_resource_id) + if resource_properties.get("NoStackPrefix", "False").lower() == "false": + schedule_name = f"{self.stack_name}-{schedule_name}" + + # --------------- PARSE PERIODS ---------------# + period_defs = [] + period_identifiers = [] + period_counter = 0 + for period_props in resource_properties.get("Periods", []): + period_counter += 1 + try: + period_name = "{}-period-{:0>4d}".format(schedule_name, period_counter) + period_def = PeriodDefinition( + name=period_name, + description=period_props.get( + "Description", + f"Schedule {schedule_name} period {period_counter}, " + f"do not delete or update manually", + ), + begintime=period_props.get("BeginTime", None), + endtime=period_props.get("EndTime", None), + weekdays=_ensure_set(period_props.get("WeekDays", None)), + months=_ensure_set(period_props.get("Months", None)), + monthdays=_ensure_set(period_props.get("MonthDays", None)), + configured_in_stack=self.stack_id, + ) - # handles Delete request from CloudFormation + period_defs.append(period_def) + period_identifiers.append( + PeriodIdentifier.of( + period_name, period_props.get("InstanceType", None) + ) + ) + except Exception as ex: + raise InvalidScheduleConfiguration( + f"Error parsing period {period_counter} for schedule {schedule_name}: {ex}" + ) - def _delete_request(self) -> bool: + # --------------- PARSE SCHEDULE ---------------# try: - self._delete_schedule() - return True + sche_def = ScheduleDefinition( + name=schedule_name, + periods=period_identifiers, + timezone=resource_properties.get("Timezone", None), + description=resource_properties.get("Description", None), + override_status=resource_properties.get("OverrideStatus", None), + stop_new_instances=_parse_bool( + resource_properties.get("StopNewInstances", None) + ), + ssm_maintenance_window=_ensure_list( + resource_properties.get("SsmMaintenanceWindow") + ), + enforced=_parse_bool(resource_properties.get("Enforced", None)), + hibernate=_parse_bool(resource_properties.get("Hibernate", None)), + retain_running=_parse_bool( + resource_properties.get("RetainRunning", None) + ), + configured_in_stack=self.stack_id, + ) except Exception as ex: - self._logger.error(str(ex)) - return False - finally: - self._logger.flush() + raise InvalidScheduleConfiguration( + f"Error parsing schedule {schedule_name}: {ex}" + ) + + return sche_def, period_defs + + +def _parse_bool(bool_str: Optional[str]) -> Optional[bool]: + if bool_str is None: + return None + if bool_str.lower() == "true": + return True + elif bool_str.lower() == "false": + return False + else: + raise ValueError(f"unknown bool value {bool_str}, must be 'true' or 'false'") + + +def _ensure_set(s: list[str] | set[str] | str | None) -> set[str] | None: + if s is None: + return None + if isinstance(s, list): + return set(s) + if isinstance(s, str): + return set(s.split(",")) + return s + + +def _ensure_list(s: list[str] | set[str] | str | None) -> list[str] | None: + if s is None: + return None + if isinstance(s, set): + return list(s) + if isinstance(s, str): + return list(s.split(",")) + return s + + +def _validate_period_props_structure(props: CfnSchedulePeriodProperties) -> None: + for key in props.keys(): + if key not in CfnSchedulePeriodProperties.__annotations__.keys(): + raise InvalidScheduleConfiguration( + f"Unknown period property {key}, valid properties are " + f"{CfnSchedulePeriodProperties.__annotations__.keys()}" + ) + + +def _validate_schedule_props_structure(props: CfnScheduleResourceProperties) -> None: + for key in props.keys(): + if key in {"ServiceToken", "Timeout"}: + # these properties used to be part of the sample template in the IG, but have been removed in July 2023, + # They do not do anything, but customers may still have old templates that include them, + # so we need to not break compatibility + continue + if key not in CfnScheduleResourceProperties.__annotations__.keys(): + raise InvalidScheduleConfiguration( + f"Unknown schedule property {key}, valid properties are " + f"{CfnScheduleResourceProperties.__annotations__.keys()}" + ) diff --git a/source/app/instance_scheduler/handler/cli.py b/source/app/instance_scheduler/handler/cli.py deleted file mode 100644 index 29c4455e..00000000 --- a/source/app/instance_scheduler/handler/cli.py +++ /dev/null @@ -1,209 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -import json -from collections.abc import Mapping -from datetime import datetime, timezone -from typing import TYPE_CHECKING, Any, TypeGuard - -import jmespath - -from instance_scheduler import configuration -from instance_scheduler.configuration.config_admin import ConfigAdmin -from instance_scheduler.handler.base import Handler -from instance_scheduler.ops_metrics.metric_type.cli_request_metric import ( - CliRequestMetric, -) -from instance_scheduler.ops_metrics.metrics import collect_metric -from instance_scheduler.util import safe_json -from instance_scheduler.util.app_env import get_app_env -from instance_scheduler.util.logger import Logger - -if TYPE_CHECKING: - from aws_lambda_powertools.utilities.typing import LambdaContext -else: - LambdaContext = object - -CLI_SOURCE = "scheduler.cli" - -LOG_STREAM = "{}-{:0>4d}{:0>2d}{:0>2d}" - -AdminCliRequest = dict[str, Any] - - -class CliHandler(Handler[AdminCliRequest]): - """ - Class to handles requests from admin CLI - """ - - def __init__(self, event: AdminCliRequest, context: LambdaContext) -> None: - """ - Initializes handle instance - :param event: event to handle - :param context: lambda context - """ - self._event = event - self._context = context - - self.additional_parameters = { - "delete-period": {"exception_if_not_exists": True}, - "delete-schedule": {"exception_if_not_exists": True}, - } - - self.transform_parameters = {"metrics": "use-metrics"} - - self.commands = { - "create-period": "create_period", - "create-schedule": "create_schedule", - "delete-period": "delete_period", - "delete-schedule": "delete_schedule", - "describe-periods": ( - "list_periods" - if self.parameters.get(configuration.NAME) is None - else "get_period" - ), - "describe-schedule-usage": "get_schedule_usage", - "describe-schedules": ( - "list_schedules" - if self.parameters.get(configuration.NAME) is None - else "get_schedule" - ), - "update-period": "update_period", - "update-schedule": "update_schedule", - } - - self.transformations = { - "get_period": "{Periods:[Period]}", - "get_schedule": "{Schedules:[Schedule]}", - } - - # Setup logging - classname = self.__class__.__name__ - app_env = get_app_env() - dt = datetime.now(timezone.utc) - log_stream = LOG_STREAM.format(classname, dt.year, dt.month, dt.day) - self._logger = Logger( - log_group=app_env.log_group, - log_stream=log_stream, - topic_arn=app_env.topic_arn, - debug=app_env.enable_debug_logging, - ) - - @property - def action(self) -> Any: - """ - Retrieves admin REST api action from the event - :return: name of the action of the event - """ - return self._event["action"] - - @property - def parameters(self) -> dict[Any, Any]: - params = self._event.get("parameters", {}) - for p in params: - if p in self.transform_parameters: - params[self.transform_parameters[p]] = params[p] - del params[p] - extra = self.additional_parameters.get(self.action, {}) - params.update(extra) - return {p.replace("-", "_"): params[p] for p in params} - - @staticmethod - def is_handling_request(event: Mapping[str, Any]) -> TypeGuard[AdminCliRequest]: - """ - Returns True if the handler can handle the event - :param event: tested event - :return: True if the handles does handle the tested event - """ - - if event.get("source", "") != CLI_SOURCE: - return False - return "action" in event - - def handle_request(self) -> Any: - """ - Handles the event - :return: result of handling the event, result send back to REST admin api - """ - - def snake_to_pascal_case(s: Any) -> Any: - converted = "" - s = s.strip("_").capitalize() - i = 0 - - while i < len(s): - if s[i] == "_": - i += 1 - converted += s[i].upper() - else: - converted += s[i] - i += 1 - - return converted - - def dict_to_pascal_case(d: Any) -> Any: - d_result = {} - - if isinstance(d, dict): - for i in d: - key = snake_to_pascal_case(i) - d_result[key] = dict_to_pascal_case(d[i]) - return d_result - - elif isinstance(d, list): - return [dict_to_pascal_case(item) for item in d] - - return d - - try: - self._logger.info( - "Handler {} : Received CLI request {}", - self.__class__.__name__, - json.dumps(self._event), - ) - - collect_metric( - CliRequestMetric(command_used=self.action), logger=self._logger - ) - - # get access to admin api - admin = ConfigAdmin(logger=self._logger, context=self._context) - - # get api action and map it to a function in the admin API - fn_name = self.commands.get(self.action, None) - if fn_name is None: - raise ValueError("Command {} does not exist".format(self.action)) - fn = getattr(admin, fn_name) - - # calling the mapped admin api method - self._logger.info( - 'Calling "{}" with parameters {}', fn.__name__, self.parameters - ) - - if fn.__name__ in [ - "create_period", - "update_period", - "create_schedule", - "update_schedule", - ]: - api_result = fn(self.parameters) - else: - api_result = fn(**self.parameters) - - # convert to awscli PascalCase output format - result = dict_to_pascal_case(api_result) - - # perform output transformation - if fn_name in self.transformations: - result = jmespath.search(self.transformations[fn_name], result) - - # log formatted result - json_result = safe_json(result, 3) - self._logger.info("Call result is {}", json_result) - - return result - - except Exception as ex: - self._logger.info("Call failed, error is {}", str(ex)) - return {"Error": str(ex)} - finally: - self._logger.flush() diff --git a/source/app/tests/configuration/setbuilders/__init__.py b/source/app/instance_scheduler/handler/cli/__init__.py similarity index 100% rename from source/app/tests/configuration/setbuilders/__init__.py rename to source/app/instance_scheduler/handler/cli/__init__.py diff --git a/source/app/instance_scheduler/handler/cli/cli_request_handler.py b/source/app/instance_scheduler/handler/cli/cli_request_handler.py new file mode 100644 index 00000000..f0a347a7 --- /dev/null +++ b/source/app/instance_scheduler/handler/cli/cli_request_handler.py @@ -0,0 +1,471 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import dataclasses +import json +from collections.abc import Mapping +from datetime import datetime, timezone +from typing import TYPE_CHECKING, Any, Callable, TypeGuard, cast +from zoneinfo import ZoneInfo + +from packaging.version import Version + +from instance_scheduler import __version__ +from instance_scheduler.handler.base import Handler +from instance_scheduler.handler.cli.schedule_usage import get_schedule_usage +from instance_scheduler.model.ddb_item_utils import optionally +from instance_scheduler.model.period_definition import ( + PeriodDefinition, + validate_as_period_params, +) +from instance_scheduler.model.schedule_definition import ( + ScheduleDefinition, + validate_as_schedule_params, +) +from instance_scheduler.model.store.dynamo_period_definition_store import ( + DynamoPeriodDefinitionStore, +) +from instance_scheduler.model.store.dynamo_schedule_definition_store import ( + DynamoScheduleDefinitionStore, +) +from instance_scheduler.model.store.period_definition_store import ( + PeriodAlreadyExistsException, + PeriodDefinitionStore, + UnknownPeriodException, +) +from instance_scheduler.model.store.schedule_definition_store import ( + ScheduleAlreadyExistsException, + UnknownScheduleException, +) +from instance_scheduler.ops_metrics.metric_type.cli_request_metric import ( + CliRequestMetric, +) +from instance_scheduler.ops_metrics.metrics import collect_metric +from instance_scheduler.util import safe_json +from instance_scheduler.util.app_env import get_app_env +from instance_scheduler.util.logger import Logger +from instance_scheduler.util.validation import ValidationException, validate_string + +if TYPE_CHECKING: + from aws_lambda_powertools.utilities.typing import LambdaContext +else: + LambdaContext = object + +CURRENT_CLI_VERSION = __version__ +MINIMUM_SUPPORTED_CLI_VERSION = "3.0.0" + +CLI_SOURCE = "scheduler.cli" + +LOG_STREAM = "{}-{:0>4d}{:0>2d}{:0>2d}" + +AdminCliRequest = dict[str, Any] + + +class PeriodInUseException(Exception): + pass + + +class ManagedByCfnException(Exception): + pass + + +class UnsupportedVersionException(Exception): + pass + + +class CliRequestHandler(Handler[AdminCliRequest]): + """ + Class to handles requests from admin CLI + """ + + def __init__(self, event: AdminCliRequest, context: LambdaContext) -> None: + """ + Initializes handle instance + :param event: event to handle + :param context: lambda context + """ + self._event = event + self._context = context + self._schedule_store = DynamoScheduleDefinitionStore( + get_app_env().config_table_name + ) + self._period_store = DynamoPeriodDefinitionStore( + get_app_env().config_table_name + ) + + # Setup logging + classname = self.__class__.__name__ + app_env = get_app_env() + dt = datetime.now(timezone.utc) + log_stream = LOG_STREAM.format(classname, dt.year, dt.month, dt.day) + self._logger = Logger( + log_group=app_env.log_group, + log_stream=log_stream, + topic_arn=app_env.topic_arn, + debug=app_env.enable_debug_logging, + ) + + @property + def action(self) -> Any: + """ + Retrieves admin REST api action from the event + :return: name of the action of the event + """ + return self._event["action"] + + @property + def parameters(self) -> dict[Any, Any]: + params = self._event.get("parameters", {}) + return {p.replace("-", "_"): params[p] for p in params} + + @property + def version(self) -> Any: + return self._event["version"] + + @staticmethod + def is_handling_request(event: Mapping[str, Any]) -> TypeGuard[AdminCliRequest]: + """ + Returns True if the handler can handle the event + :param event: tested event + :return: True if the handles does handle the tested event + """ + + if event.get("source", "") != CLI_SOURCE: + return False + return "action" in event + + def handle_request(self) -> Any: + """ + Handles the event + :return: result of handling the event, result send back to REST admin api + """ + + def snake_to_pascal_case(s: Any) -> Any: + converted = "" + s = s.strip("_").capitalize() + i = 0 + + while i < len(s): + if s[i] == "_": + i += 1 + converted += s[i].upper() + else: + converted += s[i] + i += 1 + + return converted + + def dict_to_pascal_case(d: Any) -> Any: + d_result = {} + + if isinstance(d, dict): + for i in d: + key = snake_to_pascal_case(i) + d_result[key] = dict_to_pascal_case(d[i]) + return d_result + + elif isinstance(d, list): + return [dict_to_pascal_case(item) for item in d] + + return d + + try: + self._logger.info( + "Handler {} : Received CLI request {}", + self.__class__.__name__, + json.dumps(self._event), + ) + + # Supports cli versions from some minimum version to current solution version + if ( + not Version(MINIMUM_SUPPORTED_CLI_VERSION) + <= Version(self.version) + <= Version(CURRENT_CLI_VERSION) + ): + + raise UnsupportedVersionException( + f"CLI version {self.version} is not supported for this version of the solution. Please update to a supported version ({get_supported_cli_versions()})." + ) + + collect_metric( + CliRequestMetric(command_used=self.action), logger=self._logger + ) + + api_result = self.handle_command(self.action, self.parameters) + + # convert to awscli PascalCase output format + result = dict_to_pascal_case(api_result) + + # log formatted result + json_result = safe_json(result, 3) + self._logger.info("Call result is {}", json_result) + + return json.loads( + json_result + ) # returned as dict to allow lambda to control final format + + except Exception as ex: + self._logger.info("Call failed, error is {}", str(ex)) + return {"Error": str(ex)} + finally: + self._logger.flush() + + def handle_command(self, command: str, parameters: dict[str, Any]) -> Any: + commands: dict[str, Callable[[dict[str, Any]], Any]] = { + "create-period": self.create_period_cmd, + "create-schedule": self.create_schedule_cmd, + "delete-period": self.delete_period_cmd, + "delete-schedule": self.delete_schedule_cmd, + "describe-periods": self.describe_periods_command, + "describe-schedules": self.describe_schedules_command, + "update-period": self.update_period_cmd, + "update-schedule": self.update_schedule_cmd, + "describe-schedule-usage": self.describe_schedule_usage_command, + } + + command_func = commands.get(command) + if command_func: + return command_func(parameters) + else: + raise ValueError(f"Command {command} does not exist") + + def create_period_cmd(self, parameters: dict[str, Any]) -> Any: + if validate_as_period_params(parameters): + period_def = PeriodDefinition.from_period_params(parameters) + try: + self._period_store.put(period_def, overwrite=False) + return { + "period": { + "type": "period", + **_strip_none_values(dataclasses.asdict(period_def)), + } + } + except PeriodAlreadyExistsException as e: + raise PeriodAlreadyExistsException(f"error: {e}") + + def create_schedule_cmd(self, parameters: dict[str, Any]) -> Any: + if validate_as_schedule_params(parameters): + schedule_def = ScheduleDefinition.from_schedule_params(parameters) + validate_periods_exist(schedule_def, self._period_store) + try: + self._schedule_store.put(schedule_def, overwrite=False) + return { + "schedule": { + "type": "schedule", + **_strip_none_values(dataclasses.asdict(schedule_def)), + } + } + except ScheduleAlreadyExistsException as e: + raise ScheduleAlreadyExistsException(f"error: {e}") + + def delete_period_cmd(self, parameters: dict[str, Any]) -> Any: + if validate_string(parameters, "name", required=True): + period_name: str = parameters["name"] + existing_period = self._period_store.find_by_name(period_name) + if not existing_period: + raise UnknownPeriodException( + f"not found: period {period_name} does not exist" + ) + + if is_managed_by_cfn(existing_period): + raise ManagedByCfnException( + f"Period {existing_period.name} is owned by {existing_period.configured_in_stack} and cannot be" + f" deleted by the cli. Please delete the owning stack to delete this period" + ) + + schedules_using_period = self._schedule_store.find_by_period(period_name) + if schedules_using_period: + raise PeriodInUseException( + f"error: period {period_name} can not be deleted " + f"because it is still used in schedule(s) " + f"{[sched.name for sched in schedules_using_period.values()]}" + ) + + self._period_store.delete(period_name) + return {"period": period_name} + + def delete_schedule_cmd(self, parameters: dict[str, Any]) -> Any: + if validate_string(parameters, "name", required=True): + schedule_name: str = parameters["name"] + existing_schedule = self._schedule_store.find_by_name(schedule_name) + if not existing_schedule: + raise UnknownScheduleException( + f"not found: schedule {schedule_name} does not exist" + ) + + if is_managed_by_cfn(existing_schedule): + raise ManagedByCfnException( + f"Schedule {existing_schedule.name} is owned by {existing_schedule.configured_in_stack} and cannot be" + f" deleted by the cli. Please delete the owning stack to delete this schedule" + ) + + self._schedule_store.delete(schedule_name) + return {"schedule": schedule_name} + + def update_period_cmd(self, parameters: dict[str, Any]) -> Any: + if validate_as_period_params(parameters): + period_def = PeriodDefinition.from_period_params(parameters) + old_period_def = self._period_store.find_by_name(period_def.name) + if not old_period_def: + raise UnknownPeriodException( + f"not found: period {period_def.name} does not exist" + ) + if is_managed_by_cfn(old_period_def): + raise ManagedByCfnException( + f"Period {old_period_def.name} is owned by {old_period_def.configured_in_stack} and cannot be" + f" edited by the cli. Please update the owning stack to edit this period" + ) + self._period_store.put(period_def, overwrite=True) + return { + "period": { + "type": "period", + **_strip_none_values(dataclasses.asdict(period_def)), + } + } + + def update_schedule_cmd(self, parameters: dict[str, Any]) -> Any: + if validate_as_schedule_params(parameters): + schedule_def = ScheduleDefinition.from_schedule_params(parameters) + validate_periods_exist(schedule_def, self._period_store) + + old_schedule_def = self._schedule_store.find_by_name(schedule_def.name) + if not old_schedule_def: + raise UnknownScheduleException( + f"not found: schedule {schedule_def.name} does not exist" + ) + if is_managed_by_cfn(old_schedule_def): + raise ManagedByCfnException( + f"Schedule {old_schedule_def.name} is owned by {old_schedule_def.configured_in_stack} and cannot be" + f" edited by the cli. Please update the owning stack to edit this schedule" + ) + self._schedule_store.put(schedule_def, overwrite=True) + return { + "schedule": { + "type": "schedule", + **_strip_none_values(dataclasses.asdict(schedule_def)), + } + } + + def describe_periods_command(self, parameters: dict[str, Any]) -> Any: + validate_string(parameters, "name", required=False) + name = parameters.get("name", None) + + if name: + period_def = self._period_store.find_by_name(name) + if not period_def: + raise UnknownPeriodException(f"not found: period {name} does not exist") + return { + "periods": [ + { + "type": "period", + **_strip_none_values(dataclasses.asdict(period_def)), + } + ] + } + else: + period_defs = self._period_store.find_all() + return { + "periods": [ + { + "type": "period", + **_strip_none_values(dataclasses.asdict(period_def)), + } + for period_def in period_defs.values() + ] + } + + def describe_schedules_command(self, parameters: dict[str, Any]) -> Any: + validate_string(parameters, "name", required=False) + name = parameters.get("name", None) + + if name: + schedule_def = self._schedule_store.find_by_name(name) + if not schedule_def: + raise UnknownScheduleException( + f"not found: schedule {name} does not exist" + ) + return { + "schedules": [ + { + "type": "schedule", + **_strip_none_values(dataclasses.asdict(schedule_def)), + } + ] + } + else: + schedule_defs = self._schedule_store.find_all() + return { + "schedules": [ + { + "type": "schedule", + **_strip_none_values(dataclasses.asdict(schedule_def)), + } + for schedule_def in schedule_defs.values() + ] + } + + def describe_schedule_usage_command(self, parameters: dict[str, Any]) -> Any: + validate_string(parameters, "name", required=True) + validate_string(parameters, "startdate", required=False) + validate_string(parameters, "enddate", required=False) + + name: str = cast(str, parameters.get("name")) + + schedule = self._schedule_store.find_by_name(name) + if schedule is None: + raise ValueError(f"not found: schedule {name} does not exist") + try: + start_date = optionally(_parse_date, parameters.get("startdate"), None) + except ValueError as e: + raise ValueError( + f"error: invalid startdate {parameters.get('startdate')}, {e}" + ) + try: + end_date = optionally(_parse_date, parameters.get("enddate"), None) + except ValueError as e: + raise ValueError(f"error: invalid enddate {parameters.get('enddate')}, {e}") + + # name, start_date, and end_date parsed + tz = ZoneInfo(schedule.timezone) if schedule.timezone else timezone.utc + start_date = start_date.replace(tzinfo=tz) if start_date else None + end_date = end_date.replace(tzinfo=tz) if end_date else None + return get_schedule_usage( + schedule.to_instance_schedule(self._period_store), + start_date, + end_date, + ) + + +def _strip_none_values(dict_to_strip: dict[str, Any]) -> dict[str, Any]: + return {k: v for k, v in dict_to_strip.items() if v is not None} + + +def _parse_date(date_str: str) -> datetime: + try: + return datetime.strptime(date_str, "%Y%m%d") + except ValueError as ex: + raise ValueError(f"must be a valid date in format yyyymmdd {str(ex)}") + + +def validate_periods_exist( + schedule_def: ScheduleDefinition, period_store: PeriodDefinitionStore +) -> bool: + # validate all periods exist -- carrying forward validation behavior that existed in 1.5.1 + for period_id in schedule_def.periods: + period = period_store.find_by_name(period_id.name) + if not period: + raise ValidationException( + f"error: not found: period {period_id.name} does not exist" + ) + return True + + +def is_managed_by_cfn(resource: PeriodDefinition | ScheduleDefinition) -> bool: + return resource.configured_in_stack is not None + + +def get_supported_cli_versions() -> str: + return ( + CURRENT_CLI_VERSION + if CURRENT_CLI_VERSION == MINIMUM_SUPPORTED_CLI_VERSION + else f"{MINIMUM_SUPPORTED_CLI_VERSION}-{CURRENT_CLI_VERSION}" + ) diff --git a/source/app/instance_scheduler/handler/cli/schedule_usage.py b/source/app/instance_scheduler/handler/cli/schedule_usage.py new file mode 100644 index 00000000..8ba64742 --- /dev/null +++ b/source/app/instance_scheduler/handler/cli/schedule_usage.py @@ -0,0 +1,149 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import json +from datetime import datetime, timedelta +from typing import Any, Optional + +from instance_scheduler.configuration.instance_schedule import InstanceSchedule +from instance_scheduler.schedulers.states import InstanceState, ScheduleState +from instance_scheduler.util.logger import Logger + + +def get_schedule_usage( + schedule: InstanceSchedule, + start: Optional[datetime] = None, + end: Optional[datetime] = None, +) -> Any: + if not start: + start = datetime.now(schedule.timezone) + if not end: + end = start + + if start > end: + raise ValueError("stop_date must be equal or later than start_date") + + start = start.replace(tzinfo=schedule.timezone) + end = end.replace(tzinfo=schedule.timezone) + + return _for_output( + calculate_schedule_usage_for_period(schedule, start_dt=start, stop_dt=end) + ) + + +def calculate_schedule_usage_for_period( # NOSONAR -- (cog-complexity) Deferred - requires an algorithmic rewrite, + # not just cleanup. preferably using simulation of actual schedule code rather than estimation + schedule: InstanceSchedule, + start_dt: datetime, + stop_dt: Optional[datetime] = None, + logger: Optional[Logger] = None, +) -> dict[str, Any]: + result = {} + + stop = stop_dt or start_dt + if start_dt > stop: + raise ValueError("stop_date must be equal or later than start_date") + + dt = ( + start_dt + if isinstance(start_dt, datetime) + else datetime(start_dt.year, start_dt.month, start_dt.day) + ) + dt = dt.replace(tzinfo=schedule.timezone) + + while dt <= stop: + timeline = {dt.replace(hour=0, minute=0)} + for p in schedule.periods: + begintime = p["period"].begintime + endtime = p["period"].endtime + if begintime is None and endtime is None: + timeline.add(dt.replace(hour=0, minute=0)) + timeline.add(dt.replace(hour=23, minute=59)) + else: + if begintime: + timeline.add( + dt.replace(hour=begintime.hour, minute=begintime.minute) + ) + if endtime: + timeline.add(dt.replace(hour=endtime.hour, minute=endtime.minute)) + + running_periods = {} + started = None + starting_period = None + current_state: InstanceState = InstanceState.UNKNOWN + for tm in sorted(list(timeline)): + desired_state, _, period = schedule.get_desired_state(tm, logger, False) + + if current_state != desired_state: # type: ignore[comparison-overlap] + if desired_state == ScheduleState.RUNNING: + started = tm + current_state = InstanceState.RUNNING + starting_period = period + elif desired_state == ScheduleState.STOPPED: + stopped = tm + ( + desired_state_with_adj_check, + __, + ___, + ) = schedule.get_desired_state(tm, logger, True) + if desired_state_with_adj_check == ScheduleState.RUNNING: + stopped += timedelta(minutes=1) + if current_state == InstanceState.RUNNING: + current_state = InstanceState.STOPPED + running_periods[starting_period] = _make_period( + started, stopped + ) + + if current_state == InstanceState.RUNNING: + stopped = dt.replace(hour=23, minute=59) + timedelta(minutes=1) + running_periods[starting_period] = _make_period(started, stopped) + + result[str(dt.date())] = { + "running_periods": running_periods, + "billing_seconds": sum( + [running_periods[ps]["billing_seconds"] for ps in running_periods] + ), + "billing_hours": sum( + [running_periods[ph]["billing_hours"] for ph in running_periods] + ), + } + + dt += timedelta(days=1) + + return {"schedule": schedule.name, "usage": result} + + +def _running_seconds(startdt: datetime, stopdt: datetime) -> int: + return max(int((stopdt - startdt).total_seconds()), 60) + + +def _running_hours(startdt: datetime, stopdt: datetime) -> int: + return int(((stopdt - startdt).total_seconds() - 1) / 3600) + 1 + + +def _make_period(started_dt: Any, stopped_dt: Any) -> dict[str, Any]: + running_period = { + "begin": started_dt, + "end": stopped_dt, + "billing_hours": _running_hours(started_dt, stopped_dt), + "billing_seconds": _running_seconds(started_dt, stopped_dt), + } + return running_period + + +class CliCustomEncoder(json.JSONEncoder): + """ + Custom encoding to handle unsupported data types + """ + + def default(self, o: Any) -> Any: + if isinstance(o, set): + return list(o) + if isinstance(o, datetime): + return o.replace(second=0).strftime("%x %X") + + return json.JSONEncoder.default(self, o) + + +def _for_output(item: Any) -> Any: + # to anf from json using custom encoder to convert datetime and set type data into string and lists + return json.loads(json.dumps(item, cls=CliCustomEncoder)) diff --git a/source/app/instance_scheduler/handler/config_resource.py b/source/app/instance_scheduler/handler/config_resource.py index b7f3426b..d1fc6a5f 100644 --- a/source/app/instance_scheduler/handler/config_resource.py +++ b/source/app/instance_scheduler/handler/config_resource.py @@ -4,22 +4,23 @@ import re from collections.abc import Mapping from datetime import datetime, timezone -from re import Match -from typing import TYPE_CHECKING, Any, Literal, Optional, TypedDict, TypeGuard +from typing import TYPE_CHECKING, Any, TypedDict, TypeGuard -import boto3 - -from instance_scheduler import configuration from instance_scheduler.boto_retry import get_client_with_standard_retry -from instance_scheduler.configuration.config_admin import ( - ConfigAdmin, - ConfigTableConfigItem, -) from instance_scheduler.handler import setup_demo_data as demo_data +from instance_scheduler.model.ddb_config_item import DdbConfigItem +from instance_scheduler.model.store.ddb_config_item_store import DdbConfigItemStore +from instance_scheduler.model.store.dynamo_period_definition_store import ( + DynamoPeriodDefinitionStore, +) +from instance_scheduler.model.store.dynamo_schedule_definition_store import ( + DynamoScheduleDefinitionStore, +) from instance_scheduler.util.app_env import get_app_env from instance_scheduler.util.custom_resource import ( CustomResource, CustomResourceRequest, + CustomResourceResponse, ) from instance_scheduler.util.logger import Logger @@ -30,32 +31,11 @@ LambdaContext = object LogsClient = object -BoolStr = Literal["True", "False"] - class ServiceSetupResourceProperties(TypedDict): - ServiceToken: str # Lambda Function ARN timeout: int - config_table: str # DynamoDB Table name - tagname: str # Schedule tag key - default_timezone: str - use_metrics: BoolStr - scheduled_services: list[str] - schedule_clusters: BoolStr - create_rds_snapshot: BoolStr - regions: list[str] remote_account_ids: list[str] - namespace: str - aws_partition: str - scheduler_role_name: str - schedule_lambda_account: BoolStr - trace: BoolStr - enable_ssm_maintenance_windows: BoolStr log_retention_days: float - started_tags: str - stopped_tags: str - stack_version: str - use_aws_organizations: BoolStr ServiceSetupRequest = CustomResourceRequest[ServiceSetupResourceProperties] @@ -74,8 +54,6 @@ class ServiceSetupResourceProperties(TypedDict): "Setting log retention policy for Lambda CloudWatch loggroup {} to {} days" ) -LOG_STREAM = "{}-{:0>4d}{:0>2d}{:0>2d}" - class SchedulerSetupHandler(CustomResource[ServiceSetupResourceProperties]): """ @@ -83,12 +61,17 @@ class SchedulerSetupHandler(CustomResource[ServiceSetupResourceProperties]): """ def __init__(self, event: Any, context: LambdaContext) -> None: + app_env = get_app_env() + CustomResource.__init__(self, event, context) + self.config_item_store = DdbConfigItemStore(app_env.config_table_name) + # Setup logging classname = self.__class__.__name__ - app_env = get_app_env() dt = datetime.now(timezone.utc) - log_stream = LOG_STREAM.format(classname, dt.year, dt.month, dt.day) + log_stream = "{}-{:0>4d}{:0>2d}{:0>2d}".format( + classname, dt.year, dt.month, dt.day + ) self._logger = Logger( log_group=app_env.log_group, log_stream=log_stream, @@ -96,8 +79,6 @@ def __init__(self, event: Any, context: LambdaContext) -> None: debug=app_env.enable_debug_logging, ) - self._stack_version = self.resource_properties["stack_version"] - @staticmethod def is_handling_request( event: Mapping[str, Any] @@ -107,175 +88,6 @@ def is_handling_request( and event.get("ResourceType") == "Custom::ServiceSetup" ) - @property - def tagname(self) -> Any: - """ - Name of the tag to mark scheduled instances - :return: tag name - """ - return self.resource_properties.get( - configuration.TAGNAME, configuration.DEFAULT_TAGNAME - ) - - @property - def default_timezone(self) -> Any: - """ - Returns default time zone - :return: default timezone - """ - return self.resource_properties.get( - configuration.DEFAULT_TIMEZONE, configuration.DEFAULT_TZ - ) - - @property - def use_metrics(self) -> Any: - """ - Returns global metrics switch - :return: metrics switch - """ - return self.resource_properties.get(configuration.METRICS, "False") - - @property - def trace(self) -> Any: - """ - Returns global trace flag - :return: trace flag - """ - return self.resource_properties.get(configuration.TRACE, "True") - - @property - def namespace(self) -> Any: - """ - Returns global namespace - :return: namespace string - """ - return self.resource_properties.get(configuration.NAMESPACE, None) - - @property - def aws_partition(self) -> Any: - """ - Returns aws partition - :return: aws partition string - """ - return self.resource_properties.get(configuration.AWS_PARTITION, None) - - @property - def scheduler_role_name(self) -> Any: - """ - Returns execution_role_name - :return: execution_role_name string - """ - return self.resource_properties.get(configuration.SCHEDULER_ROLE_NAME, None) - - @property - def enable_ssm_maintenance_windows(self) -> Any: - """ - Returns global enable SSM Maintenance Windows flag - :return: ssm_enable_ssm_maintenance_windows flag - """ - return self.resource_properties.get( - configuration.ENABLE_SSM_MAINTENANCE_WINDOWS, "False" - ) - - @property - def regions(self) -> set[str] | list[str]: - """ - Returns all regions from the configuration - :return: regions - """ - result: set[str] | list[str] = set(self.resource_properties["regions"]) - if result == set() or len([i for i in result if i.strip() != ""]) == 0: - result = [boto3.Session().region_name] - return result - - @property - def started_tags(self) -> Any: - """ - Returns started tags as a string - :return: started tags - """ - return self.resource_properties.get(configuration.STARTED_TAGS, None) - - @property - def stopped_tags(self) -> Any: - """ - Returns stopped tags as a string - :return: stopped tags - """ - return self.resource_properties.get(configuration.STOPPED_TAGS, None) - - @property - def remote_account_ids(self) -> Any: - """ - Returns remote account ids - :return: remote account ids - """ - result = set(self.resource_properties["remote_account_ids"]) - if result == set() or len([i for i in result if i.strip() != ""]) == 0: - return None - - return result - - @property - def old_remote_account_ids(self) -> Any: - """ - Returns remote account ids from the previous event of create/update - :return: remote account ids - """ - result = set(self.old_resource_properties["remote_account_ids"]) - if result == set() or len([i for i in result if i.strip() != ""]) == 0: - return None - - return result - - @property - def scheduled_services(self) -> Optional[set[str]]: - """ - Returns scheduled services - :return: services to schedule - """ - result = set(self.resource_properties["scheduled_services"]) - if result == set() or len([i for i in result if i.strip() != ""]) == 0: - return None - - return result - - @property - def schedule_clusters(self) -> Any: - """ - Returns global schedule clusters flag - :return: schedule_clusters flag - """ - return self.resource_properties.get(configuration.SCHEDULE_CLUSTERS, "False") - - @property - def create_rds_snapshot(self) -> Any: - """ - Returns global create RDS Snapshots flag - :return: create_rds_snapshot flag - """ - return self.resource_properties.get(configuration.CREATE_RDS_SNAPSHOT, "True") - - @property - def schedule_lambda_account(self) -> Any: - """ - Returns flag for processing lambda account switch - :return: lambda account process switch - """ - return self.resource_properties.get( - configuration.SCHEDULE_LAMBDA_ACCOUNT, "True" - ) - - @property - def use_aws_organizations(self) -> Any: - """ - Returns use_aws_organizations flag - :return: use_aws_organizations flag - """ - return self.resource_properties.get( - configuration.USE_AWS_ORGANIZATIONS, "False" - ) - def handle_request(self) -> None: """ Handles the custom resource request to write scheduler global settings to config database @@ -292,75 +104,13 @@ def handle_request(self) -> None: finally: self._logger.flush() - def get_valid_org_id(self, org_id: str) -> Optional[Match[str]]: - """ - Verifies if the ou_id param is a valid ou_id format. https://docs.aws.amazon.com/organizations/latest/APIReference/API_Organization.html - :return: the org id or else None - """ - return re.fullmatch("^o-[a-z0-9]{10,32}$", org_id) - - def _update_settings(self, prev_org_remote_account_ids: Any = None) -> bool: - if prev_org_remote_account_ids is None: - prev_org_remote_account_ids = {} - - try: - admin = ConfigAdmin(logger=self._logger, context=self.context) - try: - org_id = list(self.remote_account_ids)[0] - except Exception as error: - self._logger.info(f"org id is not valid or empty {error}") - org_id = "" - - if self.get_valid_org_id(org_id) and self.use_aws_organizations == "True": - self.organization_id = org_id - remote_account_ids = prev_org_remote_account_ids - elif ( - self.get_valid_org_id(org_id) and self.use_aws_organizations == "False" - ): - self.organization_id = org_id - remote_account_ids = {} - else: - self.organization_id = "" - remote_account_ids = self.remote_account_ids - - settings = admin.update_config( - ConfigTableConfigItem( - default_timezone=self.default_timezone, - scheduled_services=self.scheduled_services or set(), - schedule_clusters=self.schedule_clusters, - create_rds_snapshot=self.create_rds_snapshot, - tagname=self.tagname, - regions=self.regions, - remote_account_ids=remote_account_ids, - organization_id=self.organization_id, - schedule_lambda_account=self.schedule_lambda_account.lower() - == "true", - use_metrics=self.use_metrics.lower() == "true", - trace=self.trace.lower() == "true", - enable_ssm_maintenance_windows=self.enable_ssm_maintenance_windows.lower() - == "true", - scheduler_role_name=self.scheduler_role_name, - aws_partition=self.aws_partition, - namespace=self.namespace, - started_tags=self.started_tags, - stopped_tags=self.stopped_tags, - ) - ) - - self._logger.info(INF_CONFIG_SET, str(settings)) - - except Exception as ex: - self._logger.info(ERR_SETTING_CONFIG, ex) - return False - - return True - def set_lambda_logs_retention_period(self) -> None: """ Sets the retention period of the log group associated with the Lambda context to - resource_properties["log_retention_days"] if present - default value of 30 otherwise """ + # todo: this relies on the monolith lambda and will need to be rewritten when we break apart the lambdas if not self.context: return @@ -381,18 +131,16 @@ def set_lambda_logs_retention_period(self) -> None: def _create_sample_schemas(self) -> None: try: - admin: ConfigAdmin = ConfigAdmin(logger=self._logger, context=self.context) + period_store = DynamoPeriodDefinitionStore(get_app_env().config_table_name) + schedule_store = DynamoScheduleDefinitionStore( + get_app_env().config_table_name + ) - admin.create_period(demo_data.PERIOD_WORKING_DAYS) - admin.create_period(demo_data.PERIOD_WEEKENDS) - admin.create_period(demo_data.PERIOD_OFFICE_HOURS) - admin.create_period(demo_data.PERIOD_FIRST_MONDAY_IN_QUARTER) + for demo_period in demo_data.DEMO_PERIODS: + period_store.put(demo_period) - admin.create_schedule(demo_data.SCHEDULE_SEATTLE_OFFICE_HOURS) - admin.create_schedule(demo_data.SCHEDULE_UK_OFFICE_HOURS) - admin.create_schedule(demo_data.SCHEDULE_STOPPED) - admin.create_schedule(demo_data.SCHEDULE_RUNNING) - admin.create_schedule(demo_data.SCHEDULE_SCALING) + for demo_schedule in demo_data.DEMO_SCHEDULES: + schedule_store.put(demo_schedule) except Exception as ex: self._logger.error( @@ -400,38 +148,90 @@ def _create_sample_schemas(self) -> None: ) # handles Create request from CloudFormation - def _create_request(self) -> bool: + def _create_request(self) -> CustomResourceResponse: self._create_sample_schemas() - result = self._update_settings() self.set_lambda_logs_retention_period() - return result + if get_app_env().enable_aws_organizations: + org_id = parse_as_org_id(self.resource_properties) + self.config_item_store.put( + DdbConfigItem(organization_id=org_id, remote_account_ids=[]) + ) + else: + spoke_accounts = parse_as_account_ids(self.resource_properties) + self.config_item_store.put( + DdbConfigItem(organization_id="", remote_account_ids=spoke_accounts) + ) + return self.OkResponse() - def _update_request(self) -> bool: - try: - org_id = list(self.remote_account_ids)[0] - except Exception as error: - self._logger.info(f"org id is not valid or empty {error}") - org_id = "" - try: - prev_org_id = list(self.old_remote_account_ids)[0] - except Exception as error: - self._logger.info( - f"org id from old custom resource request parameters is not valid or empty {error}" + def _update_request(self) -> CustomResourceResponse: + """ + Scenarios to Handle: + when orgs not enabled: + -always overwrite accounts + when orgs enabled: + -when org_id does not change -- keep accounts + -when org_id does change -- purge accounts + """ + if get_app_env().enable_aws_organizations: + # using organizations + try: + prev_org_id = parse_as_org_id(self.old_resource_properties) + except ValueError: + prev_org_id = "" + org_id = parse_as_org_id(self.resource_properties) + + if org_id == prev_org_id: + self._logger.info( + "org_id has not changed, preserving registered spoke accounts..." + ) + spoke_accounts = self.config_item_store.get().remote_account_ids + self._logger.info( + f"preserved {len(spoke_accounts)} registered spoke accounts" + ) + else: + self._logger.info( + f"org_id has not changed from {prev_org_id} to {org_id}, " + f"registered spoke accounts will not be preserved" + ) + spoke_accounts = [] + + self.config_item_store.put( + DdbConfigItem(organization_id=org_id, remote_account_ids=spoke_accounts) ) - prev_org_id = "" - if ( - self.get_valid_org_id(org_id) - and self.get_valid_org_id(prev_org_id) - and org_id == prev_org_id - ): - config = configuration.get_global_configuration(self._logger) - prev_remote_account_id = config.remote_account_ids else: - prev_remote_account_id = [] - result = self._update_settings(prev_remote_account_id) - self.set_lambda_logs_retention_period() - return result + # not using organizations + spoke_accounts = parse_as_account_ids(self.resource_properties) + self.config_item_store.put( + DdbConfigItem(organization_id="", remote_account_ids=spoke_accounts) + ) + + return self.OkResponse() # handles Delete request from CloudFormation - def _delete_request(self) -> bool: - return True + def _delete_request(self) -> CustomResourceResponse: + # no action to taken + return self.OkResponse() + + +def parse_as_org_id(props: ServiceSetupResourceProperties) -> str: + ids_list = props["remote_account_ids"] + if len(ids_list) != 1: + raise ValueError( + f"org_id must be provided as a single value! received: {ids_list}" + ) + org_id = props["remote_account_ids"][0] + if not is_org_id(org_id): + raise ValueError(f"invalid org id {org_id}") + return org_id + + +def parse_as_account_ids(props: ServiceSetupResourceProperties) -> list[str]: + return props["remote_account_ids"] + + +def is_org_id(org_id: str) -> bool: + """ + Verifies if the ou_id param is a valid ou_id format. https://docs.aws.amazon.com/organizations/latest/APIReference/API_Organization.html + :return: the org id or else None + """ + return bool(re.fullmatch("^o-[a-z0-9]{10,32}$", org_id)) diff --git a/source/app/instance_scheduler/handler/environments/asg_env.py b/source/app/instance_scheduler/handler/environments/asg_env.py new file mode 100644 index 00000000..43ec2dae --- /dev/null +++ b/source/app/instance_scheduler/handler/environments/asg_env.py @@ -0,0 +1,45 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from dataclasses import dataclass +from os import environ +from zoneinfo import ZoneInfo, ZoneInfoNotFoundError + +from instance_scheduler.util.app_env import AppEnvError, env_to_bool + + +@dataclass(frozen=True) +class AsgEnv: + user_agent_extra: str + + issues_topic_arn: str + logger_raise_exceptions: bool + + config_table_name: str + asg_scheduling_role_name: str + default_timezone: ZoneInfo + schedule_tag_key: str + scheduled_tag_key: str + rule_prefix: str + + @staticmethod + def from_env() -> "AsgEnv": + try: + return AsgEnv( + user_agent_extra=environ["USER_AGENT_EXTRA"], + issues_topic_arn=environ["ISSUES_TOPIC_ARN"], + logger_raise_exceptions=env_to_bool( + environ.get("LOGGER_RAISE_EXCEPTIONS", "False") + ), + config_table_name=environ["CONFIG_TABLE"], + asg_scheduling_role_name=environ["ASG_SCHEDULING_ROLE_NAME"], + default_timezone=ZoneInfo(environ["DEFAULT_TIMEZONE"]), + schedule_tag_key=environ["SCHEDULE_TAG_KEY"], + scheduled_tag_key=environ["SCHEDULED_TAG_KEY"], + rule_prefix=environ["RULE_PREFIX"], + ) + except ZoneInfoNotFoundError as err: + raise AppEnvError(f"Invalid timezone: {err.args[0]}") from err + except KeyError as err: + raise AppEnvError( + f"Missing required application environment variable: {err.args[0]}" + ) from err diff --git a/source/app/instance_scheduler/handler/environments/asg_orch_env.py b/source/app/instance_scheduler/handler/environments/asg_orch_env.py new file mode 100644 index 00000000..988e7a9c --- /dev/null +++ b/source/app/instance_scheduler/handler/environments/asg_orch_env.py @@ -0,0 +1,40 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from dataclasses import dataclass +from os import environ + +from instance_scheduler.util.app_env import AppEnvError, env_to_bool, env_to_list + + +@dataclass(frozen=True) +class AsgOrchEnv: + user_agent_extra: str + + issues_topic_arn: str + logger_raise_exceptions: bool + + config_table_name: str + enable_schedule_hub_account: bool + schedule_regions: list[str] + asg_scheduler_name: str + + @staticmethod + def from_env() -> "AsgOrchEnv": + try: + return AsgOrchEnv( + user_agent_extra=environ["USER_AGENT_EXTRA"], + issues_topic_arn=environ["ISSUES_TOPIC_ARN"], + logger_raise_exceptions=env_to_bool( + environ.get("LOGGER_RAISE_EXCEPTIONS", "False") + ), + config_table_name=environ["CONFIG_TABLE"], + enable_schedule_hub_account=env_to_bool( + environ["ENABLE_SCHEDULE_HUB_ACCOUNT"] + ), + schedule_regions=env_to_list(environ["SCHEDULE_REGIONS"]), + asg_scheduler_name=environ["ASG_SCHEDULER_NAME"], + ) + except KeyError as err: + raise AppEnvError( + f"Missing required application environment variable: {err.args[0]}" + ) from err diff --git a/source/app/instance_scheduler/handler/environments/metrics_uuid_environment.py b/source/app/instance_scheduler/handler/environments/metrics_uuid_environment.py new file mode 100644 index 00000000..adf4cec8 --- /dev/null +++ b/source/app/instance_scheduler/handler/environments/metrics_uuid_environment.py @@ -0,0 +1,26 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from dataclasses import dataclass +from os import environ + +from instance_scheduler.util.app_env import AppEnvError + + +@dataclass +class MetricsUuidEnvironment: + user_agent_extra: str + stack_id: str + uuid_key: str + + @staticmethod + def from_env() -> "MetricsUuidEnvironment": + try: + return MetricsUuidEnvironment( + user_agent_extra=environ["USER_AGENT_EXTRA"], + stack_id=environ["STACK_ID"], + uuid_key=environ["UUID_KEY"], + ) + except KeyError as err: + raise AppEnvError( + f"Missing required application environment variable: {err.args[0]}" + ) from err diff --git a/source/app/instance_scheduler/handler/environments/orchestrator_environment.py b/source/app/instance_scheduler/handler/environments/orchestrator_environment.py new file mode 100644 index 00000000..2e55515f --- /dev/null +++ b/source/app/instance_scheduler/handler/environments/orchestrator_environment.py @@ -0,0 +1,95 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from dataclasses import dataclass +from os import environ +from zoneinfo import ZoneInfo, ZoneInfoNotFoundError + +from instance_scheduler.util.app_env import AppEnvError, env_to_bool, env_to_list + + +@dataclass(frozen=True) +class OrchestratorEnvironment: + # logging + user_agent_extra: str + log_group: str + topic_arn: str + enable_debug_logging: bool + # references + scheduling_request_handler_name: str + config_table_name: str + # scheduling + enable_schedule_hub_account: bool + enable_ec2_service: bool + enable_rds_service: bool + enable_rds_clusters: bool + enable_neptune_service: bool + enable_docdb_service: bool + enable_asg_service: bool + schedule_regions: list[str] + + # used for metrics only + default_timezone: ZoneInfo + enable_rds_snapshots: bool + scheduler_frequency_minutes: int + enable_aws_organizations: bool + enable_ec2_ssm_maintenance_windows: bool + ops_dashboard_enabled: bool + start_tags: list[str] + stop_tags: list[str] + + @staticmethod + def from_env() -> "OrchestratorEnvironment": + try: + return OrchestratorEnvironment( + user_agent_extra=environ["USER_AGENT_EXTRA"], + log_group=environ["LOG_GROUP"], + topic_arn=environ["ISSUES_TOPIC_ARN"], + enable_debug_logging=env_to_bool(environ["ENABLE_DEBUG_LOGS"]), + config_table_name=environ["CONFIG_TABLE"], + scheduling_request_handler_name=environ[ + "SCHEDULING_REQUEST_HANDLER_NAME" + ], + # scheduling + enable_schedule_hub_account=env_to_bool( + environ["ENABLE_SCHEDULE_HUB_ACCOUNT"] + ), + enable_ec2_service=env_to_bool(environ["ENABLE_EC2_SERVICE"]), + enable_rds_service=env_to_bool(environ["ENABLE_RDS_SERVICE"]), + enable_rds_clusters=env_to_bool(environ["ENABLE_RDS_CLUSTERS"]), + enable_neptune_service=env_to_bool(environ["ENABLE_NEPTUNE_SERVICE"]), + enable_docdb_service=env_to_bool(environ["ENABLE_DOCDB_SERVICE"]), + enable_asg_service=env_to_bool(environ["ENABLE_ASG_SERVICE"]), + schedule_regions=env_to_list(environ["SCHEDULE_REGIONS"]), + # metrics data + default_timezone=ZoneInfo(environ["DEFAULT_TIMEZONE"]), + enable_rds_snapshots=env_to_bool(environ["ENABLE_RDS_SNAPSHOTS"]), + scheduler_frequency_minutes=int(environ["SCHEDULING_INTERVAL_MINUTES"]), + enable_aws_organizations=env_to_bool( + environ["ENABLE_AWS_ORGANIZATIONS"] + ), + enable_ec2_ssm_maintenance_windows=env_to_bool( + environ["ENABLE_EC2_SSM_MAINTENANCE_WINDOWS"] + ), + ops_dashboard_enabled=env_to_bool(environ["OPS_DASHBOARD_ENABLED"]), + start_tags=env_to_list(environ["START_TAGS"]), + stop_tags=env_to_list(environ["STOP_TAGS"]), + ) + except ZoneInfoNotFoundError as err: + raise AppEnvError(f"Invalid timezone: {err.args[0]}") from err + except KeyError as err: + raise AppEnvError( + f"Missing required application environment variable: {err.args[0]}" + ) from err + + def scheduled_services(self) -> list[str]: + result = [] + if self.enable_ec2_service: + result.append("ec2") + if ( + self.enable_rds_service + or self.enable_rds_clusters + or self.enable_neptune_service + or self.enable_docdb_service + ): + result.append("rds") + return result diff --git a/source/app/instance_scheduler/handler/environments/remote_registration_environment.py b/source/app/instance_scheduler/handler/environments/remote_registration_environment.py new file mode 100644 index 00000000..8eb44a71 --- /dev/null +++ b/source/app/instance_scheduler/handler/environments/remote_registration_environment.py @@ -0,0 +1,24 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from dataclasses import dataclass +from os import environ + +from instance_scheduler.util.app_env import AppEnvError + + +@dataclass +class RemoteRegistrationEnvironment: + user_agent_extra: str + hub_registration_lambda_arn: str + + @staticmethod + def from_env() -> "RemoteRegistrationEnvironment": + try: + return RemoteRegistrationEnvironment( + user_agent_extra=environ["USER_AGENT_EXTRA"], + hub_registration_lambda_arn=environ["HUB_REGISTRATION_LAMBDA_ARN"], + ) + except KeyError as err: + raise AppEnvError( + f"Missing required application environment variable: {err.args[0]}" + ) from err diff --git a/source/app/instance_scheduler/handler/environments/scheduling_request_environment.py b/source/app/instance_scheduler/handler/environments/scheduling_request_environment.py new file mode 100644 index 00000000..b7418f3b --- /dev/null +++ b/source/app/instance_scheduler/handler/environments/scheduling_request_environment.py @@ -0,0 +1,72 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from dataclasses import dataclass +from os import environ +from zoneinfo import ZoneInfo, ZoneInfoNotFoundError + +from instance_scheduler.util.app_env import AppEnvError, env_to_bool, env_to_list + + +@dataclass(frozen=True) +class SchedulingRequestEnvironment: + user_agent_extra: str + log_group: str + topic_arn: str + enable_debug_logging: bool + stack_name: str + state_table_name: str + config_table_name: str + maintenance_window_table_name: str + scheduler_role_name: str + + default_timezone: ZoneInfo + start_tags: list[str] + stop_tags: list[str] + schedule_tag_key: str + enable_ops_monitoring: bool + + # for ec2 + scheduler_frequency_minutes: int + enable_ec2_ssm_maintenance_windows: bool + + # for rds + enable_rds_service: bool + enable_rds_clusters: bool + enable_docdb_service: bool + enable_neptune_service: bool + enable_rds_snapshots: bool + + @staticmethod + def from_env() -> "SchedulingRequestEnvironment": + try: + return SchedulingRequestEnvironment( + user_agent_extra=environ["USER_AGENT_EXTRA"], + log_group=environ["LOG_GROUP"], + topic_arn=environ["ISSUES_TOPIC_ARN"], + enable_debug_logging=env_to_bool(environ["ENABLE_DEBUG_LOGS"]), + stack_name=environ["STACK_NAME"], + config_table_name=environ["CONFIG_TABLE"], + state_table_name=environ["STATE_TABLE"], + maintenance_window_table_name=environ["MAINT_WINDOW_TABLE"], + scheduler_role_name=environ["SCHEDULER_ROLE_NAME"], + default_timezone=ZoneInfo(environ["DEFAULT_TIMEZONE"]), + start_tags=env_to_list(environ["START_TAGS"]), + stop_tags=env_to_list(environ["STOP_TAGS"]), + schedule_tag_key=environ["SCHEDULE_TAG_KEY"], + enable_ec2_ssm_maintenance_windows=env_to_bool( + environ["ENABLE_EC2_SSM_MAINTENANCE_WINDOWS"] + ), + enable_rds_service=env_to_bool(environ["ENABLE_RDS_SERVICE"]), + enable_rds_clusters=env_to_bool(environ["ENABLE_RDS_CLUSTERS"]), + enable_neptune_service=env_to_bool(environ["ENABLE_NEPTUNE_SERVICE"]), + enable_docdb_service=env_to_bool(environ["ENABLE_DOCDB_SERVICE"]), + enable_rds_snapshots=env_to_bool(environ["ENABLE_RDS_SNAPSHOTS"]), + scheduler_frequency_minutes=int(environ["SCHEDULING_INTERVAL_MINUTES"]), + enable_ops_monitoring=env_to_bool(environ["ENABLE_OPS_MONITORING"]), + ) + except ZoneInfoNotFoundError as err: + raise AppEnvError(f"Invalid timezone: {err.args[0]}") from err + except KeyError as err: + raise AppEnvError( + f"Missing required application environment variable: {err.args[0]}" + ) from err diff --git a/source/app/instance_scheduler/handler/metrics_uuid_custom_resource.py b/source/app/instance_scheduler/handler/metrics_uuid_custom_resource.py new file mode 100644 index 00000000..7ed82b9c --- /dev/null +++ b/source/app/instance_scheduler/handler/metrics_uuid_custom_resource.py @@ -0,0 +1,75 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import uuid +from typing import TYPE_CHECKING, Any, Mapping, Optional, TypedDict + +import boto3 +from botocore.exceptions import ClientError + +from instance_scheduler.handler.environments.metrics_uuid_environment import ( + MetricsUuidEnvironment, +) +from instance_scheduler.util import get_boto_config +from instance_scheduler.util.custom_resource import ( + CustomResource, + CustomResourceResponse, +) + +if TYPE_CHECKING: + from aws_lambda_powertools.utilities.typing import LambdaContext + from mypy_boto3_ssm import SSMClient +else: + LambdaContext = object + SSMClient = object + + +class CreateUuidRequest(TypedDict): + pass # empty dict, no values are provided + + +def handle_metrics_uuid_request( + event: Mapping[str, Any], context: LambdaContext +) -> Any: + handler = MetricsUuidCustomResource( + event, context, MetricsUuidEnvironment.from_env() + ) + return handler.handle_request() + + +class MetricsUuidCustomResource(CustomResource[CreateUuidRequest]): + """ + custom resource for generating a unique metrics id for the solution + + for backwards-compatibility with previous versions (<=1.5.3), this custom resource checks for a metrics-uuid value + already being present in SSM and will return that uuid value instead of generating a new one + """ + + def __init__(self, event: Any, context: LambdaContext, env: MetricsUuidEnvironment): + self._env = env + CustomResource.__init__(self, event, context) + + def _create_request(self) -> CustomResourceResponse: + metrics_uuid = self._get_metrics_uuid_from_ssm_if_exists() + if not metrics_uuid: + metrics_uuid = uuid.uuid4() + + return self.OkResponse(data={"Uuid": str(metrics_uuid)}) + + def _update_request(self) -> CustomResourceResponse: + return self.ErrorResponse( + reason="Updates for this resource type are not supported" + ) + + def _delete_request(self) -> CustomResourceResponse: + return self.OkResponse() # nothing to do + + def _get_metrics_uuid_from_ssm_if_exists(self) -> Optional[uuid.UUID]: + stack_id = self._env.stack_id[-36:] + uuid_key = self._env.uuid_key + str(stack_id) + ssm: SSMClient = boto3.client("ssm", config=get_boto_config()) + try: + ssm_response = ssm.get_parameter(Name=uuid_key) + uuid_parameter = ssm_response.get("Parameter", {}).get("Value") + return uuid.UUID(uuid_parameter) + except ClientError: + return None diff --git a/source/app/instance_scheduler/handler/remote_registration_custom_resource.py b/source/app/instance_scheduler/handler/remote_registration_custom_resource.py new file mode 100644 index 00000000..4e7ca936 --- /dev/null +++ b/source/app/instance_scheduler/handler/remote_registration_custom_resource.py @@ -0,0 +1,134 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import json +from functools import cached_property +from typing import TYPE_CHECKING, Any, Final, Mapping, TypedDict + +import boto3 +from aws_lambda_powertools import Logger +from botocore.exceptions import ClientError +from urllib3 import PoolManager + +from instance_scheduler.handler.environments.remote_registration_environment import ( + RemoteRegistrationEnvironment, +) +from instance_scheduler.util import get_boto_config +from instance_scheduler.util.custom_resource import ( + CustomResource, + CustomResourceResponse, +) + +if TYPE_CHECKING: + from aws_lambda_powertools.utilities.typing import LambdaContext + from mypy_boto3_lambda import LambdaClient +else: + LambdaContext = object + LambdaClient = object + +http = PoolManager() + + +def handle_remote_registration_request( + event: Mapping[str, Any], context: LambdaContext +) -> None: + # this lambda exists in the spoke stack and does not require the SnsHandler + logger = Logger(log_uncaught_exceptions=True, use_rfc3339=True) + try: + env = RemoteRegistrationEnvironment.from_env() + handler = RemoteRegistrationCustomResourceHandler(event, context, logger, env) + handler.handle_request() + except Exception: + logger.error("Error occurred while running custom resource lambda") + response_body = { + "Status": "FAILED", + "Reason": "Response sent to cloudformation to prevent hung resource", + "PhysicalResourceId": event.get("LogicalResourceId"), + "StackId": event.get("StackId"), + "RequestId": event.get("RequestId"), + "LogicalResourceId": event.get("LogicalResourceId"), + } + + headers = {"Content-Type": "application/json"} + http.request( # type: ignore[no-untyped-call] + "PUT", + event.get("ResponseURL"), + headers=headers, + body=json.dumps(response_body), + ) + + +class RemoteRegistrationProperties(TypedDict): + pass + + +class RemoteRegistrationCustomResourceHandler( + CustomResource[RemoteRegistrationProperties] +): + """ + Implements handler for remote registration custom resource + """ + + def __init__( + self, + event: Any, + context: LambdaContext, + logger: Logger, + env: RemoteRegistrationEnvironment, + ) -> None: + super().__init__(event, context) + self._env: Final = env + self._logger: Final = logger + self._spoke_account_id: Final = context.invoked_function_arn.split(":")[4] + + @cached_property + def _lambda_client(self) -> LambdaClient: + client: LambdaClient = boto3.client("lambda", config=get_boto_config()) + return client + + # handles Create request from CloudFormation + def _create_request(self) -> CustomResourceResponse: + try: + payload = str.encode( + json.dumps({"account": self._spoke_account_id, "operation": "Register"}) + ) + self._lambda_client.invoke( + FunctionName=self._env.hub_registration_lambda_arn, + InvocationType="RequestResponse", + LogType="None", + Payload=payload, + ) + self._logger.info( + f"Registered spoke account {self._spoke_account_id} with hub account registration lambda {self._env.hub_registration_lambda_arn}" + ) + return self.OkResponse() + except ClientError: + message = f"Unable to register with hub account via registration lambda: {self._env.hub_registration_lambda_arn}" + self._logger.error(message) + return self.ErrorResponse(reason=message) + + # handles Update request from CloudFormation + def _update_request(self) -> CustomResourceResponse: + return self.OkResponse(reason="No handler for Update request") + + # handles Delete request from CloudFormation + def _delete_request(self) -> CustomResourceResponse: + try: + payload = str.encode( + json.dumps( + {"account": self._spoke_account_id, "operation": "Deregister"} + ) + ) + self._lambda_client.invoke( + FunctionName=self._env.hub_registration_lambda_arn, + InvocationType="RequestResponse", + LogType="None", + Payload=payload, + ) + self._logger.info( + f"Deregistered spoke account {self._spoke_account_id} with hub account registration lambda {self._env.hub_registration_lambda_arn}" + ) + return self.OkResponse() + except ClientError: + message = f"Unable to deregister with hub account via registration lambda: {self._env.hub_registration_lambda_arn}" + self._logger.error(message) + return self.ErrorResponse(reason=message) diff --git a/source/app/instance_scheduler/handler/schedule_update.py b/source/app/instance_scheduler/handler/schedule_update.py new file mode 100644 index 00000000..190aaaa4 --- /dev/null +++ b/source/app/instance_scheduler/handler/schedule_update.py @@ -0,0 +1,91 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from collections.abc import Iterable, Iterator +from typing import TYPE_CHECKING, Any, Final +from uuid import uuid4 + +from aws_lambda_powertools.logging import Logger +from aws_lambda_powertools.utilities.data_classes import DynamoDBStreamEvent +from aws_lambda_powertools.utilities.data_classes.dynamo_db_stream_event import ( + DynamoDBRecord, + DynamoDBRecordEventName, +) + +from instance_scheduler.handler.asg_orchestrator import orchestrate_asgs +from instance_scheduler.handler.environments.asg_orch_env import AsgOrchEnv +from instance_scheduler.model.store.dynamo_schedule_definition_store import ( + DynamoScheduleDefinitionStore, +) +from instance_scheduler.model.store.schedule_definition_store import ( + ScheduleDefinitionStore, +) +from instance_scheduler.util.sns_handler import SnsHandler + +if TYPE_CHECKING: + from aws_lambda_powertools.utilities.typing import LambdaContext +else: + LambdaContext = object + +logger: Final = Logger(log_uncaught_exceptions=True, use_rfc3339=True) + + +@logger.inject_lambda_context(log_event=True) +def lambda_handler(event: dict[str, Any], context: LambdaContext) -> None: + env: Final = AsgOrchEnv.from_env() + sns_handler: Final = SnsHandler( + topic_arn=env.issues_topic_arn, + log_group_name=context.log_group_name, + log_stream_name=context.log_stream_name, + raise_exceptions=env.logger_raise_exceptions, + ) + logger.addHandler(sns_handler) + correlation_id: Final = uuid4() + logger.set_correlation_id(str(correlation_id)) + + stream_event: Final = DynamoDBStreamEvent(event) + store: Final = DynamoScheduleDefinitionStore(env.config_table_name) + schedule_names: Final = list( + set(schedule_names_from_records(stream_event.records, store)) + ) + + orchestrate_asgs(env, context, schedule_names) + + +def schedule_names_from_records( # NOSONAR + records: Iterable[DynamoDBRecord], store: ScheduleDefinitionStore +) -> Iterator[str]: + for record in records: + if record.event_name not in { + DynamoDBRecordEventName.INSERT, + DynamoDBRecordEventName.MODIFY, + }: + # these should be filtered already, but just in case + continue + + ddb = record.dynamodb + if ddb is None: + continue + + keys = ddb.keys + if keys is None: + continue + + record_type: str | None = keys.get("type") + if record_type is None: + continue + + record_name: str | None = keys.get("name") + if record_name is None: + continue + + if record_type == "schedule": + yield record_name + elif record_type == "period": + try: + # this is not very efficient + # we should make one pass, finding all schedules given a list of period names + yield from store.find_by_period(record_name).keys() + except Exception: + logger.exception( + f"Unable to determine schedules to update based on period {record_name}" + ) diff --git a/source/app/instance_scheduler/handler/scheduling_orchestrator.py b/source/app/instance_scheduler/handler/scheduling_orchestrator.py index 12758482..0a472f38 100644 --- a/source/app/instance_scheduler/handler/scheduling_orchestrator.py +++ b/source/app/instance_scheduler/handler/scheduling_orchestrator.py @@ -1,69 +1,133 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import json -from collections.abc import Iterator, Mapping +import traceback +from collections.abc import Mapping from datetime import datetime, timezone -from typing import TYPE_CHECKING, Any, Literal, Optional, TypedDict, TypeGuard +from typing import TYPE_CHECKING, Any, Literal, TypedDict, TypeGuard, cast -import boto3 - -from instance_scheduler import configuration from instance_scheduler.boto_retry import get_client_with_standard_retry -from instance_scheduler.configuration.instance_schedule import InstanceSchedule -from instance_scheduler.configuration.scheduler_config import GlobalConfig -from instance_scheduler.configuration.scheduling_context import SchedulingContext -from instance_scheduler.handler.base import Handler +from instance_scheduler.handler.environments.orchestrator_environment import ( + OrchestratorEnvironment, +) +from instance_scheduler.handler.scheduling_request import SchedulingRequest +from instance_scheduler.model.ddb_config_item import DdbConfigItem +from instance_scheduler.model.period_definition import ( + InvalidPeriodDefinition, + PeriodDefinition, +) +from instance_scheduler.model.schedule_definition import ( + InvalidScheduleDefinition, + ScheduleDefinition, +) +from instance_scheduler.model.store.ddb_config_item_store import DdbConfigItemStore +from instance_scheduler.model.store.dynamo_period_definition_store import ( + DynamoPeriodDefinitionStore, +) +from instance_scheduler.model.store.dynamo_schedule_definition_store import ( + DynamoScheduleDefinitionStore, +) +from instance_scheduler.model.store.in_memory_period_definition_store import ( + InMemoryPeriodDefinitionStore, +) +from instance_scheduler.model.store.in_memory_schedule_definition_store import ( + InMemoryScheduleDefinitionStore, +) +from instance_scheduler.model.store.period_definition_store import PeriodDefinitionStore +from instance_scheduler.model.store.schedule_definition_store import ( + ScheduleDefinitionStore, +) from instance_scheduler.ops_metrics.metric_type.deployment_description_metric import ( DeploymentDescriptionMetric, ScheduleFlagCounts, ) from instance_scheduler.ops_metrics.metrics import collect_metric, should_collect_metric -from instance_scheduler.util.app_env import AppEnv, get_app_env +from instance_scheduler.util import safe_json from instance_scheduler.util.logger import Logger +from instance_scheduler.util.scheduling_target import get_account_ids, list_all_targets +from instance_scheduler.util.validation import ValidationException, validate_string if TYPE_CHECKING: from aws_lambda_powertools.utilities.typing import LambdaContext else: LambdaContext = object - STSClient = object LOG_STREAM = "{}-{:0>4d}{:0>2d}{:0>2d}" -LOG_STREAM_PREFIX = "Scheduler" class OrchestrationRequest(TypedDict): scheduled_action: Literal["run_orchestrator"] +def validate_orchestration_request( + untyped_dict: Mapping[str, Any] +) -> TypeGuard[OrchestrationRequest]: + validate_string(untyped_dict, "scheduled_action", required=True) + + if untyped_dict["scheduled_action"] != "run_orchestrator": + raise ValidationException( + f"unknown scheduled_action. received '{untyped_dict['scheduled_action']}', expected 'run_orchestrator'" + ) + + return True + + LAMBDA_PAYLOAD_CAPACITY_BYTES = ( 200_000 # is actually 256_000 but this provides some overhead ) -class SchedulingOrchestratorHandler(Handler[OrchestrationRequest]): +def handle_orchestration_request( + event: Mapping[str, Any], context: LambdaContext +) -> Any: + env = OrchestratorEnvironment.from_env() + dt = datetime.now(timezone.utc) + logstream = "SchedulingOrchestratorHandler-{:0>4d}{:0>2d}{:0>2d}".format( + dt.year, dt.month, dt.day + ) + logger = Logger( + log_group=env.log_group, + log_stream=logstream, + topic_arn=env.topic_arn, + debug=env.enable_debug_logging, + ) + + with logger: + try: + validate_orchestration_request(event) + event = cast(OrchestrationRequest, event) + handler = SchedulingOrchestratorHandler(event, context, env, logger) + return handler.handle_request() + except Exception as e: + # log error to SNS, then let the lambda execution fail + logger.error( + "Error handling orchestration registration request {}: ({})\n{}", + safe_json(event), + e, + traceback.format_exc(), + ) + raise e + + +class SchedulingOrchestratorHandler: """ Handles event from cloudwatch rule timer """ - def __init__(self, event: OrchestrationRequest, context: LambdaContext) -> None: + def __init__( + self, + event: OrchestrationRequest, + context: LambdaContext, + env: OrchestratorEnvironment, + logger: Logger, + ) -> None: + self._env = env self._context = context self._event = event - self._configuration: Optional[GlobalConfig] = None + self._logger = logger self._lambda_client = None self._hub_account_id: str = context.invoked_function_arn.split(":")[4] - # Setup logging - classname = self.__class__.__name__ - app_env = get_app_env() - dt = datetime.now(timezone.utc) - logstream = LOG_STREAM.format(classname, dt.year, dt.month, dt.day) - self._logger = Logger( - log_group=app_env.log_group, - log_stream=logstream, - topic_arn=app_env.topic_arn, - debug=app_env.enable_debug_logging, - ) - @property def lambda_client(self) -> Any: """ @@ -74,59 +138,6 @@ def lambda_client(self) -> Any: self._lambda_client = get_client_with_standard_retry("lambda") return self._lambda_client - @property - def configuration(self) -> GlobalConfig: - """ - Returns the scheduler configuration - :return: scheduler configuration - """ - if self._configuration is None: - self._configuration = configuration.get_global_configuration(self._logger) - return self._configuration - - def accounts_and_roles(self, config: GlobalConfig) -> Iterator[str]: - """ - Iterates account and cross-account-roles of the accounts to operate on - :return: - """ - processed_accounts = [] - - if config.schedule_lambda_account: - processed_accounts.append(self._hub_account_id) - yield self._hub_account_id - - for remote_account in config.remote_account_ids: - if remote_account is None: - continue - # warn and skip if account was already processed - if remote_account in processed_accounts: - self._logger.warning( - "Remote account {} is already processed", remote_account - ) - continue - yield remote_account - - def target_account_id(self, context: SchedulingContext) -> str: - """ - Iterates list of accounts to process - :param context: - :return: - """ - if context.schedule_lambda_account: - return self._hub_account_id - else: - return context.account_id - - @staticmethod - def is_handling_request( - event: Mapping[str, Any] - ) -> TypeGuard[OrchestrationRequest]: - """ - Handler for cloudwatch event to run the scheduler - :return: True - """ - return str(event.get("scheduled_action", "")) == "run_orchestrator" - def handle_request(self) -> list[Any]: """ Handles the CloudWatch Rule timer events @@ -140,14 +151,39 @@ def handle_request(self) -> list[Any]: datetime.now(), ) + ddb_config_item_store = DdbConfigItemStore(self._env.config_table_name) + + schedules, periods = prefetch_schedules_and_periods(self._env, self._logger) + ddb_config_item = ddb_config_item_store.get() + + serialized_schedules = schedules.serialize() + serialized_periods = periods.serialize() + result = [] - for scheduling_context in self.list_scheduling_contexts(self.configuration): - result.append(self._run_scheduling_lambda(scheduling_context)) + for target in list_all_targets( + ddb_config_item, self._env, self._logger, self._context + ): + current_dt_str = datetime.now(timezone.utc).isoformat() + scheduler_request = SchedulingRequest( + action="scheduler:run", + account=target.account, + region=target.region, + service=target.service, + current_dt=current_dt_str, + dispatch_time=datetime.now(timezone.utc).isoformat(), + ) + scheduler_request["schedules"] = serialized_schedules + scheduler_request["periods"] = serialized_periods + result.append(self._run_scheduling_lambda(scheduler_request)) - if should_collect_metric(DeploymentDescriptionMetric, logger=self._logger): + if should_collect_metric(DeploymentDescriptionMetric): collect_metric( self.build_deployment_description_metric( - self.configuration, get_app_env(), self._context + ddb_config_item, + schedules, + periods, + self._env, + self._context, ), logger=self._logger, ) @@ -156,88 +192,30 @@ def handle_request(self) -> list[Any]: finally: self._logger.flush() - def list_scheduling_contexts( - self, config: GlobalConfig - ) -> Iterator[SchedulingContext]: - services = config.scheduled_services - regions = config.regions - current_dt = datetime.now(timezone.utc) - if not regions: - regions = [boto3.Session().region_name] - # todo: better way to use local region? - # todo: could pull from event the same as how lambda_account is fetched - - for service in services: - for region in regions: - for account in self.accounts_and_roles( - config - ): # todo: pull from config.remote_accounts directly? - if account is self._hub_account_id: # local account - schedule_lambda_account = True - account_id = "" - else: # remote account - schedule_lambda_account = False - account_id = account - - yield SchedulingContext( - account_id=account_id, # mutated above - service=service, - region=region, - current_dt=current_dt, - schedules=config.schedules, - default_timezone=config.default_timezone, - schedule_clusters=config.schedule_clusters, - tag_name=config.tag_name, - trace=config.trace, - enable_ssm_maintenance_windows=config.enable_ssm_maintenance_windows, - use_metrics=config.use_metrics, - namespace=config.namespace, - aws_partition=config.aws_partition, - scheduler_role_name=config.scheduler_role_name, - organization_id=config.organization_id, - schedule_lambda_account=schedule_lambda_account, # mutated above - create_rds_snapshot=config.create_rds_snapshot, - started_tags=config.started_tags, # - stopped_tags=config.stopped_tags, - ) - - def _run_scheduling_lambda(self, context: SchedulingContext) -> dict[str, Any]: + def _run_scheduling_lambda( + self, scheduler_request: SchedulingRequest + ) -> dict[str, Any]: # runs a service/account/region subset of the configuration as a new lambda function self._logger.info( "Starting lambda function for scheduling {} instances for account {} in region {}", - context.service, - self.target_account_id(context), - context.region, - ) - - # need to convert configuration to dictionary to allow it to be passed in event - event_payload = context.to_dict() - - payload = str.encode( - json.dumps( - { - "action": "scheduler:run", - "configuration": event_payload, - "dispatch_time": str(datetime.now()), - } - ) + scheduler_request["service"], + scheduler_request["account"], + scheduler_request["region"], ) + payload = str.encode(json.dumps(scheduler_request)) if len(payload) > LAMBDA_PAYLOAD_CAPACITY_BYTES: - strip_schedules_and_periods(event_payload) - payload = str.encode( - json.dumps( - { - "action": "scheduler:run", - "configuration": event_payload, - "dispatch_time": str(datetime.now()), - } - ) - ) + # strip periods and let the request handler reload them + del scheduler_request["periods"] + payload = str.encode(json.dumps(scheduler_request)) + if len(payload) > LAMBDA_PAYLOAD_CAPACITY_BYTES: + # if payload is still too large, strip schedules as well + del scheduler_request["schedules"] + payload = str.encode(json.dumps(scheduler_request)) # start the lambda function resp = self.lambda_client.invoke( - FunctionName=self._context.function_name, + FunctionName=self._env.scheduling_request_handler_name, InvocationType="Event", LogType="None", Payload=payload, @@ -247,13 +225,13 @@ def _run_scheduling_lambda(self, context: SchedulingContext) -> dict[str, Any]: "Error executing {}, version {} with configuration {}", self._context.function_name, self._context.function_version, - event_payload, + payload, ) result = { - "service": context.service, - "account": self.target_account_id(context), - "region": context.region, + "service": scheduler_request["service"], + "account": scheduler_request["account"], + "region": scheduler_request["region"], "lambda_invoke_result": resp["StatusCode"], "lambda_request_id": resp["ResponseMetadata"]["RequestId"], } @@ -261,41 +239,53 @@ def _run_scheduling_lambda(self, context: SchedulingContext) -> dict[str, Any]: def build_deployment_description_metric( self, - global_config: GlobalConfig, - app_env: AppEnv, + ddb_config_item: DdbConfigItem, + schedule_store: ScheduleDefinitionStore, + period_store: PeriodDefinitionStore, + env: OrchestratorEnvironment, lambda_context: LambdaContext, ) -> DeploymentDescriptionMetric: flag_counts = ScheduleFlagCounts() - for schedule in global_config.schedules.values(): - flag_counts.stop_new_instances += schedule.stop_new_instances is True + schedules = schedule_store.find_all() + periods = period_store.find_all() + for schedule in schedules.values(): + flag_counts.stop_new_instances += schedule.stop_new_instances in [ + True, + None, + ] # default is also true flag_counts.enforced += schedule.enforced is True flag_counts.retain_running += schedule.retain_running is True flag_counts.hibernate += schedule.hibernate is True flag_counts.override += schedule.override_status is not None - flag_counts.use_ssm_maintenance_window += ( - schedule.use_maintenance_window is True + flag_counts.use_ssm_maintenance_window += bool( + schedule.ssm_maintenance_window ) - flag_counts.use_metrics += schedule.use_metrics is True flag_counts.non_default_timezone += schedule.timezone != str( - global_config.default_timezone + env.default_timezone ) metric = DeploymentDescriptionMetric( - services=self.configuration.scheduled_services, - regions=self.configuration.regions, - num_accounts=sum(1 for _ in self.accounts_and_roles(self.configuration)), - num_schedules=len(global_config.schedules), - num_cfn_schedules=_count_cfn_schedules(global_config.schedules), + services=get_enabled_services(env), + regions=env.schedule_regions, + num_accounts=sum( + 1 + for _ in get_account_ids( + ddb_config_item, self._env, self._logger, self._context + ) + ), + num_schedules=len(schedules), + num_cfn_schedules=_count_cfn_schedules(schedules), + num_one_sided_schedules=_count_one_sided_schedules(schedules, periods), schedule_flag_counts=flag_counts, - default_timezone=str(global_config.default_timezone), - schedule_aurora_clusters=global_config.schedule_clusters, - create_rds_snapshots=global_config.create_rds_snapshot, - schedule_interval_minutes=app_env.scheduler_frequency_minutes, + default_timezone=str(env.default_timezone), + create_rds_snapshots=env.enable_rds_snapshots, + schedule_interval_minutes=env.scheduler_frequency_minutes, memory_size_mb=lambda_context.memory_limit_in_mb, - using_organizations=app_env.enable_aws_organizations, - enable_ec2_ssm_maintenance_windows=app_env.enable_ec2_ssm_maintenance_windows, - num_started_tags=len(app_env.start_tags), - num_stopped_tags=len(app_env.stop_tags), + using_organizations=env.enable_aws_organizations, + enable_ec2_ssm_maintenance_windows=env.enable_ec2_ssm_maintenance_windows, + ops_dashboard_enabled=env.ops_dashboard_enabled, + num_started_tags=len(env.start_tags), + num_stopped_tags=len(env.stop_tags), ) return metric @@ -306,8 +296,96 @@ def strip_schedules_and_periods(event_dict: dict[str, Any]) -> None: event_dict["periods"] = {} -def _count_cfn_schedules(schedules: dict[str, InstanceSchedule]) -> int: +def _count_cfn_schedules(schedules: Mapping[str, ScheduleDefinition]) -> int: count = 0 for schedule in schedules.values(): count += bool(schedule.configured_in_stack) return count + + +def _count_one_sided_schedules( + schedules: Mapping[str, ScheduleDefinition], periods: Mapping[str, PeriodDefinition] +) -> int: + def is_one_sided_period(period: PeriodDefinition | None) -> bool: + if period is None: + return False + return bool( + (period.begintime and not period.endtime) + or (not period.begintime and period.endtime) + ) # logical xor + + count = 0 + for schedule in schedules.values(): + for schedule_period in schedule.periods: + if is_one_sided_period(periods[schedule_period.name]): + count += 1 + break + return count + + +def get_enabled_services(env: OrchestratorEnvironment) -> list[str]: + enabled_services = [] + if env.enable_ec2_service: + enabled_services.append("ec2") + if env.enable_rds_service: + enabled_services.append("rds") + if env.enable_rds_clusters: + enabled_services.append("rds-clusters") + if env.enable_neptune_service: + enabled_services.append("neptune") + if env.enable_docdb_service: + enabled_services.append("docdb") + if env.enable_asg_service: + enabled_services.append("asg") + return enabled_services + + +def prefetch_schedules_and_periods( + env: OrchestratorEnvironment, logger: Logger +) -> tuple[InMemoryScheduleDefinitionStore, InMemoryPeriodDefinitionStore]: + schedules, schedule_errors = prefetch_schedules(env) + periods, period_errors = prefetch_periods(env) + + cached_schedule_store = InMemoryScheduleDefinitionStore(schedules) + cached_period_store = InMemoryPeriodDefinitionStore(periods) + + exceptions: list[InvalidScheduleDefinition | InvalidPeriodDefinition] = list() + exceptions.extend(schedule_errors) + exceptions.extend(period_errors) + + for schedule in list(cached_schedule_store.find_all().values()): + # filter and warn about schedules referencing periods that do not exist + try: + schedule.to_instance_schedule(cached_period_store) + except InvalidScheduleDefinition as e: + cached_schedule_store.delete(schedule.name) + exceptions.append( + InvalidScheduleDefinition( + f"Invalid Schedule Definition:\n{json.dumps(schedule.to_item(), indent=2)}\n{e}" + ) + ) + + logger.info("prefetched {} schedules and {} periods", len(schedules), len(periods)) + if exceptions: + logger.error( + "There are incorrectly configured schedules/periods!\n{}", + "\n\n".join(map(str, exceptions)), + ) + + return cached_schedule_store, cached_period_store + + +def prefetch_schedules( + env: OrchestratorEnvironment, +) -> tuple[Mapping[str, ScheduleDefinition], list[InvalidScheduleDefinition]]: + dynamo_store = DynamoScheduleDefinitionStore(env.config_table_name) + schedules, exceptions = dynamo_store.find_all_with_errors() + return schedules, exceptions + + +def prefetch_periods( + env: OrchestratorEnvironment, +) -> tuple[Mapping[str, PeriodDefinition], list[InvalidPeriodDefinition]]: + dynamo_store = DynamoPeriodDefinitionStore(env.config_table_name) + periods, exceptions = dynamo_store.find_all_with_errors() + return periods, exceptions diff --git a/source/app/instance_scheduler/handler/scheduling_request.py b/source/app/instance_scheduler/handler/scheduling_request.py index 57029e3e..6a7ae2cf 100644 --- a/source/app/instance_scheduler/handler/scheduling_request.py +++ b/source/app/instance_scheduler/handler/scheduling_request.py @@ -1,286 +1,320 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -import dataclasses -import json +import copy +import inspect +import traceback from collections.abc import Mapping -from datetime import datetime -from typing import TYPE_CHECKING, Any, Final, Literal, Optional, TypedDict, TypeGuard +from datetime import datetime, timezone +from functools import cached_property +from typing import ( + TYPE_CHECKING, + Any, + Final, + Literal, + NotRequired, + Optional, + TypedDict, + TypeGuard, + cast, +) import boto3 -from boto3 import Session -from botocore.exceptions import ClientError from instance_scheduler import configuration -from instance_scheduler.configuration import scheduling_context +from instance_scheduler.configuration.instance_schedule import InstanceSchedule +from instance_scheduler.configuration.scheduling_context import ( + SchedulingContext, + TagTemplate, +) from instance_scheduler.handler.base import Handler +from instance_scheduler.handler.environments.scheduling_request_environment import ( + SchedulingRequestEnvironment, +) +from instance_scheduler.model.store.dynamo_period_definition_store import ( + DynamoPeriodDefinitionStore, +) +from instance_scheduler.model.store.dynamo_schedule_definition_store import ( + DynamoScheduleDefinitionStore, +) +from instance_scheduler.model.store.in_memory_period_definition_store import ( + InMemoryPeriodDefinitionStore, + SerializedInMemoryPeriodDefinitionStore, +) +from instance_scheduler.model.store.in_memory_schedule_definition_store import ( + InMemoryScheduleDefinitionStore, + SerializedInMemoryScheduleDefinitionStore, +) +from instance_scheduler.model.store.period_definition_store import PeriodDefinitionStore +from instance_scheduler.model.store.schedule_definition_store import ( + ScheduleDefinitionStore, +) from instance_scheduler.schedulers.instance_scheduler import InstanceScheduler from instance_scheduler.schedulers.instance_states import InstanceStates -from instance_scheduler.service import ServiceArgs, services -from instance_scheduler.util import get_boto_config -from instance_scheduler.util.app_env import get_app_env +from instance_scheduler.service import Ec2Service, RdsService, Service +from instance_scheduler.util import get_boto_config, safe_json from instance_scheduler.util.logger import Logger +from instance_scheduler.util.session_manager import assume_role, get_role_arn +from instance_scheduler.util.validation import ( + ValidationException, + validate_string, + validate_string_list, +) if TYPE_CHECKING: from aws_lambda_powertools.utilities.typing import LambdaContext from mypy_boto3_lambda.client import LambdaClient - from mypy_boto3_sts.client import STSClient else: LambdaContext = object LambdaClient = object - STSClient = object -class SchedulerRequest(TypedDict): +class SchedulingRequest(TypedDict): action: Literal["scheduler:run"] - configuration: dict[str, Any] dispatch_time: str + account: str + service: str + region: str + current_dt: str + schedules: NotRequired[SerializedInMemoryScheduleDefinitionStore] + periods: NotRequired[SerializedInMemoryPeriodDefinitionStore] + schedule_names: NotRequired[list[str]] -class SchedulingRequestHandler(Handler[SchedulerRequest]): - def __init__(self, event: SchedulerRequest, context: LambdaContext) -> None: - self._function_name: Final = context.function_name - self._hub_account_id: Final = context.invoked_function_arn.split(":")[4] - self._scheduling_context = scheduling_context.from_dict(event["configuration"]) - self._target_account_id: Final = ( - self._hub_account_id - if self._scheduling_context.schedule_lambda_account - else self._scheduling_context.account_id - ) +def validate_scheduler_request( + untyped_dict: Mapping[str, Any] +) -> TypeGuard[SchedulingRequest]: + valid_keys = inspect.get_annotations(SchedulingRequest).keys() + for key in untyped_dict.keys(): + if key not in valid_keys: + raise ValidationException( + f"{key} is not a valid parameter, valid parameters are {valid_keys}" + ) - self._sts_client: Optional[STSClient] = None - self._lambda_client: Optional[LambdaClient] = None + validate_string(untyped_dict, "dispatch_time", required=True) + validate_string(untyped_dict, "account", required=True) + validate_string(untyped_dict, "service", required=True) + validate_string(untyped_dict, "region", required=True) + validate_string( + untyped_dict, "current_dt", required=True + ) # todo: validate as ISO string - app_env: Final = get_app_env() + if "schedules" in untyped_dict: + InMemoryScheduleDefinitionStore.validate_serial_data(untyped_dict["schedules"]) - self._state_table_name: Final = app_env.state_table_name - self._stack_name: Final = app_env.stack_name + if "periods" in untyped_dict: + InMemoryPeriodDefinitionStore.validate_serial_data(untyped_dict["periods"]) - log_stream_name: Final = "-".join( - [ - "Scheduler", - self._scheduling_context.service, - self._target_account_id, - self._scheduling_context.region, - ] - ) - dt: Final = datetime.now(self._scheduling_context.default_timezone) - log_stream: Final = "{}-{:0>4d}{:0>2d}{:0>2d}".format( - log_stream_name, dt.year, dt.month, dt.day - ) - self._logger: Final = Logger( - log_group=app_env.log_group, - log_stream=log_stream, - topic_arn=app_env.topic_arn, - debug=app_env.enable_debug_logging, - ) + if "schedule_names" in untyped_dict: + validate_string_list(untyped_dict, "schedule_names", required=False) + + return True - if self._scheduling_context.schedules == {}: - # for large configurations the schedules are not passed in the event, so we - # need to reload them from dynamo - global_config = configuration.get_global_configuration(self._logger) - self._scheduling_context = dataclasses.replace( - self._scheduling_context, schedules=global_config.schedules + +def handle_scheduling_request(event: Mapping[str, Any], context: LambdaContext) -> Any: + # todo: how to surface validation error? + env = SchedulingRequestEnvironment.from_env() + validate_scheduler_request(event) + event = cast(SchedulingRequest, event) + + logger = init_logger( + service=event["service"], + account=event["account"], + region=event["region"], + env=env, + ) + with logger: + try: + handler = SchedulingRequestHandler(event, context, env, logger) + return handler.handle_request() + except Exception as e: + # log error to SNS, then let the lambda execution fail + logger.error( + "Error handling scheduling request {}: ({})\n{}", + safe_json(event), + e, + traceback.format_exc(), ) + raise e + + +def init_logger( + service: str, account: str, region: str, env: SchedulingRequestEnvironment +) -> Logger: + log_stream_name: Final = f"Scheduler-{service}-{account}-{region}" + dt: Final = datetime.now(timezone.utc) + log_stream: Final = "{}-{:0>4d}{:0>2d}{:0>2d}".format( + log_stream_name, dt.year, dt.month, dt.day + ) + + return Logger( + log_group=env.log_group, + log_stream=log_stream, + topic_arn=env.topic_arn, + debug=env.enable_debug_logging, + ) + + +class SchedulingRequestHandler(Handler[SchedulingRequest]): + def __init__( + self, + event: SchedulingRequest, + context: LambdaContext, + env: SchedulingRequestEnvironment, + logger: Logger, + ) -> None: + self._env: Final = env + self._logger = logger + self._function_name: Final = context.function_name + self._hub_account_id: Final = context.invoked_function_arn.split(":")[4] + self._event = event @staticmethod - def is_handling_request(event: Mapping[str, Any]) -> TypeGuard[SchedulerRequest]: + def is_handling_request(event: Mapping[str, Any]) -> TypeGuard[SchedulingRequest]: return str(event.get("action", "")) == "scheduler:run" def handle_request(self) -> Any: with self._logger: - message: Final = ( - "Handler {} scheduling request for service {}, account {}, region {} " - "at {}, time stamp is based on the default timezone selected for the " - "solution." - ) - self._logger.info( - message.format( - self.__class__.__name__, - self._scheduling_context.service, - self._target_account_id, - self._scheduling_context.region, - datetime.now(self._scheduling_context.default_timezone), - ) - ) + scheduling_context = build_scheduling_context(self._event, self._env) - service_args: Final = ServiceArgs( - account_id=self._target_account_id, - logger=self._logger, - stack_name=self._stack_name, - session=self.get_session_for_target_account(), - scheduling_context=self._scheduling_context, + spoke_scheduler_role = assume_role( + account=scheduling_context.account_id, + region=scheduling_context.region, + role_name=self._env.scheduler_role_name, ) - service_name: Final = self._scheduling_context.service - - service_strategy: Final = services[service_name](service_args) + service_api: Service # type:ignore[type-arg] + match scheduling_context.service: + case "ec2": + service_api = Ec2Service( + assumed_scheduling_role=spoke_scheduler_role, + logger=self._logger, + scheduling_context=scheduling_context, + env=self._env, + ) + case "rds": + service_api = RdsService( + assumed_scheduling_role=spoke_scheduler_role, + logger=self._logger, + scheduling_context=scheduling_context, + env=self._env, + ) + case _: + raise ValueError(f"Unknown service: {scheduling_context.service}") instance_states: Final = InstanceStates( - self._state_table_name, service_name, self._logger + self._env.state_table_name, scheduling_context.service, self._logger ) scheduler: Final = InstanceScheduler( - service_strategy, - self._scheduling_context, + service_api, + scheduling_context, instance_states, - self._target_account_id, - self.get_role_arn(), self._logger, + self._env, + ) + + self._logger.info( + f"Handler {self.__class__.__name__}" + f" Running {scheduling_context.service.upper()} scheduler" + f" for account {scheduling_context.account_id}" + f" in region(s) {scheduling_context.region}" + f" at {scheduling_context.current_dt}" + f" using role {get_role_arn(account_id=scheduling_context.account_id, role_name=self._env.scheduler_role_name)}" ) - result: Final = {self._target_account_id: scheduler.run()} + result: Final = {scheduling_context.account_id: scheduler.run()} self._logger.info("Scheduler result {}", result) return result - @property - def sts(self) -> STSClient: - if self._sts_client is None: - session: Final = Session() - sts_regional_endpoint: Final = str.format( - "https://sts.{}.amazonaws.com", session.region_name - ) - # STS client __must__ use a regional endpoint so that tokens are version 2. - # version 1 tokens are not valid in opt-in regions unless enabled on an - # account level - self._sts_client = session.client( - "sts", - region_name=session.region_name, - endpoint_url=sts_regional_endpoint, - config=get_boto_config(), - ) - return self._sts_client - - def targeting_hub_account(self) -> bool: - return self._target_account_id == self._hub_account_id - - def get_session_for_target_account(self) -> Session: - if self.targeting_hub_account(): - return Session(region_name=self._scheduling_context.region) - else: - return self.get_session_for_spoke_account() - - def get_role_arn(self) -> Optional[str]: - if self.targeting_hub_account(): - # no role in the hub account - return None - else: - spoke_account_role_name: Final = "-".join( - [ - self._scheduling_context.namespace, - self._scheduling_context.scheduler_role_name, - ] - ) - return ":".join( - [ - "arn", - self._scheduling_context.aws_partition, - "iam", - "", - self._target_account_id, - f"role/{spoke_account_role_name}", - ] - ) + @cached_property + def lambda_client(self) -> LambdaClient: + client: LambdaClient = boto3.client("lambda", config=get_boto_config()) + return client - def get_session_for_spoke_account(self) -> Session: - spoke_account_role_arn: Final = self.get_role_arn() - if not spoke_account_role_arn: - raise ValueError("No role to assume") - # get a token for the cross account role and use it to create a session - try: - session_name: Final = "{}-scheduler-{}".format( - self._scheduling_context.service, self._target_account_id - ) - # assume a role - token: Final = self.sts.assume_role( - RoleArn=spoke_account_role_arn, RoleSessionName=session_name - ) - credentials: Final = token["Credentials"] - # create a session using the assumed role credentials - return Session( - aws_access_key_id=credentials["AccessKeyId"], - aws_secret_access_key=credentials["SecretAccessKey"], - aws_session_token=credentials["SessionToken"], - region_name=self._scheduling_context.region, - ) - except ClientError as ex: - self._logger.error( - "Error Code {}".format(ex.response.get("Error", {}).get("Code")) - ) - if ex.response.get("Error", {}).get("Code") == "AccessDenied": - self.remove_account_from_config( - aws_account=self._target_account_id, - cross_account_role=spoke_account_role_arn, - ) - else: - self._logger.error( - "Can not assume role {} for account {}, ({}))".format( - spoke_account_role_arn, self._target_account_id, str(ex) - ) - ) - raise RuntimeError( - "Unable to assume role {} for account {}".format( - spoke_account_role_arn, self._target_account_id - ) - ) +def build_scheduling_context( + event: SchedulingRequest, env: SchedulingRequestEnvironment +) -> SchedulingContext: + current_dt = datetime.fromisoformat(event["current_dt"]) - @property - def lambda_client(self) -> LambdaClient: - if self._lambda_client is None: - self._lambda_client = boto3.client("lambda", config=get_boto_config()) - return self._lambda_client + return SchedulingContext( + account_id=event["account"], + service=event["service"], + region=event["region"], + current_dt=current_dt, + default_timezone=env.default_timezone, + schedules=load_schedules(event, env), + scheduling_interval_minutes=env.scheduler_frequency_minutes, + started_tags=build_tags_from_template(",".join(env.start_tags), env), + stopped_tags=build_tags_from_template(",".join(env.stop_tags), env), + ) - def remove_account_from_config( - self, aws_account: str, cross_account_role: str - ) -> None: - """ - This method will invoke the lambda to remove the aws_account from the - configuration, it calls the lambda handler eventbus_request_handler, and sends - payload which will update the config by removing the account from further - scheduling. + +def load_schedules( + event: SchedulingRequest, + env: SchedulingRequestEnvironment, +) -> Mapping[str, InstanceSchedule]: + schedule_store: ScheduleDefinitionStore + period_store: PeriodDefinitionStore + + if "schedules" in event: + schedule_store = InMemoryScheduleDefinitionStore.deserialize(event["schedules"]) + else: + dynamo_schedule_store = DynamoScheduleDefinitionStore(env.config_table_name) + schedule_store = InMemoryScheduleDefinitionStore( + dynamo_schedule_store.find_all() + ) + + if "periods" in event: + period_store = InMemoryPeriodDefinitionStore.deserialize(event["periods"]) + else: + dynamo_period_store = DynamoPeriodDefinitionStore(env.config_table_name) + period_store = InMemoryPeriodDefinitionStore(dynamo_period_store.find_all()) + + loaded_schedules: dict[str, InstanceSchedule] = {} + for schedule_def in schedule_store.find_all().values(): + schedule = schedule_def.to_instance_schedule(period_store) + loaded_schedules[schedule.name] = schedule + + return loaded_schedules + + +def build_tags_from_template( + tags_str: Any, + env: SchedulingRequestEnvironment, + tag_variables: Optional[Any] = None, +) -> list[TagTemplate]: + lastkey = None + tags = {} + for tag in tags_str.split(","): + if "=" in tag: + t = tag.partition("=") + tags[t[0]] = t[2] + lastkey = t[0] + elif lastkey is not None: + tags[lastkey] = ",".join([tags[lastkey], tag]) + + tag_vars = {} if tag_variables is None else copy.copy(tag_variables) + + dt = datetime.now(timezone.utc) + tag_vars.update( { - "account": 111122223333, - "detail-type": "Parameter Store Change", - "detail": { - "operation": "Delete" - } + configuration.TAG_VAL_SCHEDULER: env.stack_name, + configuration.TAG_VAL_YEAR: "{:0>4d}".format(dt.year), + configuration.TAG_VAL_MONTH: "{:0>2d}".format(dt.month), + configuration.TAG_VAL_DAY: "{:0>2d}".format(dt.day), + configuration.TAG_VAL_HOUR: "{:0>2d}".format(dt.hour), + configuration.TAG_VAL_MINUTE: "{:0>2d}".format(dt.minute), + configuration.TAG_VAL_TIMEZONE: "UTC", } - :param aws_account: account where the assume role permission is not available - for the lambda role to assume. - :param cross_account_role: role name for logging message to SNS. - """ - try: - message: Final = ( - "Removing the account {} from scheduling configuration as assume role " - "permission is missing for the iam role {}" - ) - self._logger.error(message.format(aws_account, cross_account_role)) - payload: Final = str.encode( - json.dumps( - { - "account": aws_account, - "detail-type": "Parameter Store Change", - "detail": {"operation": "Delete"}, - } - ) - ) - response: Final = self.lambda_client.invoke( - FunctionName=self._function_name, - InvocationType="Event", - LogType="None", - Payload=payload, - ) - self._logger.info( - "Removing account {} from configuration".format(aws_account) - ) - self._logger.debug( - "Lambda response {} for removing account from configuration".format( - response - ) - ) - except Exception as ex: - self._logger.error( - "Error invoking lambda {} error {}".format(self._function_name, ex) - ) + ) + + for tag in tags: + value = tags[tag] + if value not in ["", None]: + for v in tag_vars: + tags[tag] = tags[tag].replace("{{{}}}".format(v), tag_vars[v]) + + return [{"Key": t, "Value": tags[t]} for t in tags] diff --git a/source/app/instance_scheduler/handler/setup_demo_data.py b/source/app/instance_scheduler/handler/setup_demo_data.py index c947235c..d9cbb959 100644 --- a/source/app/instance_scheduler/handler/setup_demo_data.py +++ b/source/app/instance_scheduler/handler/setup_demo_data.py @@ -1,61 +1,65 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -from instance_scheduler.configuration.config_admin import ( - ConfigTablePeriodItem, - ConfigTableScheduleItem, -) +from instance_scheduler.model.period_definition import PeriodDefinition +from instance_scheduler.model.period_identifier import PeriodIdentifier +from instance_scheduler.model.schedule_definition import ScheduleDefinition -PERIOD_WORKING_DAYS = ConfigTablePeriodItem( - name="working-days", - description="Working days", - weekdays={"mon-fri"}, -) -PERIOD_WEEKENDS = ConfigTablePeriodItem( - name="weekends", - description="Days in weekend", - weekdays={"sat-sun"}, -) -PERIOD_OFFICE_HOURS = ConfigTablePeriodItem( - name="office-hours", - description="Office hours", - weekdays={"mon-fri"}, - begintime="09:00", - endtime="17:00", -) -PERIOD_FIRST_MONDAY_IN_QUARTER = ConfigTablePeriodItem( - name="first-monday-in-quarter", - description="Every first monday of each quarter", - weekdays={"mon#1"}, - months={"jan/3"}, -) +DEMO_PERIODS = [ + PeriodDefinition( + name="working-days", + description="Working days", + weekdays={"mon-fri"}, + ), + PeriodDefinition( + name="weekends", + description="Days in weekend", + weekdays={"sat-sun"}, + ), + PeriodDefinition( + name="office-hours", + description="Office hours", + weekdays={"mon-fri"}, + begintime="09:00", + endtime="17:00", + ), + PeriodDefinition( + name="first-monday-in-quarter", + description="Every first monday of each quarter", + weekdays={"mon#1"}, + months={"jan/3"}, + ), +] -SCHEDULE_SEATTLE_OFFICE_HOURS = ConfigTableScheduleItem( - name="seattle-office-hours", - description="Office hours in Seattle (Pacific)", - periods={"office-hours"}, - timezone="US/Pacific", -) -SCHEDULE_UK_OFFICE_HOURS = ConfigTableScheduleItem( - name="uk-office-hours", - description="Office hours in UK", - periods={"office-hours"}, - timezone="Europe/London", -) -SCHEDULE_STOPPED = ConfigTableScheduleItem( - name="stopped", - description="Instances stopped", - override_status="stopped", - use_metrics=False, -) -SCHEDULE_RUNNING = ConfigTableScheduleItem( - name="running", - description="Instances running", - override_status="running", - use_metrics=False, -) -SCHEDULE_SCALING = ConfigTableScheduleItem( - name="scale-up-down", - description="Vertical scaling on weekdays, based on UTC time", - periods={"working-days@t2.micro", "weekends@t2.nano"}, - timezone="UTC", -) +DEMO_SCHEDULES = [ + ScheduleDefinition( + name="seattle-office-hours", + description="Office hours in Seattle (Pacific)", + periods=[PeriodIdentifier("office-hours")], + timezone="US/Pacific", + ), + ScheduleDefinition( + name="uk-office-hours", + description="Office hours in UK", + periods=[PeriodIdentifier("office-hours")], + timezone="Europe/London", + ), + ScheduleDefinition( + name="stopped", + description="Instances stopped", + override_status="stopped", + ), + ScheduleDefinition( + name="running", + description="Instances running", + override_status="running", + ), + ScheduleDefinition( + name="scale-up-down", + description="Vertical scaling on weekdays, based on UTC time", + periods=[ + PeriodIdentifier.of("weekends", "t2.nano"), + PeriodIdentifier.of("working-days", "t2.micro"), + ], + timezone="UTC", + ), +] diff --git a/source/app/instance_scheduler/handler/spoke_registration.py b/source/app/instance_scheduler/handler/spoke_registration.py index c92250a9..abd571f6 100644 --- a/source/app/instance_scheduler/handler/spoke_registration.py +++ b/source/app/instance_scheduler/handler/spoke_registration.py @@ -1,14 +1,17 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -import json +import traceback from collections.abc import Mapping +from dataclasses import dataclass from datetime import datetime, timezone -from typing import TYPE_CHECKING, Any, TypeGuard +from os import environ +from typing import TYPE_CHECKING, Any, Literal, TypedDict, TypeGuard, cast, get_args -from instance_scheduler.handler.base import Handler -from instance_scheduler.util.app_env import get_app_env -from instance_scheduler.util.dynamodb_utils import DynamoDBUtils +from instance_scheduler.model.store.ddb_config_item_store import DdbConfigItemStore +from instance_scheduler.util import safe_json +from instance_scheduler.util.app_env import AppEnvError, env_to_bool from instance_scheduler.util.logger import Logger +from instance_scheduler.util.validation import ValidationException, validate_string if TYPE_CHECKING: from aws_lambda_powertools.utilities.typing import LambdaContext @@ -16,101 +19,144 @@ LambdaContext = object -INF_HANDLER = "Request Handler {} : Received request {} at {}" -EVENT_BUS_NAMESPACE_PREFIX = "/scheduler/do-not-delete-manually/{}" -EVENT_CREATE = "Create" -EVENT_DELETE = "Delete" +SpokeRegistrationOperation = Literal["Register", "Deregister"] -SpokeRegistrationRequest = dict[str, Any] +SpokeRequest = TypedDict( + "SpokeRequest", + { + "account": str, + "operation": SpokeRegistrationOperation, + }, + total=False, +) -class SpokeRegistrationHandler(Handler[SpokeRegistrationRequest]): +@dataclass(frozen=True) +class SpokeRegistrationEnvironment: + user_agent_extra: str + log_group: str + topic_arn: str + enable_debug_logging: bool + config_table_name: str + + @staticmethod + def from_env() -> "SpokeRegistrationEnvironment": + try: + return SpokeRegistrationEnvironment( + user_agent_extra=environ["USER_AGENT_EXTRA"], + log_group=environ["LOG_GROUP"], + topic_arn=environ["ISSUES_TOPIC_ARN"], + enable_debug_logging=env_to_bool(environ["ENABLE_DEBUG_LOGS"]), + config_table_name=environ["CONFIG_TABLE"], + ) + except KeyError as err: + raise AppEnvError( + f"Missing required application environment variable: {err.args[0]}" + ) from err + + +def validate_spoke_request(untyped_dict: Mapping[str, Any]) -> TypeGuard[SpokeRequest]: + validate_string(untyped_dict, "account", required=True) + validate_string(untyped_dict, "operation", required=True) + + operation = untyped_dict["operation"] + valid_operations = get_args(SpokeRegistrationOperation) + if operation not in valid_operations: + raise ValidationException( + f"{operation} is not a valid operation, valid operations are {valid_operations}" + ) + + return True + + +class SpokeRegistrationException(Exception): + pass + + +class InvalidRequestException(Exception): + pass + + +def handle_spoke_registration_event( + event: Mapping[str, Any], + _: LambdaContext, +) -> str: + env = SpokeRegistrationEnvironment.from_env() + + dt = datetime.now(timezone.utc) + log_stream = "SpokeRegistration-{:0>4d}{:0>2d}{:0>2d}".format( + dt.year, dt.month, dt.day + ) + with Logger( + log_group=env.log_group, + log_stream=log_stream, + topic_arn=env.topic_arn, + debug=env.enable_debug_logging, + ) as logger: + logger.debug( + "spoke registration handler received event: {}", safe_json(event, indent=3) + ) + + try: + validate_spoke_request(event) + event = cast(SpokeRequest, event) + handler = SpokeRegistrationHandler(event, env=env, logger=logger) + + return handler.handle_request() + except Exception as e: + # log error to SNS, then let the lambda execution fail + logger.error( + "Error handling spoke registration request {}: ({})\n{}", + safe_json(event), + e, + traceback.format_exc(), + ) + raise e + + +class SpokeRegistrationHandler: """ Handles event from cloudwatch rule time """ - def __init__(self, event: SpokeRegistrationRequest, context: LambdaContext) -> None: - self._context = context + def __init__( + self, + event: SpokeRequest, + env: SpokeRegistrationEnvironment, + logger: Logger, + ) -> None: + self._logger = logger self._event = event - self._configuration = None - self._lambda_client = None - - logging_stream_name = "-".join(["eventbus_request_handler"]) - dt = datetime.now(timezone.utc) - app_env = get_app_env() - self._config_table_name = app_env.config_table_name - log_stream = "{}-{:0>4d}{:0>2d}{:0>2d}".format( - logging_stream_name, dt.year, dt.month, dt.day - ) - self._logger = Logger( - log_group=app_env.log_group, - log_stream=log_stream, - topic_arn=app_env.topic_arn, - debug=app_env.enable_debug_logging, - ) - - @staticmethod - def is_handling_request( - event: Mapping[str, Any] - ) -> TypeGuard[SpokeRegistrationRequest]: - """ - Handler for EventBus request to update accounts. - :return: True - """ - return str(event.get("detail-type", "")) == "Parameter Store Change" + self._ddb_config_item_store = DdbConfigItemStore(env.config_table_name) def handle_request(self) -> str: """ Handles the CloudWatch Rule timer events :return: """ - try: - self._logger.info( - INF_HANDLER, - self.__class__.__name__, - json.dumps(self._event), - datetime.now(), - ) - detail = self._event.get("detail", None) - if detail is not None: - self._logger.debug(f"Details of the event {detail}") - dynamodb_table = DynamoDBUtils.get_dynamodb_table_resource_ref( - self._config_table_name - ) - config_key = {"name": "scheduler", "type": "config"} - if detail.get("operation") == EVENT_CREATE: - self._logger.info("Add account id from the config") - account = self._event.get("account") - update_account_ids_response = dynamodb_table.update_item( - TableName=self._config_table_name, - Key=config_key, - UpdateExpression="add remote_account_ids :a", - ExpressionAttributeValues={":a": set({account})}, - ReturnValues="UPDATED_NEW", - ) - self._logger.debug( - f"Response from account update: {update_account_ids_response}" - ) - elif detail.get("operation") == EVENT_DELETE: - account = self._event.get("account") - self._logger.info(f"remove account {account} from the config") - update_account_ids_response = dynamodb_table.update_item( - TableName=self._config_table_name, - Key=config_key, - UpdateExpression="delete remote_account_ids :a", - ExpressionAttributeValues={":a": set({account})}, - ReturnValues="UPDATED_NEW", - ) - self._logger.debug( - f"Response from account update: {update_account_ids_response}" + event = self._event + account_id = event["account"] + + match event["operation"]: + case "Register": + try: + self._ddb_config_item_store.register_spoke_accounts({account_id}) + self._logger.info("Registered spoke account {}", account_id) + return f"Registered spoke account {account_id}" + except Exception as e: + raise SpokeRegistrationException( + f"Error registering spoke account {account_id}: {e}" ) - else: - self._logger.info( - f"event details.operations doesn't match the scenarios configured. {detail}" + case "Deregister": + try: + self._ddb_config_item_store.deregister_spoke_accounts({account_id}) + self._logger.info("Deregistered spoke account {}", account_id) + return f"Deregistered spoke account {account_id}" + except Exception as e: + raise SpokeRegistrationException( + f"Error deregistering spoke account {account_id}: {e}" ) - return "Exiting event bus request handler" - except Exception as error: - self._logger.error(str(error)) - return "Error in event bus request handler." - finally: - self._logger.flush() + case _: + raise InvalidRequestException( + f"Spoke Registration handler received an unknown request: {self._event}" + ) diff --git a/source/app/instance_scheduler/main.py b/source/app/instance_scheduler/main.py index 57dae6e4..a25edd5c 100644 --- a/source/app/instance_scheduler/main.py +++ b/source/app/instance_scheduler/main.py @@ -2,12 +2,16 @@ # SPDX-License-Identifier: Apache-2.0 import json import traceback +from collections.abc import Mapping from datetime import datetime, timezone from time import time -from typing import TYPE_CHECKING, Any, Mapping +from typing import TYPE_CHECKING, Any, Final, Sequence -from instance_scheduler import configuration, util -from instance_scheduler.handler import handlers +from instance_scheduler import util +from instance_scheduler.handler.base import Handler +from instance_scheduler.handler.cfn_schedule import CfnScheduleHandler +from instance_scheduler.handler.cli.cli_request_handler import CliRequestHandler +from instance_scheduler.handler.config_resource import SchedulerSetupHandler from instance_scheduler.util.app_env import get_app_env from instance_scheduler.util.logger import Logger @@ -18,46 +22,47 @@ LOG_STREAM = "InstanceScheduler-{:0>4d}{:0>2d}{:0>2d}" +handlers: Final[Sequence[type[Handler[Any]]]] = ( + SchedulerSetupHandler, + CfnScheduleHandler, + CliRequestHandler, +) + def lambda_handler(event: Mapping[str, Any], context: LambdaContext) -> Any: - try: - dt = datetime.now(timezone.utc) - app_env = get_app_env() - log_stream = LOG_STREAM.format(dt.year, dt.month, dt.day) - result = {} - with Logger( - log_group=app_env.log_group, - log_stream=log_stream, - topic_arn=app_env.topic_arn, - debug=app_env.enable_debug_logging, - ) as logger: - logger.info( - "InstanceScheduler, version {}".format(app_env.solution_version) - ) - - logger.debug("Event is {}", util.safe_json(event, indent=3)) - - for handler_type in handlers: - if handler_type.is_handling_request(event): - start = time() - handler = handler_type(event, context) - logger.info("Handler is {}".format(handler_type.__name__)) - try: - result = handler.handle_request() - except Exception as e: - logger.error( - "Error handling request {} by handler {}: ({})\n{}", - json.dumps(event), - handler_type.__name__, - e, - traceback.format_exc(), - ) - execution_time = round(float((time() - start)), 3) - logger.info("Handling took {} seconds", execution_time) - return result - logger.debug( - "Request was not handled, no handler was able to handle this type of request {}", - json.dumps(event), - ) - finally: - configuration.unload_global_configuration() + dt = datetime.now(timezone.utc) + app_env = get_app_env() + log_stream = LOG_STREAM.format(dt.year, dt.month, dt.day) + result = {} + with Logger( + log_group=app_env.log_group, + log_stream=log_stream, + topic_arn=app_env.topic_arn, + debug=app_env.enable_debug_logging, + ) as logger: + logger.info("InstanceScheduler, version {}".format(app_env.solution_version)) + + logger.debug("Event is {}", util.safe_json(event, indent=3)) + + for handler_type in handlers: + if handler_type.is_handling_request(event): + start = time() + handler = handler_type(event, context) + logger.info("Handler is {}".format(handler_type.__name__)) + try: + result = handler.handle_request() + except Exception as e: + logger.error( + "Error handling request {} by handler {}: ({})\n{}", + json.dumps(event), + handler_type.__name__, + e, + traceback.format_exc(), + ) + execution_time = round(float((time() - start)), 3) + logger.info("Handling took {} seconds", execution_time) + return result + logger.debug( + "Request was not handled, no handler was able to handle this type of request {}", + json.dumps(event), + ) diff --git a/source/app/instance_scheduler/maint_win/__init__.py b/source/app/instance_scheduler/maint_win/__init__.py index 6c434e46..04f8b7b7 100644 --- a/source/app/instance_scheduler/maint_win/__init__.py +++ b/source/app/instance_scheduler/maint_win/__init__.py @@ -1,5 +1,2 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -from .ec2_ssm import EC2SSMMaintenanceWindows - -__all__ = ["EC2SSMMaintenanceWindows"] diff --git a/source/app/instance_scheduler/maint_win/ec2_ssm.py b/source/app/instance_scheduler/maint_win/ec2_ssm.py deleted file mode 100644 index eab3cc34..00000000 --- a/source/app/instance_scheduler/maint_win/ec2_ssm.py +++ /dev/null @@ -1,330 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -from datetime import datetime, timedelta -from typing import TYPE_CHECKING, Any, Final, Literal -from zoneinfo import ZoneInfo - -import dateutil.parser -from boto3 import Session - -from instance_scheduler.boto_retry import get_client_with_standard_retry -from instance_scheduler.configuration.instance_schedule import InstanceSchedule -from instance_scheduler.configuration.running_period import RunningPeriod -from instance_scheduler.configuration.running_period_dict_element import ( - RunningPeriodDictElement, -) -from instance_scheduler.configuration.scheduler_config_builder import ( - SchedulerConfigBuilder, -) -from instance_scheduler.model import EC2SSMMaintenanceWindowStore -from instance_scheduler.util.logger import Logger - -if TYPE_CHECKING: - from mypy_boto3_ssm.client import SSMClient - from mypy_boto3_ssm.type_defs import ( - DescribeMaintenanceWindowsResultTypeDef, - MaintenanceWindowIdentityTypeDef, - ) -else: - SSMClient = object - DescribeMaintenanceWindowsResultTypeDef = object - MaintenanceWindowIdentityTypeDef = object - - -class EC2SSMMaintenanceWindows: - def __init__( - self, - *, - hub_session: Session, - spoke_session: Session, - spoke_account_id: str, - table_name: str, - scheduler_interval: int, - logger: Logger, - ) -> None: - self._spoke_session: Final = spoke_session - self._spoke_account_id = spoke_account_id - self._scheduler_interval = scheduler_interval - self._logger: Final = logger - - self._region: Final = self._spoke_session.region_name - self._store: Final = EC2SSMMaintenanceWindowStore( - session=hub_session, table_name=table_name, logger=self._logger - ) - - def get_ssm_windows_service( - self, session: Session, region: str - ) -> list[MaintenanceWindowIdentityTypeDef]: - """ - This function gets all the ssm windows which are enabled from SSM service. - - Returns: - list of ssm windows - """ - ssm_client: SSMClient = get_client_with_standard_retry( - "ssm", session=session, region=region - ) - resp_maintenance_windows: DescribeMaintenanceWindowsResultTypeDef = {} # type: ignore - try: - resp_maintenance_windows = ssm_client.describe_maintenance_windows( - Filters=[ - { - "Key": "Enabled", - "Values": [ - "true", - ], - }, - ] - ) - except Exception as error: - self._logger.error( - "Caught Exception while getting the maintenance window: {}".format( - error - ) - ) - ssm_window_list = resp_maintenance_windows.get("WindowIdentities", []) - next_token = resp_maintenance_windows.get("NextToken", None) - while next_token is not None: - try: - resp_maintenance_windows = ssm_client.describe_maintenance_windows( - Filters=[ - { - "Key": "Enabled", - "Values": [ - "true", - ], - }, - ], - NextToken=next_token, - ) - except Exception as error: - self._logger.error( - "Caught Exception while getting the maintenance window: {}".format( - error - ) - ) - next_token = resp_maintenance_windows.get("NextToken", None) - ssm_window_list.extend(resp_maintenance_windows.get("WindowIdentities", [])) - return ssm_window_list - - def process_ssm_window( - self, - window: MaintenanceWindowIdentityTypeDef, - ssm_windows_db: list[Any], - account: str, - region: str, - ) -> MaintenanceWindowIdentityTypeDef: - """ - This function checks if the window is enabled before adding it to the db and update the db for disabled windows. - - Parameters: - SSM window object - List of maintenance windows from db - """ - new_ssm_window: MaintenanceWindowIdentityTypeDef = {} - current_window = {} - for window_db in ssm_windows_db: - if window_db["Name"] == window["Name"]: - current_window = window_db # get the window from the db with the same name as the window from service - break - if current_window.get("Name") is None: - self._store.put_window_dynamodb( - window=window, account=account, region=region - ) - new_ssm_window = window - else: - if not self.check_window_running(current_window): - self._store.put_window_dynamodb( - window=window, account=account, region=region - ) - - return new_ssm_window - - def check_window_running(self, window: Any) -> bool: - """ - This function checks if given maintenance window is currently running. - - Parameters: - SSM window object - """ - try: - duration = window["Duration"] - if "ScheduleTimezone" in window: - execution_time = datetime.strptime( - window["NextExecutionTime"], "%Y-%m-%dT%H:%M%z" - ) - else: - execution_time = datetime.strptime( - window["NextExecutionTime"], "%Y-%m-%dT%H:%MZ" - ) - window["ScheduleTimezone"] = "UTC" - - tz = ZoneInfo(window["ScheduleTimezone"]) - window_begin_time = execution_time.replace(tzinfo=tz) - window_end_time = execution_time.replace(tzinfo=tz) + timedelta( - hours=int(duration) - ) - current_time = datetime.now(tz).replace(tzinfo=tz) - return window_begin_time < current_time < window_end_time - except Exception as ex: - self._logger.error("error in check_window_running {}".format(ex)) - return False - - def remove_unused_windows( - self, - *, - window_db: Any, - ssm_windows_service: list[MaintenanceWindowIdentityTypeDef], - ) -> None: - """ - This function removes the old windows not present in the ssm service response. - """ - window_found = False - for window_service in ssm_windows_service: - if window_service["Name"] == window_db["Name"]: - window_found = True - break - if not window_found: - try: # if window from db is not found in the SSM response delete the entry from db - self._store.delete_window(window_db) - except Exception as error: - self._logger.error( - "Caught Exception while deleting maintenance windows from Dynamodb: {}".format( - error - ) - ) - - def get_ssm_windows(self, session: Session, account: str, region: str) -> list[Any]: - """ - This function gets the list of the SSM maintenance windows - """ - new_ssm_windows_list = [] - ssm_windows_service = self.get_ssm_windows_service(session, region) - ssm_windows_db = self._store.get_ssm_windows_db(account=account, region=region) - for window_service in ssm_windows_service: - new_maintenance_window = self.process_ssm_window( - window_service, ssm_windows_db, account, region - ) - if new_maintenance_window: - new_ssm_windows_list.append(new_maintenance_window) - for window_db in ssm_windows_db: - self.remove_unused_windows( - window_db=window_db, ssm_windows_service=ssm_windows_service - ) - for window in new_ssm_windows_list: - ssm_windows_db.append(window) - return ssm_windows_db - - def ssm_maintenance_windows( - self, session: Session, account: str, region: str - ) -> dict[str, InstanceSchedule | Literal["NOT-FOUND"]]: - windows: dict[str, InstanceSchedule | Literal["NOT-FOUND"]] = {} - try: - window_list = self.get_ssm_windows(session, account, region) - for window in window_list: - start = dateutil.parser.parse(window["NextExecutionTime"]) - scheduler_timezone = window.get("ScheduleTimezone", "UTC") - maintenance_schedule = self._schedule_from_maint_window( - name=window["Name"], - start=start, - interval=self._scheduler_interval, - hours=int(window["Duration"]), - timezone=scheduler_timezone, - ) - windows[str(window["Name"])] = maintenance_schedule - except Exception as ex: - self._logger.error("Error loading ssm maintenace windows, ({})".format(ex)) - - return windows - - def _schedule_from_maint_window( - self, name: str, start: datetime, hours: int, interval: int, timezone: str - ) -> InstanceSchedule: - start_dt = start.replace(second=0, microsecond=0) - start_before_begin = interval + 10 - begin_dt = start_dt - timedelta(minutes=start_before_begin) - end_dt = start_dt + timedelta(hours=hours) - if begin_dt.day == end_dt.day: - periods: list[RunningPeriodDictElement] = [ - { - "period": RunningPeriod( - name="{}-period".format(name), - begintime=begin_dt.time(), - endtime=end_dt.time(), - monthdays={begin_dt.day}, - months={begin_dt.month}, - ), - "instancetype": None, - } - ] - elif end_dt - begin_dt <= timedelta(hours=24): - periods = [ - { - "period": RunningPeriod( - name="{}-period-1".format(name), - begintime=begin_dt.time(), - endtime=SchedulerConfigBuilder.get_time_from_string("23:59"), - monthdays={begin_dt.day}, - months={begin_dt.month}, - ), - "instancetype": None, - }, - { - "period": RunningPeriod( - name="{}-period-2".format(name), - begintime=SchedulerConfigBuilder.get_time_from_string("00:00"), - endtime=end_dt.time(), - monthdays={end_dt.day}, - months={end_dt.month}, - ), - "instancetype": None, - }, - ] - else: - periods = [ - { - "period": RunningPeriod( - name="{}-period-1".format(name), - begintime=begin_dt.time(), - endtime=SchedulerConfigBuilder.get_time_from_string("23:59"), - monthdays={begin_dt.day}, - months={begin_dt.month}, - ), - "instancetype": None, - }, - { - "period": RunningPeriod( - name="{}-period-2".format(name), - monthdays={(end_dt - timedelta(days=1)).day}, - months={(end_dt - timedelta(days=1)).month}, - ), - "instancetype": None, - }, - { - "period": RunningPeriod( - name="{}-period-3".format(name), - begintime=SchedulerConfigBuilder.get_time_from_string("00:00"), - endtime=end_dt.time(), - monthdays={end_dt.day}, - months={end_dt.month}, - ), - "instancetype": None, - }, - ] - - schedule = InstanceSchedule( - name=name, - timezone=timezone, - description="{} maintenance window".format(name), - enforced=True, - periods=periods, - ) - - self._logger.info( - "Created schedule {} from SSM maintence window, start is {}, end is {}", - name, - begin_dt.isoformat(), - end_dt.isoformat(), - ) - - return schedule diff --git a/source/app/instance_scheduler/maint_win/maintenance_window_context.py b/source/app/instance_scheduler/maint_win/maintenance_window_context.py new file mode 100644 index 00000000..16526cd7 --- /dev/null +++ b/source/app/instance_scheduler/maint_win/maintenance_window_context.py @@ -0,0 +1,212 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from dataclasses import dataclass +from typing import Final, Iterable, Optional + +from instance_scheduler.configuration.scheduling_context import SchedulingContext +from instance_scheduler.maint_win.ssm_mw_client import SSMMWClient +from instance_scheduler.model import EC2SSMMaintenanceWindow, MWStore +from instance_scheduler.model.store.in_memory_mw_store import ( + InMemoryMWStore, + to_account_region_pk, +) +from instance_scheduler.util.logger import Logger +from instance_scheduler.util.session_manager import AssumedRole + + +@dataclass(frozen=True) +class MWDeltas: + unchanged: dict[str, EC2SSMMaintenanceWindow] + updated: dict[str, EC2SSMMaintenanceWindow] + deleted: dict[str, EC2SSMMaintenanceWindow] + + +class MaintenanceWindowContext: + """ + Interface Abstraction for working with EC2 Maintenance Windows within the context + of a specific scheduling target (account/region) + + Expected use of this context would be to invoke reconcile_with_dynamo_db() exactly once at the beginning + of each scheduling request. This will then synchronize maintenance windows stored in the maint_win ddb table + with those reported by the SSM service in that given scheduling target. + + The reason for this data duplication is to work around a limitation in the SSM Maintenance Window API: + When calling describe_maintenance_windows on the SSM api, The information returned for each maintenance window + includes the window duration (in hours) and the next_execution_time (datetime) for that window. This tells us + when the next window will run, but not when the most recent window execution (which may still be running) + last started. As such, we must maintain a separate record of maintenance windows that can track the most recent + execution in order to remember that a 3-hour maintenance window that started an hour ago + will still be active for the next 2 hours. Only after the previous maintenance window has ended can we discard that + information and replace it with the next planned window + """ + + _context: Final[SchedulingContext] + _spoke_scheduler_role: Final[AssumedRole] + _ddb_store: Final[MWStore] + + _prefetched_windows: Optional[dict[str, list[EC2SSMMaintenanceWindow]]] = None + + def __init__( + self, + scheduling_context: SchedulingContext, + spoke_scheduler_role: AssumedRole, + mw_store: MWStore, + logger: Logger, + ): + self._context = scheduling_context + self._spoke_scheduler_role = spoke_scheduler_role + self._ssm_mw_client = SSMMWClient(spoke_scheduler_role) + self._ddb_store = mw_store + self._logger = logger + + def reconcile_ssm_with_dynamodb(self) -> None: + """ + fetch all maintenance windows in the given context and reconcile them with the windows being tracked in + dynamodb. The purpose of this table is to remember actively running windows that the SSM api is unable to return + (it only provides info about the next execution in the future) As such, any windows stored in dynamodb and + currently in a running state need to be preserved until after the running window has concluded + """ + self._logger.info( + "Beginning reconciliation of maintenance windows between SSM and DDB" + ) + account = self._context.account_id + region = self._context.region + + raw_ssm_data = SSMMWClient(self._spoke_scheduler_role).get_mws_from_ssm() + filtered_ssm_data = _collect_by_nameid( + self.filter_by_windows_defined_in_schedules(raw_ssm_data) + ) + + ddb_data = _collect_by_nameid( + self._ddb_store.find_by_account_region(account, region) + ) + + # in-mem store to mirror changes made to ddb + in_mem_store = InMemoryMWStore( + {to_account_region_pk(account, region): ddb_data} + ) + + deltas = _compute_delta_between(original=ddb_data, new=filtered_ssm_data) + + # if the window currently stored in dynamodb is still running, we need to respect that window + # until it stops + for updated_window in deltas.updated.values(): + if self._is_running(ddb_data.get(updated_window.name_id)): + continue # reject the update + + try: + self._ddb_store.put(updated_window) + in_mem_store.put(updated_window) + except Exception as e: + self._logger.error( + f"error updating maintenance window {updated_window.name_id} -- skipping update. Error: {e}" + ) + + for deleted_window in deltas.deleted.values(): + if self._is_running(ddb_data.get(deleted_window.name_id)): + continue # reject the update + + try: + self._ddb_store.delete(deleted_window) # update window in ddb + in_mem_store.delete(deleted_window) + except Exception as e: + self._logger.error( + f"error deleting maintenance window {deleted_window.name_id} -- skipping delete. Error: {e}" + ) + + # reset cache + self._prefetched_windows = _collect_and_aggregate_by_name( + in_mem_store.find_by_account_region(account, region) + ) + + self._logger.info( + f"reconciliation complete! updated: {len(deltas.updated)}, " + f"deleted: {len(deltas.deleted)}, " + f"total_windows_loaded: {sum([len(mws) for mws in self._prefetched_windows.values()])}" + ) + + def _is_running(self, window: Optional[EC2SSMMaintenanceWindow]) -> bool: + if not window: + return False + return window.is_running_at( + self._context.current_dt, self._context.scheduling_interval_minutes + ) + + def __contains__(self, name: str) -> bool: + return bool(self.find_by_name(name)) + + def find_by_name(self, name: str) -> Iterable[EC2SSMMaintenanceWindow]: + if not self._windows_loaded(): + self.reconcile_ssm_with_dynamodb() + assert self._prefetched_windows is not None + return self._prefetched_windows.get(name, []) + + def _windows_loaded(self) -> bool: + return self._prefetched_windows is not None + + def filter_by_windows_defined_in_schedules( + self, raw_windows: Iterable[EC2SSMMaintenanceWindow] + ) -> Iterable[EC2SSMMaintenanceWindow]: + + # collect all windows referenced by schedules + referenced_windows: set[str] = set() + for schedule in self._context.schedules.values(): + if schedule.ssm_maintenance_window: + referenced_windows.update(schedule.ssm_maintenance_window) + + for window in raw_windows: + if window.window_name in referenced_windows: + yield window + + +def _compute_delta_between( + original: dict[str, EC2SSMMaintenanceWindow], + new: dict[str, EC2SSMMaintenanceWindow], +) -> MWDeltas: + """ + compare 2 dict[name-id, maintenance-window] and return the delta of what has been + updated/deleted from the original to the new dict + """ + + unchanged: dict[str, EC2SSMMaintenanceWindow] = dict() + updated: dict[str, EC2SSMMaintenanceWindow] = dict() + deleted: dict[str, EC2SSMMaintenanceWindow] = dict() + + for new_window in new.values(): + if new_window == original.get(new_window.name_id, None): + unchanged[new_window.name_id] = new_window + else: + updated[new_window.name_id] = new_window + + for og_window in original.values(): + if og_window.name_id not in new: + deleted[og_window.name_id] = og_window + + return MWDeltas(unchanged, updated, deleted) + + +def _collect_by_nameid( + maintenance_windows: Iterable[EC2SSMMaintenanceWindow], +) -> dict[str, EC2SSMMaintenanceWindow]: + """ + collect an iterable of MWs into a dict of the form [name-id, EC2SSMMaintenanceWindow] + this function assumes that all name-ids are unique which is true only within a specific account-region, thus + this function should only be used within single target contexts + """ + return {mw.name_id: mw for mw in maintenance_windows} + + +def _collect_and_aggregate_by_name( + maintenance_windows: Iterable[EC2SSMMaintenanceWindow], +) -> dict[str, list[EC2SSMMaintenanceWindow]]: + """ + collect an iterable of MWs into a dict of the form [name, list[EC2SSMMaintenanceWindow]] + """ + result: dict[str, list[EC2SSMMaintenanceWindow]] = dict() + for mw in maintenance_windows: + if mw.window_name not in result: + result[mw.window_name] = [mw] + else: + result[mw.window_name].append(mw) + + return result diff --git a/source/app/instance_scheduler/maint_win/ssm_mw_client.py b/source/app/instance_scheduler/maint_win/ssm_mw_client.py new file mode 100644 index 00000000..08dab7d1 --- /dev/null +++ b/source/app/instance_scheduler/maint_win/ssm_mw_client.py @@ -0,0 +1,36 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from typing import TYPE_CHECKING, Final, Iterator + +from instance_scheduler.model import EC2SSMMaintenanceWindow +from instance_scheduler.util.session_manager import AssumedRole + +if TYPE_CHECKING: + from mypy_boto3_ssm import SSMClient +else: + SSMClient = object + + +class SSMMWClient: + + def __init__(self, spoke_session: AssumedRole): + self._spoke_session = spoke_session + + def get_mws_from_ssm(self) -> Iterator[EC2SSMMaintenanceWindow]: + """ + This function gets all the ssm windows which are enabled from SSM service. + + Returns: + list of ssm windows + """ + ssm: Final[SSMClient] = self._spoke_session.client("ssm") + paginator: Final = ssm.get_paginator("describe_maintenance_windows") + for page in paginator.paginate( + Filters=[{"Key": "Enabled", "Values": ["true"]}] + ): + for identity in page["WindowIdentities"]: + yield EC2SSMMaintenanceWindow.from_identity( + identity=identity, + account_id=self._spoke_session.account, + region=self._spoke_session.region, + ) diff --git a/source/app/instance_scheduler/model/__init__.py b/source/app/instance_scheduler/model/__init__.py index e9570727..f0ef797f 100644 --- a/source/app/instance_scheduler/model/__init__.py +++ b/source/app/instance_scheduler/model/__init__.py @@ -26,11 +26,14 @@ Raises `EC2SSMMaintenanceWindowValidationError` on validation error Store: `EC2SSMMaintenanceWindowStore` """ +from instance_scheduler.model.store.maint_win_store import EC2SSMMaintenanceWindowStore + from .maint_win import EC2SSMMaintenanceWindow, EC2SSMMaintenanceWindowValidationError -from .maint_win_store import EC2SSMMaintenanceWindowStore +from .store.mw_store import MWStore __all__ = [ "EC2SSMMaintenanceWindow", "EC2SSMMaintenanceWindowStore", + "MWStore", "EC2SSMMaintenanceWindowValidationError", ] diff --git a/source/app/instance_scheduler/model/ddb_config_item.py b/source/app/instance_scheduler/model/ddb_config_item.py new file mode 100644 index 00000000..1cfe92ae --- /dev/null +++ b/source/app/instance_scheduler/model/ddb_config_item.py @@ -0,0 +1,42 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from dataclasses import dataclass, field +from typing import TYPE_CHECKING + +from instance_scheduler.model.ddb_item_utils import skip_if_empty + +if TYPE_CHECKING: + from mypy_boto3_dynamodb.type_defs import AttributeValueTypeDef +else: + AttributeValueTypeDef = object + + +@dataclass +class DdbConfigItem: + """ + object representation of the config item stored in the dynamodb config table + + There can only ever be 1 config item stored in dynamodb as this item represents the global configuration + data of the solution that may be updated dynamically (is not stored in the lambda environment) + """ + + organization_id: str = "" + remote_account_ids: list[str] = field(default_factory=list) + + def to_item(self) -> dict[str, AttributeValueTypeDef]: + """Return this object as a dict suitable for a call to DynamoDB `put_item`""" + return { + "type": {"S": "config"}, + "name": {"S": "scheduler"}, + "organization_id": { + "S": self.organization_id + }, # todo: check if this was an omittable field + **skip_if_empty("remote_account_ids", {"SS": self.remote_account_ids}), + } + + @classmethod + def from_item(cls, item: dict[str, AttributeValueTypeDef]) -> "DdbConfigItem": + return DdbConfigItem( + organization_id=item.get("organization_id", {}).get("S", ""), + remote_account_ids=list(item.get("remote_account_ids", {}).get("SS", [])), + ) diff --git a/source/app/instance_scheduler/model/ddb_item_utils.py b/source/app/instance_scheduler/model/ddb_item_utils.py new file mode 100644 index 00000000..5128fd8c --- /dev/null +++ b/source/app/instance_scheduler/model/ddb_item_utils.py @@ -0,0 +1,171 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from collections.abc import Mapping +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Optional, + Sequence, + TypedDict, + TypeGuard, + TypeVar, +) + +if TYPE_CHECKING: + from mypy_boto3_dynamodb.type_defs import AttributeValueTypeDef +else: + AttributeValueTypeDef = object + +OptionalAttributeValue = TypedDict( + "OptionalAttributeValue", + { + "S": Optional[str], + "N": Optional[str], + "B": Optional[bytes], + "SS": Optional[Sequence[str]], + "NS": Optional[Sequence[str]], + "BS": Optional[Sequence[bytes]], + "M": Optional[Mapping[str, Any]], + "L": Optional[Sequence[Any]], + "NULL": Optional[bool], + "BOOL": Optional[bool], + }, + total=False, +) + +ParseFunctionReturnType = TypeVar("ParseFunctionReturnType") +ParseFunctionInputType = TypeVar("ParseFunctionInputType") +DefaultType = TypeVar("DefaultType") + + +def optionally( + parsing_func: Callable[[ParseFunctionInputType], ParseFunctionReturnType], + input_to_parse: Optional[ParseFunctionInputType], + default: DefaultType, +) -> ParseFunctionReturnType | DefaultType: + """ + parse an Optional[T] input using any other parsing function that accepts T (not optional) + + if the provided input is falsy, then default will be returned. Otherwise the parsing function will be invoked to + parse the input + """ + if input_to_parse: + return parsing_func(input_to_parse) + else: + return default + + +def parse_str(value: AttributeValueTypeDef) -> str: + """ + parse an AttributeValueTypeDef as a str value + + Will throw an error if values is not of type S + """ + if "S" in value: + return value["S"] + else: + raise ValueError(f"unable to parse string {value}, must be of type 'S'") + + +def parse_bool(value: AttributeValueTypeDef) -> bool: + """ + coerce an AttributeValueTypeDef returned from a DynamoDB item into a boolean value + + Support values that will be coerced into booleans: + + - S types containing "true" or "false" (case-insensitive) + - BOOL types of True/False + """ + if "S" in value: + if value["S"].lower() == "true": + return True + elif value["S"].lower() == "false": + return False + else: + raise ValueError(f"unknown bool value {value}, must be 'true' or 'false'") + elif "BOOL" in value: + return value["BOOL"] + else: + raise ValueError( + f"unable to parse bool value from {value}, must be of type 'S' or 'BOOL'" + ) + + +def parse_str_set(value: AttributeValueTypeDef) -> set[str]: + """ + coerce an AttributeValueTypeDef returned from a DynamoDB item into a String sequences + + Support values that will be coerced into String sequences: + + - SS types containing lists of strings + - S types containing comma-separated lists values (spaces are not stripped!) + """ + if "S" in value: + return set(value["S"].split(",") if value["S"] != "" else []) + elif "SS" in value: + return set(value["SS"]) + else: + raise ValueError( + f"unable to parse string set from {value}, must be of type 'S' or 'SS'" + ) + + +def skip_if_none( + key: str, value: OptionalAttributeValue +) -> dict[str, AttributeValueTypeDef]: + """ + helper function for skipping inclusion into a ddb item if the element does not have a configured value. This allows + saving optional values to dynamodb that should not be included in the item if they are not configured + example usage: + + { + **skip_if_none("description", {"S": string_that_might_be_empty) + } + + differs from skip_if_empty() -- empty sequences will be included while None sequences will be excluded + """ + if _is_non_none_entry(value): + return {key: value} + else: + return {} + + +def skip_if_empty( + key: str, value: OptionalAttributeValue +) -> dict[str, AttributeValueTypeDef]: + """ + helper function for skipping inclusion into a ddb item if the element does not have a configured value. This allows + saving optional values to dynamodb that should not be included in the item if they are not configured + example usage: + + { + **skip_if_empty("periods", {"SS": sequence_that_might_be_empty) + } + + differs from skip_if_none() -- empty sequences will be excluded the same as if they were set to None + """ + if _is_non_empty_entry(value): + return {key: value} + else: + return {} + + +def _is_non_none_entry( + entry: OptionalAttributeValue, +) -> TypeGuard[AttributeValueTypeDef]: + return any(value is not None for value in entry.values()) + + +def _is_non_empty_entry( + entry: OptionalAttributeValue, +) -> TypeGuard[AttributeValueTypeDef]: + for val in entry.values(): + if val is not None: + # sequences/maps need the extra len check to confirm non-empty + if isinstance(val, Sequence) or isinstance(val, Mapping): + if len(val) > 0: + return True + else: + return True + return False diff --git a/source/app/instance_scheduler/model/maint_win.py b/source/app/instance_scheduler/model/maint_win.py index a11a8193..545c0612 100644 --- a/source/app/instance_scheduler/model/maint_win.py +++ b/source/app/instance_scheduler/model/maint_win.py @@ -1,27 +1,61 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 +from collections.abc import Mapping, Sequence from dataclasses import dataclass -from datetime import datetime +from datetime import datetime, timedelta +from decimal import Decimal from re import fullmatch -from typing import TYPE_CHECKING, Final +from typing import TYPE_CHECKING, Any, Final, Union from zoneinfo import ZoneInfo from dateutil.parser import isoparse +from instance_scheduler.configuration.instance_schedule import InstanceSchedule +from instance_scheduler.configuration.running_period import RunningPeriod +from instance_scheduler.configuration.running_period_dict_element import ( + RunningPeriodDictElement, +) +from instance_scheduler.configuration.time_utils import parse_time_str +from instance_scheduler.cron.cron_recurrence_expression import CronRecurrenceExpression +from instance_scheduler.cron.expression import CronSingleValueNumeric +from instance_scheduler.schedulers.states import ScheduleState from instance_scheduler.util.time import is_aware +from instance_scheduler.util.validation import ( + validate_number_item, + validate_string_item, +) if TYPE_CHECKING: - from mypy_boto3_dynamodb.type_defs import ( - AttributeValueTypeDef, - GetItemOutputTypeDef, - ) + from mypy_boto3_dynamodb.type_defs import AttributeValueTypeDef from mypy_boto3_ssm.type_defs import MaintenanceWindowIdentityTypeDef else: AttributeValueTypeDef = object - GetItemOutputTypeDef = object MaintenanceWindowIdentityTypeDef = object +ItemTypeDef = dict[ + str, + Union[ + bytes, + bytearray, + str, + int, + Decimal, + bool, + set[int], + set[Decimal], + set[str], + set[bytes], + set[bytearray], + Sequence[Any], + Mapping[str, Any], + None, + ], +] + +WINDOW_ID_LENGTH = 20 # window id has a fixed length of 20 + + class EC2SSMMaintenanceWindowValidationError(Exception): """An error occurred while validating the consistency of the maintenance window""" @@ -50,11 +84,19 @@ class EC2SSMMaintenanceWindow: window_name: str schedule_timezone: ZoneInfo next_execution_time: datetime - duration: int + duration_hours: int def __post_init__(self) -> None: self._validate() + @property + def account_region(self) -> str: + return f"{self.account_id}:{self.region}" + + @property + def name_id(self) -> str: + return f"{self.window_name}:{self.window_id}" + def _validate(self) -> None: # https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_CreateMaintenanceWindow.html if not fullmatch(r"\d{12}", self.account_id): @@ -75,9 +117,9 @@ def _validate(self) -> None: raise EC2SSMMaintenanceWindowValidationError( f"Non-timezone-aware datetime: {self.next_execution_time}" ) - if self.duration < 1 or self.duration > 24: + if self.duration_hours < 1 or self.duration_hours > 24: raise EC2SSMMaintenanceWindowValidationError( - f"Invalid duration: {self.duration}" + f"Invalid duration: {self.duration_hours}" ) def to_item(self) -> dict[str, AttributeValueTypeDef]: @@ -86,21 +128,130 @@ def to_item(self) -> dict[str, AttributeValueTypeDef]: # `NextExecutionTime` is encoded using `isoformat`, which produces a stricter # output than the SSM service. return { - "account-region": {"S": f"{self.account_id}:{self.region}"}, - "WindowId": {"S": self.window_id}, - "Name": {"S": self.window_name}, + "account-region": {"S": self.account_region}, + "name-id": {"S": self.name_id}, "ScheduleTimezone": {"S": str(self.schedule_timezone)}, "NextExecutionTime": {"S": self.next_execution_time.isoformat()}, - "Duration": {"N": str(self.duration)}, + "Duration": {"N": str(self.duration_hours)}, } - def to_key(self) -> dict[str, str]: + def to_key(self) -> dict[str, AttributeValueTypeDef]: """Return this object as a key suitable for a call to DynamoDB `delete_item`""" return { - "account-region": f"{self.account_id}:{self.region}", - "Name": self.window_name, + "account-region": {"S": f"{self.account_id}:{self.region}"}, + "name-id": {"S": f"{self.window_name}:{self.window_id}"}, } + def is_running_at(self, dt: datetime, scheduler_interval_minutes: int) -> bool: + return ( + self.to_schedule(scheduler_interval_minutes).get_desired_state(dt)[0] + == ScheduleState.RUNNING + ) + + def to_schedule(self, scheduler_interval_minutes: int) -> InstanceSchedule: + """convert this maintenance window into a schedule""" + name_id = f"{self.window_name}:{self.window_id}" + window_begin_dt: Final = self.next_execution_time.replace( + second=0, microsecond=0 + ) + margin_minutes: Final = scheduler_interval_minutes + 10 + period_begin_dt: Final = window_begin_dt - timedelta(minutes=margin_minutes) + period_end_dt: Final = window_begin_dt + timedelta(hours=self.duration_hours) + + if period_begin_dt.day == period_end_dt.day: + periods: list[RunningPeriodDictElement] = [ + { + "period": RunningPeriod( + name=f"{name_id}-period", + begintime=period_begin_dt.time(), + endtime=period_end_dt.time(), + cron_recurrence=CronRecurrenceExpression( + monthdays=CronSingleValueNumeric(period_begin_dt.day), + months=CronSingleValueNumeric(period_begin_dt.month), + ), + ), + "instancetype": None, + } + ] + elif period_end_dt - period_begin_dt <= timedelta(hours=24): + periods = [ + { + "period": RunningPeriod( + name=f"{name_id}-period-1", + begintime=period_begin_dt.time(), + endtime=parse_time_str("23:59"), + cron_recurrence=CronRecurrenceExpression( + monthdays=CronSingleValueNumeric(period_begin_dt.day), + months=CronSingleValueNumeric(period_begin_dt.month), + ), + ), + "instancetype": None, + }, + { + "period": RunningPeriod( + name=f"{name_id}-period-2", + begintime=parse_time_str("00:00"), + endtime=period_end_dt.time(), + cron_recurrence=CronRecurrenceExpression( + monthdays=CronSingleValueNumeric(period_end_dt.day), + months=CronSingleValueNumeric(period_end_dt.month), + ), + ), + "instancetype": None, + }, + ] + else: + periods = [ + { + "period": RunningPeriod( + name=f"{name_id}-period-1", + begintime=period_begin_dt.time(), + endtime=parse_time_str("23:59"), + cron_recurrence=CronRecurrenceExpression( + monthdays=CronSingleValueNumeric(period_begin_dt.day), + months=CronSingleValueNumeric(period_begin_dt.month), + ), + ), + "instancetype": None, + }, + { + "period": RunningPeriod( + name=f"{name_id}-period-2", + cron_recurrence=CronRecurrenceExpression( + monthdays=CronSingleValueNumeric( + (period_end_dt - timedelta(days=1)).day + ), + months=CronSingleValueNumeric( + (period_end_dt - timedelta(days=1)).month + ), + ), + ), + "instancetype": None, + }, + { + "period": RunningPeriod( + name=f"{name_id}-period-3", + begintime=parse_time_str("00:00"), + endtime=period_end_dt.time(), + cron_recurrence=CronRecurrenceExpression( + monthdays=CronSingleValueNumeric(period_end_dt.day), + months=CronSingleValueNumeric(period_end_dt.month), + ), + ), + "instancetype": None, + }, + ] + + schedule: Final = InstanceSchedule( + name=name_id, + timezone=self.schedule_timezone, + description=f"{name_id} maintenance window", + enforced=True, + periods=periods, + ) + + return schedule + @classmethod def from_identity( cls, *, identity: MaintenanceWindowIdentityTypeDef, account_id: str, region: str @@ -125,22 +276,38 @@ def from_identity( window_name=identity["Name"], schedule_timezone=ZoneInfo(identity.get("ScheduleTimezone", "UTC")), next_execution_time=isoparse(identity["NextExecutionTime"]), - duration=identity["Duration"], + duration_hours=identity["Duration"], ) @classmethod - def from_item(cls, item: GetItemOutputTypeDef) -> "EC2SSMMaintenanceWindow": + def from_item( + cls, item: dict[str, AttributeValueTypeDef] + ) -> "EC2SSMMaintenanceWindow": """Return a maintenance window object from a DynamoDB `get_item` response""" # Like `from_identity`, this function must use `isoparse` to parse # `next_execution_time` because Instance Scheduler may have stored the value # from the service response verbatim. - account_region: Final = item["Item"]["account-region"]["S"].split(":") + + # in addition to the fields explicitly loaded here, maintenance window items + # may have a Number attribute named TimeToLive that is not used + + # the output type of a table scan is wider than query, so more validation is + # required + validate_string_item(item, "account-region", True) + validate_string_item(item, "name-id", True) + validate_string_item(item, "ScheduleTimezone", True) + validate_string_item(item, "NextExecutionTime", True) + validate_number_item(item, "Duration", True) + + account_id, region = item["account-region"]["S"].split(":") + window_name, window_id = item["name-id"]["S"].split(":") + return EC2SSMMaintenanceWindow( - account_id=account_region[0], - region=account_region[1], - window_id=item["Item"]["WindowId"]["S"], - window_name=item["Item"]["Name"]["S"], - schedule_timezone=ZoneInfo(item["Item"]["ScheduleTimezone"]["S"]), - next_execution_time=isoparse(item["Item"]["NextExecutionTime"]["S"]), - duration=int(item["Item"]["Duration"]["N"]), + account_id=account_id, + region=region, + window_id=window_id, + window_name=window_name, + schedule_timezone=ZoneInfo(item["ScheduleTimezone"]["S"]), + next_execution_time=isoparse(item["NextExecutionTime"]["S"]), + duration_hours=int(item["Duration"]["N"]), ) diff --git a/source/app/instance_scheduler/model/maint_win_store.py b/source/app/instance_scheduler/model/maint_win_store.py deleted file mode 100644 index 49d0381c..00000000 --- a/source/app/instance_scheduler/model/maint_win_store.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -from datetime import datetime, timedelta -from typing import TYPE_CHECKING, Any, Final - -from boto3 import Session -from boto3.dynamodb.conditions import Key - -from instance_scheduler.util import get_boto_config -from instance_scheduler.util.logger import Logger - -if TYPE_CHECKING: - from mypy_boto3_dynamodb.service_resource import DynamoDBServiceResource, Table - from mypy_boto3_dynamodb.type_defs import ( - ScanInputTableScanTypeDef, - ScanOutputTableTypeDef, - ) - from mypy_boto3_ssm.type_defs import MaintenanceWindowIdentityTypeDef -else: - Table = object - ScanInputTableScanTypeDef = object - ScanOutputTableTypeDef = object - MaintenanceWindowIdentityTypeDef = object - - -class EC2SSMMaintenanceWindowStore: - def __init__(self, *, session: Session, table_name: str, logger: Logger) -> None: - self._logger: Final = logger - ddb: Final[DynamoDBServiceResource] = session.resource( - "dynamodb", config=get_boto_config() - ) - self._table: Final[Table] = ddb.Table(table_name) - - def get_ssm_windows_db(self, *, account: str, region: str) -> list[Any]: - """ - This function gets all the periods for a given ssm windows from the database. - """ - maintenance_windows: ScanOutputTableTypeDef = {} # type: ignore - account_region_string = account + ":" + region - try: - scan_kwargs: ScanInputTableScanTypeDef = { - "FilterExpression": Key("account-region").eq(account_region_string), - } - maintenance_windows = self._table.scan(**scan_kwargs) - except Exception as error: - self._logger.error( - "Caught Exception while getting maintenance windows from Dynamodb: {}".format( - error - ) - ) - window_list = maintenance_windows.get("Items", []) - last_evaluated_key = maintenance_windows.get("LastEvaluatedKey", None) - while last_evaluated_key is not None: - self._logger.debug(str(maintenance_windows["LastEvaluatedKey"])) - try: - scan_kwargs = { - "FilterExpression": Key("account-region").eq(account_region_string), - "ExclusiveStartKey": last_evaluated_key, - } - maintenance_windows = self._table.scan(**scan_kwargs) - except Exception as error: - self._logger.error( - "Caught Exception while getting maintenance windows from Dynamodb: {}".format( - error - ) - ) - last_evaluated_key = maintenance_windows.get("LastEvaluatedKey", None) - window_list.extend(maintenance_windows.get("Items", [])) - return window_list - - def put_window_dynamodb( - self, *, window: MaintenanceWindowIdentityTypeDef, account: str, region: str - ) -> None: - """ - This function adds the ssm window entry to the database. - - Parameters: - SSM window object - """ - try: - duration = window["Duration"] - if "ScheduleTimezone" in window: - execution_time = datetime.strptime( - window["NextExecutionTime"], "%Y-%m-%dT%H:%M%z" - ) - else: - execution_time = datetime.strptime( - window["NextExecutionTime"], "%Y-%m-%dT%H:%MZ" - ) - window["ScheduleTimezone"] = "UTC" - - ttl = execution_time + timedelta(hours=int(duration)) - epoch_time_to_live = int( - datetime(ttl.year, ttl.month, ttl.day, ttl.hour, ttl.minute).timestamp() - ) - self._table.put_item( - Item={ - "Name": window["Name"], - "NextExecutionTime": window["NextExecutionTime"], - "Duration": window["Duration"], - "WindowId": window["WindowId"], - "TimeToLive": epoch_time_to_live, - "account-region": account + ":" + region, - "ScheduleTimezone": window["ScheduleTimezone"], - } - ) - except Exception as error: - self._logger.info( - "Unable to put maintenance window in Dynamodb: {}".format(error) - ) - - def delete_window(self, window: Any) -> None: - self._table.delete_item( - Key={ - "Name": window["Name"], - "account-region": window["account-region"], - } - ) diff --git a/source/app/instance_scheduler/model/period_definition.py b/source/app/instance_scheduler/model/period_definition.py new file mode 100644 index 00000000..a7ead072 --- /dev/null +++ b/source/app/instance_scheduler/model/period_definition.py @@ -0,0 +1,225 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import inspect +from dataclasses import dataclass +from typing import ( + TYPE_CHECKING, + Any, + Final, + Iterable, + NotRequired, + Optional, + TypedDict, + TypeGuard, +) + +from instance_scheduler.configuration.running_period import ( + RunningPeriod, + RunningPeriodValidationException, +) +from instance_scheduler.configuration.time_utils import ( + TIME_FORMAT, + is_valid_time_str, + parse_time_str, +) +from instance_scheduler.cron.cron_recurrence_expression import CronRecurrenceExpression +from instance_scheduler.cron.parser import ( + parse_monthdays_expr, + parse_months_expr, + parse_weekdays_expr, +) +from instance_scheduler.model.ddb_item_utils import ( + optionally, + parse_str, + parse_str_set, + skip_if_empty, + skip_if_none, +) +from instance_scheduler.util.validation import ValidationException, validate_string + +if TYPE_CHECKING: + from mypy_boto3_dynamodb.type_defs import AttributeValueTypeDef +else: + AttributeValueTypeDef = object + + +class InvalidPeriodDefinition(Exception): + pass + + +class PeriodParams(TypedDict): + """ + Dict definition of a period used for api calls and interaction + """ + + name: str + begintime: NotRequired[str] + endtime: NotRequired[str] + weekdays: NotRequired[str] # csv string + monthdays: NotRequired[str] # csv string + months: NotRequired[str] # csv string + description: NotRequired[str] + configured_in_stack: NotRequired[str] + + +def validate_as_period_params(untyped_dict: dict[str, Any]) -> TypeGuard[PeriodParams]: + """ + validate if an unknown dict conforms to the PeriodParams shape + + This method will either return true (no errors) or raise a ValidationException describing why the provided dict + does not conform to PeriodParams + """ + valid_keys = inspect.get_annotations(PeriodParams).keys() + for key in untyped_dict.keys(): + if key not in valid_keys: + raise ValidationException( + f"{key} is not a valid parameter, valid parameters are {valid_keys}" + ) + + validate_string(untyped_dict, "name", required=True) + validate_string(untyped_dict, "begintime", required=False) + validate_string(untyped_dict, "endtime", required=False) + validate_string(untyped_dict, "description", required=False) + validate_string(untyped_dict, "weekdays", required=False) + validate_string(untyped_dict, "monthdays", required=False) + validate_string(untyped_dict, "months", required=False) + return True + + +@dataclass +class PeriodDefinition: + name: str + begintime: Optional[str] = None + endtime: Optional[str] = None + weekdays: Optional[set[str]] = None + months: Optional[set[str]] = None + monthdays: Optional[set[str]] = None + description: Optional[str] = None + configured_in_stack: Optional[str] = None + + def __post_init__(self) -> None: + self.validate() + # todo: may need to coerce optional sequences to empty sets here + + def validate(self) -> None: + # will throw validation exceptions + if not self.name: + raise InvalidPeriodDefinition("Period name is required") + + if self.begintime and not is_valid_time_str(self.begintime): + raise InvalidPeriodDefinition( + f'Invalid begintime "{self.begintime}". must match the format {TIME_FORMAT}' + ) + + if self.endtime and not is_valid_time_str(self.endtime): + raise InvalidPeriodDefinition( + f'Invalid endtime "{self.endtime}". must match the format {TIME_FORMAT}' + ) + + cron_fields: Final = ( + self.begintime, + self.endtime, + self.weekdays, + self.months, + self.monthdays, + ) + # must specify at least one + if not any(field is not None for field in cron_fields): + raise InvalidPeriodDefinition( + "Must contain at least one of begintime, endtime, weekdays, months, monthdays" + ) + + try: + self.to_running_period() + except RunningPeriodValidationException as e: + raise InvalidPeriodDefinition(e) + except ValueError as ve: + raise InvalidPeriodDefinition(ve) + + def to_running_period(self) -> RunningPeriod: + return RunningPeriod( + name=self.name, + begintime=optionally(parse_time_str, self.begintime, None), + endtime=optionally(parse_time_str, self.endtime, None), + cron_recurrence=CronRecurrenceExpression( + weekdays=parse_weekdays_expr(self.weekdays), + monthdays=parse_monthdays_expr(self.monthdays), + months=parse_months_expr(self.months), + ), + ) + + def to_item(self) -> dict[str, AttributeValueTypeDef]: + """Return this object as a dict suitable for a call to DynamoDB `put_item`""" + return { + "type": {"S": "period"}, + "name": {"S": self.name}, + **skip_if_none("begintime", {"S": self.begintime}), + **skip_if_none("endtime", {"S": self.endtime}), + **skip_if_empty("weekdays", {"SS": _optional_list(self.weekdays)}), + **skip_if_empty("monthdays", {"SS": _optional_list(self.monthdays)}), + **skip_if_empty("months", {"SS": _optional_list(self.months)}), + **skip_if_none("description", {"S": self.description}), + **skip_if_none("configured_in_stack", {"S": self.configured_in_stack}), + } + + @classmethod + def from_item(cls, item: dict[str, AttributeValueTypeDef]) -> "PeriodDefinition": + return PeriodDefinition( + name=parse_str(item["name"]), + begintime=optionally(parse_str, item.get("begintime"), None), + endtime=optionally(parse_str, item.get("endtime"), None), + weekdays=optionally(parse_str_set, item.get("weekdays"), None), + months=optionally(parse_str_set, item.get("months"), None), + monthdays=optionally(parse_str_set, item.get("monthdays"), None), + description=optionally(parse_str, item.get("description"), None), + configured_in_stack=optionally( + parse_str, item.get("configured_in_stack"), None + ), + ) + + def to_period_params(self) -> PeriodParams: + params: PeriodParams = {"name": self.name} + if self.begintime: + params["begintime"] = self.begintime + if self.endtime: + params["endtime"] = self.endtime + if self.weekdays: + params["weekdays"] = ",".join(self.weekdays) + if self.monthdays: + params["monthdays"] = ",".join(self.monthdays) + if self.months: + params["months"] = ",".join(self.months) + if self.description: + params["description"] = self.description + if self.configured_in_stack: + params["configured_in_stack"] = self.configured_in_stack + + return params + + @classmethod + def from_period_params(cls, params: PeriodParams) -> "PeriodDefinition": + """ + convert PeriodParams to a RunningPeriodDefinition + + This method may raise InvalidPeriodDefinition if RunningPeriod invariants are violated + """ + return PeriodDefinition( + name=params["name"], + begintime=params.get("begintime", None), + endtime=params.get("endtime", None), + weekdays=optionally(parse_csv_as_set, params.get("weekdays"), None), + months=optionally(parse_csv_as_set, params.get("months"), None), + monthdays=optionally(parse_csv_as_set, params.get("monthdays"), None), + description=params.get("description", None), + configured_in_stack=params.get("configured_in_stack", None), + ) + + +def _optional_list(input: Optional[Iterable[str]]) -> Optional[list[str]]: + if input is None: + return None + return list(input) + + +def parse_csv_as_set(value: str) -> set[str]: + return {token.strip() for token in value.split(",")} diff --git a/source/app/instance_scheduler/model/period_identifier.py b/source/app/instance_scheduler/model/period_identifier.py new file mode 100644 index 00000000..10be7322 --- /dev/null +++ b/source/app/instance_scheduler/model/period_identifier.py @@ -0,0 +1,26 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from typing import Optional + + +class PeriodIdentifier(str): + @property + def name(self) -> str: + return self.split("@")[0] + + @property + def desired_type(self) -> Optional[str]: + tokens = self.split("@") + if len(tokens) > 1: + return tokens[1] + else: + return None + + @classmethod + def of( + cls, period_name: str, instance_type: Optional[str] = None + ) -> "PeriodIdentifier": + if instance_type: + return PeriodIdentifier(f"{period_name}@{instance_type}") + else: + return PeriodIdentifier(period_name) diff --git a/source/app/instance_scheduler/model/schedule_definition.py b/source/app/instance_scheduler/model/schedule_definition.py new file mode 100644 index 00000000..f02dedb0 --- /dev/null +++ b/source/app/instance_scheduler/model/schedule_definition.py @@ -0,0 +1,290 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import inspect +from dataclasses import dataclass, field +from os import environ +from typing import ( + TYPE_CHECKING, + Any, + NotRequired, + Optional, + Sequence, + TypedDict, + TypeGuard, +) +from zoneinfo import ZoneInfo, ZoneInfoNotFoundError + +from instance_scheduler.configuration.instance_schedule import InstanceSchedule +from instance_scheduler.configuration.running_period_dict_element import ( + RunningPeriodDictElement, +) +from instance_scheduler.model.ddb_item_utils import ( + optionally, + parse_bool, + parse_str_set, + skip_if_empty, + skip_if_none, +) +from instance_scheduler.model.period_identifier import PeriodIdentifier +from instance_scheduler.model.store.period_definition_store import PeriodDefinitionStore +from instance_scheduler.util.validation import ( + ValidationException, + validate_boolean, + validate_string, + validate_string_list, +) + +if TYPE_CHECKING: + from mypy_boto3_dynamodb.type_defs import AttributeValueTypeDef +else: + AttributeValueTypeDef = object + + +class InvalidScheduleDefinition(Exception): + pass + + +class ScheduleParams(TypedDict): + name: str + timezone: NotRequired[str] + periods: NotRequired[str] # comma separated list + description: NotRequired[str] + overwrite: NotRequired[bool] + stop_new_instances: NotRequired[bool] + ssm_maintenance_window: NotRequired[Sequence[str]] + retain_running: NotRequired[bool] + enforced: NotRequired[bool] + hibernate: NotRequired[bool] + override_status: NotRequired[str] + configured_in_stack: NotRequired[str] + + +def validate_as_schedule_params( + untyped_dict: dict[str, Any] +) -> TypeGuard[ScheduleParams]: + """ + validate if an unknown dict conforms to the ScheduleParams shape + + This method will either return true (no errors) or raise a ValidationException describing why the provided dict + does not conform to ScheduleParams + """ + valid_keys = inspect.get_annotations(ScheduleParams).keys() + for key in untyped_dict.keys(): + if key not in valid_keys: + raise ValidationException( + f"{key} is not a valid parameter, valid parameters are {valid_keys}" + ) + + validate_string(untyped_dict, "name", required=True) + validate_string(untyped_dict, "timezone", required=False) + validate_string(untyped_dict, "periods", required=False) + validate_string(untyped_dict, "description", required=False) + validate_boolean(untyped_dict, "overwrite", required=False) + validate_boolean(untyped_dict, "stop_new_instances", required=False) + validate_string_list(untyped_dict, "ssm_maintenance_window", required=False) + validate_boolean(untyped_dict, "retain_running", required=False) + validate_boolean(untyped_dict, "enforced", required=False) + validate_boolean(untyped_dict, "hibernate", required=False) + validate_string(untyped_dict, "override_status", required=False) + validate_string(untyped_dict, "configured_in_stack", required=False) + return True + + +@dataclass +class ScheduleDefinition: + name: str + periods: Sequence[PeriodIdentifier] = field(default_factory=list) + timezone: Optional[str] = None + description: Optional[str] = None + override_status: Optional[str] = None + stop_new_instances: Optional[bool] = None + ssm_maintenance_window: Optional[Sequence[str]] = None + enforced: Optional[bool] = None + hibernate: Optional[bool] = None + retain_running: Optional[bool] = None + configured_in_stack: Optional[str] = None + + def __post_init__(self) -> None: + self.override_status = ( + self.override_status.lower() if self.override_status else None + ) + self.validate() + + def validate(self) -> None: + # todo: more validation -- use maint window with no maint window name? + if not self.name: + raise InvalidScheduleDefinition("Schedule name is required") + + if self.timezone: + try: + _ = ZoneInfo(self.timezone) + except ZoneInfoNotFoundError: + raise InvalidScheduleDefinition(f"Unknown timezone {self.timezone}") + + if self.override_status not in [ + None, + "running", + "stopped", + ]: + raise InvalidScheduleDefinition( + f"Invalid override_status {self.override_status}, " + f"valid values are running and stopped" + ) + + if not self.periods and not self.override_status: + raise InvalidScheduleDefinition( + "At least one period must be specified for a schedule" + ) + + def to_item( + self, + ) -> dict[str, AttributeValueTypeDef]: + return { + "type": {"S": "schedule"}, + "name": {"S": self.name}, + **skip_if_empty("periods", {"SS": self.periods}), + **skip_if_none("timezone", {"S": self.timezone}), + **skip_if_none("description", {"S": self.description}), + **skip_if_none("override_status", {"S": self.override_status}), + **skip_if_none("stop_new_instances", {"BOOL": self.stop_new_instances}), + **skip_if_empty( + "ssm_maintenance_window", {"SS": self.ssm_maintenance_window} + ), + **skip_if_none("enforced", {"BOOL": self.enforced}), + **skip_if_none("hibernate", {"BOOL": self.hibernate}), + **skip_if_none("retain_running", {"BOOL": self.retain_running}), + **skip_if_none("configured_in_stack", {"S": self.configured_in_stack}), + } # must be handled by data store separately + + @classmethod + def from_item( + cls, + item: dict[str, AttributeValueTypeDef], + ) -> "ScheduleDefinition": + raw_periods: set[str] = optionally(parse_str_set, item.get("periods"), set()) + return ScheduleDefinition( + name=item["name"]["S"], + periods=[PeriodIdentifier(pid) for pid in raw_periods], + timezone=item.get("timezone", {}).get("S", None), + description=item.get("description", {}).get("S", None), + override_status=item.get("override_status", {}).get("S", None), + stop_new_instances=optionally( + parse_bool, item.get("stop_new_instances"), None + ), + ssm_maintenance_window=list( + optionally(parse_str_set, item.get("ssm_maintenance_window"), []) + ) + or None, + enforced=optionally(parse_bool, item.get("enforced"), None), + hibernate=optionally(parse_bool, item.get("hibernate"), None), + retain_running=optionally(parse_bool, item.get("retain_running"), None), + configured_in_stack=item.get("configured_in_stack", {}).get("S", None), + ) + + def to_schedule_params(self) -> ScheduleParams: + params: ScheduleParams = {"name": self.name} + if self.periods: + params["periods"] = ",".join(self.periods) + if self.timezone: + params["timezone"] = self.timezone + if self.description: + params["description"] = self.description + if self.override_status: + params["override_status"] = self.override_status + if self.stop_new_instances is not None: + params["stop_new_instances"] = self.stop_new_instances + if self.ssm_maintenance_window: + params["ssm_maintenance_window"] = self.ssm_maintenance_window + if self.enforced is not None: + params["enforced"] = self.enforced + if self.hibernate is not None: + params["hibernate"] = self.hibernate + if self.retain_running is not None: + params["retain_running"] = self.retain_running + if self.configured_in_stack: + params["configured_in_stack"] = self.configured_in_stack + + return params + + @classmethod + def from_schedule_params(cls, params: ScheduleParams) -> "ScheduleDefinition": + return ScheduleDefinition( + name=params["name"], + periods=_period_ids_from_csv(params.get("periods", None)), + timezone=params.get("timezone", None), + description=params.get("description", None), + override_status=params.get("override_status", None), + stop_new_instances=params.get("stop_new_instances", None), + ssm_maintenance_window=params.get("ssm_maintenance_window", None), + enforced=params.get("enforced", None), + hibernate=params.get("hibernate", None), + retain_running=params.get("retain_running", None), + configured_in_stack=params.get("configured_in_stack", None), + ) + + def to_instance_schedule( + self, + period_store: PeriodDefinitionStore, + ) -> InstanceSchedule: + fetched_periods = self.build_periods(period_store) + + return InstanceSchedule( + name=self.name, + periods=fetched_periods, + timezone=self.build_timezone(), + override_status=self.override_status, + description=self.description, + stop_new_instances=( + bool(self.stop_new_instances) + if self.stop_new_instances is not None + else True + ), + ssm_maintenance_window=self.ssm_maintenance_window, + enforced=bool(self.enforced), + hibernate=bool(self.hibernate), + retain_running=bool(self.retain_running), + configured_in_stack=self.configured_in_stack, + ) + + def build_periods( + self, + period_store: PeriodDefinitionStore, + ) -> list[RunningPeriodDictElement]: + typed_periods: list[RunningPeriodDictElement] = [] + for period_id in self.periods: + period_def = period_store.find_by_name(period_id.name) + + if not period_def: + raise InvalidScheduleDefinition( + f"Unable to find period definition for {period_id.name}" + ) + + if period_id.desired_type: + typed_periods.append( + RunningPeriodDictElement( + period=period_def.to_running_period(), + instancetype=period_id.desired_type, + ) + ) + else: + typed_periods.append( + RunningPeriodDictElement(period=period_def.to_running_period()) + ) + return typed_periods + + def build_timezone(self) -> ZoneInfo: + if self.timezone: + return ZoneInfo(self.timezone) + elif "DEFAULT_TIMEZONE" in environ: + return ZoneInfo( + environ["DEFAULT_TIMEZONE"] + ) # todo: handle this in a correctly typed fashion + else: + return ZoneInfo("UTC") + + +def _period_ids_from_csv(csv_str: Optional[str]) -> Sequence[PeriodIdentifier]: + if not csv_str: + return [] + else: + return [PeriodIdentifier(period_name) for period_name in csv_str.split(",")] diff --git a/source/app/instance_scheduler/model/store/__init__.py b/source/app/instance_scheduler/model/store/__init__.py new file mode 100644 index 00000000..04f8b7b7 --- /dev/null +++ b/source/app/instance_scheduler/model/store/__init__.py @@ -0,0 +1,2 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 diff --git a/source/app/instance_scheduler/model/store/ddb_config_item_store.py b/source/app/instance_scheduler/model/store/ddb_config_item_store.py new file mode 100644 index 00000000..5806c470 --- /dev/null +++ b/source/app/instance_scheduler/model/store/ddb_config_item_store.py @@ -0,0 +1,94 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from typing import Final + +from botocore.exceptions import ClientError + +from instance_scheduler.model.ddb_config_item import DdbConfigItem +from instance_scheduler.model.store.dynamo_client import hub_dynamo_client + + +class DdbConfigItemNotFound(Exception): + pass + + +class DdbConfigItemStore: + def __init__( + self, + table_name: str, + ): + self._table: Final = table_name + + def put(self, config_item: DdbConfigItem) -> None: + """ + save this item to dynamo + + unlike the other stores, this will ALWAYS OVERWRITE because there is always exactly 1 DdbConfigItem + saved in dynamo + """ + hub_dynamo_client().put_item( + TableName=self._table, + Item=config_item.to_item(), + ) + + def get(self) -> DdbConfigItem: + """fetch the DdbConfigItem from dynamo""" + result = hub_dynamo_client().get_item( + TableName=self._table, + Key={"type": {"S": "config"}, "name": {"S": "scheduler"}}, + ) + + if "Item" in result: + return DdbConfigItem.from_item(result["Item"]) + else: + raise DdbConfigItemNotFound( + "scheduler config item not found in config table!" # NOSONAR -- same error string is not duplication + ) + + def register_spoke_accounts(self, account_ids: set[str]) -> DdbConfigItem: + """ + register spoke accounts for scheduling + :returns the updated DdbConfigItem + """ + result = hub_dynamo_client().update_item( + TableName=self._table, + Key={"type": {"S": "config"}, "name": {"S": "scheduler"}}, + UpdateExpression="add remote_account_ids :a", + ExpressionAttributeValues={":a": {"SS": list(account_ids)}}, + ReturnValues="ALL_NEW", + ) + + if "Attributes" in result: + return DdbConfigItem.from_item(result["Attributes"]) + else: + raise DdbConfigItemNotFound( + "scheduler config item not found in config table!" # NOSONAR -- same error string is not duplication + ) + + def deregister_spoke_accounts(self, account_ids: set[str]) -> DdbConfigItem: + """ + remove spoke accounts from scheduling + :returns the updated DdbConfigItem + """ + try: + result = hub_dynamo_client().update_item( + TableName=self._table, + Key={"type": {"S": "config"}, "name": {"S": "scheduler"}}, + UpdateExpression="delete remote_account_ids :a", + ExpressionAttributeValues={":a": {"SS": list(account_ids)}}, + ReturnValues="ALL_NEW", + ) + + if "Attributes" in result: + return DdbConfigItem.from_item(result["Attributes"]) + else: + raise DdbConfigItemNotFound( + "scheduler config item not found in config table!" # NOSONAR -- same error string is not duplication + ) + except ClientError as ce: + if ce.response["Error"]["Code"] == "ValidationException": + # this error can occur when the last spoke account was removed by this function as dynamo deletes the + # "column" from the item entirely which causes the update expression to reference a field + # that does not exist. + pass # swallow the error + return self.get() diff --git a/source/app/instance_scheduler/model/store/ddb_transact_write.py b/source/app/instance_scheduler/model/store/ddb_transact_write.py new file mode 100644 index 00000000..e4791a64 --- /dev/null +++ b/source/app/instance_scheduler/model/store/ddb_transact_write.py @@ -0,0 +1,52 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import uuid +from types import TracebackType +from typing import TYPE_CHECKING, Optional, Self, Sequence + +if TYPE_CHECKING: + from mypy_boto3_dynamodb import DynamoDBClient + from mypy_boto3_dynamodb.type_defs import TransactWriteItemTypeDef +else: + DynamoDBClient = object + TransactWriteItemTypeDef = object + + +class WriteTransaction: + """ + A context manager object for a DynamoDB transact_write_items call. + + This transaction is will be automatically committed when __exit__ is called and may raise + an exception when doing so + + refer to https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb/client/transact_write_items.html# + for details + """ + + def __init__(self, client: DynamoDBClient) -> None: + self._client = client + self.transaction_items: list[TransactWriteItemTypeDef] = [] + self.request_token = str(uuid.uuid4()) + + def __enter__(self) -> Self: + return self + + def __exit__( + self, + exc_type: Optional[type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + if exc_type is None: + self._commit() + else: + pass # exceptions allowed to bubble up to calling context + + def add(self, items: Sequence[TransactWriteItemTypeDef]) -> None: + self.transaction_items.extend(items) + + def _commit(self) -> None: + self._client.transact_write_items( + TransactItems=self.transaction_items, + ClientRequestToken=self.request_token, + ) diff --git a/source/app/instance_scheduler/model/store/dynamo_client.py b/source/app/instance_scheduler/model/store/dynamo_client.py new file mode 100644 index 00000000..81dab6bf --- /dev/null +++ b/source/app/instance_scheduler/model/store/dynamo_client.py @@ -0,0 +1,25 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from typing import TYPE_CHECKING, Optional + +import boto3 + +from instance_scheduler.util import get_boto_config + +if TYPE_CHECKING: + from mypy_boto3_dynamodb import DynamoDBClient +else: + DynamoDBClient = object + + +# shared dynamodb client to minimize the number of KMS api calls needed to access encrypted dynamodb tables +# note: KMS caching with dynamo is done on a per-connection (client) level +_hub_dynamo_client: Optional[DynamoDBClient] = None + + +def hub_dynamo_client() -> DynamoDBClient: + global _hub_dynamo_client + if not _hub_dynamo_client: + new_client: DynamoDBClient = boto3.client("dynamodb", config=get_boto_config()) + _hub_dynamo_client = new_client + return _hub_dynamo_client diff --git a/source/app/instance_scheduler/model/store/dynamo_mw_store.py b/source/app/instance_scheduler/model/store/dynamo_mw_store.py new file mode 100644 index 00000000..288df552 --- /dev/null +++ b/source/app/instance_scheduler/model/store/dynamo_mw_store.py @@ -0,0 +1,33 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from collections.abc import Iterator +from typing import Final + +from instance_scheduler.model import MWStore +from instance_scheduler.model.maint_win import EC2SSMMaintenanceWindow +from instance_scheduler.model.store.dynamo_client import hub_dynamo_client + + +class DynamoMWStore(MWStore): + def __init__(self, table_name: str) -> None: + self._table_name: Final = table_name + + def put(self, window: EC2SSMMaintenanceWindow) -> None: + hub_dynamo_client().put_item(TableName=self._table_name, Item=window.to_item()) + + def delete(self, window: EC2SSMMaintenanceWindow) -> None: + hub_dynamo_client().delete_item(TableName=self._table_name, Key=window.to_key()) + + def find_by_account_region( + self, account: str, region: str + ) -> Iterator[EC2SSMMaintenanceWindow]: + primary_key: Final = f"{account}:{region}" + paginator: Final = hub_dynamo_client().get_paginator("query") + for page in paginator.paginate( + TableName=self._table_name, + ExpressionAttributeNames={"#pk": "account-region"}, + ExpressionAttributeValues={":val": {"S": primary_key}}, + KeyConditionExpression="#pk = :val", + ): + for item in page["Items"]: + yield EC2SSMMaintenanceWindow.from_item(item) diff --git a/source/app/instance_scheduler/model/store/dynamo_period_definition_store.py b/source/app/instance_scheduler/model/store/dynamo_period_definition_store.py new file mode 100644 index 00000000..4a4cb10c --- /dev/null +++ b/source/app/instance_scheduler/model/store/dynamo_period_definition_store.py @@ -0,0 +1,181 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import json +from collections.abc import Mapping +from typing import TYPE_CHECKING, Final, Optional, Sequence + +from botocore.exceptions import ClientError + +from instance_scheduler.model.period_definition import ( + InvalidPeriodDefinition, + PeriodDefinition, +) +from instance_scheduler.model.store.dynamo_client import hub_dynamo_client +from instance_scheduler.model.store.period_definition_store import ( + PeriodAlreadyExistsException, + PeriodDefinitionStore, + UnknownPeriodException, +) + +if TYPE_CHECKING: + from mypy_boto3_dynamodb.type_defs import TransactWriteItemTypeDef +else: + TransactWriteItemTypeDef = object + + +class DynamoPeriodDefinitionStore(PeriodDefinitionStore): + def __init__(self, table_name: str): + self._table: Final[str] = table_name + + def put(self, period: PeriodDefinition, overwrite: bool = False) -> None: + if overwrite: + hub_dynamo_client().put_item( + TableName=self._table, + Item=period.to_item(), + ) + else: + try: + hub_dynamo_client().put_item( + TableName=self._table, + Item=period.to_item(), + ConditionExpression="attribute_not_exists(#key_type) AND attribute_not_exists(#key_name)", + ExpressionAttributeNames={ + "#key_type": "type", # NOSONAR -- this is not duplication + "#key_name": "name", # NOSONAR -- this is not duplication + }, + ) + except ClientError as ce: + if ce.response["Error"]["Code"] == "ConditionalCheckFailedException": + raise PeriodAlreadyExistsException( + f"period {period.name} already exists" + ) + else: + raise ce + + def delete(self, period_name: str, error_if_missing: bool = False) -> None: + if error_if_missing: + try: + hub_dynamo_client().delete_item( + TableName=self._table, + Key={"type": {"S": "period"}, "name": {"S": period_name}}, + ConditionExpression="attribute_exists(#key_type) AND attribute_exists(#key_name)", + ExpressionAttributeNames={ + "#key_type": "type", # NOSONAR -- this is not duplication + "#key_name": "name", # NOSONAR -- this is not duplication + }, + ) + except ClientError as ce: + if ce.response["Error"]["Code"] == "ConditionalCheckFailedException": + raise UnknownPeriodException(f"period {period_name} does not exist") + else: + raise ce + else: + hub_dynamo_client().delete_item( + TableName=self._table, + Key={"type": {"S": "period"}, "name": {"S": period_name}}, + ) + + def transact_put( + self, period: PeriodDefinition, overwrite: bool = False + ) -> Sequence[TransactWriteItemTypeDef]: + if overwrite: + return [ + { + "Put": { + "Item": period.to_item(), + "TableName": self._table, + } + } + ] + else: + return [ + { + "Put": { + "Item": period.to_item(), + "TableName": self._table, + "ConditionExpression": "attribute_not_exists(#key_type) AND attribute_not_exists(#key_name)", + "ExpressionAttributeNames": { + "#key_type": "type", # NOSONAR -- this is not duplication + "#key_name": "name", # NOSONAR -- this is not duplication + }, + }, + } + ] + + def transact_delete( + self, period_name: str, error_if_missing: bool = False + ) -> Sequence[TransactWriteItemTypeDef]: + if error_if_missing: + return [ + { + "Delete": { + "Key": { + "type": {"S": "period"}, + "name": {"S": period_name}, + }, + "TableName": self._table, + "ConditionExpression": "attribute_exists(#key_type) AND attribute_exists(#key_name)", + "ExpressionAttributeNames": { + "#key_type": "type", # NOSONAR -- this is not duplication + "#key_name": "name", # NOSONAR -- this is not duplication + }, + } + } + ] + else: + return [ + { + "Delete": { + "Key": { + "type": {"S": "period"}, + "name": {"S": period_name}, + }, + "TableName": self._table, + } + } + ] + + def find_by_name(self, period_name: str) -> Optional[PeriodDefinition]: + result = hub_dynamo_client().get_item( + TableName=self._table, + Key={"type": {"S": "period"}, "name": {"S": period_name}}, + ) + + if "Item" in result: + try: + return PeriodDefinition.from_item(result["Item"]) + except InvalidPeriodDefinition as e: + raise InvalidPeriodDefinition( + f"Unable to build period {period_name}: {e}" + ) + else: + return None + + def find_all(self) -> Mapping[str, PeriodDefinition]: + result, errors = self.find_all_with_errors() + return result + + def find_all_with_errors( + self, + ) -> tuple[Mapping[str, PeriodDefinition], list[InvalidPeriodDefinition]]: + result = hub_dynamo_client().query( + TableName=self._table, + KeyConditionExpression="#part_key=:value", + ExpressionAttributeNames={"#part_key": "type"}, + ExpressionAttributeValues={":value": {"S": "period"}}, + ) + + periods: dict[str, PeriodDefinition] = {} + exceptions: list[InvalidPeriodDefinition] = list() + for item in result["Items"]: + try: + period = PeriodDefinition.from_item(item) + periods[period.name] = period + except InvalidPeriodDefinition as e: + exceptions.append( + InvalidPeriodDefinition( + f"Invalid Period Definition\n{json.dumps(item, indent=2)}\n{e}" + ) + ) + + return periods, exceptions diff --git a/source/app/instance_scheduler/model/store/dynamo_schedule_definition_store.py b/source/app/instance_scheduler/model/store/dynamo_schedule_definition_store.py new file mode 100644 index 00000000..771e5e78 --- /dev/null +++ b/source/app/instance_scheduler/model/store/dynamo_schedule_definition_store.py @@ -0,0 +1,213 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import json +from collections.abc import Mapping +from typing import TYPE_CHECKING, Final, Optional, Sequence + +from botocore.exceptions import ClientError + +from instance_scheduler.model.schedule_definition import ( + InvalidScheduleDefinition, + ScheduleDefinition, +) +from instance_scheduler.model.store.ddb_transact_write import WriteTransaction +from instance_scheduler.model.store.dynamo_client import hub_dynamo_client +from instance_scheduler.model.store.schedule_definition_store import ( + ScheduleAlreadyExistsException, + ScheduleDefinitionStore, + UnknownScheduleException, +) + +if TYPE_CHECKING: + from mypy_boto3_dynamodb.type_defs import TransactWriteItemTypeDef +else: + TransactWriteItemTypeDef = object + + +class DynamoScheduleDefinitionStore(ScheduleDefinitionStore): + def __init__( + self, + table_name: str, + ): + self._table: Final = table_name + + def put(self, schedule: ScheduleDefinition, overwrite: bool = False) -> None: + if overwrite: + hub_dynamo_client().put_item( + TableName=self._table, + Item=schedule.to_item(), + ) + else: + try: + hub_dynamo_client().put_item( + TableName=self._table, + Item=schedule.to_item(), + ConditionExpression=( + "attribute_exists(#key_type) AND attribute_exists(#key_name)" # NOSONAR -- this is not duplication + if overwrite + else "attribute_not_exists(#key_type) AND attribute_not_exists(#key_name)" + ), + ExpressionAttributeNames={ + "#key_type": "type", # NOSONAR -- this is not duplication + "#key_name": "name", # NOSONAR -- this is not duplication + }, + ) + except ClientError as ce: + if ce.response["Error"]["Code"] == "ConditionalCheckFailedException": + raise ScheduleAlreadyExistsException( + f"schedule {schedule.name} already exists" + ) + else: + raise ce + + def delete(self, schedule_name: str, error_if_missing: bool = False) -> None: + if error_if_missing: + try: + hub_dynamo_client().delete_item( + TableName=self._table, + Key={"type": {"S": "schedule"}, "name": {"S": schedule_name}}, + ConditionExpression="attribute_exists(#key_type) AND attribute_exists(#key_name)", + ExpressionAttributeNames={ + "#key_type": "type", # NOSONAR -- this is not duplication + "#key_name": "name", # NOSONAR -- this is not duplication + }, + ) + except ClientError as ce: + if ce.response["Error"]["Code"] == "ConditionalCheckFailedException": + raise UnknownScheduleException( + f"schedule {schedule_name} does not exist" + ) + else: + raise ce + else: + hub_dynamo_client().delete_item( + TableName=self._table, + Key={"type": {"S": "schedule"}, "name": {"S": schedule_name}}, + ) + + def transact_put( + self, schedule: ScheduleDefinition, overwrite: bool = False + ) -> Sequence[TransactWriteItemTypeDef]: + if overwrite: + return [ + { + "Put": { + "Item": schedule.to_item(), + "TableName": self._table, + } + } + ] + else: + return [ + { + "Put": { + "Item": schedule.to_item(), + "TableName": self._table, + "ConditionExpression": "attribute_not_exists(#key_type) AND attribute_not_exists(#key_name)", + "ExpressionAttributeNames": { + "#key_type": "type", # NOSONAR -- this is not duplication + "#key_name": "name", # NOSONAR -- this is not duplication + }, + } + } + ] + + def transact_delete( + self, schedule_name: str, error_if_missing: bool = False + ) -> Sequence[TransactWriteItemTypeDef]: + if error_if_missing: + return [ + { + "Delete": { + "Key": { + "type": {"S": "schedule"}, + "name": {"S": schedule_name}, + }, + "TableName": self._table, + "ConditionExpression": "attribute_exists(#key_type) AND attribute_exists(#key_name)", + "ExpressionAttributeNames": { + "#key_type": "type", # NOSONAR -- this is not duplication + "#key_name": "name", # NOSONAR -- this is not duplication + }, + } + } + ] + else: + return [ + { + "Delete": { + "Key": { + "type": {"S": "schedule"}, + "name": {"S": schedule_name}, + }, + "TableName": self._table, + } + } + ] + + def find_by_name(self, schedule_name: str) -> Optional[ScheduleDefinition]: + result = hub_dynamo_client().get_item( + TableName=self._table, + Key={"type": {"S": "schedule"}, "name": {"S": schedule_name}}, + ) + + if "Item" in result: + try: + return ScheduleDefinition.from_item(result["Item"]) + except InvalidScheduleDefinition as e: + raise InvalidScheduleDefinition( + f"Unable to build schedule {schedule_name}: {e}" + ) + else: + return None + + def find_by_period(self, period_name: str) -> Mapping[str, ScheduleDefinition]: + """ + find all schedules that reference the provided period + """ + + def contains_period(sched_def: ScheduleDefinition, period_name: str) -> bool: + for period_identifier in sched_def.periods: + if period_identifier.name == period_name: + return True + return False + + schedules = self.find_all() + return { + sched_name: sched_def + for sched_name, sched_def in schedules.items() + if contains_period(sched_def, period_name) + } + + def find_all(self) -> Mapping[str, ScheduleDefinition]: + result, errors = self.find_all_with_errors() + return result + + def find_all_with_errors( + self, + ) -> tuple[Mapping[str, ScheduleDefinition], list[InvalidScheduleDefinition]]: + result = hub_dynamo_client().query( + TableName=self._table, + KeyConditionExpression="#part_key=:value", + ExpressionAttributeNames={"#part_key": "type"}, + ExpressionAttributeValues={":value": {"S": "schedule"}}, + ) + + schedules: dict[str, ScheduleDefinition] = {} + exceptions: list[InvalidScheduleDefinition] = list() + for item in result["Items"]: + try: + schedule = ScheduleDefinition.from_item(item) + schedules[schedule.name] = schedule + except InvalidScheduleDefinition as e: + exceptions.append( + InvalidScheduleDefinition( + f"Invalid Schedule Definition:\n{json.dumps(item, indent=2)}\n{e}" + ) + ) + + return schedules, exceptions + + @staticmethod + def new_transaction() -> WriteTransaction: + return WriteTransaction(hub_dynamo_client()) diff --git a/source/app/instance_scheduler/model/store/in_memory_mw_store.py b/source/app/instance_scheduler/model/store/in_memory_mw_store.py new file mode 100644 index 00000000..85a8801d --- /dev/null +++ b/source/app/instance_scheduler/model/store/in_memory_mw_store.py @@ -0,0 +1,43 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from typing import Iterator, Mapping, Optional + +from instance_scheduler.model import EC2SSMMaintenanceWindow, MWStore + +AccountRegionPK = str +NameIDSK = str + + +class InMemoryMWStore(MWStore): + + _data: dict[AccountRegionPK, dict[NameIDSK, EC2SSMMaintenanceWindow]] + + def __init__( + self, + initial_data: Optional[ + Mapping[AccountRegionPK, dict[NameIDSK, EC2SSMMaintenanceWindow]] + ] = None, + ): + self._data = dict(initial_data) if initial_data else dict() + + def put(self, window: EC2SSMMaintenanceWindow) -> None: + if window.account_region not in self._data: + self._data[window.account_region] = dict() + self._data[window.account_region][window.name_id] = window + + def delete(self, window: EC2SSMMaintenanceWindow) -> None: + if window.account_region in self._data: + self._data[window.account_region].pop(window.name_id) + + def find_by_account_region( + self, account: str, region: str + ) -> Iterator[EC2SSMMaintenanceWindow]: + account_region = to_account_region_pk(account, region) + if account_region in self._data: + return iter(self._data[account_region].values()) + else: + return iter([]) + + +def to_account_region_pk(account: str, region: str) -> AccountRegionPK: + return f"{account}:{region}" diff --git a/source/app/instance_scheduler/model/store/in_memory_period_definition_store.py b/source/app/instance_scheduler/model/store/in_memory_period_definition_store.py new file mode 100644 index 00000000..9817a738 --- /dev/null +++ b/source/app/instance_scheduler/model/store/in_memory_period_definition_store.py @@ -0,0 +1,74 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from typing import Any, Mapping, Optional, Sequence, TypeGuard + +from instance_scheduler.model.period_definition import ( + PeriodDefinition, + PeriodParams, + validate_as_period_params, +) +from instance_scheduler.model.store.period_definition_store import ( + PeriodAlreadyExistsException, + PeriodDefinitionStore, + UnknownPeriodException, +) +from instance_scheduler.util.validation import ValidationException + +SerializedInMemoryPeriodDefinitionStore = Sequence[PeriodParams] + + +class InMemoryPeriodDefinitionStore(PeriodDefinitionStore): + _data: dict[str, PeriodDefinition] + + def __init__(self, initial_data: Optional[Mapping[str, PeriodDefinition]] = None): + self._data = dict(initial_data) if initial_data else {} + + def put(self, period: PeriodDefinition, overwrite: bool = False) -> None: + if not overwrite and period.name in self._data: + raise PeriodAlreadyExistsException(f"period {period.name} already exists") + + self._data[period.name] = period + + def delete(self, period_name: str, error_if_missing: bool = False) -> None: + if error_if_missing and period_name not in self._data: + raise UnknownPeriodException(f"period {period_name} does not exist") + + self._data.pop(period_name, None) + + def find_by_name(self, period_name: str) -> Optional[PeriodDefinition]: + return self._data.get(period_name, None) + + def find_all(self) -> Mapping[str, PeriodDefinition]: + return self._data + + def serialize(self) -> SerializedInMemoryPeriodDefinitionStore: + return [period_def.to_period_params() for period_def in self._data.values()] + + @classmethod + def deserialize( + cls, data: SerializedInMemoryPeriodDefinitionStore + ) -> "InMemoryPeriodDefinitionStore": + periods: dict[str, PeriodDefinition] = {} + for period_params in data: + period_def = PeriodDefinition.from_period_params(period_params) + periods[period_def.name] = period_def + + return InMemoryPeriodDefinitionStore(periods) + + @staticmethod + def validate_serial_data( + data: Any, + ) -> TypeGuard[SerializedInMemoryPeriodDefinitionStore]: + if not isinstance(data, Sequence): + raise ValidationException( + f"Invalid PeriodStore format: must be a sequence of period definitions, received: {type(data)}" + ) + + for params in data: + if not isinstance(params, dict): + raise ValidationException( + f"Invalid PeriodStore format: must be a sequence of PeriodParams, Sequence contains {type(data)}" + ) + validate_as_period_params(params) + + return True diff --git a/source/app/instance_scheduler/model/store/in_memory_schedule_definition_store.py b/source/app/instance_scheduler/model/store/in_memory_schedule_definition_store.py new file mode 100644 index 00000000..2fc3558e --- /dev/null +++ b/source/app/instance_scheduler/model/store/in_memory_schedule_definition_store.py @@ -0,0 +1,91 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from typing import Any, Mapping, Optional, Sequence, TypeGuard + +from instance_scheduler.model.schedule_definition import ( + ScheduleDefinition, + ScheduleParams, + validate_as_schedule_params, +) +from instance_scheduler.model.store.schedule_definition_store import ( + ScheduleAlreadyExistsException, + ScheduleDefinitionStore, + UnknownScheduleException, +) +from instance_scheduler.util.validation import ValidationException + +SerializedInMemoryScheduleDefinitionStore = Sequence[ScheduleParams] + + +class InMemoryScheduleDefinitionStore(ScheduleDefinitionStore): + _data: dict[str, ScheduleDefinition] + + def __init__(self, initial_data: Optional[Mapping[str, ScheduleDefinition]] = None): + self._data = dict(initial_data) if initial_data else {} + + def put(self, schedule: ScheduleDefinition, overwrite: bool = False) -> None: + if not overwrite and schedule.name in self._data: + raise ScheduleAlreadyExistsException( + f"schedule {schedule.name} already exists" + ) + self._data[schedule.name] = schedule + + def delete(self, schedule_name: str, error_if_missing: bool = False) -> None: + if error_if_missing and schedule_name not in self._data: + raise UnknownScheduleException(f"schedule {schedule_name} does not exist") + + self._data.pop(schedule_name, None) + + def find_by_name(self, schedule_name: str) -> Optional[ScheduleDefinition]: + return self._data.get(schedule_name, None) + + def find_by_period(self, period_name: str) -> Mapping[str, ScheduleDefinition]: + def contains_period(sched_def: ScheduleDefinition, period_name: str) -> bool: + for period_identifier in sched_def.periods: + if period_identifier.name == period_name: + return True + return False + + schedules = self.find_all() + return { + sched_name: sched_def + for sched_name, sched_def in schedules.items() + if contains_period(sched_def, period_name) + } + + def find_all(self) -> Mapping[str, ScheduleDefinition]: + return self._data + + def serialize(self) -> SerializedInMemoryScheduleDefinitionStore: + return [ + schedule_def.to_schedule_params() for schedule_def in self._data.values() + ] + + @classmethod + def deserialize( + cls, data: SerializedInMemoryScheduleDefinitionStore + ) -> "InMemoryScheduleDefinitionStore": + schedules: dict[str, ScheduleDefinition] = {} + for period_params in data: + schedule_def = ScheduleDefinition.from_schedule_params(period_params) + schedules[schedule_def.name] = schedule_def + + return InMemoryScheduleDefinitionStore(schedules) + + @staticmethod + def validate_serial_data( + data: Any, + ) -> TypeGuard[SerializedInMemoryScheduleDefinitionStore]: + if not isinstance(data, Sequence): + raise ValidationException( + f"Invalid PeriodStore format: must be a sequence of period definitions, received: {type(data)}" + ) + + for params in data: + if not isinstance(params, dict): + raise ValidationException( + f"Invalid ScheduleStore format: must be a sequence of ScheduleParams, Sequence contains {type(data)}" + ) + validate_as_schedule_params(params) + + return True diff --git a/source/app/instance_scheduler/model/store/maint_win_store.py b/source/app/instance_scheduler/model/store/maint_win_store.py new file mode 100644 index 00000000..d3028816 --- /dev/null +++ b/source/app/instance_scheduler/model/store/maint_win_store.py @@ -0,0 +1,33 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from collections.abc import Iterator +from typing import Final + +from instance_scheduler.model.maint_win import EC2SSMMaintenanceWindow +from instance_scheduler.model.store.dynamo_client import hub_dynamo_client + + +class EC2SSMMaintenanceWindowStore: + def __init__(self, table_name: str) -> None: + self._client: Final = hub_dynamo_client() + self._table_name: Final = table_name + + def get_ssm_windows_db( + self, *, account: str, region: str + ) -> Iterator[EC2SSMMaintenanceWindow]: + primary_key: Final = f"{account}:{region}" + paginator: Final = self._client.get_paginator("query") + for page in paginator.paginate( + TableName=self._table_name, + ExpressionAttributeNames={"#pk": "account-region"}, + ExpressionAttributeValues={":val": {"S": primary_key}}, + KeyConditionExpression="#pk = :val", + ): + for item in page["Items"]: + yield EC2SSMMaintenanceWindow.from_item(item) + + def put_window_dynamodb(self, window: EC2SSMMaintenanceWindow) -> None: + self._client.put_item(TableName=self._table_name, Item=window.to_item()) + + def delete_window(self, window: EC2SSMMaintenanceWindow) -> None: + self._client.delete_item(TableName=self._table_name, Key=window.to_key()) diff --git a/source/app/instance_scheduler/model/store/mw_store.py b/source/app/instance_scheduler/model/store/mw_store.py new file mode 100644 index 00000000..e392f380 --- /dev/null +++ b/source/app/instance_scheduler/model/store/mw_store.py @@ -0,0 +1,27 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from abc import ABC, abstractmethod +from collections.abc import Iterator + +from instance_scheduler.model.maint_win import EC2SSMMaintenanceWindow + + +class MWStore(ABC): + """ + An abstract DAO layer between the rest of the app and the underlying persistence engine being used to + store SSM Maintenance Windows + """ + + @abstractmethod + def put(self, window: EC2SSMMaintenanceWindow) -> None: + raise NotImplementedError() + + @abstractmethod + def delete(self, window: EC2SSMMaintenanceWindow) -> None: + raise NotImplementedError() + + @abstractmethod + def find_by_account_region( + self, account: str, region: str + ) -> Iterator[EC2SSMMaintenanceWindow]: + raise NotImplementedError() diff --git a/source/app/instance_scheduler/model/store/period_definition_store.py b/source/app/instance_scheduler/model/store/period_definition_store.py new file mode 100644 index 00000000..379c589b --- /dev/null +++ b/source/app/instance_scheduler/model/store/period_definition_store.py @@ -0,0 +1,33 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from abc import ABC, abstractmethod +from collections.abc import Mapping +from typing import Optional + +from instance_scheduler.model.period_definition import PeriodDefinition + + +class UnknownPeriodException(Exception): + pass + + +class PeriodAlreadyExistsException(Exception): + pass + + +class PeriodDefinitionStore(ABC): + @abstractmethod + def put(self, period: PeriodDefinition, overwrite: bool = False) -> None: + raise NotImplementedError() + + @abstractmethod + def delete(self, period_name: str, error_if_missing: bool = False) -> None: + raise NotImplementedError() + + @abstractmethod + def find_by_name(self, period_name: str) -> Optional[PeriodDefinition]: + raise NotImplementedError() + + @abstractmethod + def find_all(self) -> Mapping[str, PeriodDefinition]: + raise NotImplementedError() diff --git a/source/app/instance_scheduler/model/store/schedule_definition_store.py b/source/app/instance_scheduler/model/store/schedule_definition_store.py new file mode 100644 index 00000000..3d73474e --- /dev/null +++ b/source/app/instance_scheduler/model/store/schedule_definition_store.py @@ -0,0 +1,37 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from abc import ABC, abstractmethod +from collections.abc import Mapping +from typing import Optional + +from instance_scheduler.model.schedule_definition import ScheduleDefinition + + +class UnknownScheduleException(Exception): + pass + + +class ScheduleAlreadyExistsException(Exception): + pass + + +class ScheduleDefinitionStore(ABC): + @abstractmethod + def put(self, schedule: ScheduleDefinition, overwrite: bool = False) -> None: + raise NotImplementedError() + + @abstractmethod + def delete(self, schedule_name: str, error_if_missing: bool = False) -> None: + raise NotImplementedError() + + @abstractmethod + def find_by_name(self, schedule_name: str) -> Optional[ScheduleDefinition]: + raise NotImplementedError() + + @abstractmethod + def find_by_period(self, period_name: str) -> Mapping[str, ScheduleDefinition]: + raise NotImplementedError() + + @abstractmethod + def find_all(self) -> Mapping[str, ScheduleDefinition]: + raise NotImplementedError() diff --git a/source/app/instance_scheduler/ops_metrics/anonymous_metric_wrapper.py b/source/app/instance_scheduler/ops_metrics/anonymous_metric_wrapper.py index 5b76ded0..ff781de4 100644 --- a/source/app/instance_scheduler/ops_metrics/anonymous_metric_wrapper.py +++ b/source/app/instance_scheduler/ops_metrics/anonymous_metric_wrapper.py @@ -7,9 +7,10 @@ @dataclass(frozen=True) class AnonymousMetricWrapper: - TimeStamp: str - UUID: str - Solution: str - Version: str - Event_Name: str - Context: OpsMetric + timestamp: str + uuid: str + solution: str + version: str + event_name: str + context_version: int + context: OpsMetric diff --git a/source/app/instance_scheduler/ops_metrics/metric_type/asg_count_metric.py b/source/app/instance_scheduler/ops_metrics/metric_type/asg_count_metric.py new file mode 100644 index 00000000..8a6688a1 --- /dev/null +++ b/source/app/instance_scheduler/ops_metrics/metric_type/asg_count_metric.py @@ -0,0 +1,15 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +from dataclasses import dataclass +from typing import ClassVar + +from instance_scheduler.ops_metrics import GatheringFrequency +from instance_scheduler.ops_metrics.metric_type.instance_count_metric import ( + InstanceCountMetric, +) + + +@dataclass(frozen=True) +class AsgCountMetric(InstanceCountMetric): + collection_frequency: ClassVar[GatheringFrequency] = GatheringFrequency.UNLIMITED diff --git a/source/app/instance_scheduler/ops_metrics/metric_type/cli_request_metric.py b/source/app/instance_scheduler/ops_metrics/metric_type/cli_request_metric.py index bfdc15ca..b3a3822a 100644 --- a/source/app/instance_scheduler/ops_metrics/metric_type/cli_request_metric.py +++ b/source/app/instance_scheduler/ops_metrics/metric_type/cli_request_metric.py @@ -12,3 +12,4 @@ class CliRequestMetric(OpsMetric): command_used: str event_name: ClassVar[str] = "cli_request" collection_frequency: ClassVar[GatheringFrequency] = GatheringFrequency.UNLIMITED + context_version: ClassVar[int] = 1 diff --git a/source/app/instance_scheduler/ops_metrics/metric_type/deployment_description_metric.py b/source/app/instance_scheduler/ops_metrics/metric_type/deployment_description_metric.py index 8ec9df63..b947e595 100644 --- a/source/app/instance_scheduler/ops_metrics/metric_type/deployment_description_metric.py +++ b/source/app/instance_scheduler/ops_metrics/metric_type/deployment_description_metric.py @@ -15,7 +15,6 @@ class ScheduleFlagCounts: hibernate: int = 0 override: int = 0 use_ssm_maintenance_window: int = 0 - use_metrics: int = 0 non_default_timezone: int = 0 @@ -26,15 +25,17 @@ class DeploymentDescriptionMetric(OpsMetric): num_accounts: int num_schedules: int num_cfn_schedules: int + num_one_sided_schedules: int schedule_flag_counts: ScheduleFlagCounts default_timezone: str - schedule_aurora_clusters: bool create_rds_snapshots: bool schedule_interval_minutes: int memory_size_mb: int using_organizations: bool enable_ec2_ssm_maintenance_windows: bool + ops_dashboard_enabled: bool num_started_tags: int num_stopped_tags: int event_name: ClassVar[str] = "deployment_description" collection_frequency: ClassVar[GatheringFrequency] = GatheringFrequency.DAILY + context_version: ClassVar[int] = 1 diff --git a/source/app/instance_scheduler/ops_metrics/metric_type/insights_metric.py b/source/app/instance_scheduler/ops_metrics/metric_type/insights_metric.py new file mode 100644 index 00000000..688c3b2f --- /dev/null +++ b/source/app/instance_scheduler/ops_metrics/metric_type/insights_metric.py @@ -0,0 +1,67 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +from dataclasses import dataclass +from datetime import datetime +from typing import TYPE_CHECKING, ClassVar + +from instance_scheduler.ops_metrics import GatheringFrequency +from instance_scheduler.ops_metrics.metric_type.ops_metric import OpsMetric +from instance_scheduler.ops_monitoring.instance_counts import ServiceInstanceCounts + +if TYPE_CHECKING: + from mypy_boto3_cloudwatch.literals import StandardUnitType + from mypy_boto3_cloudwatch.type_defs import MetricDatumTypeDef +else: + MetricDatumTypeDef = object + StandardUnitType = object + + +@dataclass(frozen=True) +class Dimension: + name: str + value: str + + +@dataclass(frozen=True) +class MetricDataItem: + metric_name: str + dimensions: list[Dimension] + timestamp: datetime + value: float + unit: StandardUnitType + + def to_cloudwatch_data(self) -> MetricDatumTypeDef: + return { + "MetricName": self.metric_name, + "Dimensions": [ + {"Name": dimension.name, "Value": dimension.value} + for dimension in self.dimensions + ], + "Timestamp": self.timestamp, + "Value": self.value, + "Unit": self.unit, + } + + +@dataclass(frozen=True) +class InsightsMetric(OpsMetric): + metric_data: list[MetricDataItem] + event_name: ClassVar[str] = "insights_metric" + collection_frequency: ClassVar[GatheringFrequency] = GatheringFrequency.DAILY + context_version: ClassVar[int] = 1 + + @classmethod + def from_service_counts( + cls, service_counts: ServiceInstanceCounts, scheduling_interval_minutes: int + ) -> "InsightsMetric": + # imported here to avoid circular import + from instance_scheduler.ops_monitoring.cw_ops_insights import ( + CloudWatchOperationalInsights, + ) + + return InsightsMetric( + metric_data=CloudWatchOperationalInsights.build_per_instance_type_metrics( + service_counts, scheduling_interval_minutes=scheduling_interval_minutes + ), + ) diff --git a/source/app/instance_scheduler/ops_metrics/metric_type/instance_count_metric.py b/source/app/instance_scheduler/ops_metrics/metric_type/instance_count_metric.py index fd553968..b7e7160d 100644 --- a/source/app/instance_scheduler/ops_metrics/metric_type/instance_count_metric.py +++ b/source/app/instance_scheduler/ops_metrics/metric_type/instance_count_metric.py @@ -15,3 +15,4 @@ class InstanceCountMetric(OpsMetric): num_schedules: int event_name: ClassVar[str] = "instance_count" collection_frequency: ClassVar[GatheringFrequency] = GatheringFrequency.DAILY + context_version: ClassVar[int] = 1 diff --git a/source/app/instance_scheduler/ops_metrics/metric_type/ops_metric.py b/source/app/instance_scheduler/ops_metrics/metric_type/ops_metric.py index 967080ed..f549372d 100644 --- a/source/app/instance_scheduler/ops_metrics/metric_type/ops_metric.py +++ b/source/app/instance_scheduler/ops_metrics/metric_type/ops_metric.py @@ -11,3 +11,4 @@ class OpsMetric(ABC): collection_frequency: ClassVar[GatheringFrequency] = GatheringFrequency.UNLIMITED event_name: ClassVar[str] + context_version: ClassVar[int] diff --git a/source/app/instance_scheduler/ops_metrics/metric_type/scheduling_action_metric.py b/source/app/instance_scheduler/ops_metrics/metric_type/scheduling_action_metric.py index 1782b236..a61f52d6 100644 --- a/source/app/instance_scheduler/ops_metrics/metric_type/scheduling_action_metric.py +++ b/source/app/instance_scheduler/ops_metrics/metric_type/scheduling_action_metric.py @@ -25,3 +25,4 @@ class SchedulingActionMetric(OpsMetric): actions: list[ActionTaken] event_name: ClassVar[str] = "scheduling_action" collection_frequency: ClassVar[GatheringFrequency] = GatheringFrequency.UNLIMITED + context_version: ClassVar[int] = 1 diff --git a/source/app/instance_scheduler/ops_metrics/metrics.py b/source/app/instance_scheduler/ops_metrics/metrics.py index 8a41eec4..1e2d7ac3 100644 --- a/source/app/instance_scheduler/ops_metrics/metrics.py +++ b/source/app/instance_scheduler/ops_metrics/metrics.py @@ -1,155 +1,155 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import dataclasses -import uuid from datetime import datetime, timezone -from typing import TYPE_CHECKING, Optional +from os import environ +from typing import Final, Optional, assert_never +from uuid import UUID -import boto3 -import requests -from botocore.exceptions import ClientError -from typing_extensions import assert_never +from aws_lambda_powertools import Logger as PowerToolsLogger +from urllib3 import HTTPResponse, PoolManager from instance_scheduler.ops_metrics import GatheringFrequency from instance_scheduler.ops_metrics.anonymous_metric_wrapper import ( AnonymousMetricWrapper, ) from instance_scheduler.ops_metrics.metric_type.ops_metric import OpsMetric -from instance_scheduler.util import get_boto_config, safe_json -from instance_scheduler.util.app_env import get_app_env +from instance_scheduler.util import safe_json +from instance_scheduler.util.app_env import AppEnvError, env_to_bool from instance_scheduler.util.logger import Logger -if TYPE_CHECKING: - from mypy_boto3_ssm.client import SSMClient -else: - SSMClient = object + +@dataclasses.dataclass +class MetricsEnvironment: + # a sub environment that controls anonymized metrics + # the absense/misconfiguration of this environment should not be a solution-breaking + # error but should rather just disable the sending of metrics + send_anonymous_metrics: bool + anonymous_metrics_url: str + solution_id: str + solution_version: str + scheduler_frequency_minutes: int + metrics_uuid: UUID + + @staticmethod + def from_env() -> "MetricsEnvironment": + try: + try: + metrics_uuid = UUID(environ["METRICS_UUID"]) + except ValueError: + raise AppEnvError(f"invalid METRICS_UUID: {environ['METRICS_UUID']}") + + try: + scheduler_frequency_minutes = int( + environ["SCHEDULING_INTERVAL_MINUTES"] + ) + except ValueError: + raise AppEnvError( + f"invalid SCHEDULING_INTERVAL_MINUTES: {environ['SCHEDULING_INTERVAL_MINUTES']}" + ) + + return MetricsEnvironment( + send_anonymous_metrics=env_to_bool(environ["SEND_METRICS"]), + anonymous_metrics_url=environ["METRICS_URL"], + solution_id=environ["SOLUTION_ID"], + solution_version=environ["SOLUTION_VERSION"], + scheduler_frequency_minutes=scheduler_frequency_minutes, + metrics_uuid=metrics_uuid, + ) + + except KeyError as err: + raise AppEnvError( + f"Missing required application environment variable: {err.args[0]}" + ) from err + + +http = PoolManager() + +_metrics_env: Optional[MetricsEnvironment] = None + + +def get_metrics_env() -> MetricsEnvironment: + """can raise AppEnvError""" + global _metrics_env + if not _metrics_env: + _metrics_env = MetricsEnvironment.from_env() # can raise AppEnvError + return _metrics_env def collect_metric( - metric: OpsMetric, logger: Logger + metric: OpsMetric, logger: Logger | PowerToolsLogger ) -> Optional[AnonymousMetricWrapper]: - if not should_collect_metric(metric, logger): + if not should_collect_metric(metric): return None try: - app_env = get_app_env() - url = app_env.anonymous_metrics_url + metrics_env = get_metrics_env() + url = metrics_env.anonymous_metrics_url metric_wrapper = AnonymousMetricWrapper( # current required timestamp format for metrics backend (7/11/23) - TimeStamp=str(datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S")), - UUID=str(_get_deployment_uuid(logger)), - Solution=app_env.solution_id, - Version=app_env.solution_version, - Event_Name=metric.event_name, - Context=metric, + timestamp=str(datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S")), + uuid=str(metrics_env.metrics_uuid), + solution=get_metrics_env().solution_id, + version=get_metrics_env().solution_version, + event_name=metric.event_name, + context_version=metric.context_version, + context=metric, ) data_json = safe_json(dataclasses.asdict(metric_wrapper)) - logger.info("Sending anonymous metrics data: {}", data_json) + logger.info(f"Sending anonymous metrics data: {data_json}") headers = { "content-type": "application/json", "content-length": str(len(data_json)), } - response = requests.post(url, data=data_json, headers=headers, timeout=300) - response.raise_for_status() - logger.debug( - "Metrics data sent, status code is {}, message is {}", - response.status_code, - response.text, + response: HTTPResponse = http.request( # type: ignore[no-untyped-call] + "POST", url, headers=headers, body=data_json ) + logger.debug(f"Metrics data sent, status code is {response.status}") return metric_wrapper except Exception as exc: logger.warning(("Failed sending metrics data ({})".format(str(exc)))) return None -# cache the deployment uuid to try to minimize calls to ssm -_deployment_uuid: Optional[uuid.UUID] = None - - -def _get_deployment_uuid(logger: Logger) -> uuid.UUID: - global _deployment_uuid - if not _deployment_uuid: - _deployment_uuid = _deployment_uuid_from_ssm(logger) - return _deployment_uuid - - -def _deployment_uuid_from_ssm(logger: Logger) -> uuid.UUID: - app_env = get_app_env() - stack_id = app_env.stack_id[-36:] - uuid_key = app_env.uuid_key + str(stack_id) - ssm: SSMClient = boto3.client("ssm", config=get_boto_config()) +def should_collect_metric(metric: OpsMetric | type[OpsMetric]) -> bool: try: - ssm_response = ssm.get_parameter(Name=uuid_key) - uuid_parameter = ssm_response.get("Parameter", {}).get("Value") - return uuid.UUID(uuid_parameter) - except ClientError as fetch_exception: - if fetch_exception.response.get("Error", {}).get("Code") == "ParameterNotFound": - uuid_parameter = str(uuid.uuid4()) - try: - logger.info("creating a new parameter") - ssm.put_parameter( - Name=uuid_key, - Description="This is a unique id for each Instance Scheduler on AWS solution stack, for reporting metrics.", - Value=uuid_parameter, - Type="String", - ) - return uuid.UUID(uuid_parameter) - except Exception as create_exception: - logger.info( - "Unable to create UUID for operational metrics, metrics will not be sent: \n{}".format( - create_exception - ) - ) - raise ValueError("Unable to get solution UUID") - else: - logger.info( - "Unable to fetch UUID for operational metrics, metrics will not be sent: \n{}".format( - fetch_exception - ) - ) - raise ValueError("Unable to get solution UUID") - - -def should_collect_metric(metric: OpsMetric | type[OpsMetric], logger: Logger) -> bool: - app_env = get_app_env() - if not app_env.send_anonymous_metrics: + env: Final = get_metrics_env() + except AppEnvError: + # environment not configured, treat as disabled + return False + if not env.send_anonymous_metrics: # do not send metrics when not enabled return False - try: - solution_uuid = _get_deployment_uuid(logger) # can fail - interval = app_env.scheduler_frequency_minutes - current_time = datetime.now(timezone.utc) - - if metric.collection_frequency is GatheringFrequency.UNLIMITED: - return True - elif metric.collection_frequency is GatheringFrequency.DAILY: - return _is_allowed_hour_for_metrics( - solution_uuid, current_time - ) and _is_first_call_in_current_hour(current_time, interval) - elif metric.collection_frequency is GatheringFrequency.WEEKLY: - return ( - _is_first_day_in_week(current_time) - and _is_allowed_hour_for_metrics(solution_uuid, current_time) - and _is_first_call_in_current_hour(current_time, interval) - ) - else: - assert_never(metric.collection_frequency) - except ValueError: - return False + solution_uuid = env.metrics_uuid + interval = env.scheduler_frequency_minutes + current_time = datetime.now(timezone.utc) + + if metric.collection_frequency is GatheringFrequency.UNLIMITED: + return True + elif metric.collection_frequency is GatheringFrequency.DAILY: + return _is_allowed_hour_for_metrics( + solution_uuid, current_time + ) and _is_first_call_in_current_hour(current_time, interval) + elif metric.collection_frequency is GatheringFrequency.WEEKLY: + return ( + _is_first_day_in_week(current_time) + and _is_allowed_hour_for_metrics(solution_uuid, current_time) + and _is_first_call_in_current_hour(current_time, interval) + ) + else: + assert_never(metric.collection_frequency) def _is_first_day_in_week(current_time: datetime) -> bool: return current_time.weekday() == 0 -def _is_allowed_hour_for_metrics( - solution_uuid: uuid.UUID, current_time: datetime -) -> bool: +def _is_allowed_hour_for_metrics(solution_uuid: UUID, current_time: datetime) -> bool: hour_to_send = solution_uuid.int % 20 return current_time.hour == hour_to_send diff --git a/source/app/instance_scheduler/ops_monitoring/__init__.py b/source/app/instance_scheduler/ops_monitoring/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/source/app/instance_scheduler/ops_monitoring/cw_ops_insights.py b/source/app/instance_scheduler/ops_monitoring/cw_ops_insights.py new file mode 100644 index 00000000..85b4eace --- /dev/null +++ b/source/app/instance_scheduler/ops_monitoring/cw_ops_insights.py @@ -0,0 +1,252 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from datetime import datetime, timezone +from enum import StrEnum +from functools import cached_property +from typing import TYPE_CHECKING, Iterable, TypedDict + +from instance_scheduler.boto_retry import get_client_with_standard_retry +from instance_scheduler.handler.environments.scheduling_request_environment import ( + SchedulingRequestEnvironment, +) +from instance_scheduler.ops_metrics.metric_type.insights_metric import ( + Dimension, + MetricDataItem, +) +from instance_scheduler.ops_monitoring.instance_counts import ServiceInstanceCounts +from instance_scheduler.util.logger import Logger + +if TYPE_CHECKING: + from mypy_boto3_cloudwatch import CloudWatchClient +else: + CloudWatchClient = object + + +class InstanceData(TypedDict): + stopped: int + total: int + + +"""str is instance type""" +InstanceDataByInstanceType = dict[str, InstanceData] + +"""str is service""" +ServicesInstanceData = dict[str, InstanceDataByInstanceType] +""" +sample data shape +{ + "ec2": { + "t2.micro": { + stopped: 5 + total: 10 + } + }, + "rds": { + "m2.medium": { + stopped: 1 + total: 1 + } + } +} +""" + + +class MetricName(StrEnum): + ManagedInstances = "ManagedInstances" + StoppedInstances = "StoppedInstances" + RunningInstances = "RunningInstances" + + +class DimensionName(StrEnum): + Service = "Service" + InstanceType = "InstanceType" + Schedule = "Schedule" + SchedulingInterval = "SchedulingInterval" + + +class CloudWatchOperationalInsights: + def __init__( + self, + env: SchedulingRequestEnvironment, + logger: Logger, + ) -> None: + self._env = env + self._namespace = f"{env.stack_name}:InstanceScheduler" + self._logger = logger + + @cached_property + def cloudwatch_client(self) -> CloudWatchClient: + client: CloudWatchClient = get_client_with_standard_retry("cloudwatch") + return client + + def send_metrics_to_cloudwatch( + self, + instance_counts: ServiceInstanceCounts, + scheduling_interval_minutes: int, + ) -> None: + metrics_to_send = self.build_per_schedule_metrics( + instance_counts, scheduling_interval_minutes + ) + metrics_to_send.extend( + self.build_per_instance_type_metrics( + instance_counts, scheduling_interval_minutes + ) + ) + self.send_to_cloudwatch(metrics_to_send) + + @staticmethod + def build_per_schedule_metrics( + aggregated_instances: ServiceInstanceCounts, + scheduling_interval_minutes: int, + ) -> list[MetricDataItem]: + metric_data = [] + + for service, instance_counts in aggregated_instances.items(): + for schedule_name, counts in instance_counts.by_schedule().items(): + managed_per_sched_metric = ( + CloudWatchOperationalInsights.build_per_schedule_metric( + service=service, + schedule_name=schedule_name, + scheduling_interval_minutes=scheduling_interval_minutes, + metric_name=MetricName.ManagedInstances, + metric_value=counts.total(), + ) + ) + running_per_sched_metric = ( + CloudWatchOperationalInsights.build_per_schedule_metric( + service=service, + schedule_name=schedule_name, + scheduling_interval_minutes=scheduling_interval_minutes, + metric_name=MetricName.RunningInstances, + metric_value=counts["running"], + ) + ) + metric_data.extend([managed_per_sched_metric, running_per_sched_metric]) + + return metric_data + + def send_to_cloudwatch(self, metric_data: Iterable[MetricDataItem]) -> None: + # actual batch limit is 1000 metrics, but there is also a 1MB payload limit which we are not directly checking + # so this should give plenty of overhead + batching_limit = 500 + try: + cw_data = [metric.to_cloudwatch_data() for metric in metric_data] + + # todo: update this to use itertools.batched when we update to python 3.12 + for i in range(0, len(cw_data), batching_limit): + batch = cw_data[i : i + batching_limit] + self.cloudwatch_client.put_metric_data( + Namespace=self._namespace, + MetricData=batch, + ) + + except Exception as e: + self._logger.warning(f"Error sending metric data to cloudwatch: {e}") + + @staticmethod + def build_per_schedule_metric( + service: str, + schedule_name: str, + scheduling_interval_minutes: int, + metric_name: MetricName, + metric_value: int, + ) -> MetricDataItem: + return MetricDataItem( + metric_name=metric_name, + dimensions=[ + Dimension( + name=DimensionName.Service, + value=service, + ), + Dimension( + name=DimensionName.Schedule, + value=schedule_name, + ), + Dimension( + name=DimensionName.SchedulingInterval, + value=str(scheduling_interval_minutes), + ), + ], + timestamp=datetime.now(timezone.utc), + value=metric_value, + unit="Count", + ) + + @staticmethod + def build_per_instance_type_metric( + service: str, + instance_type: str, + scheduling_interval_minutes: int, + metric_name: MetricName, + metric_value: int, + ) -> MetricDataItem: + return MetricDataItem( + metric_name=metric_name, + dimensions=[ + Dimension( + name=DimensionName.Service, + value=service, + ), + Dimension( + name=DimensionName.InstanceType, + value=instance_type, + ), + Dimension( + name=DimensionName.SchedulingInterval, + value=str(scheduling_interval_minutes), + ), + ], + timestamp=datetime.now(timezone.utc), + value=metric_value, + unit="Count", + ) + + @staticmethod + def build_per_instance_type_metrics( + aggregated_instances: ServiceInstanceCounts, + scheduling_interval_minutes: int, + ) -> list[MetricDataItem]: + """ + convert from raw scan data to metric items that can be sent to cloudwatch/aws + aggregated on a per-instance basis + """ + metric_data = [] + + for service, instance_counts in aggregated_instances.items(): + for instance_type, counts in instance_counts.by_type().items(): + total_controlled_per_instance_metric = ( + CloudWatchOperationalInsights.build_per_instance_type_metric( + service=service, + instance_type=instance_type, + scheduling_interval_minutes=scheduling_interval_minutes, + metric_name=MetricName.ManagedInstances, + metric_value=counts.total(), + ) + ) + stopped_per_instance_metric = ( + CloudWatchOperationalInsights.build_per_instance_type_metric( + service=service, + instance_type=instance_type, + scheduling_interval_minutes=scheduling_interval_minutes, + metric_name=MetricName.StoppedInstances, + metric_value=counts["stopped"], + ) + ) + running_per_instance_metric = ( + CloudWatchOperationalInsights.build_per_instance_type_metric( + service=service, + instance_type=instance_type, + scheduling_interval_minutes=scheduling_interval_minutes, + metric_name=MetricName.RunningInstances, + metric_value=counts["running"], + ) + ) + metric_data.extend( + [ + total_controlled_per_instance_metric, + stopped_per_instance_metric, + running_per_instance_metric, + ] + ) + + return metric_data diff --git a/source/app/instance_scheduler/ops_monitoring/instance_counts.py b/source/app/instance_scheduler/ops_monitoring/instance_counts.py new file mode 100644 index 00000000..8ae37f94 --- /dev/null +++ b/source/app/instance_scheduler/ops_monitoring/instance_counts.py @@ -0,0 +1,132 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from collections import Counter +from typing import Iterable, Literal, Self + +from instance_scheduler.service.abstract_instance import AbstractInstance + +InstanceState = Literal["running", "stopped"] +ServiceName = str + + +class InstanceCounts(dict[str, Counter[InstanceState]]): + """ + a count of instances in a given state aggregated by some key (schedule_name, instance_type) + + This can be accessed with indexing like any other dict, or safely using + counts.get(aggregation_key, Counter()).get("stopped") + which return the number of counted instances of the given type in the stopped state + (returning 0 as the default fallback even if no instances of that type have been counted) + """ + + def count_instances_by_type(self, instances: Iterable[AbstractInstance]) -> Self: + for instance in instances: + self.increment( + instance.instance_type, + self.parse_as_running_or_stopped(instance.current_state), + ) + return self + + def parse_as_running_or_stopped( + self, instance_state: str + ) -> Literal["running", "stopped"]: + instance_state = instance_state.lower() + match instance_state: + case "available" | "starting" | "backing-up": # rds running states + return "running" + case "running" | "starting": # ec2 running states + return "running" + case _: + return "stopped" + + def increment(self, aggregation_key: str, instance_state: str) -> Self: + instance_state = self.parse_as_running_or_stopped(instance_state) + if aggregation_key not in self: + self[aggregation_key] = Counter() + self[aggregation_key][instance_state] += 1 + return self + + def merged_with(self, other: "InstanceCounts") -> "InstanceCounts": + return InstanceCounts( + # create a new dict containing the union of all keys in the original 2 dicts + # and summing together the counters of any keys that existed in both dicts + { + instance_type: self.get(instance_type, Counter()) + + other.get(instance_type, Counter()) + for instance_type in set(self).union(other) + } + ) + + +class InstanceCountsAggregator(dict[Literal["by_type", "by_schedule"], InstanceCounts]): + def by_type(self) -> InstanceCounts: + if "by_type" not in self: + self["by_type"] = InstanceCounts() + return self["by_type"] + + def by_schedule(self) -> InstanceCounts: + if "by_schedule" not in self: + self["by_schedule"] = InstanceCounts() + return self["by_schedule"] + + def merged_with( + self, other: "InstanceCountsAggregator" + ) -> "InstanceCountsAggregator": + return InstanceCountsAggregator( + # create a new dict containing the union of all keys in the original 2 dicts + # and summing together the counters of any keys that existed in both dicts + { + counter: self.get(counter, InstanceCounts()).merged_with( + other.get(counter, InstanceCounts()) + ) + for counter in set(self).union(other) + } + ) + + +class ServiceInstanceCounts(dict[ServiceName, InstanceCountsAggregator]): + """ + sample data shape + { + "ec2": { + "by_type": { + "t2.micro": { + stopped: 5 + running: 10 + } + } + "by_schedule": { + "schedule-1": { + stopped: 12, + running: 0 + } + } + }, + "rds": { + "by_type": { + "m2.medium": { + stopped: 1 + running: 1 + } + } + "by_schedule": { + "schedule-1": { + stopped: 2, + running: 0 + } + } + } + } + """ + + def merged_with(self, other: "ServiceInstanceCounts") -> "ServiceInstanceCounts": + return ServiceInstanceCounts( + # create a new dict containing the union of all keys in the original 2 dicts + # and summing together the counters of any keys that existed in both dicts + { + service: self.get(service, InstanceCountsAggregator()).merged_with( + other.get(service, InstanceCountsAggregator()) + ) + for service in set(self).union(other) + } + ) diff --git a/source/app/instance_scheduler/schedulers/instance.py b/source/app/instance_scheduler/schedulers/instance.py new file mode 100644 index 00000000..5a70fe2b --- /dev/null +++ b/source/app/instance_scheduler/schedulers/instance.py @@ -0,0 +1,81 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from dataclasses import dataclass +from typing import Any, Literal, NotRequired, Optional, Sequence, TypedDict + +from instance_scheduler.configuration.instance_schedule import InstanceSchedule + + +class Instance(TypedDict): + id: str + arn: NotRequired[str] + allow_resize: bool + hibernate: bool + state: Any + state_name: str + is_running: bool + is_terminated: bool + current_state: Literal["running", "stopped"] + instancetype: str + engine_type: NotRequired[str] + maintenance_window: Optional[Sequence[InstanceSchedule]] + tags: dict[str, str] + name: str + schedule_name: str + is_cluster: NotRequired[bool] + resized: NotRequired[bool] + account: NotRequired[str] + region: NotRequired[str] + service: NotRequired[str] + instance_str: NotRequired[str] + + +@dataclass +class SchedulableInstance: + id: str + allow_resize: bool + hibernate: bool + state: Any + state_name: str + is_running: bool + is_terminated: bool + current_state: Literal["running", "stopped"] + instancetype: str + tags: dict[str, str] + name: str + schedule_name: str + maintenance_window: Optional[Sequence[InstanceSchedule]] = None + arn: Optional[str] = None + is_cluster: Optional[bool] = None + resized: Optional[bool] = None + account: Optional[str] = None + region: Optional[str] = None + service: Optional[str] = None + instance_str: Optional[str] = None + engine_type: Optional[str] = None + + @classmethod + def from_instance(cls, instance: Instance) -> "SchedulableInstance": + return SchedulableInstance( + id=instance["id"], + allow_resize=instance["allow_resize"], + hibernate=instance["hibernate"], + state=instance["state"], + state_name=instance["state_name"], + is_running=instance["is_running"], + is_terminated=instance["is_terminated"], + current_state=instance["current_state"], + instancetype=instance["instancetype"], + tags=instance["tags"], + name=instance["name"], + schedule_name=instance["schedule_name"], + maintenance_window=instance.get("maintenance_window"), + arn=instance.get("arn"), + is_cluster=instance.get("is_cluster"), + resized=instance.get("resized"), + account=instance.get("account"), + region=instance.get("region"), + service=instance.get("service"), + instance_str=instance.get("instance_str"), + engine_type=instance.get("engine_type"), + ) diff --git a/source/app/instance_scheduler/schedulers/instance_scheduler.py b/source/app/instance_scheduler/schedulers/instance_scheduler.py index b9321db7..b023622e 100644 --- a/source/app/instance_scheduler/schedulers/instance_scheduler.py +++ b/source/app/instance_scheduler/schedulers/instance_scheduler.py @@ -2,96 +2,39 @@ # SPDX-License-Identifier: Apache-2.0 import time from collections.abc import Iterator -from datetime import datetime, timezone -from typing import Any, Final, Optional, TypedDict +from datetime import datetime +from typing import Any, Final, assert_never -from typing_extensions import NotRequired - -from instance_scheduler import ScheduleState -from instance_scheduler.configuration.instance_schedule import ( - Instance, - InstanceSchedule, -) +from instance_scheduler.configuration.instance_schedule import InstanceSchedule from instance_scheduler.configuration.scheduling_context import SchedulingContext +from instance_scheduler.handler.environments.scheduling_request_environment import ( + SchedulingRequestEnvironment, +) +from instance_scheduler.ops_metrics.metric_type.insights_metric import InsightsMetric from instance_scheduler.ops_metrics.metric_type.instance_count_metric import ( InstanceCountMetric, ) from instance_scheduler.ops_metrics.metric_type.scheduling_action_metric import ( - ActionTaken, SchedulingActionMetric, ) -from instance_scheduler.ops_metrics.metrics import collect_metric, should_collect_metric -from instance_scheduler.schedulers.instance_states import InstanceStates -from instance_scheduler.service import Service -from instance_scheduler.util.logger import Logger -from instance_scheduler.util.scheduler_metrics import SchedulerMetrics - -ERR_SETTING_INSTANCE_TYPE = "Error changing instance type ({})" - -INF_DESIRED_TYPE = ", desired type is {}" -INF_PROCESSING_ACCOUNT = "Running {} scheduler for account {}{} in region(s) {}" -INF_STARTING_INSTANCES = "Starting instances {} in region {}" -INF_STOPPED_INSTANCES = "Stopping instances {} in region {}" -INF_MAINTENANCE_WINDOW = ( - 'Maintenance window "{}" used as running period found for instance {}' -) - -INF_DO_NOT_STOP_RETAINED_INSTANCE = ( - "Instance {} was already running at start of period and schedule uses retain option, desired " - "state set to {} but instance will not be stopped if it is still running." -) - -WARN_SKIPPING_UNKNOWN_SCHEDULE = ( - 'Skipping instance {} in region {} for account {}, schedule name "{}" is unknown' -) -WARN_RESIZE_NOT_SUPPORTED = "Instance {} with type {} does not support resizing" - -DEBUG_STOPPED_REGION_INSTANCES = ( - "Listing instance {} in region {} to be stopped by scheduler" -) -DEBUG_INSTANCE_HEADER = "[ Instance {} ]" -DEBUG_NEW_INSTANCE = ( - 'New instance "{}" will not be stopped until next scheduling period' -) -DEBUG_CURRENT_INSTANCE_STATE = ( - 'Current state is {}, instance type is {}, schedule is "{}"' -) -DEBUG_SKIPPING_TERMINATED_INSTANCE = ( - 'Skipping terminated instance "{}" in region {} for account {}' -) -DEBUG_STARTED_REGION_INSTANCES = ( - "Listing instance {} in region {} with instance type {} to be started by scheduler" +from instance_scheduler.ops_metrics.metrics import collect_metric +from instance_scheduler.ops_monitoring.cw_ops_insights import ( + CloudWatchOperationalInsights, ) -DEBUG_CURRENT_AND_DESIRED_STATE = ( - 'Desired state for instance from schedule "{}" is {}, last desired state was {}, ' - "actual state is {}{} " +from instance_scheduler.ops_monitoring.instance_counts import ( + InstanceCountsAggregator, + ServiceInstanceCounts, ) -DEBUG_ENFORCED_STATE = "Using enforcement flag of schedule to set actual state of instance {} from {} to {}" -DEBUG_APPLY_RETAIN_RUNNING_STATE = ( - "Desired state is {} for instance {} which is already running and retain running period is " - "used the schedule, desired state set to {} so it is not stopped automatically at the end of " - "this running period" +from instance_scheduler.schedulers.instance_states import InstanceStates +from instance_scheduler.schedulers.scheduling_decision import ( + SchedulingAction, + SchedulingDecision, ) - - -class StartStopItem(TypedDict): - schedule: Optional[str] - - -class ResizeItem(TypedDict): - schedule: Optional[str] - old: str - new: str - - -StartStopItems = dict[str, list[dict[str, StartStopItem]]] -ResizeItems = dict[str, list[dict[str, ResizeItem]]] - - -class ProcessAccountResult(TypedDict): - started: StartStopItems - stopped: StartStopItems - resized: NotRequired[ResizeItems] +from instance_scheduler.schedulers.scheduling_result import SchedulingResult +from instance_scheduler.schedulers.states import InstanceState, ScheduleState +from instance_scheduler.service import Service +from instance_scheduler.service.abstract_instance import AbstractInstance +from instance_scheduler.util.logger import Logger class InstanceScheduler: @@ -100,524 +43,442 @@ def __init__( service: Service[Any], scheduling_context: SchedulingContext, instance_states: InstanceStates, - account_id: str, - role_arn: Optional[str], logger: Logger, + env: SchedulingRequestEnvironment, ) -> None: self._service: Final = service self._scheduling_context: Final = scheduling_context self._instance_states: Final = instance_states - self._account_id: Final = account_id - self._role_arn: Final = role_arn self._logger: Final = logger + self._env: Final = env - self._region: Final = self._scheduling_context.region + self._metric_counts: InstanceCountsAggregator = InstanceCountsAggregator() - self._schedule_metrics: Final = SchedulerMetrics(datetime.now(timezone.utc)) + def run(self) -> Any: + execution_start = time.perf_counter() + result = self._run_scheduler() + execution_end = time.perf_counter() - self._scheduler_start_list: list[Instance] = [] - self._scheduler_stop_list: list[Instance] = [] - self._schedule_resize_list: list[tuple[Instance, str]] = [] + time_taken = execution_end - execution_start - self._usage_metrics: Final[dict[str, dict[str, Any]]] = { - "Started": {}, - "Stopped": {}, - "Resized": {}, - } + # op metrics + collect_metric( + InstanceCountMetric( + service=self._scheduling_context.service, + region=self._scheduling_context.region, + num_schedules=len(result.instance_counts.by_schedule()), + num_instances=sum( + count.total() for count in result.instance_counts.by_type().values() + ), + ), + self._logger, + ) + + collect_metric( + SchedulingActionMetric( + duration_seconds=time_taken, + num_instances_scanned=sum( + count.total() for count in result.instance_counts.by_type().values() + ), + num_unique_schedules=len(result.instance_counts.by_schedule()), + actions=result.to_actions_taken(self._scheduling_context.service), + ), + self._logger, + ) + + # dashboard metrics + cw_metrics = CloudWatchOperationalInsights(self._env, self._logger) + service_counts = ServiceInstanceCounts( + {self._service.service_name: result.instance_counts} + ) - def _instance_display_str(self, inst_id: str, name: str) -> str: - s = "{}:{}".format(self._service.service_name.upper(), inst_id) - if name: - s += " ({})".format(name) - return s - - def _scheduled_instances_in_region(self) -> Iterator[Instance]: - # use service strategy to get a list of instances that can be scheduled for that service - for instance in self._service.get_schedulable_instances(): - instance["account"] = self._account_id - instance["region"] = self._region - instance["service"] = self._service.service_name - instance["instance_str"] = self._instance_display_str( - instance["id"], instance["name"] + collect_metric( + InsightsMetric.from_service_counts( + service_counts, self._env.scheduler_frequency_minutes + ), + self._logger, + ) + + if self._env.enable_ops_monitoring: + cw_metrics.send_metrics_to_cloudwatch( + service_counts, self._env.scheduler_frequency_minutes ) - yield instance - def run(self) -> Any: - execution_start = time.perf_counter() - response, instances = self._process_account() - execution_end = time.perf_counter() + return result.to_output_dict() - time_taken = execution_end - execution_start - self._collect_op_metrics(instances=instances, time_taken=time_taken) + def _run_scheduler(self) -> SchedulingResult: + result = SchedulingResult() + self._instance_states.load( + account=self._scheduling_context.account_id, + region=self._scheduling_context.region, + ) - return response + actions_to_take: dict[SchedulingAction, list[SchedulingDecision]] = { + SchedulingAction.START: [], + SchedulingAction.STOP: [], + } - def get_desired_state_and_type( - self, schedule: InstanceSchedule, instance: Instance - ) -> tuple[ScheduleState, Optional[str]]: - # test if the instance has a maintenance window in which it must be running - if ( - instance["maintenance_window"] is not None - and schedule.use_maintenance_window is True + for decision in self.make_scheduling_decisions( + self._service.describe_tagged_instances(), + self._instance_states, + self._scheduling_context, + result_object=result, ): self._logger.info( - INF_MAINTENANCE_WINDOW, - instance["maintenance_window"].name, - instance["id"], + f"Scheduling decision for {decision.instance.display_str}: " + f"\n action: {decision.action} " + f"\n reason: {decision.reason}" ) - # get the desired start for the maintenance window at current UTC time - ( - inst_state, - inst_type, - _, - ) = instance["maintenance_window"].get_desired_state( - instance, - logger=self._logger, - dt=datetime.now( - timezone.utc - ), # todo: using current time instead of schedule time? - ) + if decision.action is not SchedulingAction.DO_NOTHING: + actions_to_take[decision.action].append(decision) - # if we're in the maintenance window return running state - if inst_state == InstanceSchedule.STATE_RUNNING: - return inst_state, inst_type + if decision.new_state_table_state is not None: + self._instance_states.set_instance_state( + decision.instance.id, decision.new_state_table_state + ) - # based on the schedule get the desired state and instance type for this instance - inst_state, inst_type, _ = schedule.get_desired_state( - instance, logger=self._logger, dt=self._scheduling_context.current_dt + self._handle_start_and_resize_actions( + actions_to_take[SchedulingAction.START], + result_object=result, + logger=self._logger, ) - return inst_state, inst_type - - def _process_account(self) -> tuple[ProcessAccountResult, list[Instance]]: - started_instances: StartStopItems = {} - stopped_instances: StartStopItems = {} - resized_instances: ResizeItems = {} - - self._logger.info( - INF_PROCESSING_ACCOUNT, - self._service.service_name.upper(), - self._account_id, - " using role " + (self._role_arn or ""), - self._region, + self._handle_stop_actions( + actions_to_take[SchedulingAction.STOP], + result_object=result, + logger=self._logger, ) - state_loaded = False - instances: list[Instance] = [] - - self._scheduler_start_list = [] - self._scheduler_stop_list = [] - self._schedule_resize_list = [] + self._instance_states.save() - for instance in self._scheduled_instances_in_region(): - # delay loading instance state until first instance is returned - if not state_loaded: - self._instance_states.load(self._account_id, self._region) - state_loaded = True + return result - instances.append(instance) + def make_scheduling_decisions( + self, + instances: Iterator[AbstractInstance], + instance_states: InstanceStates, + context: SchedulingContext, + result_object: SchedulingResult, + ) -> Iterator[SchedulingDecision]: + for instance in instances: + schedule = context.get_schedule(instance.schedule_name) - # handle terminated instances - if instance["is_terminated"]: - self._logger.debug( - DEBUG_SKIPPING_TERMINATED_INSTANCE, - instance["instance_str"], - self._region, - instance["account"], + if not schedule: + self._logger.warning( + f"{instance.display_str} is tagged with an unknown schedule: {instance.schedule_name}" + ) + yield SchedulingDecision( + instance=instance, + action=SchedulingAction.DO_NOTHING, + new_state_table_state=None, + reason=f"Unknown Schedule {instance.schedule_name}", ) - self._instance_states.delete_instance_state(instance["id"]) continue - # get the schedule for this instance - instance_schedule = self._scheduling_context.get_schedule( - instance["schedule_name"] - ) - if not instance_schedule: - self._logger.warning( - WARN_SKIPPING_UNKNOWN_SCHEDULE, - instance["instance_str"], - self._region, - instance["account"], - instance["schedule_name"], + # do not count instances that are tagged with invalid schedules, but do count instances that are in + # a non-schedulable state (which is typically transient) + result_object.add_checked_instance(instance) + + if not instance.is_schedulable: + yield SchedulingDecision( + instance=instance, + action=SchedulingAction.DO_NOTHING, + new_state_table_state=None, + reason=f"Current instance state ({instance.current_state}) is not schedulable", ) continue - self._logger.debug(DEBUG_INSTANCE_HEADER, instance["instance_str"]) - self._logger.debug( - DEBUG_CURRENT_INSTANCE_STATE, - instance["current_state"], - instance["instancetype"], - instance_schedule.name, - ) + dt = context.current_dt + stored_state = instance_states.get_instance_state(instance.id) - # based on the schedule get the desired state and instance type for this instance - desired_state, desired_type = self.get_desired_state_and_type( - instance_schedule, instance - ) + if schedule.hibernate: + instance.should_hibernate = True - # get the previous desired instance state - last_desired_state = self._instance_states.get_instance_state( - instance["id"] - ) - self._logger.debug( - DEBUG_CURRENT_AND_DESIRED_STATE, - instance_schedule.name, - desired_state, - last_desired_state, - instance["current_state"], - INF_DESIRED_TYPE.format(desired_type) if desired_type else "", + yield self.make_scheduling_decision(instance, schedule, stored_state, dt) + + def make_scheduling_decision( # NOSONAR -- splitting for cog-complexity would make this function harder to read + self, + instance: AbstractInstance, + schedule: InstanceSchedule, + stored_state: InstanceState, + current_dt: datetime, + ) -> SchedulingDecision: + """ + :param instance: a schedulable instance + :param schedule: the schedule the given instance is subject to + :param stored_state: the state that was stored in the dynamodb states table + :param current_dt: the current time + :return: a decision on how to schedule the instance + """ + + is_running, window_name = self._is_maintenance_window_running( + instance, current_dt + ) + if is_running: + return SchedulingDecision( + instance=instance, + action=SchedulingAction.START, + new_state_table_state=InstanceState.RUNNING, + reason=f"In active maintenance window {window_name}", ) - # last desired state None means this is the first time the instance is seen by the scheduler - if last_desired_state is InstanceSchedule.STATE_UNKNOWN: - # new instances that are running are optionally not stopped to allow them to finish possible initialization + # A Scheduling decision is based on 2 primary factors: the current state of the schedule (schedule_state), + # and the instance state that was stored during the last scheduling execution (stored_state) + # In general, the stored_state for a given instance will match the schedule_state that was present during + # the last scheduling execution. This allows the scheduler to identify when a schedule state has changed + # since the last execution and issue a start/stop command in response to this state transition (which typically + # occurs at beginning/end of a running period) + # This additional complexity allows customers to start/stop instances manually without InstanceScheduler + # overriding the manual action until the next regular start/stop action. Additional states InstanceStates and + # schedule flags can be used to modify this behavior to fit the customer needs (for example: enforced schedules + # and the retain_running flag) + schedule_state, new_desired_type, _ = schedule.get_desired_state( + current_dt, logger=self._logger + ) + match schedule_state: + case ScheduleState.STOPPED: + # stop_new_instances flag -- new instances that are running are optionally + # not stopped to allow them to finish possible initialization if ( - instance["is_running"] - and desired_state == InstanceSchedule.STATE_STOPPED + stored_state == InstanceState.UNKNOWN + and not schedule.stop_new_instances ): - if not instance_schedule.stop_new_instances: - self._instance_states.set_instance_state( - instance["id"], InstanceSchedule.STATE_STOPPED - ) - self._logger.debug(DEBUG_NEW_INSTANCE, instance["instance_str"]) - continue - self._process_new_desired_state( - instance, - desired_state, - desired_type, - last_desired_state, - instance_schedule.retain_running, - ) - else: - self._process_new_desired_state( - instance, - desired_state, - desired_type, - last_desired_state, - instance_schedule.retain_running, + return SchedulingDecision( + instance=instance, + action=SchedulingAction.DO_NOTHING, + new_state_table_state=InstanceState.STOPPED, + desired_size=new_desired_type, + reason="stop_new_instances is disabled", ) - # existing instance + # enforced flag -- always take action if schedule is enforced + if schedule.enforced: + return SchedulingDecision( + instance=instance, + action=SchedulingAction.STOP, + new_state_table_state=InstanceState.STOPPED, + desired_size=new_desired_type, + reason="enforced is enabled", + ) - # if enforced check the actual state with the desired state enforcing the schedule state - elif instance_schedule.enforced: + # retain_running flag -- don't stop instances flagged with retain_running if ( - instance["is_running"] - and desired_state == InstanceSchedule.STATE_STOPPED - ) or ( - not instance["is_running"] - and desired_state == InstanceSchedule.STATE_RUNNING + stored_state == InstanceState.RETAIN_RUNNING + and schedule.retain_running ): - self._logger.debug( - DEBUG_ENFORCED_STATE, - instance["instance_str"], - ( - InstanceSchedule.STATE_RUNNING - if instance["is_running"] - else InstanceSchedule.STATE_STOPPED - ), - desired_state, + return SchedulingDecision( + instance=instance, + action=SchedulingAction.DO_NOTHING, + new_state_table_state=InstanceState.STOPPED, + desired_size=new_desired_type, + reason="Instance is in the RETAIN_RUNNING state", ) - self._process_new_desired_state( - instance, - desired_state, - desired_type, - last_desired_state, - instance_schedule.retain_running, + + # normal scheduling behavior -- issue stop action if schedule state changed, otherwise do nothing + if stored_state != schedule_state: # type: ignore[comparison-overlap] + return SchedulingDecision( + instance=instance, + action=SchedulingAction.STOP, + new_state_table_state=InstanceState.STOPPED, + desired_size=new_desired_type, + reason=f"State transition from {stored_state} to {schedule_state}", + ) + else: + return SchedulingDecision( + instance=instance, + action=SchedulingAction.DO_NOTHING, + new_state_table_state=InstanceState.STOPPED, + desired_size=new_desired_type, + reason=f"No schedule state transition detected (prev state: {stored_state}, new state: {schedule_state})", ) - # if not enforced then compare the schedule state with the actual state so state of manually started/stopped - # instance it will honor that state - elif last_desired_state != desired_state: - self._process_new_desired_state( - instance, - desired_state, - desired_type, - last_desired_state, - instance_schedule.retain_running, - ) - self._schedule_metrics.add_schedule_metrics( - self._service.service_name, instance_schedule, instance - ) + case ScheduleState.RUNNING: + # resize handling -- top priority + if ( + instance.is_resizable + and new_desired_type + and instance.is_running + and instance.instance_type != new_desired_type + ): + # necessary to prevent the instance from hibernating + instance.resized = True + return SchedulingDecision( + instance=instance, + action=SchedulingAction.STOP, + new_state_table_state=InstanceState.STOPPED, + desired_size=new_desired_type, + reason=f"Current type {instance.instance_type} does not match desired type {new_desired_type}, " + f"stopping instance to allow for resizing to desired type", + ) - # process lists of instances that must be started or stopped - self._start_and_stop_instances() + # enforced flag -- always take action if schedule is enforced + if schedule.enforced: + return SchedulingDecision( + instance=instance, + action=SchedulingAction.START, + new_state_table_state=InstanceState.RUNNING, + desired_size=new_desired_type, + reason="enforced is enabled", + ) - # cleanup desired instance states and save - self._instance_states.cleanup([i["id"] for i in instances]) - self._instance_states.save() + # retain_running flag -- instance in stopped state is found to already be running + if ( + schedule.retain_running + and stored_state == InstanceState.STOPPED + and instance.is_running + ): + return SchedulingDecision( + instance=instance, + action=SchedulingAction.DO_NOTHING, + new_state_table_state=InstanceState.RETAIN_RUNNING, + desired_size=new_desired_type, + reason="Instance appears to have already been started -- apply retain_running flag", + ) - # build output structure, hold started, stopped and resized instances per region - if len(self._scheduler_start_list) > 0: - started_instances[self._region] = [ - {i["id"]: {"schedule": i["schedule_name"]}} - for i in self._scheduler_start_list - ] - if len(self._scheduler_stop_list): - stopped_instances[self._region] = [ - {i["id"]: {"schedule": i["schedule_name"]}} - for i in self._scheduler_stop_list - ] - if len(self._schedule_resize_list) > 0: - resized_instances[self._region] = [ - { - i[0]["id"]: { - "schedule": i[0]["schedule_name"], - "old": i[0]["instancetype"], - "new": i[1], - } - } - for i in self._schedule_resize_list - ] - - # put cloudwatch metrics - if self._scheduling_context.use_metrics: - self._schedule_metrics.put_schedule_metrics() - - # output data - result: ProcessAccountResult = { - "started": started_instances, - "stopped": stopped_instances, - } - if self._service.allow_resize: - result["resized"] = resized_instances - return result, instances + # retain_running flag -- do not clear retain_running flag inside running period + if stored_state == InstanceState.RETAIN_RUNNING: + return SchedulingDecision( + instance=instance, + action=SchedulingAction.DO_NOTHING, + new_state_table_state=InstanceState.RETAIN_RUNNING, + desired_size=new_desired_type, + reason="Instance in retain_running state -- do not remove the flag", + ) - # handle new state of an instance - def _process_new_desired_state( - self, - instance: Instance, - desired_state: str, - desired_type: Optional[str], - last_desired_state: str, - retain_running: Optional[bool], - ) -> None: - def need_and_can_resize() -> bool: - if desired_type is not None and instance["instancetype"] != desired_type: - if not instance["allow_resize"]: - self._logger.warning( - WARN_RESIZE_NOT_SUPPORTED, - instance["instance_str"], - instance["instancetype"], + # normal scheduling behavior -- issue start action if schedule state changed, otherwise do nothing + if stored_state != schedule_state: # type: ignore[comparison-overlap] + return SchedulingDecision( + instance=instance, + action=SchedulingAction.START, + new_state_table_state=InstanceState.RUNNING, + desired_size=new_desired_type, + reason=f"State transition from {stored_state} to {schedule_state}", ) - return False else: - return True - return False - - def resize_instance(inst: Instance, new_type: str) -> None: - try: - # adjust instance type before starting using the resize_instance method in the service_strategy - self._service.resize_instance(instance, new_type) - - self._schedule_resize_list.append((inst, new_type)) - except Exception as ex: - # if changing the instance type does fail do not add instance to start list so it is handled a next time - self._logger.error(ERR_SETTING_INSTANCE_TYPE, str(ex)) - - # last desired status was saved as retain-running - if last_desired_state == InstanceSchedule.STATE_RETAIN_RUNNING: - # don't change last desired state desired whilst in a running period - if desired_state == InstanceSchedule.STATE_RUNNING: - pass # todo: should this be a return? - - # save last desired state as stopped (but do not stop) at the end of running period - elif desired_state == InstanceSchedule.STATE_STOPPED: - # safe new desired stopped state but keep running - self._logger.debug( - INF_DO_NOT_STOP_RETAINED_INSTANCE, - instance["id"], - InstanceSchedule.STATE_STOPPED, - ) - self._instance_states.set_instance_state( - instance["id"], InstanceSchedule.STATE_STOPPED - ) - else: - # just save new desired state - self._instance_states.set_instance_state(instance["id"], desired_state) - else: - if desired_state == InstanceSchedule.STATE_RUNNING: - if not instance["is_running"]: - inst_type = ( - desired_type - if desired_type is not None - else instance["instancetype"] - ) - self._logger.debug( - DEBUG_STARTED_REGION_INSTANCES, - instance["instance_str"], - instance["region"], - inst_type, + return SchedulingDecision( + instance=instance, + action=SchedulingAction.DO_NOTHING, + new_state_table_state=InstanceState.RUNNING, + desired_size=new_desired_type, + reason=f"No schedule state transition detected (prev state: {stored_state}, new state: {schedule_state})", ) - # for instances to be started test if resizing is required - if need_and_can_resize(): - if not desired_type: - raise ValueError("Tried to resize with no desired type") - resize_instance(instance, desired_type) + case ScheduleState.ANY: + # Any state -- never take action + return SchedulingDecision( + instance=instance, + action=SchedulingAction.DO_NOTHING, + new_state_table_state=InstanceState.ANY, + desired_size=new_desired_type, + reason="Schedule is in the any state", + ) - # append instance to list of instances to start - self._scheduler_start_list.append(instance) + case _ as unreachable: + assert_never(unreachable) - # instance already running with desired state of running - else: - # if retain running option is used in this save desired state as retained running. - if last_desired_state == InstanceSchedule.STATE_STOPPED: - if retain_running: - self._logger.debug( - DEBUG_APPLY_RETAIN_RUNNING_STATE, - desired_state, - instance["id"], - InstanceSchedule.STATE_RETAIN_RUNNING, - ) - self._instance_states.set_instance_state( - instance["id"], InstanceSchedule.STATE_RETAIN_RUNNING - ) - else: - # instance is running, set last desired state from stopped to started - self._instance_states.set_instance_state( - instance["id"], InstanceSchedule.STATE_RUNNING - ) - - # desired state is running but saved state already saves as retain running - - elif desired_state in [ - InstanceSchedule.STATE_STOPPED, - InstanceSchedule.STATE_STOPPED_FOR_RESIZE, - ]: - if instance["is_running"]: - # instance needs to be stopped - self._logger.debug( - DEBUG_STOPPED_REGION_INSTANCES, - instance["instance_str"], - instance["region"], + def _handle_start_and_resize_actions( + self, + start_actions: list[SchedulingDecision], + result_object: SchedulingResult, + logger: Logger, + ) -> None: + filtered_actions = [] + + # filter out instances that are already running + for action in start_actions: + if action.action != SchedulingAction.START: + raise ValueError("Non start action passed to handle_start_actions()") + + if action.instance.is_running: + logger.info( + f"skipping start for {action.instance.id}: instance is already running" + ) + continue # skip instances that are already running + + # handle resizing + if ( + action.desired_size + and action.desired_size != action.instance.instance_type + ): + if not action.instance.is_resizable: + logger.warning( + "Instance {} with type {} does not support resizing", + action.instance.id, + action.instance.instance_type, ) - # append instance to list of instances to start - if desired_state == InstanceSchedule.STATE_STOPPED_FOR_RESIZE: - instance["resized"] = True - self._scheduler_stop_list.append(instance) - # stopped instance with desired state of running but in retained state mode - # (manually stopped in running period and already running at start) else: - # just save new desired state - self._instance_states.set_instance_state( - instance["id"], InstanceSchedule.STATE_STOPPED + logger.info( + f"resizing {action.instance.id} from {action.instance.instance_type} to {action.desired_size}" ) - else: - self._instance_states.set_instance_state(instance["id"], desired_state) - - # start and stop listed instances - def _start_and_stop_instances(self) -> None: - if len(self._scheduler_start_list) > 0: - self._logger.info( - INF_STARTING_INSTANCES, - ", ".join([i["instance_str"] for i in self._scheduler_start_list]), - self._region, - ) + self._service.resize_instance(action.instance, action.desired_size) - for inst_id, state in self._service.start_instances( - self._scheduler_start_list - ): - # set state based on returned state from start action - self._instance_states.set_instance_state(inst_id, state) + result_object.add_resize_action( + action.instance, action.desired_size + ) - if len(self._scheduler_stop_list) > 0: - self._logger.info( - INF_STOPPED_INSTANCES, - ", ".join([i["instance_str"] for i in self._scheduler_stop_list]), - self._region, - ) - for inst_id, state in self._service.stop_instances( - self._scheduler_stop_list - ): - # set state based on start of stop action - self._instance_states.set_instance_state(inst_id, state) + # this mutation is necessary to correctly count started instances + action.instance._instance_type = action.desired_size - # ------------------------METRICS--------------------------------# - def _collect_op_metrics(self, instances: list[Instance], time_taken: float) -> None: - if should_collect_metric(InstanceCountMetric, self._logger): - self._collect_instance_count_metric(instances) + filtered_actions.append(action) - if should_collect_metric(SchedulingActionMetric, self._logger): - self._collect_scheduling_actions_metric(time_taken, instances) + for action in filtered_actions: + result_object.add_completed_action(action) - def _collect_instance_count_metric(self, instances: list[Instance]) -> None: - collect_metric( - InstanceCountMetric( - service=self._scheduling_context.service, - region=self._scheduling_context.region, - num_instances=len(instances), - num_schedules=self._count_unique_schedules(instances), - ), - logger=self._logger, + list( + self._service.start_instances( + [action.instance for action in filtered_actions] + ) ) - def _collect_scheduling_actions_metric( - self, time_taken: float, instances: list[Instance] + def _handle_stop_actions( + self, + stop_actions: list[SchedulingDecision], + result_object: SchedulingResult, + logger: Logger, ) -> None: - self._collect_usage_metrics() - self._send_usage_metrics(time_taken, instances) + filtered_actions = [] + + # filter out instances that are already stopped + for action in stop_actions: + if action.action != SchedulingAction.STOP: + raise ValueError("Non stop action passed to handle_stop_actions()") + if action.instance.is_stopped: # skip instances that are already stopped + logger.info( + f"skipping stop for {action.instance.id}: instance is not running" + ) + continue + filtered_actions.append(action) - @staticmethod - def _count_unique_schedules(instances: list[Instance]) -> int: - schedules = set() - for instance in instances: - schedules.add(instance.get("schedule_name")) - - return len(schedules) - - def _collect_usage_metrics(self) -> None: - for i in self._scheduler_start_list: - if i["id"] in [r[0]["id"] for r in self._schedule_resize_list]: - instance_type = [ - r[1] for r in self._schedule_resize_list if r[0]["id"] == i["id"] - ][0] - else: - instance_type = i["instancetype"] - if instance_type in self._usage_metrics["Started"]: - self._usage_metrics["Started"][instance_type] += 1 - else: - self._usage_metrics["Started"][instance_type] = 1 - - for i in self._scheduler_stop_list: - if i["instancetype"] in self._usage_metrics["Stopped"]: - self._usage_metrics["Stopped"][i["instancetype"]] += 1 - else: - self._usage_metrics["Stopped"][i["instancetype"]] = 1 - - for resized_instance in self._schedule_resize_list: - type_change = "{}-{}".format( - resized_instance[0]["instancetype"], resized_instance[1] - ) - if type_change in self._usage_metrics["Resized"]: - self._usage_metrics["Resized"][type_change] += 1 - else: - self._usage_metrics["Resized"][type_change] = 1 - - def _send_usage_metrics(self, time_taken: float, instances: list[Instance]) -> None: - for s in list(self._usage_metrics): - if len(self._usage_metrics[s]) == 0: - del self._usage_metrics[s] - if len(self._usage_metrics) > 0: - actions_taken = [] - for action in self._usage_metrics: - for instance_type in self._usage_metrics.get(action, {}): - actions_taken.append( - ActionTaken( - instances=self._usage_metrics[action][instance_type], - action=action, - instanceType=instance_type, - service=self._service.service_name, - ) - ) + for action in filtered_actions: + result_object.add_completed_action(action) - collect_metric( - SchedulingActionMetric( - duration_seconds=time_taken, - actions=actions_taken, - num_instances_scanned=len(instances), - num_unique_schedules=self._count_unique_schedules(instances), - ), - logger=self._logger, + list( + self._service.stop_instances( + [action.instance for action in filtered_actions] ) + ) + + def _is_maintenance_window_running( + self, + instance: AbstractInstance, + current_dt: datetime, + ) -> tuple[bool, str | None]: + """ + Checks maintenance window is enabled for a schedule and currently running on instance + """ + if instance.maintenance_windows: + for maintenance_window in instance.maintenance_windows: + # get the desired state for the maintenance window at current UTC time + ( + inst_state, + _, + _, + ) = maintenance_window.get_desired_state( + logger=self._logger, + dt=current_dt, + ) + if inst_state == ScheduleState.RUNNING: + return True, maintenance_window.name + + return False, None diff --git a/source/app/instance_scheduler/schedulers/instance_states.py b/source/app/instance_scheduler/schedulers/instance_states.py index 20384bc5..317ed589 100644 --- a/source/app/instance_scheduler/schedulers/instance_states.py +++ b/source/app/instance_scheduler/schedulers/instance_states.py @@ -6,7 +6,7 @@ from botocore.exceptions import ClientError -from instance_scheduler.configuration.instance_schedule import InstanceSchedule +from instance_scheduler.schedulers.states import InstanceState, is_valid_instance_state from instance_scheduler.util.dynamodb_utils import DynamoDBUtils from instance_scheduler.util.logger import Logger @@ -107,7 +107,7 @@ def load(self, account: str, region: str) -> None: if InstanceStates.INSTANCE_TABLE_PURGE in item: self._instances_to_purge = item[InstanceStates.INSTANCE_TABLE_PURGE] - def set_instance_state(self, instance_id: str, new_state: str) -> None: + def set_instance_state(self, instance_id: str, new_state: InstanceState) -> None: """ Sets the state of an instance :param instance_id: id of the instance @@ -121,14 +121,14 @@ def set_instance_state(self, instance_id: str, new_state: str) -> None: self._state_info[instance_id] = new_state self._dirty = True - def get_instance_state(self, instance_id): + def get_instance_state(self, instance_id: str) -> InstanceState: """ gets the stored state of an instance :param instance_id: id of the instance :return: """ state = self._state_info.get(instance_id, None) - return state if state else InstanceSchedule.STATE_UNKNOWN + return state if is_valid_instance_state(state) else InstanceState.UNKNOWN def delete_instance_state(self, instance_id: str) -> None: """ diff --git a/source/app/instance_scheduler/schedulers/scheduling_decision.py b/source/app/instance_scheduler/schedulers/scheduling_decision.py new file mode 100644 index 00000000..2d7e7463 --- /dev/null +++ b/source/app/instance_scheduler/schedulers/scheduling_decision.py @@ -0,0 +1,23 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import dataclasses +from enum import Enum +from typing import Optional + +from instance_scheduler.schedulers.states import InstanceState +from instance_scheduler.service.abstract_instance import AbstractInstance + + +class SchedulingAction(Enum): + DO_NOTHING = None + START = "start" + STOP = "stop" + + +@dataclasses.dataclass +class SchedulingDecision: + instance: AbstractInstance + action: SchedulingAction + new_state_table_state: Optional[InstanceState] + reason: str + desired_size: Optional[str] = None diff --git a/source/app/instance_scheduler/schedulers/scheduling_result.py b/source/app/instance_scheduler/schedulers/scheduling_result.py new file mode 100644 index 00000000..9d80661a --- /dev/null +++ b/source/app/instance_scheduler/schedulers/scheduling_result.py @@ -0,0 +1,117 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from typing import Any, TypedDict + +from instance_scheduler.ops_metrics.metric_type.scheduling_action_metric import ( + ActionTaken, +) +from instance_scheduler.ops_monitoring.instance_counts import InstanceCountsAggregator +from instance_scheduler.schedulers.scheduling_decision import ( + SchedulingAction, + SchedulingDecision, +) +from instance_scheduler.service.abstract_instance import AbstractInstance + + +class ScheduleItem(TypedDict): + schedule: str + + +# dict[instance_id, schedule] +InstanceItem = dict[str, ScheduleItem] + + +class SchedulingResult: + """A result object for accumulating the final output of a scheduling execution""" + + def __init__(self) -> None: + self.instance_counts = InstanceCountsAggregator() + + # format of started/stopped/resized fields: + # { + # instance_type: [ + # {instance_id: {"schedule": schedule_name}} + # {instance_id: {"schedule": schedule_name}} + # ] + # } + self.started: dict[str, list[InstanceItem]] = {} + self.stopped: dict[str, list[InstanceItem]] = {} + self.resized: dict[str, list[InstanceItem]] = {} + + def to_output_dict(self) -> dict[str, Any]: + return { + "num_schedules_checked": len(self.instance_counts.by_schedule()), + "instance_counts": self.instance_counts.by_type(), + "started": self.started, + "stopped": self.stopped, + "resized": self.resized, + } + + def to_actions_taken(self, service: str) -> list[ActionTaken]: + actions_taken = [] + for instance_type, actions in self.started.items(): + actions_taken.append( + ActionTaken( + instanceType=instance_type, + instances=len(actions), + action="Started", + service=service, + ) + ) + for instance_type, actions in self.stopped.items(): + actions_taken.append( + ActionTaken( + instanceType=instance_type, + instances=len(actions), + action="Stopped", + service=service, + ) + ) + for instance_type, actions in self.resized.items(): + # instance_type str is of format from-to + actions_taken.append( + ActionTaken( + instanceType=instance_type, + instances=len(actions), + action="Resized", + service=service, + ) + ) + + return actions_taken + + def add_checked_instance(self, instance: AbstractInstance) -> None: + self.instance_counts.by_type().increment( + instance.instance_type, instance.current_state + ) + self.instance_counts.by_schedule().increment( + instance.schedule_name, instance.current_state + ) + + def add_resize_action(self, instance: AbstractInstance, resized_to: str) -> None: + type_str = f"{instance.instance_type}-{resized_to}" + if type_str not in self.resized: + self.resized[type_str] = [] + + self.resized[type_str].append( + {instance.id: {"schedule": instance.schedule_name}} + ) + + def add_completed_action(self, action: SchedulingDecision) -> None: + instance_type = action.instance.instance_type + + if action.action == SchedulingAction.START: + if instance_type not in self.started: + self.started[instance_type] = [] + + self.started[instance_type].append( + {action.instance.id: {"schedule": action.instance.schedule_name}} + ) + + elif action.action == SchedulingAction.STOP: + if instance_type not in self.stopped: + self.stopped[instance_type] = [] + + self.stopped[instance_type].append( + {action.instance.id: {"schedule": action.instance.schedule_name}} + ) diff --git a/source/app/instance_scheduler/schedulers/states.py b/source/app/instance_scheduler/schedulers/states.py new file mode 100644 index 00000000..57b055ce --- /dev/null +++ b/source/app/instance_scheduler/schedulers/states.py @@ -0,0 +1,27 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from enum import Enum +from typing import Optional, TypeGuard + + +class ScheduleState(str, Enum): + """possible desired states from a Schedule""" + + RUNNING = "running" + ANY = "any" + STOPPED = "stopped" + + +class InstanceState(str, Enum): + """additional states used for scheduling that can be saved to a specific instance""" + + RUNNING = "running" + ANY = "any" + STOPPED = "stopped" + UNKNOWN = "unknown" + STOPPED_FOR_RESIZE = "stopped_for_resize" + RETAIN_RUNNING = "retain-running" + + +def is_valid_instance_state(value: Optional[str]) -> TypeGuard[InstanceState]: + return any(value == state for state in InstanceState) diff --git a/source/app/instance_scheduler/service/__init__.py b/source/app/instance_scheduler/service/__init__.py index 07855338..56797712 100644 --- a/source/app/instance_scheduler/service/__init__.py +++ b/source/app/instance_scheduler/service/__init__.py @@ -1,22 +1,11 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -from collections.abc import Mapping as _Mapping -from typing import Any as _Any -from typing import Final as _Final - -from .base import Service, ServiceArgs +from .base import Service from .ec2 import Ec2Service from .rds import RdsService -services: _Final[_Mapping[str, type[Service[_Any]]]] = { - "ec2": Ec2Service, - "rds": RdsService, -} - __all__ = [ "Ec2Service", "Service", - "ServiceArgs", - "services", "RdsService", ] diff --git a/source/app/instance_scheduler/service/abstract_instance.py b/source/app/instance_scheduler/service/abstract_instance.py new file mode 100644 index 00000000..d713a33c --- /dev/null +++ b/source/app/instance_scheduler/service/abstract_instance.py @@ -0,0 +1,75 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from abc import ABC, abstractmethod +from dataclasses import dataclass +from typing import Sequence + +from instance_scheduler.configuration.instance_schedule import InstanceSchedule + + +@dataclass(kw_only=True) +class AbstractInstance(ABC): + _id: str + _name: str + _schedule_name: str + _current_state: str + _instance_type: str + _tags: dict[str, str] + _maintenance_windows: Sequence[InstanceSchedule] + + # mutable leftovers from original resizing/hibernate design of EC2, should probably be refactored + resized: bool = False + should_hibernate: bool = False + + @property + def id(self) -> str: + return self._id + + @property + def name(self) -> str: + return self._name + + @property + def schedule_name(self) -> str: + return self._schedule_name + + @property + def current_state(self) -> str: + return self._current_state + + @property + def tags(self) -> dict[str, str]: + return self._tags + + @property + def instance_type(self) -> str: + return self._instance_type + + @property + def maintenance_windows(self) -> Sequence[InstanceSchedule]: + return self._maintenance_windows + + @property + @abstractmethod + def display_str(self) -> str: + pass + + @property + @abstractmethod + def is_schedulable(self) -> bool: + pass + + @property + @abstractmethod + def is_running(self) -> bool: + pass + + @property + @abstractmethod + def is_stopped(self) -> bool: + pass + + @property + @abstractmethod + def is_resizable(self) -> bool: + pass diff --git a/source/app/instance_scheduler/service/asg.py b/source/app/instance_scheduler/service/asg.py new file mode 100644 index 00000000..3060bd99 --- /dev/null +++ b/source/app/instance_scheduler/service/asg.py @@ -0,0 +1,620 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import json +from collections.abc import Iterable, Iterator +from dataclasses import dataclass +from datetime import datetime, timedelta, timezone +from typing import TYPE_CHECKING, Final, List, Optional +from zoneinfo import ZoneInfo + +from aws_lambda_powertools.logging import Logger +from boto3 import Session + +from instance_scheduler.boto_retry import get_client_with_standard_retry +from instance_scheduler.configuration.time_utils import parse_time_str +from instance_scheduler.cron.asg import ( + to_asg_expr_monthdays, + to_asg_expr_months, + to_asg_expr_weekdays, +) +from instance_scheduler.cron.parser import ( + parse_monthdays_expr, + parse_months_expr, + parse_weekdays_expr, +) +from instance_scheduler.model.period_definition import PeriodDefinition +from instance_scheduler.model.schedule_definition import ScheduleDefinition + +if TYPE_CHECKING: + from mypy_boto3_autoscaling.client import AutoScalingClient + from mypy_boto3_autoscaling.type_defs import ( + AutoScalingGroupPaginatorTypeDef, + FilterTypeDef, + ScheduledUpdateGroupActionRequestTypeDef, + ScheduledUpdateGroupActionTypeDef, + TagDescriptionTypeDef, + ) +else: + AutoScalingClient = object + AutoScalingGroupPaginatorTypeDef = object + FilterTypeDef = object + ScheduledUpdateGroupActionRequestTypeDef = object + ScheduledUpdateGroupActionTypeDef = object + TagDescriptionTypeDef = object + +logger: Final = Logger(log_uncaught_exceptions=True, use_rfc3339=True) + + +class AsgValidationError(Exception): + pass + + +class BatchDeleteScheduledActionsError(Exception): + pass + + +class BatchPutScheduledActionsError(Exception): + pass + + +@dataclass(frozen=True) +class AsgTag: + schedule: str + ttl: str + min_size: Optional[int] = None + max_size: Optional[int] = None + desired_size: Optional[int] = None + + @classmethod + def from_group( + cls, + *, + group: AutoScalingGroupPaginatorTypeDef, + asg_scheduled_tag_key: str, + ) -> "AsgTag": + """ + Return an auto scaling group scheduled tag configured by the solution if existing. + When validation fails, raise "AsgValidationError". + + :param group: an auto scaling group + :param asg_scheduled_tag_key: an auto scaling scheduled tag key ("scheduled" by default) + :return: the auto scaling group scheduled tag + """ + + scheduled_tags: Final[List[TagDescriptionTypeDef]] = list( + filter( + lambda tag: tag.get("Key") == asg_scheduled_tag_key, + group.get("Tags", []), + ) + ) + + if len(scheduled_tags) == 0: + raise AsgValidationError("Scheduled tag missing") + + try: + scheduled_tag_value: Final = json.loads(scheduled_tags[0].get("Value", "")) + except Exception: + raise AsgValidationError("Unable to parse Scheduled tag value") + + if not isinstance(scheduled_tag_value, dict): + raise AsgValidationError("Invalid Scheduled tag value") + + # When there's no value in the tag which unlikely happens unless the solution adds new value or a user modifies values manually, + # it sets empty values by default. With default values, it would be schedulable as the values are not valid. + # Refer to `is_still_valid` method how the solution determines if the tag is still valid. + # Size values are optional so when it comes to schedule, any missing value causes not able to schedule with tag values + # as missing value is treated as stopped state which can't be scheduled. + return AsgTag( + schedule=scheduled_tag_value.get("schedule", ""), + ttl=scheduled_tag_value.get("ttl", ""), + min_size=scheduled_tag_value.get("min_size"), + max_size=scheduled_tag_value.get("max_size"), + desired_size=scheduled_tag_value.get("desired_size"), + ) + + def is_still_valid( + self, *, schedule_name: str, is_schedule_override: bool + ) -> tuple[bool, str]: + """ + Check if the auto scaling group scheduled tag is still valid. + This decides if the auto scaling group needs to be scheduled or updated. + If any of the following are true, the group needs to be updated: + 1. the schedule as changed, so we need to override + 2. the tag we applied is for a different schedule + 3. the tag we applied is nearing expiration + + :param scheduled_tag_value: an auto scaling group scheduled tag value + :param schedule_name: a schedule name + :param is_schedule_override: a flag to check if it is to override the schedule or not + :return: if the auto scaling group scheduled tag is valid and a reason behind the decision + """ + + if is_schedule_override: + return False, "Overridden" + + configured_schedule: Final = self.schedule + + if configured_schedule != schedule_name: + return False, "Configured for a different schedule" + + ttl: Final[str] = self.ttl + + try: + ttl_dt = datetime.fromisoformat(ttl) + except Exception: + return False, "Unable to parse configuration TTL" + + if ttl_dt < datetime.now(timezone.utc) + timedelta(days=1): + return False, "Configuration expiring in less than one day" + + return ( + True, + f"All conditions met, current config valid for schedule {schedule_name} until {ttl_dt.isoformat()}", + ) + + def __str__(self) -> str: + return json.dumps( + { + "schedule": self.schedule, + "ttl": self.ttl, + "min_size": self.min_size, + "max_size": self.max_size, + "desired_size": self.desired_size, + } + ) + + +@dataclass(frozen=True) +class AsgSize: + min_size: int + desired_size: int + max_size: int + + def is_stopped_state(self) -> bool: + return self.min_size == 0 and self.desired_size == 0 and self.max_size == 0 + + @classmethod + def from_group(cls, group: AutoScalingGroupPaginatorTypeDef) -> "AsgSize": + return AsgSize( + min_size=group["MinSize"], + desired_size=group["DesiredCapacity"], + max_size=group["MaxSize"], + ) + + @classmethod + def from_tag(cls, asg_tag: AsgTag) -> "AsgSize": + """ + Get an auto scaling group size from a scheduled tag configured by the solution. + When any size value is invalid, return stopped size so it can't be scheduled with tag values. + + :param asg_tag: a scheduled tag value configured by the solution + :return: the auto scaling group size + """ + + if not isinstance(asg_tag.min_size, int): + logger.info( + "Unable to determine auto scaling size from the tag as min size is invalid." + ) + return AsgSize.stopped() + + if not isinstance(asg_tag.max_size, int): + logger.info( + "Unable to determine auto scaling size from the tag as max size is invalid." + ) + return AsgSize.stopped() + + if not isinstance(asg_tag.desired_size, int): + logger.info( + "Unable to determine auto scaling size from the tag as desired size is invalid." + ) + return AsgSize.stopped() + + return AsgSize( + min_size=asg_tag.min_size, + desired_size=asg_tag.desired_size, + max_size=asg_tag.max_size, + ) + + @classmethod + def stopped(cls) -> "AsgSize": + return AsgSize(min_size=0, desired_size=0, max_size=0) + + +@dataclass(frozen=True) +class AsgScheduleMetadata: + auto_scaling_group_name: str + schedule_name: str + new_schedule_actions: list[ScheduledUpdateGroupActionRequestTypeDef] + existing_actions_configured_by_solution: list[ScheduledUpdateGroupActionTypeDef] + asg_size: AsgSize + + +class AsgService: + def __init__( + self, + *, + session: Session, + schedule_tag_key: str, + asg_scheduled_tag_key: str, + rule_prefix: str, + ) -> None: + self._schedule_tag_key: Final = schedule_tag_key + self._asg_scheduled_tag_key: Final = asg_scheduled_tag_key + self._rule_prefix: Final = rule_prefix + self._autoscaling: Final[AutoScalingClient] = get_client_with_standard_retry( + "autoscaling", session=session + ) + + def get_schedulable_groups( + self, schedule_names: list[str] | None = None + ) -> Iterator[AutoScalingGroupPaginatorTypeDef]: + paginator: Final = self._autoscaling.get_paginator( + "describe_auto_scaling_groups" + ) + filters: Final[list[FilterTypeDef]] = [] + + if schedule_names is not None: + filters.append( + {"Name": f"tag:{self._schedule_tag_key}", "Values": schedule_names} + ) + else: + filters.append({"Name": "tag-key", "Values": [self._schedule_tag_key]}) + + for page in paginator.paginate(Filters=filters): + yield from page["AutoScalingGroups"] + + def schedule_auto_scaling_group( + self, + group: AutoScalingGroupPaginatorTypeDef, + schedule_definition: ScheduleDefinition, + period_definitions: list[PeriodDefinition], + is_schedule_override: bool = False, + ) -> None: + """ + Schedule an auto scaling group. + An auto scaling group can be scheduled at least one of the following is true: + 1. the solution configured tag (auto scaling scheduled tag) should not be valid anymore. + 2. when a data in the config DynamoDB table changes, it overrides the schedule with new schedule definitions. + 3. auto scaling sizes should not be stopped. + + :param group: an auto scaling group + :param schedule_definition: a schedule definition + :param period_definitions: scheduling period definitions + :param is_schedule_override: a flag to check if it is to override the schedule or not + """ + + auto_scaling_group_name: Final = group["AutoScalingGroupName"] + schedule_name: Final = schedule_definition.name + + try: + asg_tag = AsgTag.from_group( + group=group, asg_scheduled_tag_key=self._asg_scheduled_tag_key + ) + except Exception as e: + logger.info(f"Scheduled tag validation failure: {e}") + asg_tag = AsgTag(schedule="", ttl="") + + valid, reason = asg_tag.is_still_valid( + schedule_name=schedule_name, + is_schedule_override=is_schedule_override, + ) + + if valid: + logger.info( + f"Skipping configuring group {auto_scaling_group_name} with schedule {schedule_definition.name}: {reason}" + ) + return + + logger.info( + f"Configuring group {auto_scaling_group_name} with schedule {schedule_definition.name}: {reason}" + ) + + steady_state = self._get_steady_state(group=group, asg_tag=asg_tag) + + if steady_state.is_stopped_state(): + logger.error( + f'Unable to determine "running" state for group {auto_scaling_group_name}' + ) + return + + # convert this schedule to actions now to fail fast if the schedule is invalid + new_schedule_actions: Final[list[ScheduledUpdateGroupActionRequestTypeDef]] = ( + list( + schedule_to_actions( + schedule_definition, + period_definitions, + steady_state, + self._rule_prefix, + ) + ) + ) + + # need to identify any actions we have configured so they can be replaced + existing_actions: Final[Iterable[ScheduledUpdateGroupActionTypeDef]] = list( + self._describe_scheduled_actions( + auto_scaling_group_name=auto_scaling_group_name, + ) + ) + + existing_actions_configured_by_solution: Final[ + list[ScheduledUpdateGroupActionTypeDef] + ] = list( + filter( + lambda action: action.get("ScheduledActionName", "").startswith( + self._rule_prefix + ), + existing_actions, + ) + ) + + self._configure_schedules( + asg_schedule_metadata=AsgScheduleMetadata( + auto_scaling_group_name=auto_scaling_group_name, + schedule_name=schedule_name, + new_schedule_actions=new_schedule_actions, + existing_actions_configured_by_solution=existing_actions_configured_by_solution, + asg_size=steady_state, + ) + ) + + def _get_steady_state( + self, group: AutoScalingGroupPaginatorTypeDef, asg_tag: AsgTag + ) -> AsgSize: + """ + Get the steady state of an auto scaling group size to be scheduled. + The current size on the auto scaling group is prioritized. + + :param group: an auto scaling group + :param asg_tag: an auto scaling group scheduled tag value + :return: steady state of the auto scaling group + """ + + current_size: Final = AsgSize.from_group(group=group) + tag_size: Final = AsgSize.from_tag(asg_tag=asg_tag) + + if not current_size.is_stopped_state(): + return current_size + + return tag_size + + def _describe_scheduled_actions( + self, auto_scaling_group_name: str + ) -> Iterator[ScheduledUpdateGroupActionTypeDef]: + """ + Generator to get existing scheduled actions from an auto scaling group. + It returns existing scheduled update group actions on an auto scaling group. + + :param auto_scaling_group_name: auto scaling group name + :return: a scheduled action + """ + + paginator: Final = self._autoscaling.get_paginator("describe_scheduled_actions") + + for page in paginator.paginate(AutoScalingGroupName=auto_scaling_group_name): + for action in page["ScheduledUpdateGroupActions"]: + yield action + + def _batch_delete_scheduled_action( + self, + scheduled_actions: ( + list[ScheduledUpdateGroupActionTypeDef] + | list[ScheduledUpdateGroupActionRequestTypeDef] + ), + auto_scaling_group_name: str, + ) -> None: + """ + Batch delete scheduled actions configured by the solution. + When any failed scheduled actions happen, raise `BatchDeleteScheduledActionError`. + + :param scheduled_actions: solution configured scheduled actions + :param auto_scaling_group_name: auto scaling group name + """ + if len(scheduled_actions) > 0: + delete_response = self._autoscaling.batch_delete_scheduled_action( + AutoScalingGroupName=auto_scaling_group_name, + ScheduledActionNames=list( + action["ScheduledActionName"] for action in scheduled_actions + ), + ) + + # deleting individual actions may fail, need to check response + if len(delete_response["FailedScheduledActions"]) > 0: + raise BatchDeleteScheduledActionsError( + f'Failed to delete some actions: {delete_response["FailedScheduledActions"]}' + ) + + def _batch_put_scheduled_update_group_action( + self, + scheduled_update_group_actions: list[ScheduledUpdateGroupActionRequestTypeDef], + auto_scaling_group_name: str, + ) -> None: + """ + Batch put scheduled actions configured by the solution. + + :param scheduled_update_group_actions: solution configured scheduled actions + :param auto_scaling_group_name: auto scaling group name + """ + + if len(scheduled_update_group_actions) > 0: + put_response = self._autoscaling.batch_put_scheduled_update_group_action( + AutoScalingGroupName=auto_scaling_group_name, + ScheduledUpdateGroupActions=scheduled_update_group_actions, + ) + + # creating individual actions may fail, need to check response + if len(put_response["FailedScheduledUpdateGroupActions"]) > 0: + raise BatchPutScheduledActionsError( + f'Failed to put some actions: {put_response["FailedScheduledUpdateGroupActions"]}' + ) + + def _configure_schedules(self, asg_schedule_metadata: AsgScheduleMetadata) -> None: + """ + Configure auto scaling schedules. + 1. Delete all existing scheduled actions configured by the solution previously. + 2. Put new or updated scheduled actions configured by the solution. + 3. Create or update the auto scaling tag to have a solution configured tag. + + When 1 fails, it does not require rollback as there is no resource to revert. + When 2 or 3 fails, it attempts rollback as it needs to have the previously scheduled actions correctly. + + :param asg_schedule_metadata: auto scaling group schedule metadata to configure schedules + """ + + self._batch_delete_scheduled_action( + scheduled_actions=asg_schedule_metadata.existing_actions_configured_by_solution, + auto_scaling_group_name=asg_schedule_metadata.auto_scaling_group_name, + ) + + try: + self._batch_put_scheduled_update_group_action( + scheduled_update_group_actions=asg_schedule_metadata.new_schedule_actions, + auto_scaling_group_name=asg_schedule_metadata.auto_scaling_group_name, + ) + + self._autoscaling.create_or_update_tags( + Tags=[ + { + "ResourceType": "auto-scaling-group", + "ResourceId": asg_schedule_metadata.auto_scaling_group_name, + "Key": self._asg_scheduled_tag_key, + "Value": str( + AsgTag( + schedule=asg_schedule_metadata.schedule_name, + ttl=( + datetime.now(timezone.utc) + timedelta(days=30) + ).isoformat(), + min_size=asg_schedule_metadata.asg_size.min_size, + max_size=asg_schedule_metadata.asg_size.max_size, + desired_size=asg_schedule_metadata.asg_size.desired_size, + ) + ), + "PropagateAtLaunch": False, + } + ] + ) + except Exception: + self._rollback_schedule_and_raise( + asg_schedule_metadata=asg_schedule_metadata + ) + + raise + + def _rollback_schedule_and_raise( + self, + asg_schedule_metadata: AsgScheduleMetadata, + ) -> None: + """ + When it requires to rollback, attempt rollback. + 1. Delete the scheduled actions that the solution configured this iteration. + 2. Put back the existing scheduled actions that the solution configured previously. + + When any exception happens, it only logs so it can proceed the next actions. + + :param asg_schedule_metadata: auto scaling group schedule metadata for rollback + """ + + logger.exception( + f"Failed to configure group {asg_schedule_metadata.auto_scaling_group_name} with schedule {asg_schedule_metadata.schedule_name}, attempting rollback" + ) + + try: + self._batch_delete_scheduled_action( + scheduled_actions=asg_schedule_metadata.new_schedule_actions, + auto_scaling_group_name=asg_schedule_metadata.auto_scaling_group_name, + ) + except Exception: + logger.exception("Failed to delete some actions") + + try: + self._batch_put_scheduled_update_group_action( + scheduled_update_group_actions=list( + action_description_to_request( + asg_schedule_metadata.existing_actions_configured_by_solution + ) + ), + auto_scaling_group_name=asg_schedule_metadata.auto_scaling_group_name, + ) + except Exception: + logger.exception("Failed to configure some actions") + + +def action_description_to_request( + actions: Iterable[ScheduledUpdateGroupActionTypeDef], +) -> Iterator[ScheduledUpdateGroupActionRequestTypeDef]: + for action in actions: + result: ScheduledUpdateGroupActionRequestTypeDef = { + "ScheduledActionName": action["ScheduledActionName"] + } + if "StartTime" in action: + result["StartTime"] = action["StartTime"] + if "EndTime" in action: + result["EndTime"] = action["EndTime"] + if "Recurrence" in action: + result["Recurrence"] = action["Recurrence"] + if "MinSize" in action: + result["MinSize"] = action["MinSize"] + if "DesiredCapacity" in action: + result["DesiredCapacity"] = action["DesiredCapacity"] + if "MaxSize" in action: + result["MaxSize"] = action["MaxSize"] + if "TimeZone" in action: + result["TimeZone"] = action["TimeZone"] + yield result + + +def schedule_to_actions( + schedule_definition: ScheduleDefinition, + period_definitions: list[PeriodDefinition], + steady_state: AsgSize, + rule_prefix: str, +) -> Iterator[ScheduledUpdateGroupActionRequestTypeDef]: + timezone: Final = schedule_definition.build_timezone() + + for period_definition in period_definitions: + yield from period_to_actions( + period_definition, timezone, steady_state, rule_prefix + ) + + +def period_to_actions( + period_definition: PeriodDefinition, + timezone: ZoneInfo, + steady_state: AsgSize, + rule_prefix: str, +) -> Iterator[ScheduledUpdateGroupActionRequestTypeDef]: + # ensure period names are always compatible with scheduled action names + day_of_month: Final = to_asg_expr_monthdays( + parse_monthdays_expr(period_definition.monthdays) + ) + month_of_year: Final = to_asg_expr_months( + parse_months_expr(period_definition.months) + ) + day_of_week: Final = to_asg_expr_weekdays( + parse_weekdays_expr(period_definition.weekdays) + ) + + if period_definition.begintime: + begintime: Final = parse_time_str(period_definition.begintime) + hour = begintime.hour + minute = begintime.minute + yield { + "ScheduledActionName": f"{rule_prefix}{period_definition.name}Start", + "Recurrence": f"{minute} {hour} {day_of_month} {month_of_year} {day_of_week}", + "MinSize": steady_state.min_size, + "MaxSize": steady_state.max_size, + "DesiredCapacity": steady_state.desired_size, + "TimeZone": str(timezone), + } + + if period_definition.endtime: + endtime: Final = parse_time_str(period_definition.endtime) + hour = endtime.hour + minute = endtime.minute + yield { + "ScheduledActionName": f"{rule_prefix}{period_definition.name}Stop", + "Recurrence": f"{minute} {hour} {day_of_month} {month_of_year} {day_of_week}", + "MinSize": 0, + "MaxSize": 0, + "DesiredCapacity": 0, + "TimeZone": str(timezone), + } diff --git a/source/app/instance_scheduler/service/base.py b/source/app/instance_scheduler/service/base.py index b7a24f0f..9a61b790 100644 --- a/source/app/instance_scheduler/service/base.py +++ b/source/app/instance_scheduler/service/base.py @@ -2,43 +2,23 @@ # SPDX-License-Identifier: Apache-2.0 from abc import ABC, abstractmethod from collections.abc import Iterator -from typing import Generic, TypedDict, TypeVar +from typing import Generic, TypeVar -from boto3.session import Session +from instance_scheduler.schedulers.states import ScheduleState +from instance_scheduler.service.abstract_instance import AbstractInstance -from instance_scheduler import ScheduleState -from instance_scheduler.configuration.instance_schedule import Instance -from instance_scheduler.configuration.scheduling_context import SchedulingContext -from instance_scheduler.util.logger import Logger - - -class ServiceArgs(TypedDict): - account_id: str - logger: Logger - stack_name: str - session: Session - scheduling_context: SchedulingContext - - -T = TypeVar("T", bound=Instance) +T = TypeVar("T", bound=AbstractInstance) class Service(Generic[T], ABC): - def __init__(self, _: ServiceArgs) -> None: - """noop""" @property @abstractmethod def service_name(self) -> str: pass - @property - @abstractmethod - def allow_resize(self) -> bool: - pass - @abstractmethod - def get_schedulable_instances(self) -> list[T]: + def describe_tagged_instances(self) -> Iterator[T]: pass @abstractmethod diff --git a/source/app/instance_scheduler/service/ec2.py b/source/app/instance_scheduler/service/ec2.py index 8ac7aa48..0cb92f4b 100644 --- a/source/app/instance_scheduler/service/ec2.py +++ b/source/app/instance_scheduler/service/ec2.py @@ -3,21 +3,29 @@ from collections.abc import Iterator from enum import IntEnum from itertools import chain -from typing import TYPE_CHECKING, Final, Literal +from typing import TYPE_CHECKING, Final, List, Optional -from boto3 import Session +from botocore.exceptions import ClientError -from instance_scheduler import ScheduleState -from instance_scheduler.configuration.instance_schedule import ( - Instance, - InstanceSchedule, +from instance_scheduler.configuration.instance_schedule import InstanceSchedule +from instance_scheduler.configuration.scheduling_context import ( + SchedulingContext, + TagTemplate, ) -from instance_scheduler.configuration.scheduling_context import TagTemplate -from instance_scheduler.maint_win import EC2SSMMaintenanceWindows -from instance_scheduler.service import Service, ServiceArgs -from instance_scheduler.util import get_boto_config -from instance_scheduler.util.app_env import get_app_env -from instance_scheduler.util.batch import FailureResponse, bisect_retry +from instance_scheduler.handler.environments.scheduling_request_environment import ( + SchedulingRequestEnvironment, +) +from instance_scheduler.maint_win.maintenance_window_context import ( + MaintenanceWindowContext, +) +from instance_scheduler.model import EC2SSMMaintenanceWindow +from instance_scheduler.model.store.dynamo_mw_store import DynamoMWStore +from instance_scheduler.schedulers.states import ScheduleState +from instance_scheduler.service import Service +from instance_scheduler.service.ec2_instance import EC2Instance +from instance_scheduler.util.batch import bisect_retry +from instance_scheduler.util.logger import Logger +from instance_scheduler.util.session_manager import AssumedRole if TYPE_CHECKING: from mypy_boto3_ec2.client import EC2Client @@ -44,191 +52,142 @@ def get_tags(instance: InstanceTypeDef) -> dict[str, str]: return {tag["Key"]: tag["Value"] for tag in instance.get("Tags", [])} -class Ec2Service(Service[Instance]): - def __init__(self, args: ServiceArgs) -> None: - Service.__init__(self, args) - - self._spoke_session: Final = args["session"] - self._spoke_account_id: Final = args["account_id"] - self._logger: Final = args["logger"] - self._scheduling_context: Final = args["scheduling_context"] - - self._spoke_region: Final = self._spoke_session.region_name - self._scheduler_tag_key: Final = self._scheduling_context.tag_name - - self._ec2: Final[EC2Client] = self._spoke_session.client( - "ec2", config=get_boto_config() - ) - - self._maintenance_window_schedules: dict[ - str, InstanceSchedule | Literal["NOT-FOUND"] - ] = {} +class Ec2Service(Service[EC2Instance]): - app_env: Final = get_app_env() + mw_context: Optional[MaintenanceWindowContext] = None - self._maintenance_windows = EC2SSMMaintenanceWindows( - hub_session=Session(), - spoke_account_id=self._spoke_account_id, - spoke_session=self._spoke_session, - table_name=app_env.maintenance_window_table_name, - scheduler_interval=app_env.scheduler_frequency_minutes, - logger=self._logger, - ) + def __init__( + self, + assumed_scheduling_role: AssumedRole, + logger: Logger, + scheduling_context: SchedulingContext, + env: SchedulingRequestEnvironment, + ) -> None: + self._spoke_session: Final = assumed_scheduling_role + self._logger: Final = logger + self._scheduling_context: Final = scheduling_context + self._env: Final = env + self._scheduler_tag_key: Final = self._env.schedule_tag_key + + self._ec2: Final[EC2Client] = self._spoke_session.client("ec2") + + if env.enable_ec2_ssm_maintenance_windows: + self.mw_context = MaintenanceWindowContext( + scheduling_context=self._scheduling_context, + spoke_scheduler_role=assumed_scheduling_role, + logger=self._logger, + mw_store=DynamoMWStore(self._env.maintenance_window_table_name), + ) @property def service_name(self) -> str: return "ec2" - @property - def allow_resize(self) -> bool: - return True - - def get_schedulable_instances(self) -> list[Instance]: + def describe_tagged_instances(self) -> Iterator[EC2Instance]: """ describe all EC2 instances in the target region and return `Instance` data for each instance that is schedulable - an instance is schedulable if it satisifes: + an instance is schedulable if it satisfies: - in the `running` or `stopped` state - tagged with the schedule tag key also describe all maintenance windows, reconcile the DB with the service, and create `InstanceSchedule`s for each window """ - schedules_with_hibernation: Final = frozenset( - s.name for s in self._scheduling_context.schedules.values() if s.hibernate - ) - - self._logger.info( - "Enable SSM Maintenance window is set to {}", - self._scheduling_context.enable_ssm_maintenance_windows, - ) - if self._scheduling_context.enable_ssm_maintenance_windows: - # calling the get maintenance window for this account and region. - self._logger.info( - "load the ssm maintenance windows for account {}, and region {}", - self._spoke_account_id, - self._spoke_region, - ) - self._maintenance_window_schedules = ( - self._maintenance_windows.ssm_maintenance_windows( - self._spoke_session, self._spoke_account_id, self._spoke_region - ) - ) - self._logger.info("finish loading the ssm maintenance windows") - self._logger.info( - "Fetching ec2 instances for account {} in region {}", - self._spoke_account_id, - self._spoke_region, + f"Fetching ec2 instances for account {self._spoke_session.account} in region {self._spoke_session.region}" ) paginator: Final = self._ec2.get_paginator("describe_instances") - schedulable_states: Final[list[InstanceStateNameType]] = ["running", "stopped"] + # filter for all states that are not "terminated" + states_in_scope: Final[list[InstanceStateNameType]] = [ + "pending", + "running", + "shutting-down", + "stopped", + "stopping", + ] filters: Final[list[FilterTypeDef]] = [ - {"Name": "instance-state-name", "Values": schedulable_states}, + {"Name": "instance-state-name", "Values": states_in_scope}, {"Name": "tag-key", "Values": [self._scheduler_tag_key]}, ] - instances: Final[list[Instance]] = [] for page in paginator.paginate(Filters=filters): for reservation in page["Reservations"]: for instance in reservation["Instances"]: - instance_data = self._select_instance_data( - instance, schedules_with_hibernation - ) - instances.append(instance_data) + if is_member_of_asg(instance): + self._logger.info( + f'Omitted EC2 instance with ID {instance["InstanceId"]} because it is part of an AutoScaling Group' + ) + continue + + ec2_instance = self._select_instance_data(instance) self._logger.info( - 'Selected EC2 instance with ID {} in state "{}"', - instance_data["id"], - instance_data["state_name"], + f'Selected EC2 instance with ID {ec2_instance.id} in state "{ec2_instance.current_state}"' ) - - self._logger.info( - "Quantity of EC2 instances in schedulable states: {}", - len(instances), - ) - - return instances - - def _select_instance_data( - self, instance: InstanceTypeDef, schedules_with_hibernation: frozenset[str] - ) -> Instance: + if ec2_instance.maintenance_windows: + self._logger.info( + f"EC2 instance ({ec2_instance.id}) selected with the following maintenance windows attached: " + f"{[mw.name for mw in ec2_instance.maintenance_windows]}" + ) + yield ec2_instance + + def _select_instance_data(self, instance: InstanceTypeDef) -> EC2Instance: tags: Final = get_tags(instance) name: Final = tags.get("Name", "") instance_id: Final = instance["InstanceId"] - state_code: Final = instance["State"]["Code"] & 0xFF - is_running: Final = state_code == EC2StateCode.RUNNING - is_terminated: Final = state_code == EC2StateCode.TERMINATED - schedule_name: Final = tags.get(self._scheduler_tag_key) - - maintenance_window_schedule: InstanceSchedule | Literal["NOT-FOUND"] | None = ( - None - ) - schedule = None - - if schedule_name is not None: - schedule = self._scheduling_context.schedules.get(schedule_name) - if ( - schedule is not None - and schedule.use_maintenance_window - and schedule.ssm_maintenance_window - ): - maintenance_window_schedule = self._maintenance_window_schedules.get( - schedule.ssm_maintenance_window, None - ) - if maintenance_window_schedule is None: - self._logger.error( - ( - "SSM maintenance window {} used in schedule {} not found or " - "disabled" - ), - schedule.ssm_maintenance_window, - schedule.name, - ) - self._maintenance_window_schedules[schedule.ssm_maintenance_window] = ( - "NOT-FOUND" - ) - if maintenance_window_schedule == "NOT-FOUND": - maintenance_window_schedule = None - - return Instance( - id=instance_id, - schedule_name=schedule_name, - hibernate=schedule_name in schedules_with_hibernation, - name=name, - state=state_code, - state_name=instance["State"]["Name"], - allow_resize=self.allow_resize, - resized=False, - is_running=is_running, - is_terminated=is_terminated, - current_state="running" if is_running else "stopped", - instancetype=instance["InstanceType"], - tags=tags, - maintenance_window=maintenance_window_schedule, + schedule_name: Final = tags.get(self._scheduler_tag_key, "") + + schedule = self._scheduling_context.get_schedule(schedule_name) + if schedule: + maint_windows = self._fetch_mw_schedules_for(schedule) + else: + maint_windows = [] + + return EC2Instance( + _id=instance_id, + _schedule_name=schedule_name, + _name=name, + _current_state=instance["State"]["Name"], + _instance_type=instance["InstanceType"], + _tags=tags, + _maintenance_windows=maint_windows, ) - def resize_instance(self, instance: Instance, instance_type: str) -> None: + def _fetch_mw_schedules_for( + self, schedule: InstanceSchedule + ) -> List[InstanceSchedule]: + if not (self.mw_context and schedule.ssm_maintenance_window): + return [] + + maint_windows: list[EC2SSMMaintenanceWindow] = [] + for requested_mw_name in schedule.ssm_maintenance_window: + maint_windows.extend(self.mw_context.find_by_name(requested_mw_name)) + + return [ + mw.to_schedule(self._scheduling_context.scheduling_interval_minutes) + for mw in maint_windows + ] + + def resize_instance(self, instance: EC2Instance, instance_type: str) -> None: """ change the instance type of the EC2 instance with ID in `instance` to `instance_type` """ self._logger.info( - "Setting size for ec2 instance {} to {}", instance["id"], instance_type + f"Setting size for ec2 instance {instance.id} to {instance_type}" ) try: self._ec2.modify_instance_attribute( - InstanceId=instance["id"], InstanceType={"Value": instance_type} + InstanceId=instance.id, InstanceType={"Value": instance_type} ) except Exception as ex: - self._logger.error( - "Error resizing instance {}, ({})", instance["id"], str(ex) - ) + self._logger.error(f"Error resizing instance {instance.id}, ({str(ex)})") def stop_instances( - self, instances_to_stop: list[Instance] + self, instances_to_stop: list[EC2Instance] ) -> Iterator[tuple[str, ScheduleState]]: """ stop EC2 instances with IDs in `instances_to_stop` @@ -243,43 +202,67 @@ def stop_instances( instance_ids_to_stop: Final[list[str]] = [] for instance in instances_to_stop: - # instances that are stopped for resizing cannot be hibernated - if instance["hibernate"] and instance["resized"]: - self._logger.info( - ( - "EC2 instance with ID {} will not be hibernated because it " - "must be stopped for resize" - ), - instance["id"], - ) - - if instance["hibernate"] and not instance["resized"]: - instance_ids_to_hibernate.append(instance["id"]) + if instance.should_hibernate: + # instances that are stopped for resizing cannot be hibernated + if instance.resized: + self._logger.info( + f"EC2 instance with ID {instance.id} will not be hibernated because it must be stopped for resize" + ) + instance_ids_to_stop.append(instance.id) + else: + instance_ids_to_hibernate.append(instance.id) else: - instance_ids_to_stop.append(instance["id"]) + instance_ids_to_stop.append(instance.id) - hibernate_responses = bisect_retry( + hibernate_responses: Final = bisect_retry( instance_ids_to_hibernate, lambda ids: self._ec2.stop_instances(InstanceIds=ids, Hibernate=True), ) - stop_responses = bisect_retry( - instance_ids_to_stop, lambda ids: self._ec2.stop_instances(InstanceIds=ids) - ) - stopping_instance_ids: Final[list[str]] = [] - for response in chain(hibernate_responses, stop_responses): - if isinstance(response, FailureResponse): + for response in hibernate_responses.failure_responses: + if ( + isinstance(response.error, ClientError) + and response.error.response["Error"]["Code"] + == "UnsupportedHibernationConfiguration" + ): + self._logger.warning( + "EC2 instance with ID {} not configured for hibernation, attempting to stop", + response.failed_input, + ) + instance_ids_to_stop.append(response.failed_input) + else: self._logger.error( "Failed to stop EC2 instance with ID {}: {}", response.failed_input, str(response.error), ) - else: - stopping_instance_ids.extend( - instance["InstanceId"] for instance in response["StoppingInstances"] - ) + + stopping_instance_ids.extend( + chain.from_iterable( + (instance["InstanceId"] for instance in response["StoppingInstances"]) + for response in hibernate_responses.success_responses + ) + ) + + stop_responses: Final = bisect_retry( + instance_ids_to_stop, lambda ids: self._ec2.stop_instances(InstanceIds=ids) + ) + + for response in stop_responses.failure_responses: + self._logger.error( + "Failed to stop EC2 instance with ID {}: {}", + response.failed_input, + str(response.error), + ) + + stopping_instance_ids.extend( + chain.from_iterable( + (instance["InstanceId"] for instance in response["StoppingInstances"]) + for response in stop_responses.success_responses + ) + ) self._tag_instances( stopping_instance_ids, @@ -287,10 +270,13 @@ def stop_instances( tag_templates_to_remove=self._scheduling_context.started_tags, ) - yield from ((instance_id, "stopped") for instance_id in stopping_instance_ids) + yield from ( + (instance_id, ScheduleState.STOPPED) + for instance_id in stopping_instance_ids + ) def start_instances( - self, instances_to_start: list[Instance] + self, instances_to_start: list[EC2Instance] ) -> Iterator[tuple[str, ScheduleState]]: """ start the EC2 instances with IDs in `instances_to_start` @@ -298,22 +284,21 @@ def start_instances( tag instances that were successfully started with the start tag keys and values configured at a stack value, and remove stop tag keys from the same instances """ - instance_ids = [instance["id"] for instance in instances_to_start] + instance_ids = [instance.id for instance in instances_to_start] responses: Final = bisect_retry( instance_ids, lambda ids: self._ec2.start_instances(InstanceIds=ids) ) starting_instance_ids: Final[list[str]] = [] - for response in responses: - if isinstance(response, FailureResponse): - self._logger.error( - "Failed to start EC2 instance with ID {}: {}", - response.failed_input, - str(response.error), - ) - else: - starting_instance_ids.extend( - instance["InstanceId"] for instance in response["StartingInstances"] - ) + for failure in responses.failure_responses: + self._logger.error( + "Failed to start EC2 instance with ID {}: {}", + failure.failed_input, + str(failure.error), + ) + for response in responses.success_responses: + starting_instance_ids.extend( + instance["InstanceId"] for instance in response["StartingInstances"] + ) self._tag_instances( starting_instance_ids, @@ -321,7 +306,10 @@ def start_instances( tag_templates_to_remove=self._scheduling_context.stopped_tags, ) - yield from ((instance_id, "running") for instance_id in starting_instance_ids) + yield from ( + (instance_id, ScheduleState.RUNNING) + for instance_id in starting_instance_ids + ) def _tag_instances( self, @@ -379,3 +367,12 @@ def _tag_instances( ", ".join(instance_ids), str(err), ) + + +def is_member_of_asg(instance: InstanceTypeDef) -> bool: + return any( + True + for _ in filter( + lambda x: x["Key"] == "aws:autoscaling:groupName", instance["Tags"] + ) + ) diff --git a/source/app/instance_scheduler/service/ec2_instance.py b/source/app/instance_scheduler/service/ec2_instance.py new file mode 100644 index 00000000..b727b7e5 --- /dev/null +++ b/source/app/instance_scheduler/service/ec2_instance.py @@ -0,0 +1,41 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from dataclasses import dataclass +from typing import TYPE_CHECKING + +from instance_scheduler.service.abstract_instance import AbstractInstance + +if TYPE_CHECKING: + from mypy_boto3_ec2.literals import InstanceStateNameType, InstanceTypeType +else: + InstanceStateNameType = object + InstanceTypeType = object + + +@dataclass(kw_only=True) +class EC2Instance(AbstractInstance): + _current_state: InstanceStateNameType + _instance_type: InstanceTypeType + + @property + def display_str(self) -> str: + s = f"EC2:{self.id}" + if self.name: + s += " ({})".format(self.name) + return s + + @property + def is_schedulable(self) -> bool: + return self.current_state in ["running", "stopped"] + + @property + def is_running(self) -> bool: + return self.current_state == "running" + + @property + def is_stopped(self) -> bool: + return self.current_state == "stopped" + + @property + def is_resizable(self) -> bool: + return True diff --git a/source/app/instance_scheduler/service/rds.py b/source/app/instance_scheduler/service/rds.py index 6377a775..a5662841 100644 --- a/source/app/instance_scheduler/service/rds.py +++ b/source/app/instance_scheduler/service/rds.py @@ -1,43 +1,40 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import re -from collections.abc import Callable, Iterator, Sequence -from typing import TYPE_CHECKING, Any, Final, Literal, Optional +from collections.abc import Iterator, Sequence +from functools import cached_property +from itertools import chain +from typing import TYPE_CHECKING, Any, Final, Optional, TypedDict +from zoneinfo import ZoneInfo -from instance_scheduler import ScheduleState from instance_scheduler.boto_retry import get_client_with_standard_retry -from instance_scheduler.configuration.instance_schedule import ( - Instance, - InstanceSchedule, -) +from instance_scheduler.configuration.instance_schedule import InstanceSchedule from instance_scheduler.configuration.running_period import RunningPeriod from instance_scheduler.configuration.running_period_dict_element import ( RunningPeriodDictElement, ) -from instance_scheduler.configuration.scheduler_config_builder import ( - SchedulerConfigBuilder, +from instance_scheduler.configuration.scheduling_context import ( + SchedulingContext, + TagTemplate, ) -from instance_scheduler.configuration.scheduling_context import TagTemplate -from instance_scheduler.configuration.setbuilders.weekday_setbuilder import ( - WeekdaySetBuilder, +from instance_scheduler.configuration.time_utils import parse_time_str +from instance_scheduler.cron.cron_recurrence_expression import CronRecurrenceExpression +from instance_scheduler.cron.parser import parse_weekdays_expr +from instance_scheduler.handler.environments.scheduling_request_environment import ( + SchedulingRequestEnvironment, ) -from instance_scheduler.service import Service, ServiceArgs +from instance_scheduler.schedulers.states import ScheduleState +from instance_scheduler.service import Service +from instance_scheduler.service.rds_instance import RdsInstance +from instance_scheduler.util.logger import Logger +from instance_scheduler.util.session_manager import AssumedRole if TYPE_CHECKING: from mypy_boto3_rds.client import RDSClient - from mypy_boto3_rds.type_defs import ( - DBClusterTypeDef, - DBInstanceTypeDef, - DescribeDBClustersMessageRequestTypeDef, - DescribeDBInstancesMessageRequestTypeDef, - TagTypeDef, - ) + from mypy_boto3_rds.type_defs import DBClusterTypeDef, DBInstanceTypeDef, TagTypeDef from mypy_boto3_resourcegroupstaggingapi.client import ( ResourceGroupsTaggingAPIClient, ) - from mypy_boto3_resourcegroupstaggingapi.type_defs import ( - GetResourcesInputRequestTypeDef, - ) else: RDSClient = object DBClusterTypeDef = object @@ -50,101 +47,93 @@ RESTRICTED_RDS_TAG_VALUE_SET_CHARACTERS = r"[^a-zA-Z0-9\s_\.:+/=\\@-]" -ERR_STARTING_INSTANCE = "Error starting rds {} {} ({})" -ERR_STOPPING_INSTANCE = "Error stopping rds {} {}, ({})" -ERR_DELETING_SNAPSHOT = "Error deleting snapshot {}" +MAINTENANCE_SCHEDULE_NAME = "RDS preferred Maintenance Window Schedule" +MAINTENANCE_PERIOD_NAME = "RDS preferred Maintenance Window Period" +RDS_CLUSTER_ENGINES: Final = frozenset( + {"aurora-mysql", "aurora-postgresql", "neptune", "docdb"} +) -INF_ADD_TAGS = "Adding {} tags {} to instance {}" -INF_DELETE_SNAPSHOT = "Deleted previous snapshot {}" -INF_FETCHED = "Number of fetched rds {} is {}, number of schedulable resources is {}" -INF_FETCHING_RESOURCES = "Fetching rds {} for account {} in region {}" -INF_REMOVE_KEYS = "Removing {} key(s) {} from instance {}" -INF_STOPPED_RESOURCE = 'Stopped rds {} "{}"' +ResourceArn = str -DEBUG_READ_REPLICA = ( - 'Can not schedule rds instance "{}" because it is a read replica of instance {}' -) -DEBUG_READ_REPLICA_SOURCE = 'Can not schedule rds instance "{}" because it is the source for read copy instance(s) {}' -DEBUG_SKIPPING_INSTANCE = ( - "Skipping rds {} {} because it is not in a start or stop-able state ({})" -) -DEBUG_WITHOUT_SCHEDULE = "Skipping rds {} {} without schedule" -DEBUG_SELECTED = "Selected rds instance {} in state ({}) for schedule {}" -DEBUG_NO_SCHEDULE_TAG = "Instance {} has no schedule tag named {}" - -WARN_TAGGING_STARTED = "Error setting start or stop tags to started instance {}, ({})" -WARN_TAGGING_STOPPED = "Error setting start or stop tags to stopped instance {}, ({})" -WARN_RDS_TAG_VALUE = ( - 'Tag value "{}" for tag "{}" changed to "{}" because it did contain characters that are not allowed ' - "in RDS tag values. The value can only contain only the set of Unicode letters, digits, " - "white-space, '_', '.', '/', '=', '+', '-'" -) -MAINTENANCE_SCHEDULE_NAME = "RDS preferred Maintenance Window Schedule" -MAINTENANCE_PERIOD_NAME = "RDS preferred Maintenance Window Period" +class RdsTagDescription(TypedDict): + db: dict[ResourceArn, dict[str, str]] + cluster: dict[ResourceArn, dict[str, str]] -class RdsService(Service[Instance]): +class RdsService(Service[RdsInstance]): RDS_STATE_AVAILABLE = "available" RDS_STATE_STOPPED = "stopped" RDS_SCHEDULABLE_STATES = {RDS_STATE_AVAILABLE, RDS_STATE_STOPPED} - def __init__(self, args: ServiceArgs) -> None: - Service.__init__(self, args) - - self._session: Final = args["session"] - self._region: Final = self._session.region_name - self._account: Final = args["account_id"] - self._logger: Final = args["logger"] - self._scheduling_context: Final = args["scheduling_context"] - self._scheduler_tag_key: Final = self._scheduling_context.tag_name - self._stack_name: Final = args["stack_name"] - - self._instance_tags: Optional[dict[str, dict[str, str]]] = None + def __init__( + self, + assumed_scheduling_role: AssumedRole, + logger: Logger, + scheduling_context: SchedulingContext, + env: SchedulingRequestEnvironment, + ) -> None: + self._session: Final = assumed_scheduling_role.session + self._region: Final = assumed_scheduling_role.region + self._logger: Final = logger + self._scheduling_context: Final = scheduling_context + self._stack_name: Final = env.stack_name + self._env: Final = env + + self._instance_tags: Optional[dict[str, dict[str, dict[str, str]]]] = None + + self._enabled_services = [] + if self._env.enable_rds_service: + self._enabled_services.append("rds:db") # NOSONAR + + if ( + self._env.enable_rds_clusters + or self._env.enable_docdb_service + or self._env.enable_neptune_service + ): + self._enabled_services.append("rds:cluster") # NOSONAR @property def service_name(self) -> str: return "rds" - @property - def allow_resize(self) -> bool: - return False - - @property - def rds_resource_tags(self) -> dict[str, dict[str, str]]: - if self._instance_tags is None: - tag_client: ResourceGroupsTaggingAPIClient = get_client_with_standard_retry( - "resourcegroupstaggingapi", session=self._session, region=self._region - ) - - if self._scheduler_tag_key is None: - raise ValueError("RDS scheduler not initialized properly") - - args: GetResourcesInputRequestTypeDef = { - "TagFilters": [{"Key": self._scheduler_tag_key}], - "ResourcesPerPage": 50, - "ResourceTypeFilters": ["rds:db", "rds:cluster"], - } - - self._instance_tags = {} - - while True: - resp = tag_client.get_resources(**args) + @cached_property + def rds_resource_tags(self) -> RdsTagDescription: + tag_client: ResourceGroupsTaggingAPIClient = get_client_with_standard_retry( + "resourcegroupstaggingapi", session=self._session, region=self._region + ) - for resource in resp.get("ResourceTagMappingList", []): - self._instance_tags[resource["ResourceARN"]] = { + instance_tags: RdsTagDescription = {"db": {}, "cluster": {}} + + paginator: Final = tag_client.get_paginator("get_resources") + if "rds:db" in self._enabled_services: # NOSONAR + for page in paginator.paginate( + TagFilters=[{"Key": self._env.schedule_tag_key}], + ResourceTypeFilters=["rds:db"], # NOSONAR + ): + for resource in page["ResourceTagMappingList"]: + arn = resource["ResourceARN"] + instance_tags["db"][arn] = { tag["Key"]: tag["Value"] for tag in resource.get("Tags", {}) - if tag["Key"] in ["Name", self._scheduler_tag_key] + if tag["Key"] in {"Name", self._env.schedule_tag_key} } - if resp.get("PaginationToken", "") != "": - args["PaginationToken"] = resp["PaginationToken"] - else: - break + if "rds:cluster" in self._enabled_services: # NOSONAR + for page in paginator.paginate( + TagFilters=[{"Key": self._env.schedule_tag_key}], + ResourceTypeFilters=["rds:cluster"], # NOSONAR + ): + for resource in page["ResourceTagMappingList"]: + arn = resource["ResourceARN"] + instance_tags["cluster"][arn] = { + tag["Key"]: tag["Value"] + for tag in resource.get("Tags", {}) + if tag["Key"] in {"Name", self._env.schedule_tag_key} + } - return self._instance_tags + return instance_tags @staticmethod def build_schedule_from_maintenance_window(period_str: str) -> InstanceSchedule: @@ -159,14 +148,11 @@ def build_schedule_from_maintenance_window(period_str: str) -> InstanceSchedule: start_day_string, start_hhmm_string = start_string.split(":", 1) stop_day_string, stop_hhmm_string = stop_string.split(":", 1) - # weekday set builder - weekdays_builder = WeekdaySetBuilder() + start_weekday_expr = parse_weekdays_expr({start_day_string}) + start_time = parse_time_str(start_hhmm_string) + end_time = parse_time_str(stop_hhmm_string) - start_weekday = weekdays_builder.build(start_day_string) - start_time = SchedulerConfigBuilder.get_time_from_string(start_hhmm_string) - end_time = SchedulerConfigBuilder.get_time_from_string(stop_hhmm_string) - - # windows with now day overlap, can do with one period for schedule + # windows that do not overlap days only require one period for schedule if start_day_string == stop_day_string: periods: list[RunningPeriodDictElement] = [ { @@ -174,22 +160,26 @@ def build_schedule_from_maintenance_window(period_str: str) -> InstanceSchedule: name=MAINTENANCE_PERIOD_NAME, begintime=start_time, endtime=end_time, - weekdays=start_weekday, + cron_recurrence=CronRecurrenceExpression( + weekdays=start_weekday_expr + ), ) } ] else: - # window with day overlap, need two periods for schedule - end_time_day1 = SchedulerConfigBuilder.get_time_from_string("23:59") - begin_time_day2 = SchedulerConfigBuilder.get_time_from_string("00:00") - stop_weekday = weekdays_builder.build(stop_day_string) + # windows that overlap days require two periods for schedule + end_time_day1 = parse_time_str("23:59") + begin_time_day2 = parse_time_str("00:00") + stop_weekday_expr = parse_weekdays_expr({stop_day_string}) periods = [ { "period": RunningPeriod( name=MAINTENANCE_PERIOD_NAME + "-{}".format(start_day_string), begintime=start_time, endtime=end_time_day1, - weekdays=start_weekday, + cron_recurrence=CronRecurrenceExpression( + weekdays=start_weekday_expr + ), ), "instancetype": None, }, @@ -198,7 +188,9 @@ def build_schedule_from_maintenance_window(period_str: str) -> InstanceSchedule: name=MAINTENANCE_PERIOD_NAME + "-{}".format(stop_day_string), begintime=begin_time_day2, endtime=end_time, - weekdays=stop_weekday, + cron_recurrence=CronRecurrenceExpression( + weekdays=stop_weekday_expr, + ), ), "instancetype": None, }, @@ -208,151 +200,181 @@ def build_schedule_from_maintenance_window(period_str: str) -> InstanceSchedule: schedule = InstanceSchedule( name=MAINTENANCE_SCHEDULE_NAME, periods=periods, - timezone="UTC", # todo: is this even correct? + timezone=ZoneInfo("UTC"), # PreferredMaintenanceWindow field is in utc + # https://docs.aws.amazon.com/cli/latest/reference/rds/describe-db-instances.html enforced=True, ) return schedule - def get_schedulable_resources( - self, - fn_is_schedulable: Callable[[Any], bool], - fn_describe_name: Literal["describe_db_instances", "describe_db_clusters"], - ) -> list[Instance]: - client: RDSClient = get_client_with_standard_retry( - "rds", session=self._session, region=self._region - ) - - describe_arguments: ( - DescribeDBInstancesMessageRequestTypeDef - | DescribeDBClustersMessageRequestTypeDef - ) = {} - resource_name = fn_describe_name.split("_")[-1] - resource_name = resource_name[0].upper() + resource_name[1:] - resources = [] - number_of_resources = 0 - self._logger.info( - INF_FETCHING_RESOURCES, resource_name, self._account, self._region - ) + def instance_is_in_scope(self, rds_inst: DBInstanceTypeDef) -> bool: + """check whether the instance is within scope for scheduling""" + db_id = rds_inst["DBInstanceIdentifier"] - while True: + if self.rds_resource_tags["db"].get(rds_inst["DBInstanceArn"], None) is None: self._logger.debug( - "Making {} call with parameters {}", - fn_describe_name, - describe_arguments, + f"Rds instance {rds_inst} has no schedule tag named {self._env.schedule_tag_key}" ) - fn = getattr(client, fn_describe_name) - rds_resp = fn(**describe_arguments) - for resource in rds_resp["DB" + resource_name]: - number_of_resources += 1 - - if fn_is_schedulable(resource): - resource_data = self._select_resource_data( - rds_resource=resource, is_cluster=resource_name == "Clusters" - ) - - schedule_name = resource_data["schedule_name"] - if schedule_name not in [None, ""]: - self._logger.debug( - DEBUG_SELECTED, - resource_data["id"], - resource_data["state_name"], - schedule_name, - ) - resources.append(resource_data) - else: - self._logger.debug( - DEBUG_WITHOUT_SCHEDULE, - resource_name[:-1], - resource_data["id"], - ) - if "Marker" in rds_resp: - describe_arguments["Marker"] = rds_resp["Marker"] - else: - break - self._logger.info( - INF_FETCHED, resource_name, number_of_resources, len(resources) - ) - return resources + return False - def get_schedulable_rds_instances(self) -> list[Instance]: - def is_schedulable_instance(rds_inst: DBInstanceTypeDef) -> bool: - db_id = rds_inst["DBInstanceIdentifier"] + if rds_inst.get("ReadReplicaSourceDBInstanceIdentifier", None): + self._logger.debug( + f'Cannot schedule rds instance "{db_id}" because it is a read replica of instance {rds_inst["ReadReplicaSourceDBInstanceIdentifier"]}' + ) + return False - state = rds_inst["DBInstanceStatus"] + if len(rds_inst.get("ReadReplicaDBInstanceIdentifiers", [])) > 0: + self._logger.debug( + f'Cannot schedule rds instance "{db_id}" because it is the source for read copy instance(s) {",".join(rds_inst["ReadReplicaDBInstanceIdentifiers"])}' + ) + return False - if state not in RdsService.RDS_SCHEDULABLE_STATES: - self._logger.debug(DEBUG_SKIPPING_INSTANCE, "instance", db_id, state) - return False + if rds_inst["Engine"] in RDS_CLUSTER_ENGINES: + self._logger.debug( + f"Skipping rds instance {db_id} because its engine ({rds_inst['Engine']}) indicates it is a member of a cluster" + ) + return False - if rds_inst.get("ReadReplicaSourceDBInstanceIdentifier", None) is not None: - self._logger.debug( - DEBUG_READ_REPLICA, - db_id, - rds_inst["ReadReplicaSourceDBInstanceIdentifier"], - ) - return False + return True - if len(rds_inst.get("ReadReplicaDBInstanceIdentifiers", [])) > 0: - self._logger.debug( - DEBUG_READ_REPLICA_SOURCE, - db_id, - ",".join(rds_inst["ReadReplicaDBInstanceIdentifiers"]), - ) - return False + def cluster_is_in_scope(self, rds_cluster: DBClusterTypeDef) -> bool: + """check whether the cluster is within scope for scheduling""" + db_id = rds_cluster["DBClusterIdentifier"] + engine = rds_cluster["Engine"] - if rds_inst["Engine"] in {"aurora", "aurora-mysql", "aurora-postgresql"}: - return False + if ( + self.rds_resource_tags["cluster"].get(rds_cluster["DBClusterArn"], None) + is None + ): + self._logger.debug( + "Rds cluster {} has no schedule tag named {}", + rds_cluster, + self._env.schedule_tag_key, + ) + return False + + match engine: + case "neptune": + if not self._env.enable_neptune_service: + self._logger.debug( + "Skipping cluster {} - neptune scheduling is not enabled", + db_id, + ) + return False + case "docdb": + if not self._env.enable_docdb_service: + self._logger.debug( + "Skipping cluster {} - docdb scheduling is not enabled", + db_id, + ) + return False + case _: + if not self._env.enable_rds_clusters: + self._logger.debug( + "Skipping cluster {} - rds cluster scheduling is not enabled", + db_id, + ) + return False - if self.rds_resource_tags.get(rds_inst["DBInstanceArn"]) is None: - self._logger.debug( - DEBUG_NO_SCHEDULE_TAG, rds_inst, self._scheduler_tag_key - ) - return False + return True - return True + def get_in_scope_rds_instances(self) -> Iterator[RdsInstance]: + tagged_instances: dict[ResourceArn, dict[str, str]] = self.rds_resource_tags[ + "db" + ] + if not tagged_instances: + return - return self.get_schedulable_resources( - fn_is_schedulable=is_schedulable_instance, - fn_describe_name="describe_db_instances", + client: RDSClient = get_client_with_standard_retry( + "rds", session=self._session, region=self._region ) + instance_arns = list(tagged_instances.keys()) - def get_schedulable_rds_clusters(self) -> list[Instance]: - def is_schedulable(cluster_inst: DBClusterTypeDef) -> bool: - db_id = cluster_inst["DBClusterIdentifier"] - - state = cluster_inst["Status"] - - if state not in RdsService.RDS_SCHEDULABLE_STATES: - self._logger.debug(DEBUG_SKIPPING_INSTANCE, "cluster", db_id, state) - return False - - if self.rds_resource_tags.get(cluster_inst["DBClusterArn"]) is None: - self._logger.debug( - DEBUG_NO_SCHEDULE_TAG, cluster_inst, self._scheduler_tag_key - ) - return False + paginator = client.get_paginator("describe_db_instances") + for page in paginator.paginate( + Filters=[ + { + "Name": "db-instance-id", + "Values": instance_arns, + }, + ], + PaginationConfig={"PageSize": 50}, + ): + for instance in page.get("DBInstances", []): + if self.instance_is_in_scope(instance): + resource_data = self._select_resource_data( + rds_resource=instance, + is_cluster=False, + ) + schedule_name = resource_data.schedule_name + if schedule_name: + self._logger.debug( + f"Selected rds instance {resource_data.id} in state ({resource_data.current_state}) for schedule {schedule_name}", + ) + yield resource_data + else: + self._logger.debug( + f"Skipping rds instance {resource_data.id} without schedule" + ) - return True + def get_in_scope_rds_clusters(self) -> Iterator[RdsInstance]: + tagged_clusters: dict[ResourceArn, dict[str, str]] = self.rds_resource_tags[ + "cluster" + ] + if not tagged_clusters: + return - return self.get_schedulable_resources( - fn_is_schedulable=is_schedulable, - fn_describe_name="describe_db_clusters", + client: RDSClient = get_client_with_standard_retry( + "rds", session=self._session, region=self._region ) - def get_schedulable_instances(self) -> list[Instance]: - instances = self.get_schedulable_rds_instances() - if self._scheduling_context.schedule_clusters: - instances += self.get_schedulable_rds_clusters() - return instances + # get all arns from instance_resources + cluster_arns = list(tagged_clusters.keys()) + paginator = client.get_paginator("describe_db_clusters") + for page in paginator.paginate( + Filters=[ + { + "Name": "db-cluster-id", + "Values": cluster_arns, + }, + ], + PaginationConfig={"PageSize": 50}, + ): + for cluster in page.get("DBClusters", []): + if self.cluster_is_in_scope(cluster): + resource_data = self._select_resource_data( + rds_resource=cluster, + is_cluster=True, + ) + schedule_name = resource_data.schedule_name + if schedule_name: + self._logger.debug( + f"Selected rds cluster {resource_data.id} in state ({resource_data.current_state}) for schedule {schedule_name}" + ) + yield resource_data + else: + self._logger.debug( + f"Skipping rds cluster {resource_data.id} without a tagged schedule" + ) + + def describe_tagged_instances(self) -> Iterator[RdsInstance]: + rds_instances = self.get_in_scope_rds_instances() + rds_clusters = self.get_in_scope_rds_clusters() + return chain(rds_instances, rds_clusters) - def _select_resource_data(self, rds_resource: Any, is_cluster: bool) -> Instance: + def _select_resource_data(self, rds_resource: Any, is_cluster: bool) -> RdsInstance: + # type of rds_resource is actually DBInstanceTypeDef | DBClusterTypeDef arn_for_tags = ( rds_resource["DBInstanceArn"] if not is_cluster else rds_resource["DBClusterArn"] ) - tags = self.rds_resource_tags.get(arn_for_tags, {}) + if is_cluster: + tags: dict[str, str] = self.rds_resource_tags["cluster"].get( + arn_for_tags, {} + ) + else: + tags = self.rds_resource_tags["db"].get(arn_for_tags, {}) state = ( rds_resource["DBInstanceStatus"] @@ -360,40 +382,31 @@ def _select_resource_data(self, rds_resource: Any, is_cluster: bool) -> Instance else rds_resource["Status"] ) - is_running = state == self.RDS_STATE_AVAILABLE - - if self._scheduler_tag_key is None: - raise ValueError("RDS scheduler not initialized properly") - - instance_data = Instance( - id=( + instance_data = RdsInstance( + _id=( rds_resource["DBInstanceIdentifier"] if not is_cluster else rds_resource["DBClusterIdentifier"] ), - arn=( + _arn=( rds_resource["DBInstanceArn"] if not is_cluster else rds_resource["DBClusterArn"] ), - allow_resize=self.allow_resize, - hibernate=False, - state=state, - state_name=state, - is_running=is_running, - is_terminated=False, - current_state="running" if is_running else "stopped", - instancetype=( + _current_state=state, + _instance_type=( rds_resource["DBInstanceClass"] if not is_cluster else "cluster" ), - engine_type=rds_resource["Engine"], - maintenance_window=RdsService.build_schedule_from_maintenance_window( - rds_resource["PreferredMaintenanceWindow"] - ), - tags=tags, - name=tags.get("Name", ""), - schedule_name=tags.get(self._scheduler_tag_key, None), - is_cluster=is_cluster, + _engine_type=rds_resource["Engine"], + _maintenance_windows=[ + RdsService.build_schedule_from_maintenance_window( + rds_resource["PreferredMaintenanceWindow"] + ) + ], + _tags=tags, + _name=tags.get("Name", ""), + _schedule_name=tags.get(self._env.schedule_tag_key, ""), + _is_cluster=is_cluster, ) return instance_data @@ -409,11 +422,19 @@ def _validate_rds_tag_values( value = re.sub(RESTRICTED_RDS_TAG_VALUE_SET_CHARACTERS, " ", original_value) value = value.replace("\n", " ") if value != original_value: - self._logger.warning(WARN_RDS_TAG_VALUE, original_value, tag, value) + self._logger.warning( + 'Tag value "{}" for tag "{}" changed to "{}" because it did contain characters that are not ' + "allowed " + "in RDS tag values. The value can only contain only the set of Unicode letters, digits, " + "white-space, '_', '.', '/', '=', '+', '-'", + original_value, + tag, + value, + ) result.append({"Key": tag["Key"], "Value": value}) return result - def _stop_instance(self, client: RDSClient, inst: Instance) -> None: + def _stop_instance(self, client: RDSClient, inst: RdsInstance) -> None: def does_snapshot_exist(name: str) -> bool: try: resp = client.describe_db_snapshots( @@ -427,34 +448,30 @@ def does_snapshot_exist(name: str) -> bool: else: raise ex - args = {"DBInstanceIdentifier": inst["id"]} + args = {"DBInstanceIdentifier": inst.id} - if self._scheduling_context.create_rds_snapshot: - snapshot_name = "{}-stopped-{}".format( - self._stack_name, inst["id"] - ).replace(" ", "") + if self._env.enable_rds_snapshots: + snapshot_name = "{}-stopped-{}".format(self._stack_name, inst.id).replace( + " ", "" + ) args["DBSnapshotIdentifier"] = snapshot_name try: if does_snapshot_exist(snapshot_name): client.delete_db_snapshot(DBSnapshotIdentifier=snapshot_name) - self._logger.info(INF_DELETE_SNAPSHOT, snapshot_name) + self._logger.info("Deleted previous snapshot {}", snapshot_name) except Exception: - self._logger.error(ERR_DELETING_SNAPSHOT, snapshot_name) + self._logger.error("Error deleting snapshot {}", snapshot_name) - try: - client.stop_db_instance(**args) - self._logger.info(INF_STOPPED_RESOURCE, "instance", inst["id"]) - except Exception as ex: - self._logger.error( - ERR_STOPPING_INSTANCE, "instance", inst["instance_str"], str(ex) - ) + client.stop_db_instance(**args) # exception caught upstream - def _tag_stopped_resource(self, client: RDSClient, rds_resource: Instance) -> None: + def _tag_stopped_resource( + self, client: RDSClient, rds_resource: RdsInstance + ) -> None: stop_tags = self._validate_rds_tag_values(self._scheduling_context.stopped_tags) if stop_tags is None: stop_tags = [] - stop_tags_key_names = [t["Key"] for t in stop_tags] + stop_tags_key_names = {t["Key"] for t in stop_tags} start_tags_keys = [ t["Key"] @@ -465,31 +482,38 @@ def _tag_stopped_resource(self, client: RDSClient, rds_resource: Instance) -> No try: if len(start_tags_keys): self._logger.info( - INF_REMOVE_KEYS, - "start", + "Removing start key(s) {} from instance {}", ",".join(['"{}"'.format(k) for k in start_tags_keys]), - rds_resource["arn"], + rds_resource.arn, ) client.remove_tags_from_resource( - ResourceName=rds_resource["arn"], TagKeys=start_tags_keys + ResourceName=rds_resource.arn, TagKeys=start_tags_keys ) if len(stop_tags) > 0: self._logger.info( - INF_ADD_TAGS, "stop", str(stop_tags), rds_resource["arn"] + "Adding stop tags {} to instance {}", + str(stop_tags), + rds_resource.arn, ) client.add_tags_to_resource( - ResourceName=rds_resource["arn"], Tags=stop_tags + ResourceName=rds_resource.arn, Tags=stop_tags ) except Exception as ex: - self._logger.warning(WARN_TAGGING_STOPPED, rds_resource["id"], str(ex)) + self._logger.warning( + "Error setting start or stop tags to stopped instance {}, ({})", + rds_resource.id, + str(ex), + ) - def _tag_started_instances(self, client: RDSClient, rds_resource: Instance) -> None: + def _tag_started_instances( + self, client: RDSClient, rds_resource: RdsInstance + ) -> None: start_tags = self._validate_rds_tag_values( self._scheduling_context.started_tags ) if start_tags is None: start_tags = [] - start_tags_key_names = [t["Key"] for t in start_tags] + start_tags_key_names = {t["Key"] for t in start_tags} stop_tags_keys = [ t["Key"] @@ -499,73 +523,76 @@ def _tag_started_instances(self, client: RDSClient, rds_resource: Instance) -> N try: if len(stop_tags_keys): self._logger.info( - INF_REMOVE_KEYS, - "stop", + "Removing stop key(s) {} from instance {}", ",".join(['"{}"'.format(k) for k in stop_tags_keys]), - rds_resource["arn"], + rds_resource.arn, ) client.remove_tags_from_resource( - ResourceName=rds_resource["arn"], TagKeys=stop_tags_keys + ResourceName=rds_resource.arn, TagKeys=stop_tags_keys ) if start_tags is not None and len(start_tags) > 0: self._logger.info( - INF_ADD_TAGS, "start", str(start_tags), rds_resource["arn"] + "Adding start tags {} to instance {}", + str(start_tags), + rds_resource.arn, ) client.add_tags_to_resource( - ResourceName=rds_resource["arn"], Tags=start_tags + ResourceName=rds_resource.arn, Tags=start_tags ) except Exception as ex: - self._logger.warning(WARN_TAGGING_STARTED, rds_resource["id"], str(ex)) + self._logger.warning( + "Error setting start or stop tags to started instance {}, ({})", + rds_resource.id, + str(ex), + ) def stop_instances( - self, instances_to_stop: list[Instance] + self, instances_to_stop: list[RdsInstance] ) -> Iterator[tuple[str, ScheduleState]]: - client = get_client_with_standard_retry( + client: RDSClient = get_client_with_standard_retry( "rds", session=self._session, region=self._region ) - - for rds_resource in instances_to_stop: + for instance in instances_to_stop: try: - if rds_resource["is_cluster"]: - client.stop_db_cluster(DBClusterIdentifier=rds_resource["id"]) - self._logger.info( - INF_STOPPED_RESOURCE, "cluster", rds_resource["id"] - ) + if instance.is_cluster: + client.stop_db_cluster(DBClusterIdentifier=instance.id) + self._logger.info('Stopped rds cluster "{}"', instance.id) else: - self._stop_instance(client, rds_resource) + self._stop_instance(client, instance) + self._logger.info('Stopped rds instance "{}"', instance.id) - self._tag_stopped_resource(client, rds_resource) + self._tag_stopped_resource(client, instance) - yield rds_resource["id"], "stopped" + yield instance.id, ScheduleState.STOPPED except Exception as ex: self._logger.error( - ERR_STOPPING_INSTANCE, - "cluster" if rds_resource["is_cluster"] else "instance", - rds_resource["instance_str"], + "Error stopping rds {} {}, ({})", + "cluster" if instance.is_cluster else "instance", + instance.display_str, str(ex), ) def start_instances( - self, instances_to_start: list[Instance] + self, instances_to_start: list[RdsInstance] ) -> Iterator[tuple[str, ScheduleState]]: client: RDSClient = get_client_with_standard_retry( "rds", session=self._session, region=self._region ) - for rds_resource in instances_to_start: + for instance in instances_to_start: try: - if rds_resource["is_cluster"]: - client.start_db_cluster(DBClusterIdentifier=rds_resource["id"]) + if instance.is_cluster: + client.start_db_cluster(DBClusterIdentifier=instance.id) else: - client.start_db_instance(DBInstanceIdentifier=rds_resource["id"]) + client.start_db_instance(DBInstanceIdentifier=instance.id) - self._tag_started_instances(client, rds_resource) + self._tag_started_instances(client, instance) - yield rds_resource["id"], "running" + yield instance.id, ScheduleState.RUNNING except Exception as ex: self._logger.error( - ERR_STARTING_INSTANCE, - "cluster" if rds_resource["is_cluster"] else "instance", - rds_resource["instance_str"], + "Error starting rds {} {} ({})", + "cluster" if instance.is_cluster else "instance", + instance.display_str, str(ex), ) diff --git a/source/app/instance_scheduler/service/rds_instance.py b/source/app/instance_scheduler/service/rds_instance.py new file mode 100644 index 00000000..dfc89ac8 --- /dev/null +++ b/source/app/instance_scheduler/service/rds_instance.py @@ -0,0 +1,43 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from dataclasses import dataclass + +from instance_scheduler.service.abstract_instance import AbstractInstance + + +@dataclass(kw_only=True) +class RdsInstance(AbstractInstance): + _is_cluster: bool + _arn: str + _engine_type: str + + @property + def display_str(self) -> str: + s = f"RDS:{self._engine_type}:{self.id}" + if self.name: + s += " ({})".format(self.name) + return s + + @property + def arn(self) -> str: + return self._arn + + @property + def is_cluster(self) -> bool: + return self._is_cluster + + @property + def is_schedulable(self) -> bool: + return self._current_state in ["available", "stopped"] + + @property + def is_running(self) -> bool: + return self._current_state == "available" + + @property + def is_stopped(self) -> bool: + return self._current_state == "stopped" + + @property + def is_resizable(self) -> bool: + return False diff --git a/source/app/instance_scheduler/util/__init__.py b/source/app/instance_scheduler/util/__init__.py index 02f5ade9..53175016 100644 --- a/source/app/instance_scheduler/util/__init__.py +++ b/source/app/instance_scheduler/util/__init__.py @@ -1,11 +1,11 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import json as _json +from os import environ from typing import Any as _Any from botocore.config import Config as _Config -from instance_scheduler.util.app_env import get_app_env as _get_app_env from instance_scheduler.util.custom_encoder import CustomEncoder as _CustomEncoder @@ -23,5 +23,7 @@ def get_boto_config() -> _Config: """Returns a boto3 config with standard retries and `user_agent_extra`""" return _Config( retries={"max_attempts": 5, "mode": "standard"}, - user_agent_extra=_get_app_env().user_agent_extra, + user_agent_extra=environ[ + "USER_AGENT_EXTRA" + ], # todo: don't access environ directly here ) diff --git a/source/app/instance_scheduler/util/app_env.py b/source/app/instance_scheduler/util/app_env.py index e570a304..15720819 100644 --- a/source/app/instance_scheduler/util/app_env.py +++ b/source/app/instance_scheduler/util/app_env.py @@ -23,10 +23,11 @@ class AppEnv: start_ec2_batch_size: int schedule_tag_key: str default_timezone: ZoneInfo - enable_cloudwatch_metrics: bool enable_ec2_service: bool enable_rds_service: bool enable_rds_clusters: bool + enable_neptune_service: bool + enable_docdb_service: bool enable_rds_snapshots: bool schedule_regions: list[str] app_namespace: str @@ -44,7 +45,12 @@ def scheduled_services(self) -> list[str]: result = [] if self.enable_ec2_service: result.append("ec2") - if self.enable_rds_service: + if ( + self.enable_rds_service + or self.enable_rds_clusters + or self.enable_neptune_service + or self.enable_docdb_service + ): result.append("rds") return result @@ -85,10 +91,10 @@ def _from_environment() -> AppEnv: log_group=environ["LOG_GROUP"], topic_arn=environ["ISSUES_TOPIC_ARN"], stack_name=environ["STACK_NAME"], - send_anonymous_metrics=_to_bool(environ["SEND_METRICS"]), + send_anonymous_metrics=env_to_bool(environ["SEND_METRICS"]), solution_id=environ["SOLUTION_ID"], solution_version=environ["SOLUTION_VERSION"], - enable_debug_logging=_to_bool(environ["TRACE"]), + enable_debug_logging=env_to_bool(environ["TRACE"]), user_agent_extra=environ["USER_AGENT_EXTRA"], anonymous_metrics_url=environ["METRICS_URL"], stack_id=environ["STACK_ID"], @@ -96,23 +102,24 @@ def _from_environment() -> AppEnv: start_ec2_batch_size=int(environ["START_EC2_BATCH_SIZE"]), schedule_tag_key=environ["SCHEDULE_TAG_KEY"], default_timezone=ZoneInfo(environ["DEFAULT_TIMEZONE"]), - enable_cloudwatch_metrics=_to_bool(environ["ENABLE_CLOUDWATCH_METRICS"]), - enable_ec2_service=_to_bool(environ["ENABLE_EC2_SERVICE"]), - enable_rds_service=_to_bool(environ["ENABLE_RDS_SERVICE"]), - enable_rds_clusters=_to_bool(environ["ENABLE_RDS_CLUSTERS"]), - enable_rds_snapshots=_to_bool(environ["ENABLE_RDS_SNAPSHOTS"]), - schedule_regions=_to_list(environ["SCHEDULE_REGIONS"]), + enable_ec2_service=env_to_bool(environ["ENABLE_EC2_SERVICE"]), + enable_rds_service=env_to_bool(environ["ENABLE_RDS_SERVICE"]), + enable_rds_clusters=env_to_bool(environ["ENABLE_RDS_CLUSTERS"]), + enable_neptune_service=env_to_bool(environ["ENABLE_NEPTUNE_SERVICE"]), + enable_docdb_service=env_to_bool(environ["ENABLE_DOCDB_SERVICE"]), + enable_rds_snapshots=env_to_bool(environ["ENABLE_RDS_SNAPSHOTS"]), + schedule_regions=env_to_list(environ["SCHEDULE_REGIONS"]), app_namespace=environ["APP_NAMESPACE"], scheduler_role_name=environ["SCHEDULER_ROLE_NAME"], - enable_schedule_hub_account=_to_bool( + enable_schedule_hub_account=env_to_bool( environ["ENABLE_SCHEDULE_HUB_ACCOUNT"] ), - enable_ec2_ssm_maintenance_windows=_to_bool( + enable_ec2_ssm_maintenance_windows=env_to_bool( environ["ENABLE_EC2_SSM_MAINTENANCE_WINDOWS"] ), - start_tags=_to_list(environ["START_TAGS"]), - stop_tags=_to_list(environ["STOP_TAGS"]), - enable_aws_organizations=_to_bool(environ["ENABLE_AWS_ORGANIZATIONS"]), + start_tags=env_to_list(environ["START_TAGS"]), + stop_tags=env_to_list(environ["STOP_TAGS"]), + enable_aws_organizations=env_to_bool(environ["ENABLE_AWS_ORGANIZATIONS"]), maintenance_window_table_name=environ["MAINTENANCE_WINDOW_TABLE"], config_table_name=environ["CONFIG_TABLE"], state_table_name=environ["STATE_TABLE"], @@ -125,11 +132,11 @@ def _from_environment() -> AppEnv: ) from err -def _to_bool(value: str) -> bool: +def env_to_bool(value: str) -> bool: return value.strip().lower() in {"true", "yes"} -def _to_list(value: str) -> list[str]: +def env_to_list(value: str) -> list[str]: items = [] for item in value.split(","): stripped = item.strip() diff --git a/source/app/instance_scheduler/util/batch.py b/source/app/instance_scheduler/util/batch.py index 0aacb74b..a3fe62a6 100644 --- a/source/app/instance_scheduler/util/batch.py +++ b/source/app/instance_scheduler/util/batch.py @@ -1,8 +1,8 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 from collections.abc import Callable -from dataclasses import dataclass -from typing import Final, Generic, TypeVar +from dataclasses import dataclass, field +from typing import Final, Generic, Self, TypeVar T = TypeVar("T") @@ -16,9 +16,23 @@ class FailureResponse(Generic[T]): U = TypeVar("U") +@dataclass +class BisectRetryResponse(Generic[T, U]): + success_responses: list[U] = field(default_factory=list) + intermediate_responses: list[FailureResponse[list[T]]] = field(default_factory=list) + failure_responses: list[FailureResponse[T]] = field(default_factory=list) + + def merge(self, *others: "BisectRetryResponse[T, U]") -> Self: + for other in others: + self.success_responses.extend(other.success_responses) + self.intermediate_responses.extend(other.intermediate_responses) + self.failure_responses.extend(other.failure_responses) + return self + + def bisect_retry( inputs: list[T], action: Callable[[list[T]], U] -) -> list[U | FailureResponse[T]]: +) -> BisectRetryResponse[T, U]: """ Retry an action taking a list of inputs by successively splitting the inputs in half @@ -40,16 +54,22 @@ def bisect_retry( a tuple of the single input item that resulted in an error and the error that was raised. """ - if not inputs: - return [] + length: Final = len(inputs) + if length == 0: + return BisectRetryResponse() try: - return [action(inputs)] + return BisectRetryResponse(success_responses=[action(inputs)]) except Exception as err: - length: Final = len(inputs) if length == 1: - return [FailureResponse(failed_input=inputs[0], error=err)] + return BisectRetryResponse( + failure_responses=[FailureResponse(failed_input=inputs[0], error=err)] + ) + else: + result: BisectRetryResponse[T, U] = BisectRetryResponse( + intermediate_responses=[FailureResponse(failed_input=inputs, error=err)] + ) - midpoint: Final = length // 2 - left: Final = bisect_retry(inputs[0:midpoint], action) - right: Final = bisect_retry(inputs[midpoint:], action) - return left + right + midpoint: Final = length // 2 + left: Final = bisect_retry(inputs[0:midpoint], action) + right: Final = bisect_retry(inputs[midpoint:], action) + return result.merge(left, right) diff --git a/source/app/instance_scheduler/util/custom_resource.py b/source/app/instance_scheduler/util/custom_resource.py index 69e07878..160e3a10 100644 --- a/source/app/instance_scheduler/util/custom_resource.py +++ b/source/app/instance_scheduler/util/custom_resource.py @@ -5,10 +5,19 @@ import uuid from abc import ABC, abstractmethod from collections.abc import Mapping -from typing import TYPE_CHECKING, Any, Generic, Literal, TypeGuard, TypeVar - -import requests -from typing_extensions import NotRequired, TypedDict +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Literal, + NotRequired, + Optional, + TypedDict, + TypeGuard, + TypeVar, +) + +from urllib3 import HTTPResponse, PoolManager from instance_scheduler.handler.base import Handler @@ -33,6 +42,18 @@ class CustomResourceRequest(TypedDict, Generic[ResourcePropertiesType]): OldResourceProperties: NotRequired[ResourcePropertiesType] +class CustomResourceResponse(TypedDict): + Status: str + Reason: NotRequired[str] + PhysicalResourceId: str + NoEcho: NotRequired[bool] + Data: NotRequired[dict[str, str]] + # do not edit + StackId: str + RequestId: str + LogicalResourceId: str + + class CustomResource( Generic[ResourcePropertiesType], Handler[CustomResourceRequest[ResourcePropertiesType]], @@ -42,23 +63,69 @@ class CustomResource( EVENT_TYPE_UPDATE = "Update" EVENT_TYPE_DELETE = "Delete" - def __init__(self, event: Mapping[str, Any], context: LambdaContext) -> None: - self.event = event - self.context = context - # physical resource is empty for create request, for other requests is it the returned physical id from the create request - self.physical_resource_id = event.get("PhysicalResourceId") - self.response = { - "Data": {}, - "Reason": "", + def OkResponse( + self, + data: Optional[dict[str, str]] = None, + reason: Optional[str] = None, + no_echo: bool = False, + physical_resource_id: Optional[str] = None, + ) -> CustomResourceResponse: + response: CustomResourceResponse = { + "Status": "SUCCESS", + "PhysicalResourceId": self.resolve_physical_resource_id( + override=physical_resource_id + ), + "StackId": self.stack_id, + "RequestId": self.request_id, + "LogicalResourceId": self.logical_resource_id, + } + if data: + response["Data"] = data + if no_echo: + response["NoEcho"] = True + if reason: + response["Reason"] = reason + + return response + + def ErrorResponse( + self, + reason: str, + physical_resource_id: Optional[str] = None, + ) -> CustomResourceResponse: + """ + :param reason: the reason for the error + :param physical_resource_id: custom resource physical id -- note. If using custom ids here, + It is critical that they be consistent between Ok and Error responses, + otherwise CloudFormation may generate additional delete calls on failed updates. + :return: + """ + response: CustomResourceResponse = { + "Status": "FAILED", + "Reason": reason, + "PhysicalResourceId": self.resolve_physical_resource_id( + override=physical_resource_id + ), "StackId": self.stack_id, "RequestId": self.request_id, "LogicalResourceId": self.logical_resource_id, } - # Returned attributes of custom resource - @property - def response_data(self) -> Any: - return self.response["Data"] + return response + + def resolve_physical_resource_id(self, override: Optional[str] = None) -> str: + # order of precendence: + # id passed to this function > id included in event > generate new id + if override: + return override + else: + return self.physical_resource_id or self.new_physical_resource_id() + + def __init__(self, event: Mapping[str, Any], context: LambdaContext) -> None: + self.event = event + self.context = context + # physical resource is empty for create request, for other requests is it the returned physical id from the create request + self.physical_resource_id: Optional[str] = event.get("PhysicalResourceId") # Test if event is a request custom resource request from cloudformation @staticmethod @@ -70,6 +137,7 @@ def is_handling_request( # Returns Logical Resource Id in cloudformation stack @property def logical_resource_id(self) -> Any: + # todo type this as "str" -- requires typing the event return self.event.get("LogicalResourceId") # Returns the id of the cloudformation request @@ -131,31 +199,23 @@ def new_physical_resource_id(self) -> str: return new_id.lower() # Handles Create request, overwrite in inherited class to implement create actions - # Return True on success, False if on failure @abstractmethod - def _create_request(self) -> bool: - self.response["Reason"] = "No handler for Create request" - return True + def _create_request(self) -> CustomResourceResponse: + return self.OkResponse(reason="No handler for Create request") # Handles Update request, overwrite in inherited class to implement update actions - # Return True on success, False if on failure @abstractmethod - def _update_request(self) -> bool: - self.response["Reason"] = "No handler for Update request" - return True + def _update_request(self) -> CustomResourceResponse: + return self.OkResponse(reason="No handler for Update request") # Handles Delete request, overwrite in inherited class to implement delete actions - # Return True on success, False if on failure @abstractmethod - def _delete_request(self) -> bool: - self.response["Reason"] = "No handler for Delete request" - return True + def _delete_request(self) -> CustomResourceResponse: + return self.OkResponse(reason="No handler for Delete request") def fn_timeout(self) -> None: print("Execution is about to time out, sending failure message") - self.response["Status"] = "FAILED" - self.response["Reason"] = "Timeout" - self._send_response() + self._send_response(self.ErrorResponse(reason="Timeout")) # Handles cloudformation request def handle_request(self) -> Any: @@ -169,52 +229,44 @@ def handle_request(self) -> Any: timer = threading.Timer(timeleft, self.fn_timeout) timer.start() + response: CustomResourceResponse + try: # Call handler for request type if self.request_type == CustomResource.EVENT_TYPE_CREATE: - result = self._create_request() + response = self._create_request() elif self.request_type == CustomResource.EVENT_TYPE_UPDATE: - result = self._update_request() + response = self._update_request() elif self.request_type == CustomResource.EVENT_TYPE_DELETE: - result = self._delete_request() + response = self._delete_request() else: raise ValueError( '"{}" is not a valid request type'.format(self.request_type) ) - - # Set status based on return value of handler - self.response["Status"] = "SUCCESS" if result else "FAILED" - - # set physical resource id or create new one - self.response["PhysicalResourceId"] = ( - self.physical_resource_id or self.new_physical_resource_id() - ) - except Exception as ex: - self.response["Status"] = "FAILED" - self.response["Reason"] = str(ex) + response = self.ErrorResponse(reason=str(ex)) timer.cancel() - return self._send_response() + return self._send_response(response) # Send the response to cloudformation - def _send_response(self) -> bool: + def _send_response(self, custom_resource_response: CustomResourceResponse) -> bool: # Build the PUT request and the response data - resp = json.dumps(self.response) + # todo: need to trim response to 4KB (check ASR code for example) + resp = json.dumps(custom_resource_response) headers = {"content-type": "", "content-length": str(len(resp))} # PUT request to cloudformation try: - response = requests.put( + http = PoolManager() + http_response: HTTPResponse = http.request( # type: ignore[no-untyped-call] + "PUT", self.response_url, - data=json.dumps(self.response), headers=headers, - timeout=300, + body=resp, ) - response.raise_for_status() - print("Status code: {}".format(response.status_code)) - print("Status message: {}".format(response.text)) + print("Status code: {}".format(http_response.status)) return True except Exception as exc: print( diff --git a/source/app/instance_scheduler/util/display_helper.py b/source/app/instance_scheduler/util/display_helper.py index 9e579893..2a4f2ca7 100644 --- a/source/app/instance_scheduler/util/display_helper.py +++ b/source/app/instance_scheduler/util/display_helper.py @@ -30,7 +30,9 @@ def time_as_str(t: Any) -> str: return "{:0>2d}:{:0>2d}:{:0>2d}".format(t.hour, t.minute, t.second) @staticmethod - def set_as_str(the_set: Any, display_names: Any = None, offset: int = 0) -> str: + def set_as_str( # NOSONAR -- (cog-complexity) low-prio log formatter + the_set: Any, display_names: Any = None, offset: int = 0 + ) -> str: """ Displays a set as a readable string. Adjacent elements are combined in x-y ranges. A list of strings can be passed to the set to map the values to text. diff --git a/source/app/instance_scheduler/util/logger.py b/source/app/instance_scheduler/util/logger.py index cb1c2cf9..1867bb7c 100644 --- a/source/app/instance_scheduler/util/logger.py +++ b/source/app/instance_scheduler/util/logger.py @@ -27,11 +27,13 @@ def __init__( log_stream: str, topic_arn: str, debug: bool = False, + raise_exceptions: bool = False, ) -> None: self._log_group = log_group self._log_stream = log_stream self._topic_arn = topic_arn self._debug = debug + self._raise_exceptions = raise_exceptions self._buffer_size = 60 if self._debug else 30 self._buffer: list[tuple[int, str]] = [] self._cached_size = 0 @@ -50,21 +52,26 @@ def __exit__( self.flush() def _emit(self, level: str, msg: str, *args: Any) -> str: - s = msg if len(args) == 0 else msg.format(*args) - t = time.time() - s = "{:7s} : {}".format(level, s) + try: + s = msg if len(args) == 0 else msg.format(*args) + t = time.time() + s = "{:7s} : {}".format(level, s) - if self._cached_size + (len(s) + LOG_ENTRY_ADDITIONAL) > LOG_MAX_BATCH_SIZE: - self.flush() + if self._cached_size + (len(s) + LOG_ENTRY_ADDITIONAL) > LOG_MAX_BATCH_SIZE: + self.flush() - self._cached_size += len(s) + LOG_ENTRY_ADDITIONAL + self._cached_size += len(s) + LOG_ENTRY_ADDITIONAL - self._buffer.append((int(t * 1000), s)) + self._buffer.append((int(t * 1000), s)) - if len(self._buffer) >= self._buffer_size: - self.flush() + if len(self._buffer) >= self._buffer_size: + self.flush() - return s + return s + except Exception: + if self._raise_exceptions: + raise + return "Exception raised while emitting logs" @property def sns(self) -> SNSClient: @@ -79,10 +86,14 @@ def publish_to_sns(self, level: str, msg: str) -> None: :param level: :return: """ - message = "Loggroup: {}\nLogstream {}\n{} : {}".format( - self._log_group, self._log_stream, level, msg - ) - self.sns.publish(TopicArn=self._topic_arn, Message=message) + try: + message = "Loggroup: {}\nLogstream {}\n{} : {}".format( + self._log_group, self._log_stream, level, msg + ) + self.sns.publish(TopicArn=self._topic_arn, Message=message) + except Exception: + if self._raise_exceptions: + raise def info(self, msg: str, *args: Any) -> None: """ @@ -134,27 +145,52 @@ def flush(self) -> None: Writes all buffered messages to CloudWatch Stream :return: """ + try: + if len(self._buffer) == 0: + return - if len(self._buffer) == 0: - return - - put_event_args: PutLogEventsRequestRequestTypeDef = { - "logGroupName": self._log_group, - "logStreamName": self._log_stream, - "logEvents": [{"timestamp": r[0], "message": r[1]} for r in self._buffer], - } + put_event_args: PutLogEventsRequestRequestTypeDef = { + "logGroupName": self._log_group, + "logStreamName": self._log_stream, + "logEvents": [ + {"timestamp": r[0], "message": r[1]} for r in self._buffer + ], + } - retries = 5 - while retries > 0: try: - self.client.put_log_events(**put_event_args) - self._buffer = [] - self._cached_size = 0 - return - except self.client.exceptions.ResourceNotFoundException: - retries -= 1 self.client.create_log_stream( logGroupName=self._log_group, logStreamName=self._log_stream ) - except Exception: - return + except self.client.exceptions.ResourceAlreadyExistsException: + pass + + self.client.put_log_events(**put_event_args) + self._buffer = [] + self._cached_size = 0 + except Exception: + if self._raise_exceptions: + raise + + +class NoOpLogger(Logger): + + def __init__(self) -> None: + Logger.__init__(self, log_group="", log_stream="", topic_arn="") + + def publish_to_sns(self, level: str, msg: str) -> None: + """no-op""" + + def info(self, msg: str, *args: Any) -> None: + """no-op""" + + def error(self, msg: str, *args: Any) -> None: + """no-op""" + + def warning(self, msg: str, *args: Any) -> None: + """no-op""" + + def debug(self, msg: str, *args: Any) -> None: + """no-op""" + + def flush(self) -> None: + """no-op""" diff --git a/source/app/instance_scheduler/util/scheduler_metrics.py b/source/app/instance_scheduler/util/scheduler_metrics.py deleted file mode 100644 index 0177bbfb..00000000 --- a/source/app/instance_scheduler/util/scheduler_metrics.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -from datetime import datetime -from typing import TYPE_CHECKING, Any, Optional - -from instance_scheduler.boto_retry import get_client_with_standard_retry -from instance_scheduler.configuration.instance_schedule import ( - Instance, - InstanceSchedule, -) -from instance_scheduler.util.app_env import get_app_env - -if TYPE_CHECKING: - from mypy_boto3_cloudwatch.client import CloudWatchClient - from mypy_boto3_cloudwatch.type_defs import MetricDatumTypeDef -else: - CloudWatchClient = object - MetricDatumTypeDef = object - - -class SchedulerMetrics: - """ - Implements wrapper to write metrics data - """ - - NAMESPACE = "InstanceScheduler" - RUNNING_INSTANCES = "RunningInstances" - MANAGED_INSTANCES = "ManagedInstances" - DIMENSION_SCHEDULE = "Schedule" - DIMENSION_SERVICE = "Service" - ERRORS = "Errors" - WARNINGS = "Warnings" - - def __init__(self, dt: datetime) -> None: - """ - Initializes instance of metrics wrapper - :param dt: date and time of the metrics data (typically the scheduling moment) - """ - self._dt = dt - self._metrics_managed: dict[str, dict[str, int]] = {} - self._metrics_running: dict[str, dict[str, int]] = {} - self._stack = get_app_env().stack_name - self._namespace = "{}:{}".format(self._stack, SchedulerMetrics.NAMESPACE) - - self._metrics_client: Optional[CloudWatchClient] = None - - @property - def metrics_client(self) -> CloudWatchClient: - if self._metrics_client is None: - self._metrics_client = get_client_with_standard_retry("cloudwatch") - return self._metrics_client - - def add_schedule_metrics( - self, service: str, schedule: InstanceSchedule, instance: Instance - ) -> None: - """ - Adds metrics data - :param service: name of the service - :param schedule: name of the schedule - :param instance: scheduled instance - :return: - """ - if schedule.use_metrics: - if service not in self._metrics_managed: - self._metrics_managed[service] = {} - self._metrics_running[service] = {} - self._metrics_managed[service][schedule.name] = ( - self._metrics_managed[service].get(schedule.name, 0) + 1 - ) - self._metrics_running[service][schedule.name] = ( - self._metrics_running[service].get(schedule.name, 0) + 1 - if instance["is_running"] - else 0 - ) - - def put_schedule_metrics(self) -> None: - """ - Writes the stores metrics data to cloudwatch metrics - :return: - """ - - def build_metric( - service_name: str, - schedule_name: str, - metric_name: str, - data: dict[str, Any], - ) -> MetricDatumTypeDef: - return { - "MetricName": metric_name, - "Dimensions": [ - {"Name": SchedulerMetrics.DIMENSION_SERVICE, "Value": service_name}, - { - "Name": SchedulerMetrics.DIMENSION_SCHEDULE, - "Value": schedule_name, - }, - ], - "Timestamp": self._dt, - "Value": data[schedule_name], - "Unit": "Count", - } - - if len(self._metrics_managed) > 0: - metric_data = [] - for service in list(self._metrics_managed): - for name in list(self._metrics_managed[service]): - metric_data.append( - build_metric( - service, - name, - SchedulerMetrics.MANAGED_INSTANCES, - self._metrics_managed[service], - ) - ) - metric_data.append( - build_metric( - service, - name, - SchedulerMetrics.RUNNING_INSTANCES, - self._metrics_running[service], - ) - ) - - self.metrics_client.put_metric_data( - Namespace=self._namespace, MetricData=metric_data - ) diff --git a/source/app/instance_scheduler/util/scheduling_target.py b/source/app/instance_scheduler/util/scheduling_target.py new file mode 100644 index 00000000..3bb29fe8 --- /dev/null +++ b/source/app/instance_scheduler/util/scheduling_target.py @@ -0,0 +1,82 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from collections.abc import Iterator +from dataclasses import dataclass +from typing import TYPE_CHECKING, Union + +import boto3 +from aws_lambda_powertools import Logger as PowerToolsLogger + +from instance_scheduler.configuration.ssm import replace_ssm_references_with_account_ids +from instance_scheduler.handler.environments.asg_orch_env import AsgOrchEnv +from instance_scheduler.handler.environments.orchestrator_environment import ( + OrchestratorEnvironment, +) +from instance_scheduler.model.ddb_config_item import DdbConfigItem +from instance_scheduler.util.logger import Logger + +if TYPE_CHECKING: + from aws_lambda_powertools.utilities.typing import LambdaContext +else: + LambdaContext = object + + +@dataclass(frozen=True) +class SchedulingTarget: + account: str + service: str + region: str + + def __str__(self) -> str: + return f"{self.account}-{self.region}-{self.service}" + + +def get_account_ids( + ddb_config_item: DdbConfigItem, + env: Union[OrchestratorEnvironment, AsgOrchEnv], + logger: Union[Logger, PowerToolsLogger], + context: LambdaContext, +) -> Iterator[str]: + """ + Iterates account and cross-account-roles of the accounts to operate on + :return: + """ + processed_accounts = [] + hub_account_id = context.invoked_function_arn.split(":")[4] + + if env.enable_schedule_hub_account: + processed_accounts.append(hub_account_id) + yield hub_account_id + + for remote_account in replace_ssm_references_with_account_ids( + ddb_config_item.remote_account_ids + ): + if not remote_account: + continue + + if remote_account in processed_accounts: + logger.warning("Remote account {} is already processed", remote_account) + continue + + yield remote_account + + +def list_all_targets( + ddb_config_item: DdbConfigItem, + env: OrchestratorEnvironment, + logger: Union[Logger, PowerToolsLogger], + context: LambdaContext, +) -> Iterator[SchedulingTarget]: + """ + Iterates account and cross-account-roles of the accounts to operate on + :return: + """ + services = env.scheduled_services() + regions = ( + env.schedule_regions if env.schedule_regions else [boto3.Session().region_name] + ) + + for service in services: + for region in regions: + for account in get_account_ids(ddb_config_item, env, logger, context): + yield SchedulingTarget(account=account, service=service, region=region) diff --git a/source/app/instance_scheduler/util/session_manager.py b/source/app/instance_scheduler/util/session_manager.py new file mode 100644 index 00000000..3180c7fd --- /dev/null +++ b/source/app/instance_scheduler/util/session_manager.py @@ -0,0 +1,95 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any, Final + +import boto3 +from boto3 import Session +from botocore.exceptions import ClientError + +from instance_scheduler.util import get_boto_config + +if TYPE_CHECKING: + from mypy_boto3_sts.client import STSClient +else: + STSClient = object + + +def _sts() -> STSClient: + session: Final = Session() + if session.get_partition_for_region(session.region_name) == "aws-cn": + sts_regional_endpoint = str.format( + "https://sts.{}.amazonaws.com.cn", session.region_name + ) + else: + sts_regional_endpoint = str.format( + "https://sts.{}.amazonaws.com", session.region_name + ) + + sts_client: STSClient = session.client( + "sts", + region_name=session.region_name, + endpoint_url=sts_regional_endpoint, + config=get_boto_config(), + ) + + return sts_client + + +def get_role_arn(*, account_id: str, role_name: str) -> str: + session = boto3.Session() + partition = session.get_partition_for_region(session.region_name) + + return ":".join( + [ + "arn", + partition, + "iam", + "", + account_id, + f"role/{role_name}", + ] + ) + + +@dataclass(frozen=True) +class AssumedRole: + session: Session + role_name: str + account: str + region: str + + def client(self, service_name: str) -> Any: + """simple wrapper for session.client() that includes the default config from get_boto_config""" + return self.session.client(service_name, config=get_boto_config()) + + +def assume_role(*, account: str, region: str, role_name: str) -> AssumedRole: + spoke_account_role_arn: Final = get_role_arn( + account_id=account, role_name=role_name + ) + + try: + session_name: Final = f"{role_name}-{account}-{region}" + + token: Final = _sts().assume_role( + RoleArn=spoke_account_role_arn, RoleSessionName=session_name + ) + + credentials: Final = token["Credentials"] + + session = Session( + aws_access_key_id=credentials["AccessKeyId"], + aws_secret_access_key=credentials["SecretAccessKey"], + aws_session_token=credentials["SessionToken"], + region_name=region, + ) + + return AssumedRole( + session=session, + account=account, + region=region, + role_name=role_name, + ) + except ClientError as ex: + raise ex diff --git a/source/app/instance_scheduler/util/sns_handler.py b/source/app/instance_scheduler/util/sns_handler.py new file mode 100644 index 00000000..2b6a300a --- /dev/null +++ b/source/app/instance_scheduler/util/sns_handler.py @@ -0,0 +1,39 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from logging import WARNING, Handler, LogRecord +from typing import TYPE_CHECKING, Final + +from instance_scheduler import boto_retry + +if TYPE_CHECKING: + from mypy_boto3_sns.client import SNSClient +else: + SNSClient = object + + +class SnsHandler(Handler): + def __init__( + self, + *, + topic_arn: str, + log_group_name: str, + log_stream_name: str, + raise_exceptions: bool = False + ) -> None: + super().__init__(level=WARNING) + + self._sns: Final[SNSClient] = boto_retry.get_client_with_standard_retry("sns") + self._topic_arn: Final = topic_arn + self._log_group: Final = log_group_name + self._log_stream: Final = log_stream_name + self._raise_exceptions: Final = raise_exceptions + + def emit(self, record: LogRecord) -> None: + try: + message: Final = "Loggroup: {}\nLogstream {}\n{} : {}".format( + self._log_group, self._log_stream, record.levelname, record.getMessage() + ) + self._sns.publish(TopicArn=self._topic_arn, Message=message) + except Exception: + if self._raise_exceptions: + raise diff --git a/source/app/instance_scheduler/util/validation.py b/source/app/instance_scheduler/util/validation.py new file mode 100644 index 00000000..b059dbea --- /dev/null +++ b/source/app/instance_scheduler/util/validation.py @@ -0,0 +1,177 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from typing import Any, Callable, Mapping, TypeGuard + + +class ValidationException(Exception): + pass + + +def validate_string( # NOSONAR -- (duplicate-returns) function is expected to return true or throw an error per the TypeGuard spec + untyped_dict: Mapping[str, Any], key: str, required: bool = True +) -> TypeGuard[Mapping[str, Any]]: + """ + :param untyped_dict: a mapping of strings to unknown values + :param key: the key to check + :param required: if true, an error will be thrown if the value is missing, if false, no error will be thrown + :return: true if the value stored at {key} is a str. a ValidationException will be raised otherwise + """ + value = untyped_dict.get(key, None) + if value is None: + if required: + raise ValidationException(f"required key {key} is missing") + else: + return True + if type(value) is not str: + raise ValidationException(f"{key} must be a string, found {type(value)}") + return True + + +def validate_string_list( # NOSONAR -- (duplicate-returns) function is expected to return true or throw an error per the TypeGuard spec + untyped_dict: Mapping[str, Any], key: str, required: bool = True +) -> TypeGuard[Mapping[str, Any]]: + """ + :param untyped_dict: a mapping of strings to unknown values + :param key: the key to check + :param required: if true, an error will be thrown if the value is missing, if false, no error will be thrown + :return: true if the value stored at {key} is a list[str]. a ValidationException will be raised otherwise + """ + value = untyped_dict.get(key, None) + if value is None: + if required: + raise ValidationException(f"required key {key} is missing") + else: + return True + if type(value) is not list: + raise ValidationException(f"{key} must be a list, found {type(value)}") + for item in value: + if type(item) is not str: + raise ValidationException( + f"All elements of {key} must be strings, found {type(item)}" + ) + return True + + +def validate_boolean( # NOSONAR -- (duplicate-returns) function is expected to return true or throw an error per the TypeGuard spec + untyped_dict: Mapping[str, Any], key: str, required: bool = True +) -> TypeGuard[Mapping[str, Any]]: + """ + :param untyped_dict: a mapping of strings to unknown values + :param key: the key to check + :param required: if true, an error will be thrown if the value is missing, if false, no error will be thrown + :return: true if the value stored at {key} is a bool. a ValidationException will be raised otherwise + """ + value = untyped_dict.get(key, None) + if value is None: + if required: + raise ValidationException(f"required key {key} is missing") + else: + return True + if type(value) is not bool: + raise ValidationException(f"{key} must be a boolean, found {type(value)}") + return True + + +def validate_string_set( # NOSONAR -- (duplicate-returns) function is expected to return true or throw an error per the TypeGuard spec + untyped_dict: Mapping[str, Any], key: str, required: bool = True +) -> TypeGuard[Mapping[str, Any]]: + """ + :param untyped_dict: a mapping of strings to unknown values + :param key: the key to check + :param required: if true, an error will be thrown if the value is missing, if false, no error will be thrown + :return: true if the value stored at {key} is a set[str]. a ValidationException will be raised otherwise + """ + value = untyped_dict.get(key, None) + if value is None: + if required: + raise ValidationException(f"required key {key} is missing") + else: + return True + if type(value) is not set: + raise ValidationException(f"{key} must be a set, found {type(value)}") + for element in value: + if type(element) is not str: + raise ValidationException( + f"all elements in {key} must be strings, found {element} which is {type(value)}" + ) + return True + + +def validate_sub_dict( + untyped_dict: Mapping[str, Any], + key: str, + validator: Callable[[Mapping[str, Any]], bool], + required: bool = True, +) -> TypeGuard[Mapping[str, Any]]: + """ + validate the shape of a dictionary (the sub-dict) within another dictionary + :param untyped_dict: a mapping of strings to unknown values + :param key: the key to check + :param required: if true, an error will be thrown if the value is missing, if false, no error will be thrown + :param validator: sub validator that will be called to validate the sub_dict + :return: true if the value stored at {key} is a str. a ValidationException will be raised otherwise + """ + value = untyped_dict.get(key, None) + if value is None: + if required: + raise ValidationException(f"required key {key} is missing") + else: + return True + if type(value) is not dict: + raise ValidationException(f"{key} must be a dict, found {type(value)}") + try: + return validator(value) + except ValidationException as ve: + raise ValidationException(f"{key} failed validation: {ve}") + + +def validate_string_item( # NOSONAR -- (duplicate-returns) function is expected to return true or throw an error per the TypeGuard spec + untyped_dict: Mapping[str, Any], key: str, required: bool = True +) -> TypeGuard[Mapping[str, Any]]: + """ + :param untyped_dict: a mapping of strings to unknown values + :param key: the key to check + :param required: if true, an error will be thrown if the value is missing, if false, no error will be thrown + :return: true if the value stored at {key} is a set[str]. a ValidationException will be raised otherwise + """ + value = untyped_dict.get(key, None) + if value is None: + if required: + raise ValidationException(f"required key {key} is missing") + else: + return True + if type(value) is not dict: + raise ValidationException(f"{key} must be a dict, found {type(value)}") + if "S" not in value: + raise ValidationException(f'{key} must have attribute "S", found {value}') + if type(value["S"]) is not str: + raise ValidationException( + f'{key}["S"] must be a string, found {type(value["S"])}' + ) + return True + + +def validate_number_item( # NOSONAR -- (duplicate-returns) function is expected to return true or throw an error per the TypeGuard spec + untyped_dict: Mapping[str, Any], key: str, required: bool = True +) -> TypeGuard[Mapping[str, Any]]: + """ + :param untyped_dict: a mapping of strings to unknown values + :param key: the key to check + :param required: if true, an error will be thrown if the value is missing, if false, no error will be thrown + :return: true if the value stored at {key} is a set[str]. a ValidationException will be raised otherwise + """ + value = untyped_dict.get(key, None) + if value is None: + if required: + raise ValidationException(f"required key {key} is missing") + else: + return True + if type(value) is not dict: + raise ValidationException(f"{key} must be a dict, found {type(value)}") + if "N" not in value: + raise ValidationException(f'{key} must have attribute "N", found {value}') + if type(value["N"]) is not str: + raise ValidationException( + f'{key}["N"] must be a string, found {type(value["N"])}' + ) + return True diff --git a/source/app/mypy.ini b/source/app/mypy.ini index 29943387..aa3625ea 100644 --- a/source/app/mypy.ini +++ b/source/app/mypy.ini @@ -1,47 +1,5 @@ [mypy] strict = True - -[mypy-instance_scheduler.configuration] -disallow_untyped_calls = False - -[mypy-instance_scheduler.configuration.config_admin] -disallow_untyped_calls = False - -[mypy-instance_scheduler.configuration.config_dynamodb_adapter] -disallow_untyped_calls = False -disallow_untyped_defs = False - - -[mypy-instance_scheduler.configuration.scheduler_config_builder] -disallow_untyped_calls = False -disallow_untyped_defs = False - - -[mypy-instance_scheduler.schedulers.instance_states] -disallow_untyped_calls = False -disallow_untyped_defs = False - -[mypy-instance_scheduler.schedulers.instance_scheduler] -disallow_untyped_calls = False -disallow_untyped_defs = False - - -[mypy-instance_scheduler.util.dynamodb_utils] -disallow_untyped_defs = False - - -[mypy-instance_scheduler.handler.spoke_registration] -disallow_untyped_calls = False - - -[mypy-tests.handler.test_eventbus_request_handler] -disallow_untyped_calls = False - - -[mypy-tests.integration.conftest] -disallow_untyped_calls = False - - [mypy-moto] ignore_missing_imports = True diff --git a/source/app/poetry.lock b/source/app/poetry.lock index 40b9e4c8..92ef125d 100644 --- a/source/app/poetry.lock +++ b/source/app/poetry.lock @@ -2,13 +2,13 @@ [[package]] name = "annotated-types" -version = "0.6.0" +version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" files = [ - {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, - {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] [[package]] @@ -32,13 +32,13 @@ tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "p [[package]] name = "aws-lambda-powertools" -version = "2.37.0" +version = "2.38.1" description = "Powertools for AWS Lambda (Python) is a developer toolkit to implement Serverless best practices and increase developer velocity." optional = false python-versions = "<4.0.0,>=3.8" files = [ - {file = "aws_lambda_powertools-2.37.0-py3-none-any.whl", hash = "sha256:6845995d7b0debc5b85c6d97b4336d9c536ade723d45cdc5a081477e4daa65e2"}, - {file = "aws_lambda_powertools-2.37.0.tar.gz", hash = "sha256:a49dacba249e6db860d59314e4620648c1691727cf43e465e8f0907f699dc5e8"}, + {file = "aws_lambda_powertools-2.38.1-py3-none-any.whl", hash = "sha256:4235f517a8429a0e4dd2f76ac3f2d0a77b4a8061fd75ca9da013e7a1b4d17699"}, + {file = "aws_lambda_powertools-2.38.1.tar.gz", hash = "sha256:3e25a51c0dc022b4ab733582ab4f39764831b31369a56424ac9f07139b5e96b3"}, ] [package.dependencies] @@ -56,13 +56,13 @@ validation = ["fastjsonschema (>=2.14.5,<3.0.0)"] [[package]] name = "aws-sam-translator" -version = "1.88.0" +version = "1.89.0" description = "AWS SAM Translator is a library that transform SAM templates into AWS CloudFormation templates" optional = false python-versions = "!=4.0,<=4.0,>=3.8" files = [ - {file = "aws_sam_translator-1.88.0-py3-none-any.whl", hash = "sha256:aa93d498d8de3fb3d485c316155b1628144b823bbc176099a20de06df666fcac"}, - {file = "aws_sam_translator-1.88.0.tar.gz", hash = "sha256:e77c65f3488566122277accd44a0f1ec018e37403e0d5fe25120d96e537e91a7"}, + {file = "aws_sam_translator-1.89.0-py3-none-any.whl", hash = "sha256:843be1b5ca7634f700ad0c844a7e0dc42858f35da502e91691473eadd1731ded"}, + {file = "aws_sam_translator-1.89.0.tar.gz", hash = "sha256:fff1005d0b1f3cb511d0ac7e85f54af06afc9d9e433df013a2338d7a0168d174"}, ] [package.dependencies] @@ -111,8 +111,6 @@ mypy-extensions = ">=0.4.3" packaging = ">=22.0" pathspec = ">=0.9.0" platformdirs = ">=2" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} [package.extras] colorama = ["colorama (>=0.4.3)"] @@ -122,17 +120,17 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "boto3" -version = "1.34.99" +version = "1.34.114" description = "The AWS SDK for Python" optional = false python-versions = ">=3.8" files = [ - {file = "boto3-1.34.99-py3-none-any.whl", hash = "sha256:b54084d000483b578757df03ce39a819fbba47071c9aa98611beb8806bcecd45"}, - {file = "boto3-1.34.99.tar.gz", hash = "sha256:6f600b3fe0bda53476395c902d9af5a47294c93ec52a9cdc2b926a9dc705ce79"}, + {file = "boto3-1.34.114-py3-none-any.whl", hash = "sha256:4460958d2b0c53bd2195b23ed5d45db2350e514486fe8caeb38b285b30742280"}, + {file = "boto3-1.34.114.tar.gz", hash = "sha256:eeb11bca9b19d12baf93436fb8a16b8b824f1f7e8b9bcc722607e862c46b1b08"}, ] [package.dependencies] -botocore = ">=1.34.99,<1.35.0" +botocore = ">=1.34.114,<1.35.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.10.0,<0.11.0" @@ -141,20 +139,22 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "boto3-stubs-lite" -version = "1.34.99" -description = "Type annotations for boto3 1.34.99 generated with mypy-boto3-builder 7.24.0" +version = "1.34.114" +description = "Type annotations for boto3 1.34.114 generated with mypy-boto3-builder 7.24.0" optional = false python-versions = ">=3.8" files = [ - {file = "boto3_stubs_lite-1.34.99-py3-none-any.whl", hash = "sha256:6c94a9e4f00eb3197c9a7ad1a03a7681e5167513075aac5627869fae3928410f"}, - {file = "boto3_stubs_lite-1.34.99.tar.gz", hash = "sha256:8d78bdd6a3ddb844f7f6b9f554152b056aadf7210ba57e82f57bb35fc1c66669"}, + {file = "boto3_stubs_lite-1.34.114-py3-none-any.whl", hash = "sha256:a522a772ed5382d13c039b26080da116abca40e6c0217d4549972febbdaa09e9"}, + {file = "boto3_stubs_lite-1.34.114.tar.gz", hash = "sha256:76abce810ca50f1628e7ccb745dfd0af4b400f684313f615c3fe660a1da4a60f"}, ] [package.dependencies] botocore-stubs = "*" +mypy-boto3-autoscaling = {version = ">=1.34.0,<1.35.0", optional = true, markers = "extra == \"autoscaling\""} mypy-boto3-cloudwatch = {version = ">=1.34.0,<1.35.0", optional = true, markers = "extra == \"cloudwatch\""} mypy-boto3-dynamodb = {version = ">=1.34.0,<1.35.0", optional = true, markers = "extra == \"dynamodb\""} mypy-boto3-ec2 = {version = ">=1.34.0,<1.35.0", optional = true, markers = "extra == \"ec2\""} +mypy-boto3-ecs = {version = ">=1.34.0,<1.35.0", optional = true, markers = "extra == \"ecs\""} mypy-boto3-lambda = {version = ">=1.34.0,<1.35.0", optional = true, markers = "extra == \"lambda\""} mypy-boto3-logs = {version = ">=1.34.0,<1.35.0", optional = true, markers = "extra == \"logs\""} mypy-boto3-rds = {version = ">=1.34.0,<1.35.0", optional = true, markers = "extra == \"rds\""} @@ -171,7 +171,7 @@ account = ["mypy-boto3-account (>=1.34.0,<1.35.0)"] acm = ["mypy-boto3-acm (>=1.34.0,<1.35.0)"] acm-pca = ["mypy-boto3-acm-pca (>=1.34.0,<1.35.0)"] alexaforbusiness = ["mypy-boto3-alexaforbusiness (>=1.34.0,<1.35.0)"] -all = ["mypy-boto3-accessanalyzer (>=1.34.0,<1.35.0)", "mypy-boto3-account (>=1.34.0,<1.35.0)", "mypy-boto3-acm (>=1.34.0,<1.35.0)", "mypy-boto3-acm-pca (>=1.34.0,<1.35.0)", "mypy-boto3-alexaforbusiness (>=1.34.0,<1.35.0)", "mypy-boto3-amp (>=1.34.0,<1.35.0)", "mypy-boto3-amplify (>=1.34.0,<1.35.0)", "mypy-boto3-amplifybackend (>=1.34.0,<1.35.0)", "mypy-boto3-amplifyuibuilder (>=1.34.0,<1.35.0)", "mypy-boto3-apigateway (>=1.34.0,<1.35.0)", "mypy-boto3-apigatewaymanagementapi (>=1.34.0,<1.35.0)", "mypy-boto3-apigatewayv2 (>=1.34.0,<1.35.0)", "mypy-boto3-appconfig (>=1.34.0,<1.35.0)", "mypy-boto3-appconfigdata (>=1.34.0,<1.35.0)", "mypy-boto3-appfabric (>=1.34.0,<1.35.0)", "mypy-boto3-appflow (>=1.34.0,<1.35.0)", "mypy-boto3-appintegrations (>=1.34.0,<1.35.0)", "mypy-boto3-application-autoscaling (>=1.34.0,<1.35.0)", "mypy-boto3-application-insights (>=1.34.0,<1.35.0)", "mypy-boto3-applicationcostprofiler (>=1.34.0,<1.35.0)", "mypy-boto3-appmesh (>=1.34.0,<1.35.0)", "mypy-boto3-apprunner (>=1.34.0,<1.35.0)", "mypy-boto3-appstream (>=1.34.0,<1.35.0)", "mypy-boto3-appsync (>=1.34.0,<1.35.0)", "mypy-boto3-arc-zonal-shift (>=1.34.0,<1.35.0)", "mypy-boto3-artifact (>=1.34.0,<1.35.0)", "mypy-boto3-athena (>=1.34.0,<1.35.0)", "mypy-boto3-auditmanager (>=1.34.0,<1.35.0)", "mypy-boto3-autoscaling (>=1.34.0,<1.35.0)", "mypy-boto3-autoscaling-plans (>=1.34.0,<1.35.0)", "mypy-boto3-b2bi (>=1.34.0,<1.35.0)", "mypy-boto3-backup (>=1.34.0,<1.35.0)", "mypy-boto3-backup-gateway (>=1.34.0,<1.35.0)", "mypy-boto3-backupstorage (>=1.34.0,<1.35.0)", "mypy-boto3-batch (>=1.34.0,<1.35.0)", "mypy-boto3-bcm-data-exports (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock-agent (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock-agent-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-billingconductor (>=1.34.0,<1.35.0)", "mypy-boto3-braket (>=1.34.0,<1.35.0)", "mypy-boto3-budgets (>=1.34.0,<1.35.0)", "mypy-boto3-ce (>=1.34.0,<1.35.0)", "mypy-boto3-chatbot (>=1.34.0,<1.35.0)", "mypy-boto3-chime (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-identity (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-media-pipelines (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-meetings (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-messaging (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-voice (>=1.34.0,<1.35.0)", "mypy-boto3-cleanrooms (>=1.34.0,<1.35.0)", "mypy-boto3-cleanroomsml (>=1.34.0,<1.35.0)", "mypy-boto3-cloud9 (>=1.34.0,<1.35.0)", "mypy-boto3-cloudcontrol (>=1.34.0,<1.35.0)", "mypy-boto3-clouddirectory (>=1.34.0,<1.35.0)", "mypy-boto3-cloudformation (>=1.34.0,<1.35.0)", "mypy-boto3-cloudfront (>=1.34.0,<1.35.0)", "mypy-boto3-cloudfront-keyvaluestore (>=1.34.0,<1.35.0)", "mypy-boto3-cloudhsm (>=1.34.0,<1.35.0)", "mypy-boto3-cloudhsmv2 (>=1.34.0,<1.35.0)", "mypy-boto3-cloudsearch (>=1.34.0,<1.35.0)", "mypy-boto3-cloudsearchdomain (>=1.34.0,<1.35.0)", "mypy-boto3-cloudtrail (>=1.34.0,<1.35.0)", "mypy-boto3-cloudtrail-data (>=1.34.0,<1.35.0)", "mypy-boto3-cloudwatch (>=1.34.0,<1.35.0)", "mypy-boto3-codeartifact (>=1.34.0,<1.35.0)", "mypy-boto3-codebuild (>=1.34.0,<1.35.0)", "mypy-boto3-codecatalyst (>=1.34.0,<1.35.0)", "mypy-boto3-codecommit (>=1.34.0,<1.35.0)", "mypy-boto3-codeconnections (>=1.34.0,<1.35.0)", "mypy-boto3-codedeploy (>=1.34.0,<1.35.0)", "mypy-boto3-codeguru-reviewer (>=1.34.0,<1.35.0)", "mypy-boto3-codeguru-security (>=1.34.0,<1.35.0)", "mypy-boto3-codeguruprofiler (>=1.34.0,<1.35.0)", "mypy-boto3-codepipeline (>=1.34.0,<1.35.0)", "mypy-boto3-codestar (>=1.34.0,<1.35.0)", "mypy-boto3-codestar-connections (>=1.34.0,<1.35.0)", "mypy-boto3-codestar-notifications (>=1.34.0,<1.35.0)", "mypy-boto3-cognito-identity (>=1.34.0,<1.35.0)", "mypy-boto3-cognito-idp (>=1.34.0,<1.35.0)", "mypy-boto3-cognito-sync (>=1.34.0,<1.35.0)", "mypy-boto3-comprehend (>=1.34.0,<1.35.0)", "mypy-boto3-comprehendmedical (>=1.34.0,<1.35.0)", "mypy-boto3-compute-optimizer (>=1.34.0,<1.35.0)", "mypy-boto3-config (>=1.34.0,<1.35.0)", "mypy-boto3-connect (>=1.34.0,<1.35.0)", "mypy-boto3-connect-contact-lens (>=1.34.0,<1.35.0)", "mypy-boto3-connectcampaigns (>=1.34.0,<1.35.0)", "mypy-boto3-connectcases (>=1.34.0,<1.35.0)", "mypy-boto3-connectparticipant (>=1.34.0,<1.35.0)", "mypy-boto3-controlcatalog (>=1.34.0,<1.35.0)", "mypy-boto3-controltower (>=1.34.0,<1.35.0)", "mypy-boto3-cost-optimization-hub (>=1.34.0,<1.35.0)", "mypy-boto3-cur (>=1.34.0,<1.35.0)", "mypy-boto3-customer-profiles (>=1.34.0,<1.35.0)", "mypy-boto3-databrew (>=1.34.0,<1.35.0)", "mypy-boto3-dataexchange (>=1.34.0,<1.35.0)", "mypy-boto3-datapipeline (>=1.34.0,<1.35.0)", "mypy-boto3-datasync (>=1.34.0,<1.35.0)", "mypy-boto3-datazone (>=1.34.0,<1.35.0)", "mypy-boto3-dax (>=1.34.0,<1.35.0)", "mypy-boto3-deadline (>=1.34.0,<1.35.0)", "mypy-boto3-detective (>=1.34.0,<1.35.0)", "mypy-boto3-devicefarm (>=1.34.0,<1.35.0)", "mypy-boto3-devops-guru (>=1.34.0,<1.35.0)", "mypy-boto3-directconnect (>=1.34.0,<1.35.0)", "mypy-boto3-discovery (>=1.34.0,<1.35.0)", "mypy-boto3-dlm (>=1.34.0,<1.35.0)", "mypy-boto3-dms (>=1.34.0,<1.35.0)", "mypy-boto3-docdb (>=1.34.0,<1.35.0)", "mypy-boto3-docdb-elastic (>=1.34.0,<1.35.0)", "mypy-boto3-drs (>=1.34.0,<1.35.0)", "mypy-boto3-ds (>=1.34.0,<1.35.0)", "mypy-boto3-dynamodb (>=1.34.0,<1.35.0)", "mypy-boto3-dynamodbstreams (>=1.34.0,<1.35.0)", "mypy-boto3-ebs (>=1.34.0,<1.35.0)", "mypy-boto3-ec2 (>=1.34.0,<1.35.0)", "mypy-boto3-ec2-instance-connect (>=1.34.0,<1.35.0)", "mypy-boto3-ecr (>=1.34.0,<1.35.0)", "mypy-boto3-ecr-public (>=1.34.0,<1.35.0)", "mypy-boto3-ecs (>=1.34.0,<1.35.0)", "mypy-boto3-efs (>=1.34.0,<1.35.0)", "mypy-boto3-eks (>=1.34.0,<1.35.0)", "mypy-boto3-eks-auth (>=1.34.0,<1.35.0)", "mypy-boto3-elastic-inference (>=1.34.0,<1.35.0)", "mypy-boto3-elasticache (>=1.34.0,<1.35.0)", "mypy-boto3-elasticbeanstalk (>=1.34.0,<1.35.0)", "mypy-boto3-elastictranscoder (>=1.34.0,<1.35.0)", "mypy-boto3-elb (>=1.34.0,<1.35.0)", "mypy-boto3-elbv2 (>=1.34.0,<1.35.0)", "mypy-boto3-emr (>=1.34.0,<1.35.0)", "mypy-boto3-emr-containers (>=1.34.0,<1.35.0)", "mypy-boto3-emr-serverless (>=1.34.0,<1.35.0)", "mypy-boto3-entityresolution (>=1.34.0,<1.35.0)", "mypy-boto3-es (>=1.34.0,<1.35.0)", "mypy-boto3-events (>=1.34.0,<1.35.0)", "mypy-boto3-evidently (>=1.34.0,<1.35.0)", "mypy-boto3-finspace (>=1.34.0,<1.35.0)", "mypy-boto3-finspace-data (>=1.34.0,<1.35.0)", "mypy-boto3-firehose (>=1.34.0,<1.35.0)", "mypy-boto3-fis (>=1.34.0,<1.35.0)", "mypy-boto3-fms (>=1.34.0,<1.35.0)", "mypy-boto3-forecast (>=1.34.0,<1.35.0)", "mypy-boto3-forecastquery (>=1.34.0,<1.35.0)", "mypy-boto3-frauddetector (>=1.34.0,<1.35.0)", "mypy-boto3-freetier (>=1.34.0,<1.35.0)", "mypy-boto3-fsx (>=1.34.0,<1.35.0)", "mypy-boto3-gamelift (>=1.34.0,<1.35.0)", "mypy-boto3-glacier (>=1.34.0,<1.35.0)", "mypy-boto3-globalaccelerator (>=1.34.0,<1.35.0)", "mypy-boto3-glue (>=1.34.0,<1.35.0)", "mypy-boto3-grafana (>=1.34.0,<1.35.0)", "mypy-boto3-greengrass (>=1.34.0,<1.35.0)", "mypy-boto3-greengrassv2 (>=1.34.0,<1.35.0)", "mypy-boto3-groundstation (>=1.34.0,<1.35.0)", "mypy-boto3-guardduty (>=1.34.0,<1.35.0)", "mypy-boto3-health (>=1.34.0,<1.35.0)", "mypy-boto3-healthlake (>=1.34.0,<1.35.0)", "mypy-boto3-honeycode (>=1.34.0,<1.35.0)", "mypy-boto3-iam (>=1.34.0,<1.35.0)", "mypy-boto3-identitystore (>=1.34.0,<1.35.0)", "mypy-boto3-imagebuilder (>=1.34.0,<1.35.0)", "mypy-boto3-importexport (>=1.34.0,<1.35.0)", "mypy-boto3-inspector (>=1.34.0,<1.35.0)", "mypy-boto3-inspector-scan (>=1.34.0,<1.35.0)", "mypy-boto3-inspector2 (>=1.34.0,<1.35.0)", "mypy-boto3-internetmonitor (>=1.34.0,<1.35.0)", "mypy-boto3-iot (>=1.34.0,<1.35.0)", "mypy-boto3-iot-data (>=1.34.0,<1.35.0)", "mypy-boto3-iot-jobs-data (>=1.34.0,<1.35.0)", "mypy-boto3-iot1click-devices (>=1.34.0,<1.35.0)", "mypy-boto3-iot1click-projects (>=1.34.0,<1.35.0)", "mypy-boto3-iotanalytics (>=1.34.0,<1.35.0)", "mypy-boto3-iotdeviceadvisor (>=1.34.0,<1.35.0)", "mypy-boto3-iotevents (>=1.34.0,<1.35.0)", "mypy-boto3-iotevents-data (>=1.34.0,<1.35.0)", "mypy-boto3-iotfleethub (>=1.34.0,<1.35.0)", "mypy-boto3-iotfleetwise (>=1.34.0,<1.35.0)", "mypy-boto3-iotsecuretunneling (>=1.34.0,<1.35.0)", "mypy-boto3-iotsitewise (>=1.34.0,<1.35.0)", "mypy-boto3-iotthingsgraph (>=1.34.0,<1.35.0)", "mypy-boto3-iottwinmaker (>=1.34.0,<1.35.0)", "mypy-boto3-iotwireless (>=1.34.0,<1.35.0)", "mypy-boto3-ivs (>=1.34.0,<1.35.0)", "mypy-boto3-ivs-realtime (>=1.34.0,<1.35.0)", "mypy-boto3-ivschat (>=1.34.0,<1.35.0)", "mypy-boto3-kafka (>=1.34.0,<1.35.0)", "mypy-boto3-kafkaconnect (>=1.34.0,<1.35.0)", "mypy-boto3-kendra (>=1.34.0,<1.35.0)", "mypy-boto3-kendra-ranking (>=1.34.0,<1.35.0)", "mypy-boto3-keyspaces (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-archived-media (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-media (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-signaling (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-webrtc-storage (>=1.34.0,<1.35.0)", "mypy-boto3-kinesisanalytics (>=1.34.0,<1.35.0)", "mypy-boto3-kinesisanalyticsv2 (>=1.34.0,<1.35.0)", "mypy-boto3-kinesisvideo (>=1.34.0,<1.35.0)", "mypy-boto3-kms (>=1.34.0,<1.35.0)", "mypy-boto3-lakeformation (>=1.34.0,<1.35.0)", "mypy-boto3-lambda (>=1.34.0,<1.35.0)", "mypy-boto3-launch-wizard (>=1.34.0,<1.35.0)", "mypy-boto3-lex-models (>=1.34.0,<1.35.0)", "mypy-boto3-lex-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-lexv2-models (>=1.34.0,<1.35.0)", "mypy-boto3-lexv2-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-license-manager (>=1.34.0,<1.35.0)", "mypy-boto3-license-manager-linux-subscriptions (>=1.34.0,<1.35.0)", "mypy-boto3-license-manager-user-subscriptions (>=1.34.0,<1.35.0)", "mypy-boto3-lightsail (>=1.34.0,<1.35.0)", "mypy-boto3-location (>=1.34.0,<1.35.0)", "mypy-boto3-logs (>=1.34.0,<1.35.0)", "mypy-boto3-lookoutequipment (>=1.34.0,<1.35.0)", "mypy-boto3-lookoutmetrics (>=1.34.0,<1.35.0)", "mypy-boto3-lookoutvision (>=1.34.0,<1.35.0)", "mypy-boto3-m2 (>=1.34.0,<1.35.0)", "mypy-boto3-machinelearning (>=1.34.0,<1.35.0)", "mypy-boto3-macie2 (>=1.34.0,<1.35.0)", "mypy-boto3-managedblockchain (>=1.34.0,<1.35.0)", "mypy-boto3-managedblockchain-query (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-agreement (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-catalog (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-deployment (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-entitlement (>=1.34.0,<1.35.0)", "mypy-boto3-marketplacecommerceanalytics (>=1.34.0,<1.35.0)", "mypy-boto3-mediaconnect (>=1.34.0,<1.35.0)", "mypy-boto3-mediaconvert (>=1.34.0,<1.35.0)", "mypy-boto3-medialive (>=1.34.0,<1.35.0)", "mypy-boto3-mediapackage (>=1.34.0,<1.35.0)", "mypy-boto3-mediapackage-vod (>=1.34.0,<1.35.0)", "mypy-boto3-mediapackagev2 (>=1.34.0,<1.35.0)", "mypy-boto3-mediastore (>=1.34.0,<1.35.0)", "mypy-boto3-mediastore-data (>=1.34.0,<1.35.0)", "mypy-boto3-mediatailor (>=1.34.0,<1.35.0)", "mypy-boto3-medical-imaging (>=1.34.0,<1.35.0)", "mypy-boto3-memorydb (>=1.34.0,<1.35.0)", "mypy-boto3-meteringmarketplace (>=1.34.0,<1.35.0)", "mypy-boto3-mgh (>=1.34.0,<1.35.0)", "mypy-boto3-mgn (>=1.34.0,<1.35.0)", "mypy-boto3-migration-hub-refactor-spaces (>=1.34.0,<1.35.0)", "mypy-boto3-migrationhub-config (>=1.34.0,<1.35.0)", "mypy-boto3-migrationhuborchestrator (>=1.34.0,<1.35.0)", "mypy-boto3-migrationhubstrategy (>=1.34.0,<1.35.0)", "mypy-boto3-mobile (>=1.34.0,<1.35.0)", "mypy-boto3-mq (>=1.34.0,<1.35.0)", "mypy-boto3-mturk (>=1.34.0,<1.35.0)", "mypy-boto3-mwaa (>=1.34.0,<1.35.0)", "mypy-boto3-neptune (>=1.34.0,<1.35.0)", "mypy-boto3-neptune-graph (>=1.34.0,<1.35.0)", "mypy-boto3-neptunedata (>=1.34.0,<1.35.0)", "mypy-boto3-network-firewall (>=1.34.0,<1.35.0)", "mypy-boto3-networkmanager (>=1.34.0,<1.35.0)", "mypy-boto3-networkmonitor (>=1.34.0,<1.35.0)", "mypy-boto3-nimble (>=1.34.0,<1.35.0)", "mypy-boto3-oam (>=1.34.0,<1.35.0)", "mypy-boto3-omics (>=1.34.0,<1.35.0)", "mypy-boto3-opensearch (>=1.34.0,<1.35.0)", "mypy-boto3-opensearchserverless (>=1.34.0,<1.35.0)", "mypy-boto3-opsworks (>=1.34.0,<1.35.0)", "mypy-boto3-opsworkscm (>=1.34.0,<1.35.0)", "mypy-boto3-organizations (>=1.34.0,<1.35.0)", "mypy-boto3-osis (>=1.34.0,<1.35.0)", "mypy-boto3-outposts (>=1.34.0,<1.35.0)", "mypy-boto3-panorama (>=1.34.0,<1.35.0)", "mypy-boto3-payment-cryptography (>=1.34.0,<1.35.0)", "mypy-boto3-payment-cryptography-data (>=1.34.0,<1.35.0)", "mypy-boto3-pca-connector-ad (>=1.34.0,<1.35.0)", "mypy-boto3-personalize (>=1.34.0,<1.35.0)", "mypy-boto3-personalize-events (>=1.34.0,<1.35.0)", "mypy-boto3-personalize-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-pi (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint-email (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint-sms-voice (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint-sms-voice-v2 (>=1.34.0,<1.35.0)", "mypy-boto3-pipes (>=1.34.0,<1.35.0)", "mypy-boto3-polly (>=1.34.0,<1.35.0)", "mypy-boto3-pricing (>=1.34.0,<1.35.0)", "mypy-boto3-privatenetworks (>=1.34.0,<1.35.0)", "mypy-boto3-proton (>=1.34.0,<1.35.0)", "mypy-boto3-qbusiness (>=1.34.0,<1.35.0)", "mypy-boto3-qconnect (>=1.34.0,<1.35.0)", "mypy-boto3-qldb (>=1.34.0,<1.35.0)", "mypy-boto3-qldb-session (>=1.34.0,<1.35.0)", "mypy-boto3-quicksight (>=1.34.0,<1.35.0)", "mypy-boto3-ram (>=1.34.0,<1.35.0)", "mypy-boto3-rbin (>=1.34.0,<1.35.0)", "mypy-boto3-rds (>=1.34.0,<1.35.0)", "mypy-boto3-rds-data (>=1.34.0,<1.35.0)", "mypy-boto3-redshift (>=1.34.0,<1.35.0)", "mypy-boto3-redshift-data (>=1.34.0,<1.35.0)", "mypy-boto3-redshift-serverless (>=1.34.0,<1.35.0)", "mypy-boto3-rekognition (>=1.34.0,<1.35.0)", "mypy-boto3-repostspace (>=1.34.0,<1.35.0)", "mypy-boto3-resiliencehub (>=1.34.0,<1.35.0)", "mypy-boto3-resource-explorer-2 (>=1.34.0,<1.35.0)", "mypy-boto3-resource-groups (>=1.34.0,<1.35.0)", "mypy-boto3-resourcegroupstaggingapi (>=1.34.0,<1.35.0)", "mypy-boto3-robomaker (>=1.34.0,<1.35.0)", "mypy-boto3-rolesanywhere (>=1.34.0,<1.35.0)", "mypy-boto3-route53 (>=1.34.0,<1.35.0)", "mypy-boto3-route53-recovery-cluster (>=1.34.0,<1.35.0)", "mypy-boto3-route53-recovery-control-config (>=1.34.0,<1.35.0)", "mypy-boto3-route53-recovery-readiness (>=1.34.0,<1.35.0)", "mypy-boto3-route53domains (>=1.34.0,<1.35.0)", "mypy-boto3-route53profiles (>=1.34.0,<1.35.0)", "mypy-boto3-route53resolver (>=1.34.0,<1.35.0)", "mypy-boto3-rum (>=1.34.0,<1.35.0)", "mypy-boto3-s3 (>=1.34.0,<1.35.0)", "mypy-boto3-s3control (>=1.34.0,<1.35.0)", "mypy-boto3-s3outposts (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-a2i-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-edge (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-featurestore-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-geospatial (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-metrics (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-savingsplans (>=1.34.0,<1.35.0)", "mypy-boto3-scheduler (>=1.34.0,<1.35.0)", "mypy-boto3-schemas (>=1.34.0,<1.35.0)", "mypy-boto3-sdb (>=1.34.0,<1.35.0)", "mypy-boto3-secretsmanager (>=1.34.0,<1.35.0)", "mypy-boto3-securityhub (>=1.34.0,<1.35.0)", "mypy-boto3-securitylake (>=1.34.0,<1.35.0)", "mypy-boto3-serverlessrepo (>=1.34.0,<1.35.0)", "mypy-boto3-service-quotas (>=1.34.0,<1.35.0)", "mypy-boto3-servicecatalog (>=1.34.0,<1.35.0)", "mypy-boto3-servicecatalog-appregistry (>=1.34.0,<1.35.0)", "mypy-boto3-servicediscovery (>=1.34.0,<1.35.0)", "mypy-boto3-ses (>=1.34.0,<1.35.0)", "mypy-boto3-sesv2 (>=1.34.0,<1.35.0)", "mypy-boto3-shield (>=1.34.0,<1.35.0)", "mypy-boto3-signer (>=1.34.0,<1.35.0)", "mypy-boto3-simspaceweaver (>=1.34.0,<1.35.0)", "mypy-boto3-sms (>=1.34.0,<1.35.0)", "mypy-boto3-sms-voice (>=1.34.0,<1.35.0)", "mypy-boto3-snow-device-management (>=1.34.0,<1.35.0)", "mypy-boto3-snowball (>=1.34.0,<1.35.0)", "mypy-boto3-sns (>=1.34.0,<1.35.0)", "mypy-boto3-sqs (>=1.34.0,<1.35.0)", "mypy-boto3-ssm (>=1.34.0,<1.35.0)", "mypy-boto3-ssm-contacts (>=1.34.0,<1.35.0)", "mypy-boto3-ssm-incidents (>=1.34.0,<1.35.0)", "mypy-boto3-ssm-sap (>=1.34.0,<1.35.0)", "mypy-boto3-sso (>=1.34.0,<1.35.0)", "mypy-boto3-sso-admin (>=1.34.0,<1.35.0)", "mypy-boto3-sso-oidc (>=1.34.0,<1.35.0)", "mypy-boto3-stepfunctions (>=1.34.0,<1.35.0)", "mypy-boto3-storagegateway (>=1.34.0,<1.35.0)", "mypy-boto3-sts (>=1.34.0,<1.35.0)", "mypy-boto3-supplychain (>=1.34.0,<1.35.0)", "mypy-boto3-support (>=1.34.0,<1.35.0)", "mypy-boto3-support-app (>=1.34.0,<1.35.0)", "mypy-boto3-swf (>=1.34.0,<1.35.0)", "mypy-boto3-synthetics (>=1.34.0,<1.35.0)", "mypy-boto3-textract (>=1.34.0,<1.35.0)", "mypy-boto3-timestream-influxdb (>=1.34.0,<1.35.0)", "mypy-boto3-timestream-query (>=1.34.0,<1.35.0)", "mypy-boto3-timestream-write (>=1.34.0,<1.35.0)", "mypy-boto3-tnb (>=1.34.0,<1.35.0)", "mypy-boto3-transcribe (>=1.34.0,<1.35.0)", "mypy-boto3-transfer (>=1.34.0,<1.35.0)", "mypy-boto3-translate (>=1.34.0,<1.35.0)", "mypy-boto3-trustedadvisor (>=1.34.0,<1.35.0)", "mypy-boto3-verifiedpermissions (>=1.34.0,<1.35.0)", "mypy-boto3-voice-id (>=1.34.0,<1.35.0)", "mypy-boto3-vpc-lattice (>=1.34.0,<1.35.0)", "mypy-boto3-waf (>=1.34.0,<1.35.0)", "mypy-boto3-waf-regional (>=1.34.0,<1.35.0)", "mypy-boto3-wafv2 (>=1.34.0,<1.35.0)", "mypy-boto3-wellarchitected (>=1.34.0,<1.35.0)", "mypy-boto3-wisdom (>=1.34.0,<1.35.0)", "mypy-boto3-workdocs (>=1.34.0,<1.35.0)", "mypy-boto3-worklink (>=1.34.0,<1.35.0)", "mypy-boto3-workmail (>=1.34.0,<1.35.0)", "mypy-boto3-workmailmessageflow (>=1.34.0,<1.35.0)", "mypy-boto3-workspaces (>=1.34.0,<1.35.0)", "mypy-boto3-workspaces-thin-client (>=1.34.0,<1.35.0)", "mypy-boto3-workspaces-web (>=1.34.0,<1.35.0)", "mypy-boto3-xray (>=1.34.0,<1.35.0)"] +all = ["mypy-boto3-accessanalyzer (>=1.34.0,<1.35.0)", "mypy-boto3-account (>=1.34.0,<1.35.0)", "mypy-boto3-acm (>=1.34.0,<1.35.0)", "mypy-boto3-acm-pca (>=1.34.0,<1.35.0)", "mypy-boto3-alexaforbusiness (>=1.34.0,<1.35.0)", "mypy-boto3-amp (>=1.34.0,<1.35.0)", "mypy-boto3-amplify (>=1.34.0,<1.35.0)", "mypy-boto3-amplifybackend (>=1.34.0,<1.35.0)", "mypy-boto3-amplifyuibuilder (>=1.34.0,<1.35.0)", "mypy-boto3-apigateway (>=1.34.0,<1.35.0)", "mypy-boto3-apigatewaymanagementapi (>=1.34.0,<1.35.0)", "mypy-boto3-apigatewayv2 (>=1.34.0,<1.35.0)", "mypy-boto3-appconfig (>=1.34.0,<1.35.0)", "mypy-boto3-appconfigdata (>=1.34.0,<1.35.0)", "mypy-boto3-appfabric (>=1.34.0,<1.35.0)", "mypy-boto3-appflow (>=1.34.0,<1.35.0)", "mypy-boto3-appintegrations (>=1.34.0,<1.35.0)", "mypy-boto3-application-autoscaling (>=1.34.0,<1.35.0)", "mypy-boto3-application-insights (>=1.34.0,<1.35.0)", "mypy-boto3-applicationcostprofiler (>=1.34.0,<1.35.0)", "mypy-boto3-appmesh (>=1.34.0,<1.35.0)", "mypy-boto3-apprunner (>=1.34.0,<1.35.0)", "mypy-boto3-appstream (>=1.34.0,<1.35.0)", "mypy-boto3-appsync (>=1.34.0,<1.35.0)", "mypy-boto3-arc-zonal-shift (>=1.34.0,<1.35.0)", "mypy-boto3-artifact (>=1.34.0,<1.35.0)", "mypy-boto3-athena (>=1.34.0,<1.35.0)", "mypy-boto3-auditmanager (>=1.34.0,<1.35.0)", "mypy-boto3-autoscaling (>=1.34.0,<1.35.0)", "mypy-boto3-autoscaling-plans (>=1.34.0,<1.35.0)", "mypy-boto3-b2bi (>=1.34.0,<1.35.0)", "mypy-boto3-backup (>=1.34.0,<1.35.0)", "mypy-boto3-backup-gateway (>=1.34.0,<1.35.0)", "mypy-boto3-backupstorage (>=1.34.0,<1.35.0)", "mypy-boto3-batch (>=1.34.0,<1.35.0)", "mypy-boto3-bcm-data-exports (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock-agent (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock-agent-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-billingconductor (>=1.34.0,<1.35.0)", "mypy-boto3-braket (>=1.34.0,<1.35.0)", "mypy-boto3-budgets (>=1.34.0,<1.35.0)", "mypy-boto3-ce (>=1.34.0,<1.35.0)", "mypy-boto3-chatbot (>=1.34.0,<1.35.0)", "mypy-boto3-chime (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-identity (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-media-pipelines (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-meetings (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-messaging (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-voice (>=1.34.0,<1.35.0)", "mypy-boto3-cleanrooms (>=1.34.0,<1.35.0)", "mypy-boto3-cleanroomsml (>=1.34.0,<1.35.0)", "mypy-boto3-cloud9 (>=1.34.0,<1.35.0)", "mypy-boto3-cloudcontrol (>=1.34.0,<1.35.0)", "mypy-boto3-clouddirectory (>=1.34.0,<1.35.0)", "mypy-boto3-cloudformation (>=1.34.0,<1.35.0)", "mypy-boto3-cloudfront (>=1.34.0,<1.35.0)", "mypy-boto3-cloudfront-keyvaluestore (>=1.34.0,<1.35.0)", "mypy-boto3-cloudhsm (>=1.34.0,<1.35.0)", "mypy-boto3-cloudhsmv2 (>=1.34.0,<1.35.0)", "mypy-boto3-cloudsearch (>=1.34.0,<1.35.0)", "mypy-boto3-cloudsearchdomain (>=1.34.0,<1.35.0)", "mypy-boto3-cloudtrail (>=1.34.0,<1.35.0)", "mypy-boto3-cloudtrail-data (>=1.34.0,<1.35.0)", "mypy-boto3-cloudwatch (>=1.34.0,<1.35.0)", "mypy-boto3-codeartifact (>=1.34.0,<1.35.0)", "mypy-boto3-codebuild (>=1.34.0,<1.35.0)", "mypy-boto3-codecatalyst (>=1.34.0,<1.35.0)", "mypy-boto3-codecommit (>=1.34.0,<1.35.0)", "mypy-boto3-codeconnections (>=1.34.0,<1.35.0)", "mypy-boto3-codedeploy (>=1.34.0,<1.35.0)", "mypy-boto3-codeguru-reviewer (>=1.34.0,<1.35.0)", "mypy-boto3-codeguru-security (>=1.34.0,<1.35.0)", "mypy-boto3-codeguruprofiler (>=1.34.0,<1.35.0)", "mypy-boto3-codepipeline (>=1.34.0,<1.35.0)", "mypy-boto3-codestar (>=1.34.0,<1.35.0)", "mypy-boto3-codestar-connections (>=1.34.0,<1.35.0)", "mypy-boto3-codestar-notifications (>=1.34.0,<1.35.0)", "mypy-boto3-cognito-identity (>=1.34.0,<1.35.0)", "mypy-boto3-cognito-idp (>=1.34.0,<1.35.0)", "mypy-boto3-cognito-sync (>=1.34.0,<1.35.0)", "mypy-boto3-comprehend (>=1.34.0,<1.35.0)", "mypy-boto3-comprehendmedical (>=1.34.0,<1.35.0)", "mypy-boto3-compute-optimizer (>=1.34.0,<1.35.0)", "mypy-boto3-config (>=1.34.0,<1.35.0)", "mypy-boto3-connect (>=1.34.0,<1.35.0)", "mypy-boto3-connect-contact-lens (>=1.34.0,<1.35.0)", "mypy-boto3-connectcampaigns (>=1.34.0,<1.35.0)", "mypy-boto3-connectcases (>=1.34.0,<1.35.0)", "mypy-boto3-connectparticipant (>=1.34.0,<1.35.0)", "mypy-boto3-controlcatalog (>=1.34.0,<1.35.0)", "mypy-boto3-controltower (>=1.34.0,<1.35.0)", "mypy-boto3-cost-optimization-hub (>=1.34.0,<1.35.0)", "mypy-boto3-cur (>=1.34.0,<1.35.0)", "mypy-boto3-customer-profiles (>=1.34.0,<1.35.0)", "mypy-boto3-databrew (>=1.34.0,<1.35.0)", "mypy-boto3-dataexchange (>=1.34.0,<1.35.0)", "mypy-boto3-datapipeline (>=1.34.0,<1.35.0)", "mypy-boto3-datasync (>=1.34.0,<1.35.0)", "mypy-boto3-datazone (>=1.34.0,<1.35.0)", "mypy-boto3-dax (>=1.34.0,<1.35.0)", "mypy-boto3-deadline (>=1.34.0,<1.35.0)", "mypy-boto3-detective (>=1.34.0,<1.35.0)", "mypy-boto3-devicefarm (>=1.34.0,<1.35.0)", "mypy-boto3-devops-guru (>=1.34.0,<1.35.0)", "mypy-boto3-directconnect (>=1.34.0,<1.35.0)", "mypy-boto3-discovery (>=1.34.0,<1.35.0)", "mypy-boto3-dlm (>=1.34.0,<1.35.0)", "mypy-boto3-dms (>=1.34.0,<1.35.0)", "mypy-boto3-docdb (>=1.34.0,<1.35.0)", "mypy-boto3-docdb-elastic (>=1.34.0,<1.35.0)", "mypy-boto3-drs (>=1.34.0,<1.35.0)", "mypy-boto3-ds (>=1.34.0,<1.35.0)", "mypy-boto3-dynamodb (>=1.34.0,<1.35.0)", "mypy-boto3-dynamodbstreams (>=1.34.0,<1.35.0)", "mypy-boto3-ebs (>=1.34.0,<1.35.0)", "mypy-boto3-ec2 (>=1.34.0,<1.35.0)", "mypy-boto3-ec2-instance-connect (>=1.34.0,<1.35.0)", "mypy-boto3-ecr (>=1.34.0,<1.35.0)", "mypy-boto3-ecr-public (>=1.34.0,<1.35.0)", "mypy-boto3-ecs (>=1.34.0,<1.35.0)", "mypy-boto3-efs (>=1.34.0,<1.35.0)", "mypy-boto3-eks (>=1.34.0,<1.35.0)", "mypy-boto3-eks-auth (>=1.34.0,<1.35.0)", "mypy-boto3-elastic-inference (>=1.34.0,<1.35.0)", "mypy-boto3-elasticache (>=1.34.0,<1.35.0)", "mypy-boto3-elasticbeanstalk (>=1.34.0,<1.35.0)", "mypy-boto3-elastictranscoder (>=1.34.0,<1.35.0)", "mypy-boto3-elb (>=1.34.0,<1.35.0)", "mypy-boto3-elbv2 (>=1.34.0,<1.35.0)", "mypy-boto3-emr (>=1.34.0,<1.35.0)", "mypy-boto3-emr-containers (>=1.34.0,<1.35.0)", "mypy-boto3-emr-serverless (>=1.34.0,<1.35.0)", "mypy-boto3-entityresolution (>=1.34.0,<1.35.0)", "mypy-boto3-es (>=1.34.0,<1.35.0)", "mypy-boto3-events (>=1.34.0,<1.35.0)", "mypy-boto3-evidently (>=1.34.0,<1.35.0)", "mypy-boto3-finspace (>=1.34.0,<1.35.0)", "mypy-boto3-finspace-data (>=1.34.0,<1.35.0)", "mypy-boto3-firehose (>=1.34.0,<1.35.0)", "mypy-boto3-fis (>=1.34.0,<1.35.0)", "mypy-boto3-fms (>=1.34.0,<1.35.0)", "mypy-boto3-forecast (>=1.34.0,<1.35.0)", "mypy-boto3-forecastquery (>=1.34.0,<1.35.0)", "mypy-boto3-frauddetector (>=1.34.0,<1.35.0)", "mypy-boto3-freetier (>=1.34.0,<1.35.0)", "mypy-boto3-fsx (>=1.34.0,<1.35.0)", "mypy-boto3-gamelift (>=1.34.0,<1.35.0)", "mypy-boto3-glacier (>=1.34.0,<1.35.0)", "mypy-boto3-globalaccelerator (>=1.34.0,<1.35.0)", "mypy-boto3-glue (>=1.34.0,<1.35.0)", "mypy-boto3-grafana (>=1.34.0,<1.35.0)", "mypy-boto3-greengrass (>=1.34.0,<1.35.0)", "mypy-boto3-greengrassv2 (>=1.34.0,<1.35.0)", "mypy-boto3-groundstation (>=1.34.0,<1.35.0)", "mypy-boto3-guardduty (>=1.34.0,<1.35.0)", "mypy-boto3-health (>=1.34.0,<1.35.0)", "mypy-boto3-healthlake (>=1.34.0,<1.35.0)", "mypy-boto3-honeycode (>=1.34.0,<1.35.0)", "mypy-boto3-iam (>=1.34.0,<1.35.0)", "mypy-boto3-identitystore (>=1.34.0,<1.35.0)", "mypy-boto3-imagebuilder (>=1.34.0,<1.35.0)", "mypy-boto3-importexport (>=1.34.0,<1.35.0)", "mypy-boto3-inspector (>=1.34.0,<1.35.0)", "mypy-boto3-inspector-scan (>=1.34.0,<1.35.0)", "mypy-boto3-inspector2 (>=1.34.0,<1.35.0)", "mypy-boto3-internetmonitor (>=1.34.0,<1.35.0)", "mypy-boto3-iot (>=1.34.0,<1.35.0)", "mypy-boto3-iot-data (>=1.34.0,<1.35.0)", "mypy-boto3-iot-jobs-data (>=1.34.0,<1.35.0)", "mypy-boto3-iot1click-devices (>=1.34.0,<1.35.0)", "mypy-boto3-iot1click-projects (>=1.34.0,<1.35.0)", "mypy-boto3-iotanalytics (>=1.34.0,<1.35.0)", "mypy-boto3-iotdeviceadvisor (>=1.34.0,<1.35.0)", "mypy-boto3-iotevents (>=1.34.0,<1.35.0)", "mypy-boto3-iotevents-data (>=1.34.0,<1.35.0)", "mypy-boto3-iotfleethub (>=1.34.0,<1.35.0)", "mypy-boto3-iotfleetwise (>=1.34.0,<1.35.0)", "mypy-boto3-iotsecuretunneling (>=1.34.0,<1.35.0)", "mypy-boto3-iotsitewise (>=1.34.0,<1.35.0)", "mypy-boto3-iotthingsgraph (>=1.34.0,<1.35.0)", "mypy-boto3-iottwinmaker (>=1.34.0,<1.35.0)", "mypy-boto3-iotwireless (>=1.34.0,<1.35.0)", "mypy-boto3-ivs (>=1.34.0,<1.35.0)", "mypy-boto3-ivs-realtime (>=1.34.0,<1.35.0)", "mypy-boto3-ivschat (>=1.34.0,<1.35.0)", "mypy-boto3-kafka (>=1.34.0,<1.35.0)", "mypy-boto3-kafkaconnect (>=1.34.0,<1.35.0)", "mypy-boto3-kendra (>=1.34.0,<1.35.0)", "mypy-boto3-kendra-ranking (>=1.34.0,<1.35.0)", "mypy-boto3-keyspaces (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-archived-media (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-media (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-signaling (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-webrtc-storage (>=1.34.0,<1.35.0)", "mypy-boto3-kinesisanalytics (>=1.34.0,<1.35.0)", "mypy-boto3-kinesisanalyticsv2 (>=1.34.0,<1.35.0)", "mypy-boto3-kinesisvideo (>=1.34.0,<1.35.0)", "mypy-boto3-kms (>=1.34.0,<1.35.0)", "mypy-boto3-lakeformation (>=1.34.0,<1.35.0)", "mypy-boto3-lambda (>=1.34.0,<1.35.0)", "mypy-boto3-launch-wizard (>=1.34.0,<1.35.0)", "mypy-boto3-lex-models (>=1.34.0,<1.35.0)", "mypy-boto3-lex-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-lexv2-models (>=1.34.0,<1.35.0)", "mypy-boto3-lexv2-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-license-manager (>=1.34.0,<1.35.0)", "mypy-boto3-license-manager-linux-subscriptions (>=1.34.0,<1.35.0)", "mypy-boto3-license-manager-user-subscriptions (>=1.34.0,<1.35.0)", "mypy-boto3-lightsail (>=1.34.0,<1.35.0)", "mypy-boto3-location (>=1.34.0,<1.35.0)", "mypy-boto3-logs (>=1.34.0,<1.35.0)", "mypy-boto3-lookoutequipment (>=1.34.0,<1.35.0)", "mypy-boto3-lookoutmetrics (>=1.34.0,<1.35.0)", "mypy-boto3-lookoutvision (>=1.34.0,<1.35.0)", "mypy-boto3-m2 (>=1.34.0,<1.35.0)", "mypy-boto3-machinelearning (>=1.34.0,<1.35.0)", "mypy-boto3-macie2 (>=1.34.0,<1.35.0)", "mypy-boto3-mailmanager (>=1.34.0,<1.35.0)", "mypy-boto3-managedblockchain (>=1.34.0,<1.35.0)", "mypy-boto3-managedblockchain-query (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-agreement (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-catalog (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-deployment (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-entitlement (>=1.34.0,<1.35.0)", "mypy-boto3-marketplacecommerceanalytics (>=1.34.0,<1.35.0)", "mypy-boto3-mediaconnect (>=1.34.0,<1.35.0)", "mypy-boto3-mediaconvert (>=1.34.0,<1.35.0)", "mypy-boto3-medialive (>=1.34.0,<1.35.0)", "mypy-boto3-mediapackage (>=1.34.0,<1.35.0)", "mypy-boto3-mediapackage-vod (>=1.34.0,<1.35.0)", "mypy-boto3-mediapackagev2 (>=1.34.0,<1.35.0)", "mypy-boto3-mediastore (>=1.34.0,<1.35.0)", "mypy-boto3-mediastore-data (>=1.34.0,<1.35.0)", "mypy-boto3-mediatailor (>=1.34.0,<1.35.0)", "mypy-boto3-medical-imaging (>=1.34.0,<1.35.0)", "mypy-boto3-memorydb (>=1.34.0,<1.35.0)", "mypy-boto3-meteringmarketplace (>=1.34.0,<1.35.0)", "mypy-boto3-mgh (>=1.34.0,<1.35.0)", "mypy-boto3-mgn (>=1.34.0,<1.35.0)", "mypy-boto3-migration-hub-refactor-spaces (>=1.34.0,<1.35.0)", "mypy-boto3-migrationhub-config (>=1.34.0,<1.35.0)", "mypy-boto3-migrationhuborchestrator (>=1.34.0,<1.35.0)", "mypy-boto3-migrationhubstrategy (>=1.34.0,<1.35.0)", "mypy-boto3-mobile (>=1.34.0,<1.35.0)", "mypy-boto3-mq (>=1.34.0,<1.35.0)", "mypy-boto3-mturk (>=1.34.0,<1.35.0)", "mypy-boto3-mwaa (>=1.34.0,<1.35.0)", "mypy-boto3-neptune (>=1.34.0,<1.35.0)", "mypy-boto3-neptune-graph (>=1.34.0,<1.35.0)", "mypy-boto3-neptunedata (>=1.34.0,<1.35.0)", "mypy-boto3-network-firewall (>=1.34.0,<1.35.0)", "mypy-boto3-networkmanager (>=1.34.0,<1.35.0)", "mypy-boto3-networkmonitor (>=1.34.0,<1.35.0)", "mypy-boto3-nimble (>=1.34.0,<1.35.0)", "mypy-boto3-oam (>=1.34.0,<1.35.0)", "mypy-boto3-omics (>=1.34.0,<1.35.0)", "mypy-boto3-opensearch (>=1.34.0,<1.35.0)", "mypy-boto3-opensearchserverless (>=1.34.0,<1.35.0)", "mypy-boto3-opsworks (>=1.34.0,<1.35.0)", "mypy-boto3-opsworkscm (>=1.34.0,<1.35.0)", "mypy-boto3-organizations (>=1.34.0,<1.35.0)", "mypy-boto3-osis (>=1.34.0,<1.35.0)", "mypy-boto3-outposts (>=1.34.0,<1.35.0)", "mypy-boto3-panorama (>=1.34.0,<1.35.0)", "mypy-boto3-payment-cryptography (>=1.34.0,<1.35.0)", "mypy-boto3-payment-cryptography-data (>=1.34.0,<1.35.0)", "mypy-boto3-pca-connector-ad (>=1.34.0,<1.35.0)", "mypy-boto3-personalize (>=1.34.0,<1.35.0)", "mypy-boto3-personalize-events (>=1.34.0,<1.35.0)", "mypy-boto3-personalize-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-pi (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint-email (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint-sms-voice (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint-sms-voice-v2 (>=1.34.0,<1.35.0)", "mypy-boto3-pipes (>=1.34.0,<1.35.0)", "mypy-boto3-polly (>=1.34.0,<1.35.0)", "mypy-boto3-pricing (>=1.34.0,<1.35.0)", "mypy-boto3-privatenetworks (>=1.34.0,<1.35.0)", "mypy-boto3-proton (>=1.34.0,<1.35.0)", "mypy-boto3-qbusiness (>=1.34.0,<1.35.0)", "mypy-boto3-qconnect (>=1.34.0,<1.35.0)", "mypy-boto3-qldb (>=1.34.0,<1.35.0)", "mypy-boto3-qldb-session (>=1.34.0,<1.35.0)", "mypy-boto3-quicksight (>=1.34.0,<1.35.0)", "mypy-boto3-ram (>=1.34.0,<1.35.0)", "mypy-boto3-rbin (>=1.34.0,<1.35.0)", "mypy-boto3-rds (>=1.34.0,<1.35.0)", "mypy-boto3-rds-data (>=1.34.0,<1.35.0)", "mypy-boto3-redshift (>=1.34.0,<1.35.0)", "mypy-boto3-redshift-data (>=1.34.0,<1.35.0)", "mypy-boto3-redshift-serverless (>=1.34.0,<1.35.0)", "mypy-boto3-rekognition (>=1.34.0,<1.35.0)", "mypy-boto3-repostspace (>=1.34.0,<1.35.0)", "mypy-boto3-resiliencehub (>=1.34.0,<1.35.0)", "mypy-boto3-resource-explorer-2 (>=1.34.0,<1.35.0)", "mypy-boto3-resource-groups (>=1.34.0,<1.35.0)", "mypy-boto3-resourcegroupstaggingapi (>=1.34.0,<1.35.0)", "mypy-boto3-robomaker (>=1.34.0,<1.35.0)", "mypy-boto3-rolesanywhere (>=1.34.0,<1.35.0)", "mypy-boto3-route53 (>=1.34.0,<1.35.0)", "mypy-boto3-route53-recovery-cluster (>=1.34.0,<1.35.0)", "mypy-boto3-route53-recovery-control-config (>=1.34.0,<1.35.0)", "mypy-boto3-route53-recovery-readiness (>=1.34.0,<1.35.0)", "mypy-boto3-route53domains (>=1.34.0,<1.35.0)", "mypy-boto3-route53profiles (>=1.34.0,<1.35.0)", "mypy-boto3-route53resolver (>=1.34.0,<1.35.0)", "mypy-boto3-rum (>=1.34.0,<1.35.0)", "mypy-boto3-s3 (>=1.34.0,<1.35.0)", "mypy-boto3-s3control (>=1.34.0,<1.35.0)", "mypy-boto3-s3outposts (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-a2i-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-edge (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-featurestore-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-geospatial (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-metrics (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-savingsplans (>=1.34.0,<1.35.0)", "mypy-boto3-scheduler (>=1.34.0,<1.35.0)", "mypy-boto3-schemas (>=1.34.0,<1.35.0)", "mypy-boto3-sdb (>=1.34.0,<1.35.0)", "mypy-boto3-secretsmanager (>=1.34.0,<1.35.0)", "mypy-boto3-securityhub (>=1.34.0,<1.35.0)", "mypy-boto3-securitylake (>=1.34.0,<1.35.0)", "mypy-boto3-serverlessrepo (>=1.34.0,<1.35.0)", "mypy-boto3-service-quotas (>=1.34.0,<1.35.0)", "mypy-boto3-servicecatalog (>=1.34.0,<1.35.0)", "mypy-boto3-servicecatalog-appregistry (>=1.34.0,<1.35.0)", "mypy-boto3-servicediscovery (>=1.34.0,<1.35.0)", "mypy-boto3-ses (>=1.34.0,<1.35.0)", "mypy-boto3-sesv2 (>=1.34.0,<1.35.0)", "mypy-boto3-shield (>=1.34.0,<1.35.0)", "mypy-boto3-signer (>=1.34.0,<1.35.0)", "mypy-boto3-simspaceweaver (>=1.34.0,<1.35.0)", "mypy-boto3-sms (>=1.34.0,<1.35.0)", "mypy-boto3-sms-voice (>=1.34.0,<1.35.0)", "mypy-boto3-snow-device-management (>=1.34.0,<1.35.0)", "mypy-boto3-snowball (>=1.34.0,<1.35.0)", "mypy-boto3-sns (>=1.34.0,<1.35.0)", "mypy-boto3-sqs (>=1.34.0,<1.35.0)", "mypy-boto3-ssm (>=1.34.0,<1.35.0)", "mypy-boto3-ssm-contacts (>=1.34.0,<1.35.0)", "mypy-boto3-ssm-incidents (>=1.34.0,<1.35.0)", "mypy-boto3-ssm-sap (>=1.34.0,<1.35.0)", "mypy-boto3-sso (>=1.34.0,<1.35.0)", "mypy-boto3-sso-admin (>=1.34.0,<1.35.0)", "mypy-boto3-sso-oidc (>=1.34.0,<1.35.0)", "mypy-boto3-stepfunctions (>=1.34.0,<1.35.0)", "mypy-boto3-storagegateway (>=1.34.0,<1.35.0)", "mypy-boto3-sts (>=1.34.0,<1.35.0)", "mypy-boto3-supplychain (>=1.34.0,<1.35.0)", "mypy-boto3-support (>=1.34.0,<1.35.0)", "mypy-boto3-support-app (>=1.34.0,<1.35.0)", "mypy-boto3-swf (>=1.34.0,<1.35.0)", "mypy-boto3-synthetics (>=1.34.0,<1.35.0)", "mypy-boto3-textract (>=1.34.0,<1.35.0)", "mypy-boto3-timestream-influxdb (>=1.34.0,<1.35.0)", "mypy-boto3-timestream-query (>=1.34.0,<1.35.0)", "mypy-boto3-timestream-write (>=1.34.0,<1.35.0)", "mypy-boto3-tnb (>=1.34.0,<1.35.0)", "mypy-boto3-transcribe (>=1.34.0,<1.35.0)", "mypy-boto3-transfer (>=1.34.0,<1.35.0)", "mypy-boto3-translate (>=1.34.0,<1.35.0)", "mypy-boto3-trustedadvisor (>=1.34.0,<1.35.0)", "mypy-boto3-verifiedpermissions (>=1.34.0,<1.35.0)", "mypy-boto3-voice-id (>=1.34.0,<1.35.0)", "mypy-boto3-vpc-lattice (>=1.34.0,<1.35.0)", "mypy-boto3-waf (>=1.34.0,<1.35.0)", "mypy-boto3-waf-regional (>=1.34.0,<1.35.0)", "mypy-boto3-wafv2 (>=1.34.0,<1.35.0)", "mypy-boto3-wellarchitected (>=1.34.0,<1.35.0)", "mypy-boto3-wisdom (>=1.34.0,<1.35.0)", "mypy-boto3-workdocs (>=1.34.0,<1.35.0)", "mypy-boto3-worklink (>=1.34.0,<1.35.0)", "mypy-boto3-workmail (>=1.34.0,<1.35.0)", "mypy-boto3-workmailmessageflow (>=1.34.0,<1.35.0)", "mypy-boto3-workspaces (>=1.34.0,<1.35.0)", "mypy-boto3-workspaces-thin-client (>=1.34.0,<1.35.0)", "mypy-boto3-workspaces-web (>=1.34.0,<1.35.0)", "mypy-boto3-xray (>=1.34.0,<1.35.0)"] amp = ["mypy-boto3-amp (>=1.34.0,<1.35.0)"] amplify = ["mypy-boto3-amplify (>=1.34.0,<1.35.0)"] amplifybackend = ["mypy-boto3-amplifybackend (>=1.34.0,<1.35.0)"] @@ -208,7 +208,7 @@ bedrock-agent = ["mypy-boto3-bedrock-agent (>=1.34.0,<1.35.0)"] bedrock-agent-runtime = ["mypy-boto3-bedrock-agent-runtime (>=1.34.0,<1.35.0)"] bedrock-runtime = ["mypy-boto3-bedrock-runtime (>=1.34.0,<1.35.0)"] billingconductor = ["mypy-boto3-billingconductor (>=1.34.0,<1.35.0)"] -boto3 = ["boto3 (==1.34.99)", "botocore (==1.34.99)"] +boto3 = ["boto3 (==1.34.114)", "botocore (==1.34.114)"] braket = ["mypy-boto3-braket (>=1.34.0,<1.35.0)"] budgets = ["mypy-boto3-budgets (>=1.34.0,<1.35.0)"] ce = ["mypy-boto3-ce (>=1.34.0,<1.35.0)"] @@ -389,6 +389,7 @@ lookoutvision = ["mypy-boto3-lookoutvision (>=1.34.0,<1.35.0)"] m2 = ["mypy-boto3-m2 (>=1.34.0,<1.35.0)"] machinelearning = ["mypy-boto3-machinelearning (>=1.34.0,<1.35.0)"] macie2 = ["mypy-boto3-macie2 (>=1.34.0,<1.35.0)"] +mailmanager = ["mypy-boto3-mailmanager (>=1.34.0,<1.35.0)"] managedblockchain = ["mypy-boto3-managedblockchain (>=1.34.0,<1.35.0)"] managedblockchain-query = ["mypy-boto3-managedblockchain-query (>=1.34.0,<1.35.0)"] marketplace-agreement = ["mypy-boto3-marketplace-agreement (>=1.34.0,<1.35.0)"] @@ -555,13 +556,13 @@ xray = ["mypy-boto3-xray (>=1.34.0,<1.35.0)"] [[package]] name = "botocore" -version = "1.34.99" +version = "1.34.114" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.8" files = [ - {file = "botocore-1.34.99-py3-none-any.whl", hash = "sha256:18c68bdeb0ffb73290912b0c96204fc36d3128f00a00b5cdc35ac34d66225f1c"}, - {file = "botocore-1.34.99.tar.gz", hash = "sha256:cafe569e2136cb33cb0e5dd32fb1c0e1503ddc1413d3be215df8ddf05e69137a"}, + {file = "botocore-1.34.114-py3-none-any.whl", hash = "sha256:606d1e55984d45e41a812badee292755f4db0233eed9cca63ea3bb8f5755507f"}, + {file = "botocore-1.34.114.tar.gz", hash = "sha256:5705f74fda009656a218ffaf4afd81228359160f2ab806ab8222d07e9da3a73b"}, ] [package.dependencies] @@ -677,17 +678,17 @@ pycparser = "*" [[package]] name = "cfn-lint" -version = "0.87.1" +version = "0.87.4" description = "Checks CloudFormation templates for practices and behaviour that could potentially be improved" optional = false python-versions = "!=4.0,<=4.0,>=3.8" files = [ - {file = "cfn_lint-0.87.1-py3-none-any.whl", hash = "sha256:d450f450635fc223b6f66880ccac52a5fd1a52966fa1705f1ba52b88dfed3071"}, - {file = "cfn_lint-0.87.1.tar.gz", hash = "sha256:b3ce9d3e5e0eadcea5d584c8ccaa00bf2a990a36a64d7ffd8683bc60b7e4f06f"}, + {file = "cfn_lint-0.87.4-py3-none-any.whl", hash = "sha256:a4e00f36b589a686efc59df5a25838b661c482ea51391c091553921db38fca50"}, + {file = "cfn_lint-0.87.4.tar.gz", hash = "sha256:1bf635bfe252dd6160c2ed7a8c5b920381bc404cba67d316b454cd70ba678fd7"}, ] [package.dependencies] -aws-sam-translator = ">=1.87.0" +aws-sam-translator = ">=1.89.0" jschema-to-python = ">=1.2.3,<1.3.0" jsonpatch = "*" jsonschema = ">=3.0,<5" @@ -835,68 +836,65 @@ files = [ [[package]] name = "coverage" -version = "7.5.1" +version = "7.5.3" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.5.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0884920835a033b78d1c73b6d3bbcda8161a900f38a488829a83982925f6c2e"}, - {file = "coverage-7.5.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:39afcd3d4339329c5f58de48a52f6e4e50f6578dd6099961cf22228feb25f38f"}, - {file = "coverage-7.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a7b0ceee8147444347da6a66be737c9d78f3353b0681715b668b72e79203e4a"}, - {file = "coverage-7.5.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a9ca3f2fae0088c3c71d743d85404cec8df9be818a005ea065495bedc33da35"}, - {file = "coverage-7.5.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fd215c0c7d7aab005221608a3c2b46f58c0285a819565887ee0b718c052aa4e"}, - {file = "coverage-7.5.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4bf0655ab60d754491004a5efd7f9cccefcc1081a74c9ef2da4735d6ee4a6223"}, - {file = "coverage-7.5.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:61c4bf1ba021817de12b813338c9be9f0ad5b1e781b9b340a6d29fc13e7c1b5e"}, - {file = "coverage-7.5.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:db66fc317a046556a96b453a58eced5024af4582a8dbdc0c23ca4dbc0d5b3146"}, - {file = "coverage-7.5.1-cp310-cp310-win32.whl", hash = "sha256:b016ea6b959d3b9556cb401c55a37547135a587db0115635a443b2ce8f1c7228"}, - {file = "coverage-7.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:df4e745a81c110e7446b1cc8131bf986157770fa405fe90e15e850aaf7619bc8"}, - {file = "coverage-7.5.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:796a79f63eca8814ca3317a1ea443645c9ff0d18b188de470ed7ccd45ae79428"}, - {file = "coverage-7.5.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4fc84a37bfd98db31beae3c2748811a3fa72bf2007ff7902f68746d9757f3746"}, - {file = "coverage-7.5.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6175d1a0559986c6ee3f7fccfc4a90ecd12ba0a383dcc2da30c2b9918d67d8a3"}, - {file = "coverage-7.5.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fc81d5878cd6274ce971e0a3a18a8803c3fe25457165314271cf78e3aae3aa2"}, - {file = "coverage-7.5.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:556cf1a7cbc8028cb60e1ff0be806be2eded2daf8129b8811c63e2b9a6c43bca"}, - {file = "coverage-7.5.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9981706d300c18d8b220995ad22627647be11a4276721c10911e0e9fa44c83e8"}, - {file = "coverage-7.5.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d7fed867ee50edf1a0b4a11e8e5d0895150e572af1cd6d315d557758bfa9c057"}, - {file = "coverage-7.5.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ef48e2707fb320c8f139424a596f5b69955a85b178f15af261bab871873bb987"}, - {file = "coverage-7.5.1-cp311-cp311-win32.whl", hash = "sha256:9314d5678dcc665330df5b69c1e726a0e49b27df0461c08ca12674bcc19ef136"}, - {file = "coverage-7.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:5fa567e99765fe98f4e7d7394ce623e794d7cabb170f2ca2ac5a4174437e90dd"}, - {file = "coverage-7.5.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b6cf3764c030e5338e7f61f95bd21147963cf6aa16e09d2f74f1fa52013c1206"}, - {file = "coverage-7.5.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2ec92012fefebee89a6b9c79bc39051a6cb3891d562b9270ab10ecfdadbc0c34"}, - {file = "coverage-7.5.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16db7f26000a07efcf6aea00316f6ac57e7d9a96501e990a36f40c965ec7a95d"}, - {file = "coverage-7.5.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:beccf7b8a10b09c4ae543582c1319c6df47d78fd732f854ac68d518ee1fb97fa"}, - {file = "coverage-7.5.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8748731ad392d736cc9ccac03c9845b13bb07d020a33423fa5b3a36521ac6e4e"}, - {file = "coverage-7.5.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7352b9161b33fd0b643ccd1f21f3a3908daaddf414f1c6cb9d3a2fd618bf2572"}, - {file = "coverage-7.5.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:7a588d39e0925f6a2bff87154752481273cdb1736270642aeb3635cb9b4cad07"}, - {file = "coverage-7.5.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:68f962d9b72ce69ea8621f57551b2fa9c70509af757ee3b8105d4f51b92b41a7"}, - {file = "coverage-7.5.1-cp312-cp312-win32.whl", hash = "sha256:f152cbf5b88aaeb836127d920dd0f5e7edff5a66f10c079157306c4343d86c19"}, - {file = "coverage-7.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:5a5740d1fb60ddf268a3811bcd353de34eb56dc24e8f52a7f05ee513b2d4f596"}, - {file = "coverage-7.5.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e2213def81a50519d7cc56ed643c9e93e0247f5bbe0d1247d15fa520814a7cd7"}, - {file = "coverage-7.5.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5037f8fcc2a95b1f0e80585bd9d1ec31068a9bcb157d9750a172836e98bc7a90"}, - {file = "coverage-7.5.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3721c2c9e4c4953a41a26c14f4cef64330392a6d2d675c8b1db3b645e31f0e"}, - {file = "coverage-7.5.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca498687ca46a62ae590253fba634a1fe9836bc56f626852fb2720f334c9e4e5"}, - {file = "coverage-7.5.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0cdcbc320b14c3e5877ee79e649677cb7d89ef588852e9583e6b24c2e5072661"}, - {file = "coverage-7.5.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:57e0204b5b745594e5bc14b9b50006da722827f0b8c776949f1135677e88d0b8"}, - {file = "coverage-7.5.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8fe7502616b67b234482c3ce276ff26f39ffe88adca2acf0261df4b8454668b4"}, - {file = "coverage-7.5.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:9e78295f4144f9dacfed4f92935fbe1780021247c2fabf73a819b17f0ccfff8d"}, - {file = "coverage-7.5.1-cp38-cp38-win32.whl", hash = "sha256:1434e088b41594baa71188a17533083eabf5609e8e72f16ce8c186001e6b8c41"}, - {file = "coverage-7.5.1-cp38-cp38-win_amd64.whl", hash = "sha256:0646599e9b139988b63704d704af8e8df7fa4cbc4a1f33df69d97f36cb0a38de"}, - {file = "coverage-7.5.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4cc37def103a2725bc672f84bd939a6fe4522310503207aae4d56351644682f1"}, - {file = "coverage-7.5.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fc0b4d8bfeabd25ea75e94632f5b6e047eef8adaed0c2161ada1e922e7f7cece"}, - {file = "coverage-7.5.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d0a0f5e06881ecedfe6f3dd2f56dcb057b6dbeb3327fd32d4b12854df36bf26"}, - {file = "coverage-7.5.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9735317685ba6ec7e3754798c8871c2f49aa5e687cc794a0b1d284b2389d1bd5"}, - {file = "coverage-7.5.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d21918e9ef11edf36764b93101e2ae8cc82aa5efdc7c5a4e9c6c35a48496d601"}, - {file = "coverage-7.5.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c3e757949f268364b96ca894b4c342b41dc6f8f8b66c37878aacef5930db61be"}, - {file = "coverage-7.5.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:79afb6197e2f7f60c4824dd4b2d4c2ec5801ceb6ba9ce5d2c3080e5660d51a4f"}, - {file = "coverage-7.5.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d1d0d98d95dd18fe29dc66808e1accf59f037d5716f86a501fc0256455219668"}, - {file = "coverage-7.5.1-cp39-cp39-win32.whl", hash = "sha256:1cc0fe9b0b3a8364093c53b0b4c0c2dd4bb23acbec4c9240b5f284095ccf7981"}, - {file = "coverage-7.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:dde0070c40ea8bb3641e811c1cfbf18e265d024deff6de52c5950677a8fb1e0f"}, - {file = "coverage-7.5.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:6537e7c10cc47c595828b8a8be04c72144725c383c4702703ff4e42e44577312"}, - {file = "coverage-7.5.1.tar.gz", hash = "sha256:54de9ef3a9da981f7af93eafde4ede199e0846cd819eb27c88e2b712aae9708c"}, + {file = "coverage-7.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a6519d917abb15e12380406d721e37613e2a67d166f9fb7e5a8ce0375744cd45"}, + {file = "coverage-7.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aea7da970f1feccf48be7335f8b2ca64baf9b589d79e05b9397a06696ce1a1ec"}, + {file = "coverage-7.5.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:923b7b1c717bd0f0f92d862d1ff51d9b2b55dbbd133e05680204465f454bb286"}, + {file = "coverage-7.5.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62bda40da1e68898186f274f832ef3e759ce929da9a9fd9fcf265956de269dbc"}, + {file = "coverage-7.5.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8b7339180d00de83e930358223c617cc343dd08e1aa5ec7b06c3a121aec4e1d"}, + {file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:25a5caf742c6195e08002d3b6c2dd6947e50efc5fc2c2205f61ecb47592d2d83"}, + {file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:05ac5f60faa0c704c0f7e6a5cbfd6f02101ed05e0aee4d2822637a9e672c998d"}, + {file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:239a4e75e09c2b12ea478d28815acf83334d32e722e7433471fbf641c606344c"}, + {file = "coverage-7.5.3-cp310-cp310-win32.whl", hash = "sha256:a5812840d1d00eafae6585aba38021f90a705a25b8216ec7f66aebe5b619fb84"}, + {file = "coverage-7.5.3-cp310-cp310-win_amd64.whl", hash = "sha256:33ca90a0eb29225f195e30684ba4a6db05dbef03c2ccd50b9077714c48153cac"}, + {file = "coverage-7.5.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f81bc26d609bf0fbc622c7122ba6307993c83c795d2d6f6f6fd8c000a770d974"}, + {file = "coverage-7.5.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7cec2af81f9e7569280822be68bd57e51b86d42e59ea30d10ebdbb22d2cb7232"}, + {file = "coverage-7.5.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55f689f846661e3f26efa535071775d0483388a1ccfab899df72924805e9e7cd"}, + {file = "coverage-7.5.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50084d3516aa263791198913a17354bd1dc627d3c1639209640b9cac3fef5807"}, + {file = "coverage-7.5.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:341dd8f61c26337c37988345ca5c8ccabeff33093a26953a1ac72e7d0103c4fb"}, + {file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ab0b028165eea880af12f66086694768f2c3139b2c31ad5e032c8edbafca6ffc"}, + {file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5bc5a8c87714b0c67cfeb4c7caa82b2d71e8864d1a46aa990b5588fa953673b8"}, + {file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:38a3b98dae8a7c9057bd91fbf3415c05e700a5114c5f1b5b0ea5f8f429ba6614"}, + {file = "coverage-7.5.3-cp311-cp311-win32.whl", hash = "sha256:fcf7d1d6f5da887ca04302db8e0e0cf56ce9a5e05f202720e49b3e8157ddb9a9"}, + {file = "coverage-7.5.3-cp311-cp311-win_amd64.whl", hash = "sha256:8c836309931839cca658a78a888dab9676b5c988d0dd34ca247f5f3e679f4e7a"}, + {file = "coverage-7.5.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:296a7d9bbc598e8744c00f7a6cecf1da9b30ae9ad51c566291ff1314e6cbbed8"}, + {file = "coverage-7.5.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:34d6d21d8795a97b14d503dcaf74226ae51eb1f2bd41015d3ef332a24d0a17b3"}, + {file = "coverage-7.5.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e317953bb4c074c06c798a11dbdd2cf9979dbcaa8ccc0fa4701d80042d4ebf1"}, + {file = "coverage-7.5.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:705f3d7c2b098c40f5b81790a5fedb274113373d4d1a69e65f8b68b0cc26f6db"}, + {file = "coverage-7.5.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1196e13c45e327d6cd0b6e471530a1882f1017eb83c6229fc613cd1a11b53cd"}, + {file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:015eddc5ccd5364dcb902eaecf9515636806fa1e0d5bef5769d06d0f31b54523"}, + {file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:fd27d8b49e574e50caa65196d908f80e4dff64d7e592d0c59788b45aad7e8b35"}, + {file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:33fc65740267222fc02975c061eb7167185fef4cc8f2770267ee8bf7d6a42f84"}, + {file = "coverage-7.5.3-cp312-cp312-win32.whl", hash = "sha256:7b2a19e13dfb5c8e145c7a6ea959485ee8e2204699903c88c7d25283584bfc08"}, + {file = "coverage-7.5.3-cp312-cp312-win_amd64.whl", hash = "sha256:0bbddc54bbacfc09b3edaec644d4ac90c08ee8ed4844b0f86227dcda2d428fcb"}, + {file = "coverage-7.5.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f78300789a708ac1f17e134593f577407d52d0417305435b134805c4fb135adb"}, + {file = "coverage-7.5.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b368e1aee1b9b75757942d44d7598dcd22a9dbb126affcbba82d15917f0cc155"}, + {file = "coverage-7.5.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f836c174c3a7f639bded48ec913f348c4761cbf49de4a20a956d3431a7c9cb24"}, + {file = "coverage-7.5.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:244f509f126dc71369393ce5fea17c0592c40ee44e607b6d855e9c4ac57aac98"}, + {file = "coverage-7.5.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4c2872b3c91f9baa836147ca33650dc5c172e9273c808c3c3199c75490e709d"}, + {file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:dd4b3355b01273a56b20c219e74e7549e14370b31a4ffe42706a8cda91f19f6d"}, + {file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:f542287b1489c7a860d43a7d8883e27ca62ab84ca53c965d11dac1d3a1fab7ce"}, + {file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:75e3f4e86804023e991096b29e147e635f5e2568f77883a1e6eed74512659ab0"}, + {file = "coverage-7.5.3-cp38-cp38-win32.whl", hash = "sha256:c59d2ad092dc0551d9f79d9d44d005c945ba95832a6798f98f9216ede3d5f485"}, + {file = "coverage-7.5.3-cp38-cp38-win_amd64.whl", hash = "sha256:fa21a04112c59ad54f69d80e376f7f9d0f5f9123ab87ecd18fbb9ec3a2beed56"}, + {file = "coverage-7.5.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f5102a92855d518b0996eb197772f5ac2a527c0ec617124ad5242a3af5e25f85"}, + {file = "coverage-7.5.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d1da0a2e3b37b745a2b2a678a4c796462cf753aebf94edcc87dcc6b8641eae31"}, + {file = "coverage-7.5.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8383a6c8cefba1b7cecc0149415046b6fc38836295bc4c84e820872eb5478b3d"}, + {file = "coverage-7.5.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9aad68c3f2566dfae84bf46295a79e79d904e1c21ccfc66de88cd446f8686341"}, + {file = "coverage-7.5.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e079c9ec772fedbade9d7ebc36202a1d9ef7291bc9b3a024ca395c4d52853d7"}, + {file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bde997cac85fcac227b27d4fb2c7608a2c5f6558469b0eb704c5726ae49e1c52"}, + {file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:990fb20b32990b2ce2c5f974c3e738c9358b2735bc05075d50a6f36721b8f303"}, + {file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3d5a67f0da401e105753d474369ab034c7bae51a4c31c77d94030d59e41df5bd"}, + {file = "coverage-7.5.3-cp39-cp39-win32.whl", hash = "sha256:e08c470c2eb01977d221fd87495b44867a56d4d594f43739a8028f8646a51e0d"}, + {file = "coverage-7.5.3-cp39-cp39-win_amd64.whl", hash = "sha256:1d2a830ade66d3563bb61d1e3c77c8def97b30ed91e166c67d0632c018f380f0"}, + {file = "coverage-7.5.3-pp38.pp39.pp310-none-any.whl", hash = "sha256:3538d8fb1ee9bdd2e2692b3b18c22bb1c19ffbefd06880f5ac496e42d7bb3884"}, + {file = "coverage-7.5.3.tar.gz", hash = "sha256:04aefca5190d1dc7a53a4c1a5a7f8568811306d7a8ee231c42fb69215571944f"}, ] -[package.dependencies] -tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} - [package.extras] toml = ["tomli"] @@ -967,39 +965,26 @@ files = [ [[package]] name = "docker" -version = "7.0.0" +version = "7.1.0" description = "A Python library for the Docker Engine API." optional = false python-versions = ">=3.8" files = [ - {file = "docker-7.0.0-py3-none-any.whl", hash = "sha256:12ba681f2777a0ad28ffbcc846a69c31b4dfd9752b47eb425a274ee269c5e14b"}, - {file = "docker-7.0.0.tar.gz", hash = "sha256:323736fb92cd9418fc5e7133bc953e11a9da04f4483f828b527db553f1e7e5a3"}, + {file = "docker-7.1.0-py3-none-any.whl", hash = "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0"}, + {file = "docker-7.1.0.tar.gz", hash = "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c"}, ] [package.dependencies] -packaging = ">=14.0" pywin32 = {version = ">=304", markers = "sys_platform == \"win32\""} requests = ">=2.26.0" urllib3 = ">=1.26.0" [package.extras] +dev = ["coverage (==7.2.7)", "pytest (==7.4.2)", "pytest-cov (==4.1.0)", "pytest-timeout (==2.1.0)", "ruff (==0.1.8)"] +docs = ["myst-parser (==0.18.0)", "sphinx (==5.1.1)"] ssh = ["paramiko (>=2.4.3)"] websockets = ["websocket-client (>=1.3.0)"] -[[package]] -name = "exceptiongroup" -version = "1.2.1" -description = "Backport of PEP 654 (exception groups)" -optional = false -python-versions = ">=3.7" -files = [ - {file = "exceptiongroup-1.2.1-py3-none-any.whl", hash = "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad"}, - {file = "exceptiongroup-1.2.1.tar.gz", hash = "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"}, -] - -[package.extras] -test = ["pytest (>=6)"] - [[package]] name = "execnet" version = "2.1.1" @@ -1048,13 +1033,13 @@ pyflakes = ">=3.1.0,<3.2.0" [[package]] name = "freezegun" -version = "1.5.0" +version = "1.5.1" description = "Let your Python tests travel through time" optional = false python-versions = ">=3.7" files = [ - {file = "freezegun-1.5.0-py3-none-any.whl", hash = "sha256:ec3f4ba030e34eb6cf7e1e257308aee2c60c3d038ff35996d7475760c9ff3719"}, - {file = "freezegun-1.5.0.tar.gz", hash = "sha256:200a64359b363aa3653d8aac289584078386c7c3da77339d257e46a01fb5c77c"}, + {file = "freezegun-1.5.1-py3-none-any.whl", hash = "sha256:bf111d7138a8abe55ab48a71755673dbaa4ab87f4cff5634a4442dfec34c15f1"}, + {file = "freezegun-1.5.1.tar.gz", hash = "sha256:b29dedfcda6d5e8e083ce71b2b542753ad48cfec44037b3fc79702e2980a89e9"}, ] [package.dependencies] @@ -1137,13 +1122,13 @@ files = [ [[package]] name = "joserfc" -version = "0.9.0" +version = "0.10.0" description = "The ultimate Python library for JOSE RFCs, including JWS, JWE, JWK, JWA, JWT" optional = false python-versions = ">=3.8" files = [ - {file = "joserfc-0.9.0-py3-none-any.whl", hash = "sha256:4026bdbe2c196cd40574e916fa1e28874d99649412edaab0e373dec3077153fb"}, - {file = "joserfc-0.9.0.tar.gz", hash = "sha256:eebca7f587b1761ce43a98ffd5327f2b600b9aa5bb0a77b947687f503ad43bc0"}, + {file = "joserfc-0.10.0-py3-none-any.whl", hash = "sha256:1b39bd9078d7f0087bfe694f96c9723c7a2fd63e5974f5efd805016319f2f50d"}, + {file = "joserfc-0.10.0.tar.gz", hash = "sha256:d1c16ff2179145e248fd67dbaa47bb5a3855f8754c64902dd09e2775e63bcd63"}, ] [package.dependencies] @@ -1414,13 +1399,13 @@ files = [ [[package]] name = "moto" -version = "5.0.6" +version = "5.0.8" description = "" optional = false python-versions = ">=3.8" files = [ - {file = "moto-5.0.6-py2.py3-none-any.whl", hash = "sha256:ca1e22831a741733b581ff2ef4d6ae2e1c6db1eab97af1b78b86ca2c6e88c609"}, - {file = "moto-5.0.6.tar.gz", hash = "sha256:ad8b23f2b555ad694da8b2432a42b6d96beaaf67a4e7d932196a72193a2eee2c"}, + {file = "moto-5.0.8-py2.py3-none-any.whl", hash = "sha256:7d1035e366434bfa9fcc0621f07d5aa724b6846408071d540137a0554c46f214"}, + {file = "moto-5.0.8.tar.gz", hash = "sha256:517fb808dc718bcbdda54c6ffeaca0adc34cf6e10821bfb01216ce420a31765c"}, ] [package.dependencies] @@ -1434,7 +1419,7 @@ Jinja2 = ">=2.10.1" joserfc = {version = ">=0.9.0", optional = true, markers = "extra == \"resourcegroupstaggingapi\""} jsondiff = {version = ">=1.1.2", optional = true, markers = "extra == \"resourcegroupstaggingapi\""} openapi-spec-validator = {version = ">=0.5.0", optional = true, markers = "extra == \"resourcegroupstaggingapi\""} -py-partiql-parser = {version = "0.5.4", optional = true, markers = "extra == \"dynamodb\" or extra == \"resourcegroupstaggingapi\""} +py-partiql-parser = {version = "0.5.5", optional = true, markers = "extra == \"dynamodb\" or extra == \"resourcegroupstaggingapi\""} pyparsing = {version = ">=3.0.7", optional = true, markers = "extra == \"resourcegroupstaggingapi\""} python-dateutil = ">=2.1,<3.0.0" PyYAML = {version = ">=5.1", optional = true, markers = "extra == \"resourcegroupstaggingapi\" or extra == \"ssm\""} @@ -1444,23 +1429,23 @@ werkzeug = ">=0.5,<2.2.0 || >2.2.0,<2.2.1 || >2.2.1" xmltodict = "*" [package.extras] -all = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "jsonpath-ng", "multipart", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.4)", "pyparsing (>=3.0.7)", "setuptools"] +all = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "jsonpath-ng", "multipart", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.5)", "pyparsing (>=3.0.7)", "setuptools"] apigateway = ["PyYAML (>=5.1)", "joserfc (>=0.9.0)", "openapi-spec-validator (>=0.5.0)"] apigatewayv2 = ["PyYAML (>=5.1)", "openapi-spec-validator (>=0.5.0)"] appsync = ["graphql-core"] awslambda = ["docker (>=3.0.0)"] batch = ["docker (>=3.0.0)"] -cloudformation = ["PyYAML (>=5.1)", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.4)", "pyparsing (>=3.0.7)", "setuptools"] +cloudformation = ["PyYAML (>=5.1)", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.5)", "pyparsing (>=3.0.7)", "setuptools"] cognitoidp = ["joserfc (>=0.9.0)"] -dynamodb = ["docker (>=3.0.0)", "py-partiql-parser (==0.5.4)"] -dynamodbstreams = ["docker (>=3.0.0)", "py-partiql-parser (==0.5.4)"] +dynamodb = ["docker (>=3.0.0)", "py-partiql-parser (==0.5.5)"] +dynamodbstreams = ["docker (>=3.0.0)", "py-partiql-parser (==0.5.5)"] glue = ["pyparsing (>=3.0.7)"] iotdata = ["jsondiff (>=1.1.2)"] -proxy = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=2.5.1)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "jsonpath-ng", "multipart", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.4)", "pyparsing (>=3.0.7)", "setuptools"] -resourcegroupstaggingapi = ["PyYAML (>=5.1)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.4)", "pyparsing (>=3.0.7)"] -s3 = ["PyYAML (>=5.1)", "py-partiql-parser (==0.5.4)"] -s3crc32c = ["PyYAML (>=5.1)", "crc32c", "py-partiql-parser (==0.5.4)"] -server = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "flask (!=2.2.0,!=2.2.1)", "flask-cors", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "jsonpath-ng", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.4)", "pyparsing (>=3.0.7)", "setuptools"] +proxy = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=2.5.1)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "jsonpath-ng", "multipart", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.5)", "pyparsing (>=3.0.7)", "setuptools"] +resourcegroupstaggingapi = ["PyYAML (>=5.1)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.5)", "pyparsing (>=3.0.7)"] +s3 = ["PyYAML (>=5.1)", "py-partiql-parser (==0.5.5)"] +s3crc32c = ["PyYAML (>=5.1)", "crc32c", "py-partiql-parser (==0.5.5)"] +server = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "flask (!=2.2.0,!=2.2.1)", "flask-cors", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "jsonpath-ng", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.5)", "pyparsing (>=3.0.7)", "setuptools"] ssm = ["PyYAML (>=5.1)"] stepfunctions = ["antlr4-python3-runtime", "jsonpath-ng"] xray = ["aws-xray-sdk (>=0.93,!=0.96)", "setuptools"] @@ -1520,7 +1505,6 @@ files = [ [package.dependencies] mypy-extensions = ">=1.0.0" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} typing-extensions = ">=4.1.0" [package.extras] @@ -1529,6 +1513,20 @@ install-types = ["pip"] mypyc = ["setuptools (>=50)"] reports = ["lxml"] +[[package]] +name = "mypy-boto3-autoscaling" +version = "1.34.54" +description = "Type annotations for boto3.AutoScaling 1.34.54 service generated with mypy-boto3-builder 7.23.2" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mypy-boto3-autoscaling-1.34.54.tar.gz", hash = "sha256:30600213b09eb6ead8f4137a1aadd502fcf84ea7a843f01295275b5fd2cb4db4"}, + {file = "mypy_boto3_autoscaling-1.34.54-py3-none-any.whl", hash = "sha256:abaef252016437cf5b61b2d565cfa0a8e8596d72a794fe004401d30b9c7c07da"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.12\""} + [[package]] name = "mypy-boto3-cloudwatch" version = "1.34.83" @@ -1545,13 +1543,13 @@ typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.12\""} [[package]] name = "mypy-boto3-dynamodb" -version = "1.34.97" -description = "Type annotations for boto3.DynamoDB 1.34.97 service generated with mypy-boto3-builder 7.24.0" +version = "1.34.114" +description = "Type annotations for boto3.DynamoDB 1.34.114 service generated with mypy-boto3-builder 7.24.0" optional = false python-versions = ">=3.8" files = [ - {file = "mypy_boto3_dynamodb-1.34.97-py3-none-any.whl", hash = "sha256:4bb02b01506ba27cd7b63f3d2013147824c7504fa8f4f03242e51f5b78c31edf"}, - {file = "mypy_boto3_dynamodb-1.34.97.tar.gz", hash = "sha256:3f67a291157dd94bef376c5490d9d29bbacc9741dfef124f9724bc5d29b0458a"}, + {file = "mypy_boto3_dynamodb-1.34.114-py3-none-any.whl", hash = "sha256:64be1fcd36db0daa354a78a2affdaef048653e4c5116da98f71446eee5db7638"}, + {file = "mypy_boto3_dynamodb-1.34.114.tar.gz", hash = "sha256:2a1a131587dbf857e5bec56ae84d8f9fb9618966e7a6120fb6c7da12cb73a82c"}, ] [package.dependencies] @@ -1559,13 +1557,27 @@ typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.12\""} [[package]] name = "mypy-boto3-ec2" -version = "1.34.97" -description = "Type annotations for boto3.EC2 1.34.97 service generated with mypy-boto3-builder 7.24.0" +version = "1.34.114" +description = "Type annotations for boto3.EC2 1.34.114 service generated with mypy-boto3-builder 7.24.0" optional = false python-versions = ">=3.8" files = [ - {file = "mypy_boto3_ec2-1.34.97-py3-none-any.whl", hash = "sha256:fea2cb78e0a55ad77b79fac0702d0c28c17798fc116bf7b84cbea421c5fd9b9c"}, - {file = "mypy_boto3_ec2-1.34.97.tar.gz", hash = "sha256:a38be6ad39bb18839ae349a0d1a6fe1c87ab8fe9cbc4a28a87aa5a972eedc361"}, + {file = "mypy_boto3_ec2-1.34.114-py3-none-any.whl", hash = "sha256:540cbf529502dce8c74c1ea4f094d33f40d8ff4db2e05305404cc13ca9da183a"}, + {file = "mypy_boto3_ec2-1.34.114.tar.gz", hash = "sha256:402a574b8bd118e642db9fa452c172c8d0e89e40516a8a6d19df8395bf794b1f"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.12\""} + +[[package]] +name = "mypy-boto3-ecs" +version = "1.34.76" +description = "Type annotations for boto3.ECS 1.34.76 service generated with mypy-boto3-builder 7.23.2" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mypy-boto3-ecs-1.34.76.tar.gz", hash = "sha256:cbc078421d565891203fe547cf39c976c742b00035a548b3121faa4c30c57a84"}, + {file = "mypy_boto3_ecs-1.34.76-py3-none-any.whl", hash = "sha256:a75f7d470389164eed0a4fd5ac50aade335b3faf7eba2abcd5c4f17e6b81c244"}, ] [package.dependencies] @@ -1601,13 +1613,13 @@ typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.12\""} [[package]] name = "mypy-boto3-rds" -version = "1.34.93" -description = "Type annotations for boto3.RDS 1.34.93 service generated with mypy-boto3-builder 7.24.0" +version = "1.34.110" +description = "Type annotations for boto3.RDS 1.34.110 service generated with mypy-boto3-builder 7.24.0" optional = false python-versions = ">=3.8" files = [ - {file = "mypy_boto3_rds-1.34.93-py3-none-any.whl", hash = "sha256:4c4b96eebf653ec498e8b01d6a87dd4cf1901fc3647c2fcc825b51d061370f58"}, - {file = "mypy_boto3_rds-1.34.93.tar.gz", hash = "sha256:3896edc3697e1e6e9482b312b8b982d087d68c6338ff469510b4ab49896a0b4f"}, + {file = "mypy_boto3_rds-1.34.110-py3-none-any.whl", hash = "sha256:626e55e2e7a43e7cd08edde3faf9a5c50596fd4ad5f55e2b7547e7151ab8128a"}, + {file = "mypy_boto3_rds-1.34.110.tar.gz", hash = "sha256:0bdfd4fe00fa0ff4d16fb1342b9594c37bb5cd3f67c14896112c78831f645143"}, ] [package.dependencies] @@ -1779,13 +1791,13 @@ files = [ [[package]] name = "platformdirs" -version = "4.2.1" +version = "4.2.2" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.2.1-py3-none-any.whl", hash = "sha256:17d5a1161b3fd67b390023cb2d3b026bbd40abde6fdb052dfbd3a29c3ba22ee1"}, - {file = "platformdirs-4.2.1.tar.gz", hash = "sha256:031cd18d4ec63ec53e82dceaac0417d218a6863f7745dfcc9efe7793b7039bdf"}, + {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, + {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, ] [package.extras] @@ -1810,13 +1822,13 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "py-partiql-parser" -version = "0.5.4" +version = "0.5.5" description = "Pure Python PartiQL Parser" optional = false python-versions = "*" files = [ - {file = "py_partiql_parser-0.5.4-py2.py3-none-any.whl", hash = "sha256:3dc4295a47da9587681a96b35c6e151886fdbd0a4acbe0d97c4c68e5f689d315"}, - {file = "py_partiql_parser-0.5.4.tar.gz", hash = "sha256:72e043919538fa63edae72fb59afc7e3fd93adbde656718a7d2b4666f23dd114"}, + {file = "py_partiql_parser-0.5.5-py2.py3-none-any.whl", hash = "sha256:90d278818385bd60c602410c953ee78f04ece599d8cd21c656fc5e47399577a1"}, + {file = "py_partiql_parser-0.5.5.tar.gz", hash = "sha256:ed07f8edf4b55e295cab4f5fd3e2ba3196cee48a43fe210d53ddd6ffce1cf1ff"}, ] [package.extras] @@ -1846,18 +1858,18 @@ files = [ [[package]] name = "pydantic" -version = "2.7.1" +version = "2.7.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.7.1-py3-none-any.whl", hash = "sha256:e029badca45266732a9a79898a15ae2e8b14840b1eabbb25844be28f0b33f3d5"}, - {file = "pydantic-2.7.1.tar.gz", hash = "sha256:e9dbb5eada8abe4d9ae5f46b9939aead650cd2b68f249bb3a8139dbe125803cc"}, + {file = "pydantic-2.7.2-py3-none-any.whl", hash = "sha256:834ab954175f94e6e68258537dc49402c4a5e9d0409b9f1b86b7e934a8372de7"}, + {file = "pydantic-2.7.2.tar.gz", hash = "sha256:71b2945998f9c9b7919a45bde9a50397b289937d215ae141c1d0903ba7149fd7"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.18.2" +pydantic-core = "2.18.3" typing-extensions = ">=4.6.1" [package.extras] @@ -1865,90 +1877,90 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.18.2" +version = "2.18.3" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.18.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9e08e867b306f525802df7cd16c44ff5ebbe747ff0ca6cf3fde7f36c05a59a81"}, - {file = "pydantic_core-2.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f0a21cbaa69900cbe1a2e7cad2aa74ac3cf21b10c3efb0fa0b80305274c0e8a2"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0680b1f1f11fda801397de52c36ce38ef1c1dc841a0927a94f226dea29c3ae3d"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95b9d5e72481d3780ba3442eac863eae92ae43a5f3adb5b4d0a1de89d42bb250"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fcf5cd9c4b655ad666ca332b9a081112cd7a58a8b5a6ca7a3104bc950f2038"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b5155ff768083cb1d62f3e143b49a8a3432e6789a3abee8acd005c3c7af1c74"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:553ef617b6836fc7e4df130bb851e32fe357ce36336d897fd6646d6058d980af"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89ed9eb7d616ef5714e5590e6cf7f23b02d0d539767d33561e3675d6f9e3857"}, - {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:75f7e9488238e920ab6204399ded280dc4c307d034f3924cd7f90a38b1829563"}, - {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ef26c9e94a8c04a1b2924149a9cb081836913818e55681722d7f29af88fe7b38"}, - {file = "pydantic_core-2.18.2-cp310-none-win32.whl", hash = "sha256:182245ff6b0039e82b6bb585ed55a64d7c81c560715d1bad0cbad6dfa07b4027"}, - {file = "pydantic_core-2.18.2-cp310-none-win_amd64.whl", hash = "sha256:e23ec367a948b6d812301afc1b13f8094ab7b2c280af66ef450efc357d2ae543"}, - {file = "pydantic_core-2.18.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:219da3f096d50a157f33645a1cf31c0ad1fe829a92181dd1311022f986e5fbe3"}, - {file = "pydantic_core-2.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cc1cfd88a64e012b74e94cd00bbe0f9c6df57049c97f02bb07d39e9c852e19a4"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b7133a6e6aeb8df37d6f413f7705a37ab4031597f64ab56384c94d98fa0e90"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:224c421235f6102e8737032483f43c1a8cfb1d2f45740c44166219599358c2cd"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b14d82cdb934e99dda6d9d60dc84a24379820176cc4a0d123f88df319ae9c150"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2728b01246a3bba6de144f9e3115b532ee44bd6cf39795194fb75491824a1413"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:470b94480bb5ee929f5acba6995251ada5e059a5ef3e0dfc63cca287283ebfa6"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:997abc4df705d1295a42f95b4eec4950a37ad8ae46d913caeee117b6b198811c"}, - {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75250dbc5290e3f1a0f4618db35e51a165186f9034eff158f3d490b3fed9f8a0"}, - {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4456f2dca97c425231d7315737d45239b2b51a50dc2b6f0c2bb181fce6207664"}, - {file = "pydantic_core-2.18.2-cp311-none-win32.whl", hash = "sha256:269322dcc3d8bdb69f054681edff86276b2ff972447863cf34c8b860f5188e2e"}, - {file = "pydantic_core-2.18.2-cp311-none-win_amd64.whl", hash = "sha256:800d60565aec896f25bc3cfa56d2277d52d5182af08162f7954f938c06dc4ee3"}, - {file = "pydantic_core-2.18.2-cp311-none-win_arm64.whl", hash = "sha256:1404c69d6a676245199767ba4f633cce5f4ad4181f9d0ccb0577e1f66cf4c46d"}, - {file = "pydantic_core-2.18.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:fb2bd7be70c0fe4dfd32c951bc813d9fe6ebcbfdd15a07527796c8204bd36242"}, - {file = "pydantic_core-2.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6132dd3bd52838acddca05a72aafb6eab6536aa145e923bb50f45e78b7251043"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d904828195733c183d20a54230c0df0eb46ec746ea1a666730787353e87182"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c9bd70772c720142be1020eac55f8143a34ec9f82d75a8e7a07852023e46617f"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b8ed04b3582771764538f7ee7001b02e1170223cf9b75dff0bc698fadb00cf3"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6dac87ddb34aaec85f873d737e9d06a3555a1cc1a8e0c44b7f8d5daeb89d86f"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca4ae5a27ad7a4ee5170aebce1574b375de390bc01284f87b18d43a3984df72"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:886eec03591b7cf058467a70a87733b35f44707bd86cf64a615584fd72488b7c"}, - {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ca7b0c1f1c983e064caa85f3792dd2fe3526b3505378874afa84baf662e12241"}, - {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b4356d3538c3649337df4074e81b85f0616b79731fe22dd11b99499b2ebbdf3"}, - {file = "pydantic_core-2.18.2-cp312-none-win32.whl", hash = "sha256:8b172601454f2d7701121bbec3425dd71efcb787a027edf49724c9cefc14c038"}, - {file = "pydantic_core-2.18.2-cp312-none-win_amd64.whl", hash = "sha256:b1bd7e47b1558ea872bd16c8502c414f9e90dcf12f1395129d7bb42a09a95438"}, - {file = "pydantic_core-2.18.2-cp312-none-win_arm64.whl", hash = "sha256:98758d627ff397e752bc339272c14c98199c613f922d4a384ddc07526c86a2ec"}, - {file = "pydantic_core-2.18.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9fdad8e35f278b2c3eb77cbdc5c0a49dada440657bf738d6905ce106dc1de439"}, - {file = "pydantic_core-2.18.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1d90c3265ae107f91a4f279f4d6f6f1d4907ac76c6868b27dc7fb33688cfb347"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:390193c770399861d8df9670fb0d1874f330c79caaca4642332df7c682bf6b91"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:82d5d4d78e4448683cb467897fe24e2b74bb7b973a541ea1dcfec1d3cbce39fb"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4774f3184d2ef3e14e8693194f661dea5a4d6ca4e3dc8e39786d33a94865cefd"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4d938ec0adf5167cb335acb25a4ee69a8107e4984f8fbd2e897021d9e4ca21b"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0e8b1be28239fc64a88a8189d1df7fad8be8c1ae47fcc33e43d4be15f99cc70"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:868649da93e5a3d5eacc2b5b3b9235c98ccdbfd443832f31e075f54419e1b96b"}, - {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:78363590ef93d5d226ba21a90a03ea89a20738ee5b7da83d771d283fd8a56761"}, - {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:852e966fbd035a6468fc0a3496589b45e2208ec7ca95c26470a54daed82a0788"}, - {file = "pydantic_core-2.18.2-cp38-none-win32.whl", hash = "sha256:6a46e22a707e7ad4484ac9ee9f290f9d501df45954184e23fc29408dfad61350"}, - {file = "pydantic_core-2.18.2-cp38-none-win_amd64.whl", hash = "sha256:d91cb5ea8b11607cc757675051f61b3d93f15eca3cefb3e6c704a5d6e8440f4e"}, - {file = "pydantic_core-2.18.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ae0a8a797a5e56c053610fa7be147993fe50960fa43609ff2a9552b0e07013e8"}, - {file = "pydantic_core-2.18.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:042473b6280246b1dbf530559246f6842b56119c2926d1e52b631bdc46075f2a"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a388a77e629b9ec814c1b1e6b3b595fe521d2cdc625fcca26fbc2d44c816804"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25add29b8f3b233ae90ccef2d902d0ae0432eb0d45370fe315d1a5cf231004b"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f459a5ce8434614dfd39bbebf1041952ae01da6bed9855008cb33b875cb024c0"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eff2de745698eb46eeb51193a9f41d67d834d50e424aef27df2fcdee1b153845"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8309f67285bdfe65c372ea3722b7a5642680f3dba538566340a9d36e920b5f0"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f93a8a2e3938ff656a7c1bc57193b1319960ac015b6e87d76c76bf14fe0244b4"}, - {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:22057013c8c1e272eb8d0eebc796701167d8377441ec894a8fed1af64a0bf399"}, - {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cfeecd1ac6cc1fb2692c3d5110781c965aabd4ec5d32799773ca7b1456ac636b"}, - {file = "pydantic_core-2.18.2-cp39-none-win32.whl", hash = "sha256:0d69b4c2f6bb3e130dba60d34c0845ba31b69babdd3f78f7c0c8fae5021a253e"}, - {file = "pydantic_core-2.18.2-cp39-none-win_amd64.whl", hash = "sha256:d9319e499827271b09b4e411905b24a426b8fb69464dfa1696258f53a3334641"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1874c6dd4113308bd0eb568418e6114b252afe44319ead2b4081e9b9521fe75"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:ccdd111c03bfd3666bd2472b674c6899550e09e9f298954cfc896ab92b5b0e6d"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e18609ceaa6eed63753037fc06ebb16041d17d28199ae5aba0052c51449650a9"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e5c584d357c4e2baf0ff7baf44f4994be121e16a2c88918a5817331fc7599d7"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43f0f463cf89ace478de71a318b1b4f05ebc456a9b9300d027b4b57c1a2064fb"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e1b395e58b10b73b07b7cf740d728dd4ff9365ac46c18751bf8b3d8cca8f625a"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0098300eebb1c837271d3d1a2cd2911e7c11b396eac9661655ee524a7f10587b"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:36789b70d613fbac0a25bb07ab3d9dba4d2e38af609c020cf4d888d165ee0bf3"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3f9a801e7c8f1ef8718da265bba008fa121243dfe37c1cea17840b0944dfd72c"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:3a6515ebc6e69d85502b4951d89131ca4e036078ea35533bb76327f8424531ce"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20aca1e2298c56ececfd8ed159ae4dde2df0781988c97ef77d5c16ff4bd5b400"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:223ee893d77a310a0391dca6df00f70bbc2f36a71a895cecd9a0e762dc37b349"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2334ce8c673ee93a1d6a65bd90327588387ba073c17e61bf19b4fd97d688d63c"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cbca948f2d14b09d20268cda7b0367723d79063f26c4ffc523af9042cad95592"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b3ef08e20ec49e02d5c6717a91bb5af9b20f1805583cb0adfe9ba2c6b505b5ae"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6fdc8627910eed0c01aed6a390a252fe3ea6d472ee70fdde56273f198938374"}, - {file = "pydantic_core-2.18.2.tar.gz", hash = "sha256:2e29d20810dfc3043ee13ac7d9e25105799817683348823f305ab3f349b9386e"}, + {file = "pydantic_core-2.18.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:744697428fcdec6be5670460b578161d1ffe34743a5c15656be7ea82b008197c"}, + {file = "pydantic_core-2.18.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37b40c05ced1ba4218b14986fe6f283d22e1ae2ff4c8e28881a70fb81fbfcda7"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:544a9a75622357076efb6b311983ff190fbfb3c12fc3a853122b34d3d358126c"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e2e253af04ceaebde8eb201eb3f3e3e7e390f2d275a88300d6a1959d710539e2"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:855ec66589c68aa367d989da5c4755bb74ee92ccad4fdb6af942c3612c067e34"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d3e42bb54e7e9d72c13ce112e02eb1b3b55681ee948d748842171201a03a98a"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6ac9ffccc9d2e69d9fba841441d4259cb668ac180e51b30d3632cd7abca2b9b"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c56eca1686539fa0c9bda992e7bd6a37583f20083c37590413381acfc5f192d6"}, + {file = "pydantic_core-2.18.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:17954d784bf8abfc0ec2a633108207ebc4fa2df1a0e4c0c3ccbaa9bb01d2c426"}, + {file = "pydantic_core-2.18.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:98ed737567d8f2ecd54f7c8d4f8572ca7c7921ede93a2e52939416170d357812"}, + {file = "pydantic_core-2.18.3-cp310-none-win32.whl", hash = "sha256:9f9e04afebd3ed8c15d67a564ed0a34b54e52136c6d40d14c5547b238390e779"}, + {file = "pydantic_core-2.18.3-cp310-none-win_amd64.whl", hash = "sha256:45e4ffbae34f7ae30d0047697e724e534a7ec0a82ef9994b7913a412c21462a0"}, + {file = "pydantic_core-2.18.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:b9ebe8231726c49518b16b237b9fe0d7d361dd221302af511a83d4ada01183ab"}, + {file = "pydantic_core-2.18.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b8e20e15d18bf7dbb453be78a2d858f946f5cdf06c5072453dace00ab652e2b2"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0d9ff283cd3459fa0bf9b0256a2b6f01ac1ff9ffb034e24457b9035f75587cb"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f7ef5f0ebb77ba24c9970da18b771711edc5feaf00c10b18461e0f5f5949231"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73038d66614d2e5cde30435b5afdced2b473b4c77d4ca3a8624dd3e41a9c19be"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6afd5c867a74c4d314c557b5ea9520183fadfbd1df4c2d6e09fd0d990ce412cd"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd7df92f28d351bb9f12470f4c533cf03d1b52ec5a6e5c58c65b183055a60106"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:80aea0ffeb1049336043d07799eace1c9602519fb3192916ff525b0287b2b1e4"}, + {file = "pydantic_core-2.18.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:aaee40f25bba38132e655ffa3d1998a6d576ba7cf81deff8bfa189fb43fd2bbe"}, + {file = "pydantic_core-2.18.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9128089da8f4fe73f7a91973895ebf2502539d627891a14034e45fb9e707e26d"}, + {file = "pydantic_core-2.18.3-cp311-none-win32.whl", hash = "sha256:fec02527e1e03257aa25b1a4dcbe697b40a22f1229f5d026503e8b7ff6d2eda7"}, + {file = "pydantic_core-2.18.3-cp311-none-win_amd64.whl", hash = "sha256:58ff8631dbab6c7c982e6425da8347108449321f61fe427c52ddfadd66642af7"}, + {file = "pydantic_core-2.18.3-cp311-none-win_arm64.whl", hash = "sha256:3fc1c7f67f34c6c2ef9c213e0f2a351797cda98249d9ca56a70ce4ebcaba45f4"}, + {file = "pydantic_core-2.18.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f0928cde2ae416a2d1ebe6dee324709c6f73e93494d8c7aea92df99aab1fc40f"}, + {file = "pydantic_core-2.18.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0bee9bb305a562f8b9271855afb6ce00223f545de3d68560b3c1649c7c5295e9"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e862823be114387257dacbfa7d78547165a85d7add33b446ca4f4fae92c7ff5c"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6a36f78674cbddc165abab0df961b5f96b14461d05feec5e1f78da58808b97e7"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba905d184f62e7ddbb7a5a751d8a5c805463511c7b08d1aca4a3e8c11f2e5048"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7fdd362f6a586e681ff86550b2379e532fee63c52def1c666887956748eaa326"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24b214b7ee3bd3b865e963dbed0f8bc5375f49449d70e8d407b567af3222aae4"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:691018785779766127f531674fa82bb368df5b36b461622b12e176c18e119022"}, + {file = "pydantic_core-2.18.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:60e4c625e6f7155d7d0dcac151edf5858102bc61bf959d04469ca6ee4e8381bd"}, + {file = "pydantic_core-2.18.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4e651e47d981c1b701dcc74ab8fec5a60a5b004650416b4abbef13db23bc7be"}, + {file = "pydantic_core-2.18.3-cp312-none-win32.whl", hash = "sha256:ffecbb5edb7f5ffae13599aec33b735e9e4c7676ca1633c60f2c606beb17efc5"}, + {file = "pydantic_core-2.18.3-cp312-none-win_amd64.whl", hash = "sha256:2c8333f6e934733483c7eddffdb094c143b9463d2af7e6bd85ebcb2d4a1b82c6"}, + {file = "pydantic_core-2.18.3-cp312-none-win_arm64.whl", hash = "sha256:7a20dded653e516a4655f4c98e97ccafb13753987434fe7cf044aa25f5b7d417"}, + {file = "pydantic_core-2.18.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:eecf63195be644b0396f972c82598cd15693550f0ff236dcf7ab92e2eb6d3522"}, + {file = "pydantic_core-2.18.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2c44efdd3b6125419c28821590d7ec891c9cb0dff33a7a78d9d5c8b6f66b9702"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e59fca51ffbdd1638b3856779342ed69bcecb8484c1d4b8bdb237d0eb5a45e2"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:70cf099197d6b98953468461d753563b28e73cf1eade2ffe069675d2657ed1d5"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:63081a49dddc6124754b32a3774331467bfc3d2bd5ff8f10df36a95602560361"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:370059b7883485c9edb9655355ff46d912f4b03b009d929220d9294c7fd9fd60"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a64faeedfd8254f05f5cf6fc755023a7e1606af3959cfc1a9285744cc711044"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19d2e725de0f90d8671f89e420d36c3dd97639b98145e42fcc0e1f6d492a46dc"}, + {file = "pydantic_core-2.18.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:67bc078025d70ec5aefe6200ef094576c9d86bd36982df1301c758a9fff7d7f4"}, + {file = "pydantic_core-2.18.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:adf952c3f4100e203cbaf8e0c907c835d3e28f9041474e52b651761dc248a3c0"}, + {file = "pydantic_core-2.18.3-cp38-none-win32.whl", hash = "sha256:9a46795b1f3beb167eaee91736d5d17ac3a994bf2215a996aed825a45f897558"}, + {file = "pydantic_core-2.18.3-cp38-none-win_amd64.whl", hash = "sha256:200ad4e3133cb99ed82342a101a5abf3d924722e71cd581cc113fe828f727fbc"}, + {file = "pydantic_core-2.18.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:304378b7bf92206036c8ddd83a2ba7b7d1a5b425acafff637172a3aa72ad7083"}, + {file = "pydantic_core-2.18.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c826870b277143e701c9ccf34ebc33ddb4d072612683a044e7cce2d52f6c3fef"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e201935d282707394f3668380e41ccf25b5794d1b131cdd96b07f615a33ca4b1"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5560dda746c44b48bf82b3d191d74fe8efc5686a9ef18e69bdabccbbb9ad9442"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b32c2a1f8032570842257e4c19288eba9a2bba4712af542327de9a1204faff8"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:929c24e9dea3990bc8bcd27c5f2d3916c0c86f5511d2caa69e0d5290115344a9"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1a8376fef60790152564b0eab376b3e23dd6e54f29d84aad46f7b264ecca943"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dccf3ef1400390ddd1fb55bf0632209d39140552d068ee5ac45553b556780e06"}, + {file = "pydantic_core-2.18.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:41dbdcb0c7252b58fa931fec47937edb422c9cb22528f41cb8963665c372caf6"}, + {file = "pydantic_core-2.18.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:666e45cf071669fde468886654742fa10b0e74cd0fa0430a46ba6056b24fb0af"}, + {file = "pydantic_core-2.18.3-cp39-none-win32.whl", hash = "sha256:f9c08cabff68704a1b4667d33f534d544b8a07b8e5d039c37067fceb18789e78"}, + {file = "pydantic_core-2.18.3-cp39-none-win_amd64.whl", hash = "sha256:4afa5f5973e8572b5c0dcb4e2d4fda7890e7cd63329bd5cc3263a25c92ef0026"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:77319771a026f7c7d29c6ebc623de889e9563b7087911b46fd06c044a12aa5e9"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:df11fa992e9f576473038510d66dd305bcd51d7dd508c163a8c8fe148454e059"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d531076bdfb65af593326ffd567e6ab3da145020dafb9187a1d131064a55f97c"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d33ce258e4e6e6038f2b9e8b8a631d17d017567db43483314993b3ca345dcbbb"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1f9cd7f5635b719939019be9bda47ecb56e165e51dd26c9a217a433e3d0d59a9"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cd4a032bb65cc132cae1fe3e52877daecc2097965cd3914e44fbd12b00dae7c5"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f2718430098bcdf60402136c845e4126a189959d103900ebabb6774a5d9fdb"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c0037a92cf0c580ed14e10953cdd26528e8796307bb8bb312dc65f71547df04d"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b95a0972fac2b1ff3c94629fc9081b16371dad870959f1408cc33b2f78ad347a"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a62e437d687cc148381bdd5f51e3e81f5b20a735c55f690c5be94e05da2b0d5c"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b367a73a414bbb08507da102dc2cde0fa7afe57d09b3240ce82a16d608a7679c"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ecce4b2360aa3f008da3327d652e74a0e743908eac306198b47e1c58b03dd2b"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bd4435b8d83f0c9561a2a9585b1de78f1abb17cb0cef5f39bf6a4b47d19bafe3"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:616221a6d473c5b9aa83fa8982745441f6a4a62a66436be9445c65f241b86c94"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7e6382ce89a92bc1d0c0c5edd51e931432202b9080dc921d8d003e616402efd1"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ff58f379345603d940e461eae474b6bbb6dab66ed9a851ecd3cb3709bf4dcf6a"}, + {file = "pydantic_core-2.18.3.tar.gz", hash = "sha256:432e999088d85c8f36b9a3f769a8e2b57aabd817bbb729a90d1fe7f18f6f1f39"}, ] [package.dependencies] @@ -1992,7 +2004,6 @@ files = [ [package.dependencies] packaging = ">=23.1" -tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} [package.extras] docs = ["furo (>=2023.8.19)", "sphinx (<7.2)", "sphinx-autodoc-typehints (>=1.24)"] @@ -2011,11 +2022,9 @@ files = [ [package.dependencies] colorama = {version = "*", markers = "sys_platform == \"win32\""} -exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} iniconfig = "*" packaging = "*" pluggy = ">=0.12,<2.0" -tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} [package.extras] testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] @@ -2055,6 +2064,21 @@ pytest = ">=6.2.5" [package.extras] dev = ["pre-commit", "pytest-asyncio", "tox"] +[[package]] +name = "pytest-runner" +version = "6.0.1" +description = "Invoke py.test as distutils command with dependency resolution" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-runner-6.0.1.tar.gz", hash = "sha256:70d4739585a7008f37bf4933c013fdb327b8878a5a69fcbb3316c88882f0f49b"}, + {file = "pytest_runner-6.0.1-py3-none-any.whl", hash = "sha256:ea326ed6f6613992746062362efab70212089a4209c08d67177b3df1c52cd9f2"}, +] + +[package.extras] +docs = ["jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx"] +testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-virtualenv", "types-setuptools"] + [[package]] name = "pytest-xdist" version = "3.6.1" @@ -2189,101 +2213,101 @@ rpds-py = ">=0.7.0" [[package]] name = "regex" -version = "2024.4.28" +version = "2024.5.15" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" files = [ - {file = "regex-2024.4.28-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd196d056b40af073d95a2879678585f0b74ad35190fac04ca67954c582c6b61"}, - {file = "regex-2024.4.28-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8bb381f777351bd534462f63e1c6afb10a7caa9fa2a421ae22c26e796fe31b1f"}, - {file = "regex-2024.4.28-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:47af45b6153522733aa6e92543938e97a70ce0900649ba626cf5aad290b737b6"}, - {file = "regex-2024.4.28-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99d6a550425cc51c656331af0e2b1651e90eaaa23fb4acde577cf15068e2e20f"}, - {file = "regex-2024.4.28-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bf29304a8011feb58913c382902fde3395957a47645bf848eea695839aa101b7"}, - {file = "regex-2024.4.28-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:92da587eee39a52c91aebea8b850e4e4f095fe5928d415cb7ed656b3460ae79a"}, - {file = "regex-2024.4.28-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6277d426e2f31bdbacb377d17a7475e32b2d7d1f02faaecc48d8e370c6a3ff31"}, - {file = "regex-2024.4.28-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:28e1f28d07220c0f3da0e8fcd5a115bbb53f8b55cecf9bec0c946eb9a059a94c"}, - {file = "regex-2024.4.28-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:aaa179975a64790c1f2701ac562b5eeb733946eeb036b5bcca05c8d928a62f10"}, - {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6f435946b7bf7a1b438b4e6b149b947c837cb23c704e780c19ba3e6855dbbdd3"}, - {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:19d6c11bf35a6ad077eb23852827f91c804eeb71ecb85db4ee1386825b9dc4db"}, - {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:fdae0120cddc839eb8e3c15faa8ad541cc6d906d3eb24d82fb041cfe2807bc1e"}, - {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:e672cf9caaf669053121f1766d659a8813bd547edef6e009205378faf45c67b8"}, - {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f57515750d07e14743db55d59759893fdb21d2668f39e549a7d6cad5d70f9fea"}, - {file = "regex-2024.4.28-cp310-cp310-win32.whl", hash = "sha256:a1409c4eccb6981c7baabc8888d3550df518add6e06fe74fa1d9312c1838652d"}, - {file = "regex-2024.4.28-cp310-cp310-win_amd64.whl", hash = "sha256:1f687a28640f763f23f8a9801fe9e1b37338bb1ca5d564ddd41619458f1f22d1"}, - {file = "regex-2024.4.28-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:84077821c85f222362b72fdc44f7a3a13587a013a45cf14534df1cbbdc9a6796"}, - {file = "regex-2024.4.28-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b45d4503de8f4f3dc02f1d28a9b039e5504a02cc18906cfe744c11def942e9eb"}, - {file = "regex-2024.4.28-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:457c2cd5a646dd4ed536c92b535d73548fb8e216ebee602aa9f48e068fc393f3"}, - {file = "regex-2024.4.28-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b51739ddfd013c6f657b55a508de8b9ea78b56d22b236052c3a85a675102dc6"}, - {file = "regex-2024.4.28-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:459226445c7d7454981c4c0ce0ad1a72e1e751c3e417f305722bbcee6697e06a"}, - {file = "regex-2024.4.28-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:670fa596984b08a4a769491cbdf22350431970d0112e03d7e4eeaecaafcd0fec"}, - {file = "regex-2024.4.28-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe00f4fe11c8a521b173e6324d862ee7ee3412bf7107570c9b564fe1119b56fb"}, - {file = "regex-2024.4.28-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:36f392dc7763fe7924575475736bddf9ab9f7a66b920932d0ea50c2ded2f5636"}, - {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:23a412b7b1a7063f81a742463f38821097b6a37ce1e5b89dd8e871d14dbfd86b"}, - {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:f1d6e4b7b2ae3a6a9df53efbf199e4bfcff0959dbdb5fd9ced34d4407348e39a"}, - {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:499334ad139557de97cbc4347ee921c0e2b5e9c0f009859e74f3f77918339257"}, - {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:0940038bec2fe9e26b203d636c44d31dd8766abc1fe66262da6484bd82461ccf"}, - {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:66372c2a01782c5fe8e04bff4a2a0121a9897e19223d9eab30c54c50b2ebeb7f"}, - {file = "regex-2024.4.28-cp311-cp311-win32.whl", hash = "sha256:c77d10ec3c1cf328b2f501ca32583625987ea0f23a0c2a49b37a39ee5c4c4630"}, - {file = "regex-2024.4.28-cp311-cp311-win_amd64.whl", hash = "sha256:fc0916c4295c64d6890a46e02d4482bb5ccf33bf1a824c0eaa9e83b148291f90"}, - {file = "regex-2024.4.28-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:08a1749f04fee2811c7617fdd46d2e46d09106fa8f475c884b65c01326eb15c5"}, - {file = "regex-2024.4.28-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b8eb28995771c087a73338f695a08c9abfdf723d185e57b97f6175c5051ff1ae"}, - {file = "regex-2024.4.28-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:dd7ef715ccb8040954d44cfeff17e6b8e9f79c8019daae2fd30a8806ef5435c0"}, - {file = "regex-2024.4.28-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb0315a2b26fde4005a7c401707c5352df274460f2f85b209cf6024271373013"}, - {file = "regex-2024.4.28-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f2fc053228a6bd3a17a9b0a3f15c3ab3cf95727b00557e92e1cfe094b88cc662"}, - {file = "regex-2024.4.28-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7fe9739a686dc44733d52d6e4f7b9c77b285e49edf8570754b322bca6b85b4cc"}, - {file = "regex-2024.4.28-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a74fcf77d979364f9b69fcf8200849ca29a374973dc193a7317698aa37d8b01c"}, - {file = "regex-2024.4.28-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:965fd0cf4694d76f6564896b422724ec7b959ef927a7cb187fc6b3f4e4f59833"}, - {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:2fef0b38c34ae675fcbb1b5db760d40c3fc3612cfa186e9e50df5782cac02bcd"}, - {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bc365ce25f6c7c5ed70e4bc674f9137f52b7dd6a125037f9132a7be52b8a252f"}, - {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:ac69b394764bb857429b031d29d9604842bc4cbfd964d764b1af1868eeebc4f0"}, - {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:144a1fc54765f5c5c36d6d4b073299832aa1ec6a746a6452c3ee7b46b3d3b11d"}, - {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2630ca4e152c221072fd4a56d4622b5ada876f668ecd24d5ab62544ae6793ed6"}, - {file = "regex-2024.4.28-cp312-cp312-win32.whl", hash = "sha256:7f3502f03b4da52bbe8ba962621daa846f38489cae5c4a7b5d738f15f6443d17"}, - {file = "regex-2024.4.28-cp312-cp312-win_amd64.whl", hash = "sha256:0dd3f69098511e71880fb00f5815db9ed0ef62c05775395968299cb400aeab82"}, - {file = "regex-2024.4.28-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:374f690e1dd0dbdcddea4a5c9bdd97632cf656c69113f7cd6a361f2a67221cb6"}, - {file = "regex-2024.4.28-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:25f87ae6b96374db20f180eab083aafe419b194e96e4f282c40191e71980c666"}, - {file = "regex-2024.4.28-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5dbc1bcc7413eebe5f18196e22804a3be1bfdfc7e2afd415e12c068624d48247"}, - {file = "regex-2024.4.28-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f85151ec5a232335f1be022b09fbbe459042ea1951d8a48fef251223fc67eee1"}, - {file = "regex-2024.4.28-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:57ba112e5530530fd175ed550373eb263db4ca98b5f00694d73b18b9a02e7185"}, - {file = "regex-2024.4.28-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:224803b74aab56aa7be313f92a8d9911dcade37e5f167db62a738d0c85fdac4b"}, - {file = "regex-2024.4.28-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a54a047b607fd2d2d52a05e6ad294602f1e0dec2291152b745870afc47c1397"}, - {file = "regex-2024.4.28-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a2a512d623f1f2d01d881513af9fc6a7c46e5cfffb7dc50c38ce959f9246c94"}, - {file = "regex-2024.4.28-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c06bf3f38f0707592898428636cbb75d0a846651b053a1cf748763e3063a6925"}, - {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1031a5e7b048ee371ab3653aad3030ecfad6ee9ecdc85f0242c57751a05b0ac4"}, - {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d7a353ebfa7154c871a35caca7bfd8f9e18666829a1dc187115b80e35a29393e"}, - {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:7e76b9cfbf5ced1aca15a0e5b6f229344d9b3123439ffce552b11faab0114a02"}, - {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:5ce479ecc068bc2a74cb98dd8dba99e070d1b2f4a8371a7dfe631f85db70fe6e"}, - {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7d77b6f63f806578c604dca209280e4c54f0fa9a8128bb8d2cc5fb6f99da4150"}, - {file = "regex-2024.4.28-cp38-cp38-win32.whl", hash = "sha256:d84308f097d7a513359757c69707ad339da799e53b7393819ec2ea36bc4beb58"}, - {file = "regex-2024.4.28-cp38-cp38-win_amd64.whl", hash = "sha256:2cc1b87bba1dd1a898e664a31012725e48af826bf3971e786c53e32e02adae6c"}, - {file = "regex-2024.4.28-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7413167c507a768eafb5424413c5b2f515c606be5bb4ef8c5dee43925aa5718b"}, - {file = "regex-2024.4.28-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:108e2dcf0b53a7c4ab8986842a8edcb8ab2e59919a74ff51c296772e8e74d0ae"}, - {file = "regex-2024.4.28-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f1c5742c31ba7d72f2dedf7968998730664b45e38827637e0f04a2ac7de2f5f1"}, - {file = "regex-2024.4.28-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ecc6148228c9ae25ce403eade13a0961de1cb016bdb35c6eafd8e7b87ad028b1"}, - {file = "regex-2024.4.28-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7d893c8cf0e2429b823ef1a1d360a25950ed11f0e2a9df2b5198821832e1947"}, - {file = "regex-2024.4.28-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4290035b169578ffbbfa50d904d26bec16a94526071ebec3dadbebf67a26b25e"}, - {file = "regex-2024.4.28-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44a22ae1cfd82e4ffa2066eb3390777dc79468f866f0625261a93e44cdf6482b"}, - {file = "regex-2024.4.28-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd24fd140b69f0b0bcc9165c397e9b2e89ecbeda83303abf2a072609f60239e2"}, - {file = "regex-2024.4.28-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:39fb166d2196413bead229cd64a2ffd6ec78ebab83fff7d2701103cf9f4dfd26"}, - {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9301cc6db4d83d2c0719f7fcda37229691745168bf6ae849bea2e85fc769175d"}, - {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7c3d389e8d76a49923683123730c33e9553063d9041658f23897f0b396b2386f"}, - {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:99ef6289b62042500d581170d06e17f5353b111a15aa6b25b05b91c6886df8fc"}, - {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:b91d529b47798c016d4b4c1d06cc826ac40d196da54f0de3c519f5a297c5076a"}, - {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:43548ad74ea50456e1c68d3c67fff3de64c6edb85bcd511d1136f9b5376fc9d1"}, - {file = "regex-2024.4.28-cp39-cp39-win32.whl", hash = "sha256:05d9b6578a22db7dedb4df81451f360395828b04f4513980b6bd7a1412c679cc"}, - {file = "regex-2024.4.28-cp39-cp39-win_amd64.whl", hash = "sha256:3986217ec830c2109875be740531feb8ddafe0dfa49767cdcd072ed7e8927962"}, - {file = "regex-2024.4.28.tar.gz", hash = "sha256:83ab366777ea45d58f72593adf35d36ca911ea8bd838483c1823b883a121b0e4"}, + {file = "regex-2024.5.15-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a81e3cfbae20378d75185171587cbf756015ccb14840702944f014e0d93ea09f"}, + {file = "regex-2024.5.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7b59138b219ffa8979013be7bc85bb60c6f7b7575df3d56dc1e403a438c7a3f6"}, + {file = "regex-2024.5.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a0bd000c6e266927cb7a1bc39d55be95c4b4f65c5be53e659537537e019232b1"}, + {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5eaa7ddaf517aa095fa8da0b5015c44d03da83f5bd49c87961e3c997daed0de7"}, + {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba68168daedb2c0bab7fd7e00ced5ba90aebf91024dea3c88ad5063c2a562cca"}, + {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6e8d717bca3a6e2064fc3a08df5cbe366369f4b052dcd21b7416e6d71620dca1"}, + {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1337b7dbef9b2f71121cdbf1e97e40de33ff114801263b275aafd75303bd62b5"}, + {file = "regex-2024.5.15-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9ebd0a36102fcad2f03696e8af4ae682793a5d30b46c647eaf280d6cfb32796"}, + {file = "regex-2024.5.15-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9efa1a32ad3a3ea112224897cdaeb6aa00381627f567179c0314f7b65d354c62"}, + {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1595f2d10dff3d805e054ebdc41c124753631b6a471b976963c7b28543cf13b0"}, + {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b802512f3e1f480f41ab5f2cfc0e2f761f08a1f41092d6718868082fc0d27143"}, + {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:a0981022dccabca811e8171f913de05720590c915b033b7e601f35ce4ea7019f"}, + {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:19068a6a79cf99a19ccefa44610491e9ca02c2be3305c7760d3831d38a467a6f"}, + {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1b5269484f6126eee5e687785e83c6b60aad7663dafe842b34691157e5083e53"}, + {file = "regex-2024.5.15-cp310-cp310-win32.whl", hash = "sha256:ada150c5adfa8fbcbf321c30c751dc67d2f12f15bd183ffe4ec7cde351d945b3"}, + {file = "regex-2024.5.15-cp310-cp310-win_amd64.whl", hash = "sha256:ac394ff680fc46b97487941f5e6ae49a9f30ea41c6c6804832063f14b2a5a145"}, + {file = "regex-2024.5.15-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f5b1dff3ad008dccf18e652283f5e5339d70bf8ba7c98bf848ac33db10f7bc7a"}, + {file = "regex-2024.5.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c6a2b494a76983df8e3d3feea9b9ffdd558b247e60b92f877f93a1ff43d26656"}, + {file = "regex-2024.5.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a32b96f15c8ab2e7d27655969a23895eb799de3665fa94349f3b2fbfd547236f"}, + {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10002e86e6068d9e1c91eae8295ef690f02f913c57db120b58fdd35a6bb1af35"}, + {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ec54d5afa89c19c6dd8541a133be51ee1017a38b412b1321ccb8d6ddbeb4cf7d"}, + {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10e4ce0dca9ae7a66e6089bb29355d4432caed736acae36fef0fdd7879f0b0cb"}, + {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e507ff1e74373c4d3038195fdd2af30d297b4f0950eeda6f515ae3d84a1770f"}, + {file = "regex-2024.5.15-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1f059a4d795e646e1c37665b9d06062c62d0e8cc3c511fe01315973a6542e40"}, + {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0721931ad5fe0dda45d07f9820b90b2148ccdd8e45bb9e9b42a146cb4f695649"}, + {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:833616ddc75ad595dee848ad984d067f2f31be645d603e4d158bba656bbf516c"}, + {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:287eb7f54fc81546346207c533ad3c2c51a8d61075127d7f6d79aaf96cdee890"}, + {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:19dfb1c504781a136a80ecd1fff9f16dddf5bb43cec6871778c8a907a085bb3d"}, + {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:119af6e56dce35e8dfb5222573b50c89e5508d94d55713c75126b753f834de68"}, + {file = "regex-2024.5.15-cp311-cp311-win32.whl", hash = "sha256:1c1c174d6ec38d6c8a7504087358ce9213d4332f6293a94fbf5249992ba54efa"}, + {file = "regex-2024.5.15-cp311-cp311-win_amd64.whl", hash = "sha256:9e717956dcfd656f5055cc70996ee2cc82ac5149517fc8e1b60261b907740201"}, + {file = "regex-2024.5.15-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:632b01153e5248c134007209b5c6348a544ce96c46005d8456de1d552455b014"}, + {file = "regex-2024.5.15-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e64198f6b856d48192bf921421fdd8ad8eb35e179086e99e99f711957ffedd6e"}, + {file = "regex-2024.5.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68811ab14087b2f6e0fc0c2bae9ad689ea3584cad6917fc57be6a48bbd012c49"}, + {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8ec0c2fea1e886a19c3bee0cd19d862b3aa75dcdfb42ebe8ed30708df64687a"}, + {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d0c0c0003c10f54a591d220997dd27d953cd9ccc1a7294b40a4be5312be8797b"}, + {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2431b9e263af1953c55abbd3e2efca67ca80a3de8a0437cb58e2421f8184717a"}, + {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a605586358893b483976cffc1723fb0f83e526e8f14c6e6614e75919d9862cf"}, + {file = "regex-2024.5.15-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:391d7f7f1e409d192dba8bcd42d3e4cf9e598f3979cdaed6ab11288da88cb9f2"}, + {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9ff11639a8d98969c863d4617595eb5425fd12f7c5ef6621a4b74b71ed8726d5"}, + {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4eee78a04e6c67e8391edd4dad3279828dd66ac4b79570ec998e2155d2e59fd5"}, + {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8fe45aa3f4aa57faabbc9cb46a93363edd6197cbc43523daea044e9ff2fea83e"}, + {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:d0a3d8d6acf0c78a1fff0e210d224b821081330b8524e3e2bc5a68ef6ab5803d"}, + {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c486b4106066d502495b3025a0a7251bf37ea9540433940a23419461ab9f2a80"}, + {file = "regex-2024.5.15-cp312-cp312-win32.whl", hash = "sha256:c49e15eac7c149f3670b3e27f1f28a2c1ddeccd3a2812cba953e01be2ab9b5fe"}, + {file = "regex-2024.5.15-cp312-cp312-win_amd64.whl", hash = "sha256:673b5a6da4557b975c6c90198588181029c60793835ce02f497ea817ff647cb2"}, + {file = "regex-2024.5.15-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:87e2a9c29e672fc65523fb47a90d429b70ef72b901b4e4b1bd42387caf0d6835"}, + {file = "regex-2024.5.15-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c3bea0ba8b73b71b37ac833a7f3fd53825924165da6a924aec78c13032f20850"}, + {file = "regex-2024.5.15-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bfc4f82cabe54f1e7f206fd3d30fda143f84a63fe7d64a81558d6e5f2e5aaba9"}, + {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5bb9425fe881d578aeca0b2b4b3d314ec88738706f66f219c194d67179337cb"}, + {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64c65783e96e563103d641760664125e91bd85d8e49566ee560ded4da0d3e704"}, + {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cf2430df4148b08fb4324b848672514b1385ae3807651f3567871f130a728cc3"}, + {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5397de3219a8b08ae9540c48f602996aa6b0b65d5a61683e233af8605c42b0f2"}, + {file = "regex-2024.5.15-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:455705d34b4154a80ead722f4f185b04c4237e8e8e33f265cd0798d0e44825fa"}, + {file = "regex-2024.5.15-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b2b6f1b3bb6f640c1a92be3bbfbcb18657b125b99ecf141fb3310b5282c7d4ed"}, + {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3ad070b823ca5890cab606c940522d05d3d22395d432f4aaaf9d5b1653e47ced"}, + {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:5b5467acbfc153847d5adb21e21e29847bcb5870e65c94c9206d20eb4e99a384"}, + {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:e6662686aeb633ad65be2a42b4cb00178b3fbf7b91878f9446075c404ada552f"}, + {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:2b4c884767504c0e2401babe8b5b7aea9148680d2e157fa28f01529d1f7fcf67"}, + {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:3cd7874d57f13bf70078f1ff02b8b0aa48d5b9ed25fc48547516c6aba36f5741"}, + {file = "regex-2024.5.15-cp38-cp38-win32.whl", hash = "sha256:e4682f5ba31f475d58884045c1a97a860a007d44938c4c0895f41d64481edbc9"}, + {file = "regex-2024.5.15-cp38-cp38-win_amd64.whl", hash = "sha256:d99ceffa25ac45d150e30bd9ed14ec6039f2aad0ffa6bb87a5936f5782fc1569"}, + {file = "regex-2024.5.15-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:13cdaf31bed30a1e1c2453ef6015aa0983e1366fad2667657dbcac7b02f67133"}, + {file = "regex-2024.5.15-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cac27dcaa821ca271855a32188aa61d12decb6fe45ffe3e722401fe61e323cd1"}, + {file = "regex-2024.5.15-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7dbe2467273b875ea2de38ded4eba86cbcbc9a1a6d0aa11dcf7bd2e67859c435"}, + {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64f18a9a3513a99c4bef0e3efd4c4a5b11228b48aa80743be822b71e132ae4f5"}, + {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d347a741ea871c2e278fde6c48f85136c96b8659b632fb57a7d1ce1872547600"}, + {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1878b8301ed011704aea4c806a3cadbd76f84dece1ec09cc9e4dc934cfa5d4da"}, + {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4babf07ad476aaf7830d77000874d7611704a7fcf68c9c2ad151f5d94ae4bfc4"}, + {file = "regex-2024.5.15-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:35cb514e137cb3488bce23352af3e12fb0dbedd1ee6e60da053c69fb1b29cc6c"}, + {file = "regex-2024.5.15-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cdd09d47c0b2efee9378679f8510ee6955d329424c659ab3c5e3a6edea696294"}, + {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:72d7a99cd6b8f958e85fc6ca5b37c4303294954eac1376535b03c2a43eb72629"}, + {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:a094801d379ab20c2135529948cb84d417a2169b9bdceda2a36f5f10977ebc16"}, + {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:c0c18345010870e58238790a6779a1219b4d97bd2e77e1140e8ee5d14df071aa"}, + {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:16093f563098448ff6b1fa68170e4acbef94e6b6a4e25e10eae8598bb1694b5d"}, + {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e38a7d4e8f633a33b4c7350fbd8bad3b70bf81439ac67ac38916c4a86b465456"}, + {file = "regex-2024.5.15-cp39-cp39-win32.whl", hash = "sha256:71a455a3c584a88f654b64feccc1e25876066c4f5ef26cd6dd711308aa538694"}, + {file = "regex-2024.5.15-cp39-cp39-win_amd64.whl", hash = "sha256:cab12877a9bdafde5500206d1020a584355a97884dfd388af3699e9137bf7388"}, + {file = "regex-2024.5.15.tar.gz", hash = "sha256:d3ee02d9e5f482cc8309134a91eeaacbdd2261ba111b0fef3748eeb4913e6a2c"}, ] [[package]] name = "requests" -version = "2.31.0" +version = "2.32.2" description = "Python HTTP for Humans." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, + {file = "requests-2.32.2-py3-none-any.whl", hash = "sha256:fc06670dd0ed212426dfeb94fc1b983d917c4f9847c863f313c9dfaaffb7c23c"}, + {file = "requests-2.32.2.tar.gz", hash = "sha256:dd951ff5ecf3e3b3aa26b40703ba77495dab41da839ae72ef3c8e5d8e2433289"}, ] [package.dependencies] @@ -2494,17 +2518,6 @@ files = [ [package.dependencies] mpmath = ">=0.19" -[[package]] -name = "tomli" -version = "2.0.1" -description = "A lil' TOML parser" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, -] - [[package]] name = "tox" version = "4.15.0" @@ -2525,7 +2538,6 @@ packaging = ">=23.2" platformdirs = ">=4.1" pluggy = ">=1.3" pyproject-api = ">=1.6.1" -tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} virtualenv = ">=20.25" [package.extras] @@ -2614,13 +2626,13 @@ files = [ [[package]] name = "typing-extensions" -version = "4.11.0" +version = "4.12.0" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"}, - {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"}, + {file = "typing_extensions-4.12.0-py3-none-any.whl", hash = "sha256:b349c66bea9016ac22978d800cfff206d5f9816951f12a7d0ec5578b0a819594"}, + {file = "typing_extensions-4.12.0.tar.gz", hash = "sha256:8cbcdc8606ebcb0d95453ad7dc5065e6237b6aa230a31e81d0f440c30fed5fd8"}, ] [[package]] @@ -2636,30 +2648,29 @@ files = [ [[package]] name = "urllib3" -version = "2.2.1" +version = "1.26.18" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false -python-versions = ">=3.8" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" files = [ - {file = "urllib3-2.2.1-py3-none-any.whl", hash = "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d"}, - {file = "urllib3-2.2.1.tar.gz", hash = "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"}, + {file = "urllib3-1.26.18-py2.py3-none-any.whl", hash = "sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07"}, + {file = "urllib3-1.26.18.tar.gz", hash = "sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0"}, ] [package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] -h2 = ["h2 (>=4,<5)"] -socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] -zstd = ["zstandard (>=0.18.0)"] +brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] +socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [[package]] name = "virtualenv" -version = "20.26.1" +version = "20.26.2" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.7" files = [ - {file = "virtualenv-20.26.1-py3-none-any.whl", hash = "sha256:7aa9982a728ae5892558bff6a2839c00b9ed145523ece2274fad6f414690ae75"}, - {file = "virtualenv-20.26.1.tar.gz", hash = "sha256:604bfdceaeece392802e6ae48e69cec49168b9c5f4a44e483963f9242eb0e78b"}, + {file = "virtualenv-20.26.2-py3-none-any.whl", hash = "sha256:a624db5e94f01ad993d476b9ee5346fdf7b9de43ccaee0e0197012dc838a0e9b"}, + {file = "virtualenv-20.26.2.tar.gz", hash = "sha256:82bf0f4eebbb78d36ddaee0283d43fe5736b53880b8a8cdcd37390a07ac3741c"}, ] [package.dependencies] @@ -2701,5 +2712,5 @@ files = [ [metadata] lock-version = "2.0" -python-versions = "^3.10" -content-hash = "f04f539402775a8dd86cbd776d4b20af78cbdb698724efd9a442ac5e06e9c7a4" +python-versions = "^3.11" +content-hash = "7f210b78e012f9520d74924605bc359955642f662ef16c736465c47a45bfcf75" diff --git a/source/app/pyproject.toml b/source/app/pyproject.toml index fb46a2f2..928e90d8 100644 --- a/source/app/pyproject.toml +++ b/source/app/pyproject.toml @@ -1,44 +1,73 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" +# ~~ Generated by projen. To modify, edit .projenrc.js and run "npx projen". [tool.poetry] name = "instance_scheduler" -version = "1.5.3" +version = "3.0.0" description = "Instance Scheduler on AWS" license = "Apache-2.0" -authors = ["Amazon Web Services"] +authors = [ "Amazon Web Services" ] homepage = "https://aws.amazon.com/solutions/implementations/instance-scheduler-on-aws/" -repository = "https://github.com/aws-solutions/instance-scheduler-on-aws" -documentation = "https://docs.aws.amazon.com/solutions/latest/instance-scheduler-on-aws/solution-overview.html" +readme = "README.md" -[tool.poetry.dependencies] -python = "^3.10" -requests = "^2.31.0" -typing-extensions = "^4.8.0" + [tool.poetry.dependencies] + aws-lambda-powertools = "^2.26.0" + packaging = "^24.0" + python = "^3.11" [tool.poetry.group.dev.dependencies] -aws-lambda-powertools = "^2.25.1" black = "^24.3.0" boto3 = "^1.34.1" -boto3-stubs-lite = {extras = ["cloudwatch", "dynamodb", "ec2", "lambda", "logs", "rds", "resourcegroupstaggingapi", "sns", "ssm", "sts"], version = "^1.27.1"} +botocore-stubs = "^1.31.66" botocore = "^1.34.1" flake8 = "^6.1.0" -freezegun = "^1.2.2" +freezegun = "^1.3.1" isort = "^5.12.0" jmespath = "1.0.1" -pytest = "^7.4.2" +mypy = "^1.7.1" pytest-cov = "^4.1.0" -pytest-mock = "^3.11.1" -pytest-xdist = "^3.3.1" +pytest-mock = "^3.12.0" +pytest-runner = "^6.0.1" +pytest-xdist = "^3.5.0" +pytest = "^7.4.3" python-dateutil = "2.8.2" -moto = {extras = ["dynamodb", "ec2", "logs", "rds", "resourcegroupstaggingapi", "ssm"], version = "^5.0.2"} -mypy = "^1.5.1" -tox = "^4.11.3" +tox = "^4.11.4" types-freezegun = "^1.1.10" types-jmespath = "1.0.1" types-python-dateutil = "2.8.2" types-requests = "2.31.0.6" +types-urllib3 = "^1.26.15" tzdata = "^2023.3" +urllib3 = "^1.26.15" + + [tool.poetry.group.dev.dependencies.boto3-stubs-lite] + version = "^1.34.1" + extras = [ + "autoscaling", + "cloudwatch", + "dynamodb", + "ec2", + "ecs", + "lambda", + "logs", + "rds", + "resourcegroupstaggingapi", + "sns", + "ssm", + "sts" +] + + [tool.poetry.group.dev.dependencies.moto] + version = "^5.0.2" + extras = [ + "autoscaling", + "dynamodb", + "ec2", + "logs", + "rds", + "resourcegroupstaggingapi", + "ssm" +] + +[build-system] +requires = [ "poetry-core" ] +build-backend = "poetry.core.masonry.api" diff --git a/source/app/tests/__init__.py b/source/app/tests/__init__.py index f35cbf57..e3fa6425 100644 --- a/source/app/tests/__init__.py +++ b/source/app/tests/__init__.py @@ -2,4 +2,4 @@ # SPDX-License-Identifier: Apache-2.0 from typing import Final as _Final -ami: _Final = "ami-0889ff9188674a22a" +DEFAULT_REGION: _Final = "us-east-1" diff --git a/source/app/tests/cli/__init__.py b/source/app/tests/cli/__init__.py new file mode 100644 index 00000000..04f8b7b7 --- /dev/null +++ b/source/app/tests/cli/__init__.py @@ -0,0 +1,2 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 diff --git a/source/app/tests/handler/test_cli_handler.py b/source/app/tests/cli/test_cli_request_handler.py similarity index 53% rename from source/app/tests/handler/test_cli_handler.py rename to source/app/tests/cli/test_cli_request_handler.py index cc05dd63..65a38ccc 100644 --- a/source/app/tests/handler/test_cli_handler.py +++ b/source/app/tests/cli/test_cli_request_handler.py @@ -1,25 +1,74 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 +import json +import re from datetime import date -from typing import Any, Optional, Sequence - -from instance_scheduler.handler.cli import CliHandler +from typing import Any, Iterator, Optional, Sequence +from unittest.mock import patch + +from _pytest.fixtures import fixture + +from instance_scheduler import __version__ +from instance_scheduler.handler.cli.cli_request_handler import ( + CliRequestHandler, + get_supported_cli_versions, +) +from instance_scheduler.model.period_definition import PeriodDefinition +from instance_scheduler.model.period_identifier import PeriodIdentifier +from instance_scheduler.model.schedule_definition import ScheduleDefinition +from instance_scheduler.model.store.period_definition_store import PeriodDefinitionStore +from instance_scheduler.model.store.schedule_definition_store import ( + ScheduleDefinitionStore, +) from instance_scheduler.util.app_env import AppEnv from instance_scheduler.util.dynamodb_utils import DynamoDBUtils from tests.context import MockLambdaContext +from tests.logger import MockLogger from tests.test_utils.unordered_list import UnorderedList +@fixture(autouse=True) +def mock_logger() -> Iterator[None]: + with patch("instance_scheduler.handler.cli.cli_request_handler.Logger", MockLogger): + yield + + def test_is_handling_request() -> None: - assert CliHandler.is_handling_request( + assert CliRequestHandler.is_handling_request( {"source": "scheduler.cli", "action": "something"} ) - assert not CliHandler.is_handling_request( + assert not CliRequestHandler.is_handling_request( {"source": "anything.else", "action": "another_thing"} ) +def test_cli_receives_successful_response_with_supported_cli_version( + config_table: None, +) -> None: + result = describe_schedules_with_cli(version=__version__) + assert "Schedules" in result + + +def test_get_supported_cli_versions_is_human_readable() -> None: + # matches version pattern (optional range) like x.x.x or x.x.x-x.x.x + assert re.match(r"^\d+\.\d+\.\d+(-\d+\.\d+\.\d+)?$", get_supported_cli_versions()) + + +def test_cli_receives_error_response_with_old_cli_version() -> None: + result = describe_schedules_with_cli(version="0.0.0") + assert result == { + "Error": f"CLI version 0.0.0 is not supported for this version of the solution. Please update to a supported version ({get_supported_cli_versions()})." + } + + +def test_cli_receives_error_response_with_new_cli_version() -> None: + result = describe_schedules_with_cli(version="100.0.0") + assert result == { + "Error": f"CLI version 100.0.0 is not supported for this version of the solution. Please update to a supported version ({get_supported_cli_versions()})." + } + + def test_create_schedule_throws_error_when_period_not_found(config_table: None) -> None: result = create_schedule_with_cli(periods=["office-hours"]) assert result == {"Error": "error: not found: period office-hours does not exist"} @@ -38,11 +87,9 @@ def test_create_schedule_with_2_periods(config_table: None, app_env: AppEnv) -> "Name": "cli-schedule", "Periods": UnorderedList(["period1", "period2"]), "StopNewInstances": True, - "UseMaintenanceWindow": False, "RetainRunning": False, "Enforced": False, "Hibernate": False, - "UseMetrics": False, "Type": "schedule", } } @@ -53,11 +100,46 @@ def test_create_schedule_with_2_periods(config_table: None, app_env: AppEnv) -> "name": "cli-schedule", "periods": {"period1", "period2"}, "stop_new_instances": True, - "use_maintenance_window": False, "retain_running": False, "enforced": False, "hibernate": False, - "use_metrics": False, + } + + +def test_create_schedule_with_2_maintenance_windows( + config_table: None, app_env: AppEnv +) -> None: + create_period_with_cli(name="period") + + result = create_schedule_with_cli( + name="cli-schedule", + periods=["period"], + ssm_maintenance_window=["window1", "window2"], + ) + + assert result == { + "Schedule": { + "Name": "cli-schedule", + "Periods": ["period"], + "StopNewInstances": True, + "SsmMaintenanceWindow": UnorderedList(["window1", "window2"]), + "RetainRunning": False, + "Enforced": False, + "Hibernate": False, + "Type": "schedule", + } + } + + created_schedule = get_schedule_from_dynamo("cli-schedule", app_env) + assert created_schedule == { + "type": "schedule", + "name": "cli-schedule", + "periods": {"period"}, + "stop_new_instances": True, + "ssm_maintenance_window": {"window1", "window2"}, + "retain_running": False, + "enforced": False, + "hibernate": False, } @@ -82,9 +164,28 @@ def test_delete_existing_schedule(config_table: None, app_env: AppEnv) -> None: assert get_schedule_from_dynamo("cli-schedule", app_env) is None +def test_delete_schedule_will_not_delete_cfn_managed_schedule( + schedule_store: ScheduleDefinitionStore, +) -> None: + schedule_store.put( + ScheduleDefinition( + name="test-schedule", + periods=[PeriodIdentifier.of("test-period")], + configured_in_stack="myStackArn", + ) + ) + + result = delete_schedule_with_cli("test-schedule") + assert result == { + "Error": "Schedule test-schedule is owned by myStackArn and cannot be deleted by the cli. " + "Please delete the owning stack to delete this schedule" + } + assert schedule_store.find_by_name("test-schedule") is not None + + def test_delete_nonexisting_schedule_returns_error(config_table: None) -> None: - result = delete_period_with_cli("cli-schedule") - assert result == {"Error": "not found: period cli-schedule does not exist"} + result = delete_schedule_with_cli("cli-schedule") + assert result == {"Error": "not found: schedule cli-schedule does not exist"} def test_create_basic_period(config_table: None, app_env: AppEnv) -> None: @@ -110,6 +211,40 @@ def test_create_basic_period(config_table: None, app_env: AppEnv) -> None: } +def test_create_complex_period(config_table: None, app_env: AppEnv) -> None: + result = create_period_with_cli( + name="cli-period", + begintime="10:00", + endtime="20:00", + weekdays="mon", + monthdays="1-5", + months="1", + ) + + assert result == { + "Period": { + "Begintime": "10:00", + "Endtime": "20:00", + "Name": "cli-period", + "Type": "period", + "Monthdays": ["1-5"], + "Months": ["1"], + "Weekdays": ["mon"], + } + } + + created_period = get_period_from_dynamo(name="cli-period", app_env=app_env) + assert created_period == { + "type": "period", + "name": "cli-period", + "begintime": "10:00", + "endtime": "20:00", + "monthdays": {"1-5"}, + "months": {"1"}, + "weekdays": {"mon"}, + } + + def test_create_period_throws_error_when_period_already_exists( config_table: None, ) -> None: @@ -133,17 +268,49 @@ def test_delete_nonexisting_period_returns_error(config_table: None) -> None: assert result == {"Error": "not found: period cli-period does not exist"} +def test_delete_period_returns_error_if_period_is_used_by_a_schedule( + config_table: None, +) -> None: + create_period_with_cli(name="cli-period") + create_schedule_with_cli(name="cli-schedule", periods=["cli-period"]) + + result = delete_period_with_cli("cli-period") + assert result == { + "Error": "error: period cli-period can not be deleted because it is still used in schedule(s) ['cli-schedule']" + } + + +def test_delete_period_will_not_delete_cfn_managed_period( + period_store: PeriodDefinitionStore, +) -> None: + period_store.put( + PeriodDefinition( + name="test-period", + begintime="5:00", + endtime="10:00", + configured_in_stack="myStackArn", + ) + ) + + result = delete_period_with_cli("test-period") + assert result == { + "Error": "Period test-period is owned by myStackArn and cannot be deleted by the cli. " + "Please delete the owning stack to delete this period" + } + assert period_store.find_by_name("test-period") is not None + + def test_describe_all_periods_is_empty_when_no_periods_created( config_table: None, ) -> None: - result = describe_periods() + result = describe_periods_with_cli() assert result == {"Periods": []} def test_describe_all_periods_returns_created_periods(config_table: None) -> None: - create_period_with_cli("period1", begintime="5:00", endtime="10:00"), + create_period_with_cli("period1", begintime="05:00", endtime="10:00"), create_period_with_cli("period2", begintime="12:00", endtime="14:00") - result = describe_periods() + result = describe_periods_with_cli() assert result == { "Periods": UnorderedList( [ @@ -164,10 +331,29 @@ def test_describe_all_periods_returns_created_periods(config_table: None) -> Non } +def test_describe_periods_handles_sets_correctly( + period_store: PeriodDefinitionStore, +) -> None: + period_store.put(PeriodDefinition(name="test-period", weekdays={"1", "3", "5-6"})) + + result = describe_periods_with_cli("test-period") + assert result == { + "Periods": UnorderedList( + [ + { + "Type": "period", + "Name": "test-period", + "Weekdays": UnorderedList(["1", "3", "5-6"]), + }, + ] + ) + } + + def test_describe_specific_period_returns_expected_period(config_table: None) -> None: - create_period_with_cli("period1", begintime="5:00", endtime="10:00"), + create_period_with_cli("period1", begintime="05:00", endtime="10:00"), create_period_with_cli("period2", begintime="12:00", endtime="14:00") - result = describe_periods("period1") + result = describe_periods_with_cli("period1") assert result == { "Periods": UnorderedList( [ @@ -185,28 +371,28 @@ def test_describe_specific_period_returns_expected_period(config_table: None) -> def test_describe_specific_period_returns_error_when_not_exists( config_table: None, ) -> None: - create_period_with_cli("period1", begintime="5:00", endtime="10:00"), + create_period_with_cli("period1", begintime="05:00", endtime="10:00"), create_period_with_cli("period2", begintime="12:00", endtime="14:00") - result = describe_periods("period3") + result = describe_periods_with_cli("period3") assert result == {"Error": "not found: period period3 does not exist"} def test_describe_all_schedules_is_empty_when_no_schedules_created( config_table: None, ) -> None: - result = describe_schedules() + result = describe_schedules_with_cli() assert result == {"Schedules": []} def test_describe_all_schedules_returns_created_schedules(config_table: None) -> None: - create_period_with_cli("period1", begintime="5:00", endtime="10:00"), + create_period_with_cli("period1", begintime="05:00", endtime="10:00"), create_period_with_cli("period2", begintime="12:00", endtime="14:00") create_schedule_with_cli( periods=["period1"], name="schedule1", stop_new_instances=False ) create_schedule_with_cli(periods=["period2"], name="schedule2", retain_running=True) - result = describe_schedules() + result = describe_schedules_with_cli() assert result == { "Schedules": UnorderedList( [ @@ -215,22 +401,18 @@ def test_describe_all_schedules_returns_created_schedules(config_table: None) -> "Name": "schedule1", "Periods": ["period1"], "StopNewInstances": False, - "UseMaintenanceWindow": False, "RetainRunning": False, "Enforced": False, "Hibernate": False, - "UseMetrics": False, }, { "Type": "schedule", "Name": "schedule2", "Periods": ["period2"], "StopNewInstances": True, - "UseMaintenanceWindow": False, "RetainRunning": True, "Enforced": False, "Hibernate": False, - "UseMetrics": False, }, ] ) @@ -240,17 +422,16 @@ def test_describe_all_schedules_returns_created_schedules(config_table: None) -> def test_describe_specific_schedule_returns_expected_schedule( config_table: None, ) -> None: - create_period_with_cli("period1", begintime="5:00", endtime="10:00"), + create_period_with_cli("period1", begintime="05:00", endtime="10:00"), create_period_with_cli("period2", begintime="12:00", endtime="14:00") create_schedule_with_cli( periods=["period1"], name="schedule1", - use_maintenance_window=True, - ssm_maintenance_window="window", + ssm_maintenance_window=["window"], ) create_schedule_with_cli(periods=["period2"], name="schedule2", enforced=True) - result = describe_schedules("schedule1") + result = describe_schedules_with_cli("schedule1") assert result == { "Schedules": [ { @@ -258,12 +439,10 @@ def test_describe_specific_schedule_returns_expected_schedule( "Name": "schedule1", "Periods": ["period1"], "StopNewInstances": True, - "UseMaintenanceWindow": True, - "SsmMaintenanceWindow": "window", + "SsmMaintenanceWindow": ["window"], "RetainRunning": False, "Enforced": False, "Hibernate": False, - "UseMetrics": False, }, ] } @@ -272,24 +451,23 @@ def test_describe_specific_schedule_returns_expected_schedule( def test_describe_specific_schedule_returns_error_when_not_exists( config_table: None, ) -> None: - create_period_with_cli("period1", begintime="5:00", endtime="10:00"), + create_period_with_cli("period1", begintime="05:00", endtime="10:00"), create_period_with_cli("period2", begintime="12:00", endtime="14:00") create_schedule_with_cli( periods=["period1"], name="schedule1", - use_maintenance_window=True, - ssm_maintenance_window="window", + ssm_maintenance_window=["window"], ) create_schedule_with_cli(periods=["period2"], name="schedule2", enforced=True) - result = describe_schedules("schedule3") + result = describe_schedules_with_cli("schedule3") assert result == {"Error": "not found: schedule schedule3 does not exist"} def test_describe_schedule_usage(config_table: None) -> None: - create_period_with_cli("cli-period", begintime="9:00", endtime="17:00"), + create_period_with_cli("cli-period", begintime="09:00", endtime="17:00"), create_schedule_with_cli(periods=["cli-period"], name="cli-schedule") - result = describe_schedule_usage( + result = describe_schedule_usage_with_cli( "cli-schedule", startdate=date(2023, 7, 20), enddate=date(2023, 7, 21) ) assert result == { @@ -324,7 +502,7 @@ def test_describe_schedule_usage(config_table: None) -> None: def test_update_period(config_table: None, app_env: AppEnv) -> None: - create_period_with_cli("cli-period", begintime="2:00", endtime="4:00") + create_period_with_cli("cli-period", begintime="02:00", endtime="4:00") result = update_period_with_cli("cli-period", begintime="12:00", endtime="15:00") assert result == { @@ -345,6 +523,24 @@ def test_update_period(config_table: None, app_env: AppEnv) -> None: } +def test_update_period_returns_error_when_period_managed_by_cfn( + period_store: PeriodDefinitionStore, +) -> None: + orig_period = PeriodDefinition( + name="test-period", + begintime="10:00", + configured_in_stack="myStackArn", + ) + period_store.put(orig_period) + + result = update_period_with_cli(name="test-period", begintime="12:00") + assert result == { + "Error": "Period test-period is owned by myStackArn and cannot be edited by the cli. " + "Please update the owning stack to edit this period" + } + assert period_store.find_by_name("test-period") == orig_period + + def test_update_period_returns_error_when_period_does_not_exist( config_table: None, ) -> None: @@ -372,11 +568,9 @@ def test_update_schedule(config_table: None, app_env: AppEnv) -> None: "Name": "cli-schedule", "Periods": UnorderedList(["period1", "period2"]), "StopNewInstances": True, - "UseMaintenanceWindow": False, "RetainRunning": False, "Enforced": True, "Hibernate": False, - "UseMetrics": False, "Type": "schedule", } } @@ -387,11 +581,51 @@ def test_update_schedule(config_table: None, app_env: AppEnv) -> None: "name": "cli-schedule", "periods": {"period1", "period2"}, "stop_new_instances": True, - "use_maintenance_window": False, "retain_running": False, "enforced": True, "hibernate": False, - "use_metrics": False, + } + + +def test_update_schedule_with_2_maintenance_windows( + config_table: None, app_env: AppEnv +) -> None: + create_period_with_cli("period") + create_schedule_with_cli( + periods=["period"], name="cli-schedule", retain_running=True + ) + + result = update_schedule_with_cli( + name="cli-schedule", + periods=["period"], + retain_running=False, + enforced=True, + ssm_maintenance_window=["window1", "window2"], + ) + + assert result == { + "Schedule": { + "Name": "cli-schedule", + "Periods": ["period"], + "StopNewInstances": True, + "SsmMaintenanceWindow": UnorderedList(["window1", "window2"]), + "RetainRunning": False, + "Enforced": True, + "Hibernate": False, + "Type": "schedule", + } + } + + updated_schedule = get_schedule_from_dynamo("cli-schedule", app_env) + assert updated_schedule == { + "type": "schedule", + "name": "cli-schedule", + "periods": {"period"}, + "stop_new_instances": True, + "ssm_maintenance_window": {"window1", "window2"}, + "retain_running": False, + "enforced": True, + "hibernate": False, } @@ -404,6 +638,30 @@ def test_update_schedule_returns_error_when_schedule_does_not_exist( assert result == {"Error": "not found: schedule cli-schedule does not exist"} +def test_update_schedule_returns_error_when_schedule_managed_by_cfn( + schedule_store: ScheduleDefinitionStore, period_store: PeriodDefinitionStore +) -> None: + # needed because cli checks if period exists + period_store.put(PeriodDefinition(name="period", begintime="10:00")) + orig_sched = ScheduleDefinition( + name="test-schedule", + periods=[PeriodIdentifier.of("test-period")], + configured_in_stack="myStackArn", + ) + schedule_store.put(orig_sched) + + result = update_schedule_with_cli( + name="test-schedule", + periods=["period"], + ) + + assert result == { + "Error": "Schedule test-schedule is owned by myStackArn and cannot be edited by the cli. " + "Please update the owning stack to edit this schedule" + } + assert schedule_store.find_by_name("test-schedule") == orig_sched + + def test_update_schedule_returns_error_when_period_does_not_exist( config_table: None, ) -> None: @@ -415,52 +673,86 @@ def test_update_schedule_returns_error_when_period_does_not_exist( # ------------------------------Begin Helpers------------------------------ # +def is_valid_json(json_data: Any) -> bool: + json.dumps(json_data) + return True + + def create_period_with_cli( - name: str = "cli-period", begintime: str = "10:00", endtime: str = "20:00" + name: str = "cli-period", + begintime: str = "10:00", + endtime: str = "20:00", + weekdays: Optional[str] = None, + monthdays: Optional[str] = None, + months: Optional[str] = None, + version: str = __version__, ) -> Any: + parameters = {"name": name, "begintime": begintime, "endtime": endtime} + + if weekdays: + parameters["weekdays"] = weekdays + if monthdays: + parameters["monthdays"] = monthdays + if months: + parameters["months"] = months + event = { "source": "scheduler.cli", "action": "create-period", - "parameters": {"name": name, "begintime": begintime, "endtime": endtime}, + "parameters": parameters, + "version": version, } - handler = CliHandler(event, MockLambdaContext()) - return handler.handle_request() + handler = CliRequestHandler(event, MockLambdaContext()) + result = handler.handle_request() + assert is_valid_json(result) + return result def update_period_with_cli( - name: str = "cli-period", begintime: str = "10:00", endtime: str = "20:00" + name: str = "cli-period", + begintime: str = "10:00", + endtime: str = "20:00", + version: str = __version__, ) -> Any: event = { "source": "scheduler.cli", "action": "update-period", "parameters": {"name": name, "begintime": begintime, "endtime": endtime}, + "version": version, } - handler = CliHandler(event, MockLambdaContext()) - return handler.handle_request() + handler = CliRequestHandler(event, MockLambdaContext()) + result = handler.handle_request() + assert is_valid_json(result) + return result -def delete_period_with_cli(name: str) -> Any: +def delete_period_with_cli( + name: str, + version: str = __version__, +) -> Any: event = { "source": "scheduler.cli", "action": "delete-period", "parameters": {"name": name}, + "version": version, } - handler = CliHandler(event, MockLambdaContext()) - return handler.handle_request() + handler = CliRequestHandler(event, MockLambdaContext()) + result = handler.handle_request() + assert is_valid_json(result) + return result def create_schedule_with_cli( periods: Sequence[str], name: str = "cli-schedule", stop_new_instances: bool = True, - use_maintenance_window: bool = False, retain_running: bool = False, enforced: bool = False, hibernate: bool = False, - use_metrics: bool = False, - ssm_maintenance_window: Optional[str] = None, + ssm_maintenance_window: Optional[Sequence[str]] = None, + version: str = __version__, ) -> Any: event: dict[str, Any] = { "source": "scheduler.cli", @@ -469,31 +761,31 @@ def create_schedule_with_cli( "name": name, "periods": ",".join(periods), "stop_new_instances": stop_new_instances, - "use_maintenance_window": use_maintenance_window, "retain_running": retain_running, "enforced": enforced, "hibernate": hibernate, - "use-metrics": use_metrics, }, + "version": version, } if ssm_maintenance_window: event["parameters"]["ssm_maintenance_window"] = ssm_maintenance_window - handler = CliHandler(event, MockLambdaContext()) - return handler.handle_request() + handler = CliRequestHandler(event, MockLambdaContext()) + result = handler.handle_request() + assert is_valid_json(result) + return result def update_schedule_with_cli( periods: Sequence[str], name: str = "cli-schedule", stop_new_instances: bool = True, - use_maintenance_window: bool = False, retain_running: bool = False, enforced: bool = False, hibernate: bool = False, - use_metrics: bool = False, - ssm_maintenance_window: Optional[str] = None, + ssm_maintenance_window: Optional[Sequence[str]] = None, + version: str = __version__, ) -> Any: event: dict[str, Any] = { "source": "scheduler.cli", @@ -502,60 +794,84 @@ def update_schedule_with_cli( "name": name, "periods": ",".join(periods), "stop_new_instances": stop_new_instances, - "use_maintenance_window": use_maintenance_window, "retain_running": retain_running, "enforced": enforced, "hibernate": hibernate, - "use-metrics": use_metrics, }, + "version": version, } if ssm_maintenance_window: event["parameters"]["ssm_maintenance_window"] = ssm_maintenance_window - handler = CliHandler(event, MockLambdaContext()) - return handler.handle_request() + handler = CliRequestHandler(event, MockLambdaContext()) + result = handler.handle_request() + assert is_valid_json(result) + return result -def delete_schedule_with_cli(name: str) -> Any: +def delete_schedule_with_cli( + name: str, + version: str = __version__, +) -> Any: event = { "source": "scheduler.cli", "action": "delete-schedule", "parameters": {"name": name}, + "version": version, } - handler = CliHandler(event, MockLambdaContext()) - return handler.handle_request() + handler = CliRequestHandler(event, MockLambdaContext()) + result = handler.handle_request() + assert is_valid_json(result) + return result -def describe_periods(name: Optional[str] = None) -> Any: +def describe_periods_with_cli( + name: Optional[str] = None, + version: str = __version__, +) -> Any: event: dict[str, Any] = { "source": "scheduler.cli", "action": "describe-periods", "parameters": {}, + "version": version, } if name: event["parameters"]["name"] = name - handler = CliHandler(event, MockLambdaContext()) - return handler.handle_request() + handler = CliRequestHandler(event, MockLambdaContext()) + result = handler.handle_request() + assert is_valid_json(result) + return result -def describe_schedules(name: Optional[str] = None) -> Any: +def describe_schedules_with_cli( + name: Optional[str] = None, + version: str = __version__, +) -> Any: event: dict[str, Any] = { "source": "scheduler.cli", "action": "describe-schedules", "parameters": {}, + "version": version, } if name: event["parameters"]["name"] = name - handler = CliHandler(event, MockLambdaContext()) - return handler.handle_request() + handler = CliRequestHandler(event, MockLambdaContext()) + result = handler.handle_request() + assert is_valid_json(result) + return result -def describe_schedule_usage(name: str, startdate: date, enddate: date) -> Any: +def describe_schedule_usage_with_cli( + name: str, + startdate: date, + enddate: date, + version: str = __version__, +) -> Any: event = { "source": "scheduler.cli", "action": "describe-schedule-usage", @@ -564,10 +880,13 @@ def describe_schedule_usage(name: str, startdate: date, enddate: date) -> Any: "startdate": startdate.strftime("%Y%m%d"), "enddate": enddate.strftime("%Y%m%d"), }, + "version": version, } - handler = CliHandler(event, MockLambdaContext()) - return handler.handle_request() + handler = CliRequestHandler(event, MockLambdaContext()) + result = handler.handle_request() + assert is_valid_json(result) + return result def get_period_from_dynamo(name: str, app_env: AppEnv) -> Any: diff --git a/source/app/tests/cli/test_schedule_usage.py b/source/app/tests/cli/test_schedule_usage.py new file mode 100644 index 00000000..e0d57457 --- /dev/null +++ b/source/app/tests/cli/test_schedule_usage.py @@ -0,0 +1,313 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import json +from datetime import date, datetime, time +from zoneinfo import ZoneInfo + +from _pytest.python_api import raises + +from instance_scheduler.configuration.instance_schedule import InstanceSchedule +from instance_scheduler.configuration.running_period import RunningPeriod +from instance_scheduler.cron.cron_recurrence_expression import CronRecurrenceExpression +from instance_scheduler.handler.cli.schedule_usage import ( + CliCustomEncoder, + get_schedule_usage, +) + + +def test_custom_encoder_set() -> None: + """Stringifies set as list""" + assert json.dumps(set(), cls=CliCustomEncoder) == "[]" + + items = [1, 2, 3] + items_set = set(items) + result = json.loads(json.dumps(items_set, cls=CliCustomEncoder)) + assert isinstance(result, list) + assert items_set == set(result) + + +def test_custom_encoder_datetime() -> None: + """Stringifies datetimes in locale format without seconds""" + dt_with_seconds = datetime( + year=2023, month=5, day=11, hour=11, minute=41, second=20 + ) + dt_no_seconds = dt_with_seconds.replace(second=0) + expected_str = f'"{dt_no_seconds.strftime("%x %X")}"' + assert json.dumps(dt_with_seconds, cls=CliCustomEncoder) == expected_str + + +def test_custom_encoder_invalid() -> None: + """Errors on other types""" + with raises(TypeError): + json.dumps(date(year=2023, month=1, day=12), cls=CliCustomEncoder) + + +def test_describe_schedule_describes_simple_schedule() -> None: + schedule = InstanceSchedule( + name="test-schedule", + timezone=ZoneInfo("UTC"), + periods=[ + { + "period": RunningPeriod( + name="test-period", + begintime=time(7, 0, 0), + endtime=time(15, 0, 0), + ) + } + ], + ) + + output_json = get_schedule_usage( + schedule, start=datetime(2024, 3, 1), end=datetime(2024, 3, 5) + ) + assert output_json == { + "schedule": "test-schedule", + "usage": { + "2024-03-01": { + "billing_hours": 8, + "billing_seconds": 28800, + "running_periods": { + "test-period": { + "begin": "03/01/24 " "07:00:00", + "billing_hours": 8, + "billing_seconds": 28800, + "end": "03/01/24 " "15:00:00", + } + }, + }, + "2024-03-02": { + "billing_hours": 8, + "billing_seconds": 28800, + "running_periods": { + "test-period": { + "begin": "03/02/24 " "07:00:00", + "billing_hours": 8, + "billing_seconds": 28800, + "end": "03/02/24 " "15:00:00", + } + }, + }, + "2024-03-03": { + "billing_hours": 8, + "billing_seconds": 28800, + "running_periods": { + "test-period": { + "begin": "03/03/24 " "07:00:00", + "billing_hours": 8, + "billing_seconds": 28800, + "end": "03/03/24 " "15:00:00", + } + }, + }, + "2024-03-04": { + "billing_hours": 8, + "billing_seconds": 28800, + "running_periods": { + "test-period": { + "begin": "03/04/24 " "07:00:00", + "billing_hours": 8, + "billing_seconds": 28800, + "end": "03/04/24 " "15:00:00", + } + }, + }, + "2024-03-05": { + "billing_hours": 8, + "billing_seconds": 28800, + "running_periods": { + "test-period": { + "begin": "03/05/24 " "07:00:00", + "billing_hours": 8, + "billing_seconds": 28800, + "end": "03/05/24 " "15:00:00", + } + }, + }, + }, + } + + +def test_describe_schedule_uses_correctly_handles_nth_weekday() -> None: + schedule = InstanceSchedule( + name="test-schedule", + timezone=ZoneInfo("UTC"), + periods=[ + { + "period": RunningPeriod( + name="test-period", + cron_recurrence=CronRecurrenceExpression.parse(weekdays={"Mon#2"}), + ) + } + ], + ) + + output_json = get_schedule_usage( + schedule, start=datetime(2024, 3, 1), end=datetime(2024, 3, 31) + ) + assert output_json == { + "schedule": "test-schedule", + "usage": { + "2024-03-01": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-02": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-03": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-04": { + "billing_hours": 24, + "billing_seconds": 86400, + "running_periods": { + "test-period": { + "begin": "03/04/24 " "00:00:00", + "billing_hours": 24, + "billing_seconds": 86400, + "end": "03/05/24 " "00:00:00", + } + }, + }, + "2024-03-05": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-06": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-07": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-08": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-09": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-10": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-11": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-12": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-13": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-14": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-15": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-16": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-17": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-18": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-19": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-20": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-21": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-22": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-23": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-24": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-25": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-26": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-27": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-28": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-29": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-30": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + "2024-03-31": { + "billing_hours": 0, + "billing_seconds": 0, + "running_periods": {}, + }, + }, + } diff --git a/source/app/tests/cli/test_validation.py b/source/app/tests/cli/test_validation.py new file mode 100644 index 00000000..2e118a61 --- /dev/null +++ b/source/app/tests/cli/test_validation.py @@ -0,0 +1,137 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import pytest + +from instance_scheduler.util.validation import ( + ValidationException, + validate_boolean, + validate_number_item, + validate_string, + validate_string_item, + validate_string_set, +) + + +def test_validate_string_passes_when_key_not_set_and_not_required() -> None: + validate_string({}, "test_val", required=False) + + +def test_validate_string_fails_when_key_not_set_and_is_required() -> None: + with pytest.raises(ValidationException): + validate_string({}, "test_val", required=True) + + +def test_validate_string_is_required_by_default() -> None: + # when required is omitted, it defaults to true + with pytest.raises(ValidationException): + validate_string({}, "test_val") + + +def test_validate_string_passes_with_valid_string() -> None: + validate_string({"test_val": "str"}, "test_val") + + +def test_validate_string_rejects_non_string_values() -> None: + with pytest.raises(ValidationException): + validate_string({"test_val": 15}, "test_val") + + +def test_validate_boolean_passes_when_key_not_set_and_not_required() -> None: + validate_boolean({}, "test_val", required=False) + + +def test_validate_boolean_fails_when_key_not_set_and_is_required() -> None: + with pytest.raises(ValidationException): + validate_boolean({}, "test_val", required=True) + + +def test_validate_boolean_is_required_by_default() -> None: + # when required is omitted, it defaults to true + with pytest.raises(ValidationException): + validate_boolean({}, "test_val") + + +def test_validate_boolean_passes_with_valid_boolean() -> None: + validate_boolean({"test_val": True}, "test_val") + + +def test_validate_boolean_rejects_non_boolean_values() -> None: + with pytest.raises(ValidationException): + validate_boolean( + {"test_val": "true"}, "test_val" + ) # string of true is not coerced + + +def test_validate_string_set_passes_when_key_not_set_and_not_required() -> None: + validate_string_set({}, "test_val", required=False) + + +def test_validate_string_set_fails_when_key_not_set_and_is_required() -> None: + with pytest.raises(ValidationException): + validate_string_set({}, "test_val", required=True) + + +def test_validate_string_set_is_required_by_default() -> None: + # when required is omitted, it defaults to true + with pytest.raises(ValidationException): + validate_string_set({}, "test_val") + + +def test_validate_string_set_passes_with_valid_string_set() -> None: + validate_string_set({"test_val": {"str", "another_str"}}, "test_val") + + +def test_validate_string_set_rejects_non_sets() -> None: + with pytest.raises(ValidationException): + validate_string_set({"test_val": 15}, "test_val") + + +def test_validate_string_set_rejects_sets_that_do_not_contain_only_strings() -> None: + with pytest.raises(ValidationException): + validate_string_set({"test_val": {"a str", 15}}, "test_val") + + +def test_validate_string_item_passes_when_key_not_set_and_not_required() -> None: + assert validate_string_item({}, "key", False) + + +def test_validate_string_item_fails_when_key_not_set_and_is_required() -> None: + with pytest.raises(ValidationException): + validate_string_item({}, "key") + + +def test_validate_string_item_passes_with_valid_string() -> None: + assert validate_string_item({"key": {"S": "string"}}, "key") + + +def test_validate_string_item_rejects_non_string_items() -> None: + with pytest.raises(ValidationException): + validate_string_item({"key": {"N": "5"}}, "key") + + +def test_validate_string_item_rejects_non_string_values() -> None: + with pytest.raises(ValidationException): + validate_string_item({"key": {"S": 5}}, "key") + + +def test_validate_number_item_passes_when_key_not_set_and_not_required() -> None: + assert validate_number_item({}, "key", False) + + +def test_validate_number_item_fails_when_key_not_set_and_is_required() -> None: + with pytest.raises(ValidationException): + validate_number_item({}, "key") + + +def test_validate_number_item_passes_with_valid_string() -> None: + assert validate_number_item({"key": {"N": "10"}}, "key") + + +def test_validate_number_item_rejects_non_string_items() -> None: + with pytest.raises(ValidationException): + validate_number_item({"key": {"S": "my-str"}}, "key") + + +def test_validate_number_item_rejects_non_string_values() -> None: + with pytest.raises(ValidationException): + validate_number_item({"key": {"N": 5}}, "key") diff --git a/source/app/tests/configuration/setbuilders/test_month_setbuilder.py b/source/app/tests/configuration/setbuilders/test_month_setbuilder.py deleted file mode 100644 index 817f9577..00000000 --- a/source/app/tests/configuration/setbuilders/test_month_setbuilder.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -import calendar - -from instance_scheduler.configuration.setbuilders.month_setbuilder import ( - MonthSetBuilder, -) - - -def test_month_names() -> None: - # abbreviations, case-insensitive - for i, name in enumerate(calendar.month_abbr[1:]): - assert MonthSetBuilder().build(name) == {i + 1} - assert MonthSetBuilder().build(name.lower()) == {i + 1} - assert MonthSetBuilder().build(name.upper()) == {i + 1} - - # full names, case-insensitive - for i, name in enumerate(calendar.month_name[1:]): - assert MonthSetBuilder().build(name) == {i + 1} - assert MonthSetBuilder().build(name.lower()) == {i + 1} - assert MonthSetBuilder().build(name.upper()) == {i + 1} - - -def test_months_as_integers() -> None: - for i in range(1, 13): # 13 is exclusive, so this is 1-12 - assert MonthSetBuilder().build(str(i)) == {i} diff --git a/source/app/tests/configuration/setbuilders/test_monthday_setbuilder.py b/source/app/tests/configuration/setbuilders/test_monthday_setbuilder.py deleted file mode 100644 index f414837f..00000000 --- a/source/app/tests/configuration/setbuilders/test_monthday_setbuilder.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -import calendar - -from pytest import raises - -from instance_scheduler.configuration.setbuilders.monthday_setbuilder import ( - MonthdaySetBuilder, -) - - -# Documented behavior in the IG on 7/10/23 -# -# Enter a comma-delimited list of days of the month, or a hyphenated range of days, during which the instance will run. -# For example, enter 1, 2, 3 or 1-3 to run an instance during the first three days of the month. -# You can also enter multiple ranges. -# For example, enter 1-3, 7-9 to run an instance from the 1st to the 3rd and the 7th through the 9th. -# -# You can also schedule an instance to run every nth day of the month or every nth day of the month in a range. -# For example, enter 1/7 to run an instance every seventh day starting on the 1st. -# Enter 1-15/2 to run an instance every other day from the 1st to the 15th. -# -# Enter L to run an instance on the last day of the month. -# Enter a date and W to run an instance on the nearest weekday to the specified date. -# For example, enter 15W to run an instance on the nearest weekday to the 15th. -def test_single_day_by_integer() -> None: - years = [2016, 2017] # leap and normal year - - for year in years: - for month in range(1, 13): - _, days = calendar.monthrange(year, month) - - for day in range(1, days): - assert MonthdaySetBuilder(year, month).build(str(day)) == {day} - - -def test_single_range() -> None: - assert MonthdaySetBuilder(2023, 7).build("5-9") == {5, 6, 7, 8, 9} - - -def test_multiple_ranges() -> None: - assert MonthdaySetBuilder(2023, 7).build("13-15, 26-27, 29") == { - 13, - 14, - 15, - 26, - 27, - 29, - } - - -def test_every_nth_day() -> None: - assert MonthdaySetBuilder(2023, 7).build("1/7") == {1, 8, 15, 22, 29} - assert MonthdaySetBuilder(2023, 7).build("5/14") == {5, 19} - - -def test_every_n_days_in_range() -> None: - assert MonthdaySetBuilder(2023, 7).build("1-10/2") == {1, 3, 5, 7, 9} - assert MonthdaySetBuilder(2023, 7).build("15-30/3") == {15, 18, 21, 24, 27, 30} - - -def test_L_wildcard_returns_last_day_of_each_month() -> None: - years = [2016, 2017] # leap and normal year - - for year in years: - for month in range(1, 13): - _, days = calendar.monthrange(year, month) - assert MonthdaySetBuilder(year, month).build("L") == {days} - - -def test_W_wildcard_returns_nearest_weekday_in_same_month() -> None: - years = [2016, 2017] # leap and normal year - - for year in years: - for month in range(1, 13): - _, days = calendar.monthrange(year, month) - - for day in range(1, days): - weekday = calendar.weekday(year, month, day) - result = day - if ( - weekday == 5 - ): # Saturdays return the prev friday unless doing so would be a diff month - result = day - 1 if day > 1 else day + 2 - elif ( - weekday == 6 - ): # Sundays return the next monday unless doing so would be a diff month - result = day + 1 if day < days else day - 2 - - assert MonthdaySetBuilder(year, month).build(str(day) + "W") == {result} - - -def test_exceptions() -> None: - for h in range(13, 25): - with raises(ValueError): - MonthdaySetBuilder(2016, 1).build("W") - with raises(ValueError): - MonthdaySetBuilder(2016, 1).build("32W") diff --git a/source/app/tests/configuration/setbuilders/test_setbuilder.py b/source/app/tests/configuration/setbuilders/test_setbuilder.py deleted file mode 100644 index 30ae5e57..00000000 --- a/source/app/tests/configuration/setbuilders/test_setbuilder.py +++ /dev/null @@ -1,273 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -import string -from collections.abc import Sequence -from typing import Optional - -from pytest import raises - -from instance_scheduler.configuration.setbuilders.setbuilder import SetBuilder - -characters = string.ascii_lowercase -names = [c * 3 for c in characters] -names_cased = [n.capitalize() for n in names] -all_items = set([i for i in range(0, len(names))]) - - -def test_name() -> None: - # names 1 char - for i, name in enumerate(names): - assert SetBuilder(names=names).build(name) == {i} - # names 1 char with offset - for i, name in enumerate(names): - assert SetBuilder(names=names, offset=1).build(name) == {i + 1} - - # names 1 char ignore case - for i, name in enumerate(names): - assert SetBuilder(names=names, ignorecase=True).build(name.upper()) == {i} - - # names 3 - for i, name in enumerate(names_cased): - assert SetBuilder(names=names_cased).build(name) == {i} - - # names 3, ignore case - for i, name in enumerate(names): - assert SetBuilder(names=names_cased, ignorecase=True).build(name) == {i} - - # names 3, 1 significant character - for i, name in enumerate(names): - assert SetBuilder(names=names_cased, significant_name_characters=1).build( - name.upper() - ) == {i} - - # names 3, 1 significant character, ignore case - for i, name in enumerate(names): - assert SetBuilder(names=names_cased, significant_name_characters=3).build( - name + name - ) == {i} - - # all items passed in as list of strings - assert SetBuilder(names=names).build(names) == all_items - - -def test_value() -> None: - # all by value - for value in range(0, len(names)): - assert SetBuilder(names=names).build(str(value)) == {value} - - # all by value with offset - for value in range(1, len(names) + 1): - assert SetBuilder(names=names, offset=1).build(str(value)) == {value} - - -def test_min_max() -> None: - # builder initialized by min and max values - for i in range(0, 5): - assert SetBuilder(min_value=0, max_value=4).build(str(i)) == {i} - - -def test_wildcards() -> None: - # all items using standard and custom wildcard - assert SetBuilder(names).build("*") == all_items - assert SetBuilder(names).build("?") == all_items - assert SetBuilder(names, all_items_wildcards="!").build("!") == all_items - - # first item using standard and custom wildcard - assert SetBuilder(names).build("^") == {0} - assert SetBuilder(names, first_item_wildcard="!").build("!") == {0} - assert SetBuilder(names, offset=1).build("^") == {1} - - # last item using standard and custom wildcard - assert SetBuilder(names).build("$") == {len(names) - 1} - assert SetBuilder(names, last_item_wildcard="!").build("!") == {len(names) - 1} - assert SetBuilder(names, offset=1).build("$") == {len(names)} - - # combined first and last wildcard - assert SetBuilder(names).build("^,$") == {0, len(names) - 1} - assert SetBuilder(names).build("^-$") == all_items - - -def test_multiple() -> None: - # comma separated list of names - assert SetBuilder(names).build(",".join(names)) == all_items - # comma separated list of values - assert ( - SetBuilder(names).build(",".join([str(i) for i in range(0, len(names))])) - == all_items - ) - - -def test_ranges() -> None: - # name range - assert SetBuilder(names).build(names[0] + "-" + names[2]) == {0, 1, 2} - # name ranges no overlap - assert SetBuilder(names).build( - names[0] + "-" + names[2] + "," + names[4] + "-" + names[6] - ) == {0, 1, 2, 4, 5, 6} - # name ranges with overlap - assert SetBuilder(names).build( - names[2] + "-" + names[6] + "," + names[4] + "-" + names[8] - ) == {2, 3, 4, 5, 6, 7, 8} - # name range with wrap - assert SetBuilder(names, wrap=True).build(names[-2] + "-" + names[2]) == { - 0, - 1, - 2, - len(names) - 2, - len(names) - 1, - } - - # value range - assert SetBuilder(names).build("0-2") == {0, 1, 2} - # value ranges - assert SetBuilder(names).build("0-3, 9-12") == {0, 1, 2, 3, 9, 10, 11, 12} - # value ranges with overlap - assert SetBuilder(names).build("0-8, 6-12") == { - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - } - # value range with wrap - assert SetBuilder(names, wrap=True).build(str(len(names) - 2) + "-2") == { - 0, - 1, - 2, - len(names) - 2, - len(names) - 1, - } - - with raises(ValueError): - SetBuilder(names, wrap=False).build(names[1] + "-" + names[0]) - - with raises(ValueError): - SetBuilder(names, wrap=False).build("2-1") - - -def test_increments() -> None: - # increments on start name and value - assert SetBuilder(names).build(names[0] + "/5") == { - i for i in range(0, len(names), 5) - } - assert SetBuilder(names).build("0/3") == {i for i in range(0, len(names), 3)} - - # increment on ranges - assert SetBuilder(names).build(names[0] + "-" + names[10] + "/2") == { - 0, - 2, - 4, - 6, - 8, - 10, - } - assert SetBuilder(names).build("0-10/3") == {0, 3, 6, 9} - assert SetBuilder(names, wrap=True).build("10-5/5") == {10, 15, 20, 25, 4} - - # invalid increment numbers - with raises(ValueError): - SetBuilder(names).build("0/0") - - with raises(ValueError): - SetBuilder(names).build("0/!") - - -def test_unknown_values() -> None: - # unknown name raises error - with raises(ValueError): - SetBuilder(names).build("##") - - # unknown value raises error - with raises(ValueError): - SetBuilder(min_value=0, max_value=1).build("-1") - - # this class has a handler for handling unknow items - class SetBuilderWithHandler(SetBuilder): - def _parse_unknown(self, s: str) -> Optional[list[int]]: - return [] if s == "" else None - - assert SetBuilderWithHandler(names).build("") == set() - - with raises(ValueError): - SetBuilderWithHandler(names).build("unknown") - - -def test_custom_parsers() -> None: - class SetBuilderWithCustomPreParser(SetBuilder): - def __init__(self, names: Sequence[str]): - SetBuilder.__init__(self, names=names) - self._pre_custom_parsers = [self._pre_parser] - - def _pre_parser(self, s: str) -> Optional[list[int]]: - if s == "###": - return [0] - return None - - assert SetBuilderWithCustomPreParser("").build("###") == {0} - - class SetBuilderWithCustomPostParser(SetBuilder): - def __init__(self, names: Sequence[str]): - SetBuilder.__init__(self, names=names) - self._post_custom_parsers = [self._post_parser] - - def _post_parser(self, s: str) -> Optional[list[int]]: - if s == "!!!": - return [1] - return None - - assert SetBuilderWithCustomPostParser("").build("!!!") == {1} - - class SetBuilderWithCustomParsers(SetBuilder): - def __init__(self, names: Sequence[str]): - SetBuilder.__init__(self, names=names) - self._post_custom_parsers = [self._pre_parser, self._post_parser] - - def _pre_parser(self, s: str) -> Optional[list[int]]: - if s == "###": - return [99] - return None - - def _post_parser(self, s: str) -> Optional[list[int]]: - if s == "!!!": - return [100] - return None - - assert SetBuilderWithCustomParsers(names).build("###,!!!," + names[0]) == { - 0, - 99, - 100, - } - - -def test_exceptions() -> None: - # names and max_value combination not allowed - with raises(ValueError): - SetBuilder(names=names, max_value=1) - - # names and min_value combination not allowed - with raises(ValueError): - SetBuilder(names=names, min_value=0) - - # both min_value and max_value must be used - with raises(ValueError): - SetBuilder(min_value=0) - - # both min_value and max_value must be used - with raises(ValueError): - SetBuilder(max_value=1) - - # max_value must be equal or greater than min_value - with raises(ValueError): - SetBuilder(min_value=99, max_value=1) - - # offset must be the same if specified with min_value - with raises(ValueError): - SetBuilder(min_value=0, max_value=1, offset=1) diff --git a/source/app/tests/configuration/setbuilders/test_weekday_setbuilder.py b/source/app/tests/configuration/setbuilders/test_weekday_setbuilder.py deleted file mode 100644 index 283f2393..00000000 --- a/source/app/tests/configuration/setbuilders/test_weekday_setbuilder.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -import calendar - -from pytest import raises - -from instance_scheduler.configuration.setbuilders.weekday_setbuilder import ( - WeekdaySetBuilder, -) - - -# Documented behavior in the IG on 7/10/23 -# -# Enter a comma-delimited list of days of the week, or a range of days of the week, -# during which the instance will run. For example, enter 0, 1, 2 or 0-2 to run an instance Monday through Wednesday. -# You can also enter multiple ranges. For example, enter 0-2, 4-6 to run an instance every day except Thursday. -# -# You can also schedule an instance to run every nth occurrence of a weekday in the month. -# For example, enter Mon#1 or 0#1 to run an instance the first Monday of the month. -# -# Enter a day and L to run an instance on the last occurrence of that weekday in the month. -# For example, enter friL or 4L to run an instance on the last Friday of the month. -def test_char_sentry_value_snapshot() -> None: - assert WeekdaySetBuilder.WEEKDAY_NUMBER_CHAR == "#" - assert WeekdaySetBuilder.LAST_DAY_WILDCARD == "L" - - -def test_day_names_and_abbreviations_map_correctly() -> None: - # ie. mon, tue, wed - for i, day_name in enumerate(calendar.day_abbr): - print(f"{day_name} = {i}") - assert WeekdaySetBuilder().build(day_name) == {i} - - for i, day_name in enumerate(calendar.day_name): - print(f"{day_name} = {i}") - assert WeekdaySetBuilder().build(day_name) == {i} - - -def test_providing_single_day_as_int_maps_correctly() -> None: - # ie. 1, 3, 5 - for i in range(0, len(calendar.day_abbr) - 1): - assert WeekdaySetBuilder().build(str(i)) == {i} - - -def test_weekday_numbered() -> None: - for year in [2016, 2017]: - for month in range(1, 13): - weekday, days_in_month = calendar.monthrange(year, month) - - for day in range(1, days_in_month + 1): - num = int((day - 1) / 7) + 1 - builder = WeekdaySetBuilder(year=year, month=month, day=day) - - tested_by_name = builder.build( - calendar.day_abbr[weekday] + "#" + str(num) - ) - assert tested_by_name == {weekday} - - tested_by_value = builder.build(str(weekday) + "#" + str(num)) - assert tested_by_value == {weekday} - - for other_weekday in range(0, 7): - if other_weekday != weekday: - tested_by_name = builder.build( - calendar.day_abbr[other_weekday] + "#" + str(num) - ) - assert tested_by_name == set() - tested_by_value = builder.build( - str(other_weekday) + "#" + str(num) - ) - assert tested_by_value == set() - - for other_num in range(1, 6): - if num != other_num: - tested_by_name = builder.build( - calendar.day_abbr[weekday] + "#" + str(other_num) - ) - assert tested_by_name == set() - tested_by_value = builder.build( - str(weekday) + "#" + str(other_num) - ) - assert tested_by_value == set() - - weekday = (weekday + 1) % 7 - - -def test_last_monday_returns_expected_day() -> None: - # last 7 days of june 2023 - assert ( - WeekdaySetBuilder(year=2023, month=6, day=19).build("monL") == set() - ) # 2nd-to-last monday - assert WeekdaySetBuilder(year=2023, month=6, day=24).build("monL") == set() # sat - assert WeekdaySetBuilder(year=2023, month=6, day=25).build("monL") == set() # sun - assert WeekdaySetBuilder(year=2023, month=6, day=26).build("monL") == {0} # mon - assert WeekdaySetBuilder(year=2023, month=6, day=27).build("monL") == set() # tue - assert WeekdaySetBuilder(year=2023, month=6, day=28).build("monL") == set() # wed - assert WeekdaySetBuilder(year=2023, month=6, day=29).build("monL") == set() # thur - assert WeekdaySetBuilder(year=2023, month=6, day=30).build("monL") == set() # fri - - -def test_last_tuesday_returns_expected_day() -> None: - # last 7 days of june - assert ( - WeekdaySetBuilder(year=2023, month=6, day=20).build("monL") == set() - ) # 2nd-to-last tuesday - assert WeekdaySetBuilder(year=2023, month=6, day=24).build("tueL") == set() # sat - assert WeekdaySetBuilder(year=2023, month=6, day=25).build("tueL") == set() # sun - assert WeekdaySetBuilder(year=2023, month=6, day=26).build("tueL") == set() # mon - assert WeekdaySetBuilder(year=2023, month=6, day=27).build("tueL") == {1} # tue - assert WeekdaySetBuilder(year=2023, month=6, day=28).build("tueL") == set() # wed - assert WeekdaySetBuilder(year=2023, month=6, day=29).build("tueL") == set() # thur - assert WeekdaySetBuilder(year=2023, month=6, day=30).build("tueL") == set() # fri - - -def test_last_day_wildcard_on_all_days_in_year() -> None: - for year in [2016, 2017]: - for month in range(1, 13): - weekday, days_in_month = calendar.monthrange(year, month) - for tested_on_day in range(1, days_in_month + 1): - builder = WeekdaySetBuilder(year=year, month=month, day=tested_on_day) - - # test by name of weekday - day_num_l = calendar.day_abbr[weekday] + "L" - tested_by_name = builder.build(day_num_l) - # test by number of weekday - day_value_l = str(weekday) + "L" - tested_by_value = builder.build(day_value_l) - - # everything before last week should be empty set - if tested_on_day <= (days_in_month - 7): - assert tested_by_name == set() - assert tested_by_value == set() - - else: - # in last week the set should contain the day - assert tested_by_name == {weekday} - assert tested_by_value == {weekday} - - # test if ofther weekdays on that day return empty set - for d in range(0, 6): - if d != weekday: - day_num_l = calendar.day_abbr[d] + "L" - day_value_l = str(d) + "L" - assert builder.build(day_num_l) == set() - assert builder.build(day_value_l) == set() - - weekday = (weekday + 1) % 7 - - -def test_last_xday_tag_throws_exception_when_current_date_not_provided() -> None: - # L needs year, month and daya params - with raises(ValueError): - WeekdaySetBuilder().build("1L") - - -def test_nth_weekday_throws_exception_when_n_is_gte_6() -> None: - # It is an oxymoron to ask for the 6th friday in the month as - # there can only ever be at most 5 of a given day each month - with raises(ValueError): - WeekdaySetBuilder(year=2016, month=10, day=4).build("0#6") - - -def test_nth_weekday_throws_exception_when_n_is_lte_0() -> None: - with raises(ValueError): - WeekdaySetBuilder(year=2023, month=7, day=10).build("0#-1") - with raises(ValueError): - WeekdaySetBuilder(year=2016, month=10, day=4).build("0#0") diff --git a/source/app/tests/configuration/test_config_admin.py b/source/app/tests/configuration/test_config_admin.py deleted file mode 100644 index d12e2b5b..00000000 --- a/source/app/tests/configuration/test_config_admin.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -import json -from datetime import date, datetime - -from pytest import raises - -from instance_scheduler.configuration import config_admin -from instance_scheduler.configuration.config_admin import ( - ConfigAdmin, - ConfigTableScheduleItem, -) -from instance_scheduler.util.app_env import AppEnv - - -def test_error_strings() -> None: - assert config_admin.ERR_PERIOD_BEGIN_LATER_THAN_END == ( - "error: period begintime {} can not be later than endtime {}" - ) - assert ( - config_admin.ERR_SCHEDULE_INVALID_OVERRIDE - == "{} is not a valid value for {}, possible values are {}" - ) - assert config_admin.ERR_SCHEDULE_OVERWRITE_OVERRIDE_EXCLUSIVE == ( - "{} option is mutually exclusive with {} option" - ) - assert config_admin.ERR_CREATE_PERIOD_EXISTS == "error: period {} already exists" - assert ( - config_admin.ERR_CREATE_SCHEDULE_EXISTS == "error: schedule {} already exists" - ) - assert config_admin.ERR_DEL_PERIOD_IN_USE == ( - "error: period {} can not be deleted because it is still used in schedule(s) {}" - ) - assert config_admin.ERR_PERIOD_NOT_FOUND == "not found: period {} does not exist" - assert ( - config_admin.ERR_DEL_SCHEDULE_NAME_EMPTY - == "error: schedule name parameter can not be empty" - ) - assert ( - config_admin.ERR_SCHEDULE_NOT_FOUND == "not found: schedule {} does not exist" - ) - assert ( - config_admin.ERR_EMPTY_PERIOD_NAME - == "error: period name parameter can not be empty" - ) - assert ( - config_admin.ERR_GET_SCHEDULE_NAME_EMPTY - == "error: error schedule name parameter can not be empty" - ) - assert config_admin.ERR_GET_USAGE_INVALID_END_DATE == ( - "error: invalid enddate {}, must be a valid date in format yyyymmdd {}" - ) - assert config_admin.ERR_GET_USAGE_INVALID_START_DATE == ( - "error: invalid startdate {}, must be a valid date in format yyyymmdd {}" - ) - assert config_admin.ERR_GET_USAGE_SCHEDULE_NAME_EMPTY == ( - "error: error schedule name parameter can not be empty" - ) - assert config_admin.ERR_GET_USAGE_START_MUST_BE_LESS_OR_EQUAL_STOP == ( - "stop_date must be equal or later than start_date" - ) - assert config_admin.ERR_NAME_PARAM_MISSING == "error: name parameter is missing" - assert ( - config_admin.ERR_NO_PERIODS - == "error: at least one period condition must be specified" - ) - assert ( - config_admin.ERR_PERIOD_INVALID_MONTHDAYS - == "error: {} is not a valid month days specification" - ) - assert ( - config_admin.ERR_PERIOD_INVALID_MONTHS - == "error: {} is not a valid months specification" - ) - assert config_admin.ERR_PERIOD_INVALID_TIME == "error: {} {} is not a valid time" - assert ( - config_admin.ERR_PERIOD_INVALID_WEEKDAYS - == "error: {} is not a valid weekdays specification {}" - ) - assert config_admin.ERR_PERIOD_UNKNOWN_PARAMETER == ( - "error: {} is not a valid parameter, valid parameters are {}" - ) - assert ( - config_admin.ERR_SCHEDULE_INVALID_BOOLEAN - == "error: {} for parameter {} is not a valid boolean value" - ) - assert ( - config_admin.ERR_SCHEDULE_INVALID_TIMEZONE - == "error: {} is not a valid time zone for parameter {}" - ) - assert config_admin.ERR_SCHEDULE_NAME_MISSING == "error: name parameter is missing" - assert ( - config_admin.ERR_SCHEDULE_NO_PERIOD - == "error: at least one period must be specified for a schedule" - ) - assert ( - config_admin.ERR_SCHEDULE_PERIOD_DOES_NOT_EXISTS - == "error: not found: period {} does not exist" - ) - assert config_admin.ERR_SCHEDULE_UNKNOWN_PARAMETER == ( - "error: {} is not a valid parameter, valid parameters are {}" - ) - assert config_admin.ERR_UPDATE_INVALID_BOOL_PARAM == ( - "error: {} for parameter {} is not a valid boolean value" - ) - assert ( - config_admin.ERR_UPDATE_INVALID_TZ_PARAMETER - == "error: {} is not a valid time zone for parameter {}" - ) - assert ( - config_admin.ERR_UPDATE_SCHEDULE_NAME_EMPTY - == "error: schedule name parameter can not be empty" - ) - assert ( - config_admin.ERR_UPDATE_TAGNAME_EMPTY - == "error: tagname parameter must be specified" - ) - assert ( - config_admin.ERR_UPDATE_UNKNOWN_PARAMETER - == "error: {} is not a valid parameter" - ) - assert config_admin.ERR_UPDATE_UNKNOWN_SERVICE == "{} is not a supported service" - assert config_admin.ERR_STOP_MUST_BE_LATER_OR_EQUAL_TO_START == ( - "stop_date must be equal or later than start_date" - ) - - -def test_info_strings() -> None: - assert config_admin.INF_ADD_ACCOUNT_EVENT_PERMISSION == ( - "Add permission for account {} to put events on message bus, sid is {}" - ) - assert config_admin.INF_REMOVE_EVENT_PERMISSION == ( - "Remove permission for account {} to put events on event bus, sid = {}" - ) - - -def test_config_admin_type_attr() -> None: - assert ConfigAdmin.TYPE_ATTR == "type" - - -def test_config_admin_time_regex() -> None: - assert ConfigAdmin.TIME_REGEX == "^([0|1]?[0-9]|2[0-3]):[0-5][0-9]$" - - -def test_config_admin_supported_services() -> None: - assert ConfigAdmin.SUPPORTED_SERVICES == ["ec2", "rds"] - - -def test_config_admin_table_name(app_env: AppEnv) -> None: - assert ConfigAdmin(None, None).table_name == app_env.config_table_name - - -def test_custom_encoder_set() -> None: - """Stringifies set as list""" - assert json.dumps(set(), cls=ConfigAdmin.CustomEncoder) == "[]" - - items = [1, 2, 3] - items_set = set(items) - result = json.loads(json.dumps(items_set, cls=ConfigAdmin.CustomEncoder)) - assert isinstance(result, list) - assert items_set == set(result) - - -def test_custom_encoder_datetime() -> None: - """Stringifies datetimes in locale format without seconds""" - dt_with_seconds = datetime( - year=2023, month=5, day=11, hour=11, minute=41, second=20 - ) - dt_no_seconds = dt_with_seconds.replace(second=0) - expected_str = f'"{dt_no_seconds.strftime("%x %X")}"' - assert json.dumps(dt_with_seconds, cls=ConfigAdmin.CustomEncoder) == expected_str - - -def test_custom_encoder_invalid() -> None: - """Errors on other types""" - with raises(TypeError): - json.dumps(date(year=2023, month=1, day=12), cls=ConfigAdmin.CustomEncoder) - - -def test_validate_schedule_no_periods() -> None: - config = ConfigAdmin(None, None) - - with raises(ValueError) as err: - config._validate_schedule(ConfigTableScheduleItem(name="my-schedule")) - - assert str(err.value) == config_admin.ERR_SCHEDULE_NO_PERIOD - - -def test_validate_schedule_override_no_periods() -> None: - config = ConfigAdmin(None, None) - schedule = ConfigTableScheduleItem(name="my-schedule", override_status="running") - - result = config._validate_schedule(schedule) - - schedule["type"] = "schedule" - assert result == schedule diff --git a/source/app/tests/configuration/test_configuration.py b/source/app/tests/configuration/test_configuration.py index 50ed97fd..492cc053 100644 --- a/source/app/tests/configuration/test_configuration.py +++ b/source/app/tests/configuration/test_configuration.py @@ -1,36 +1,6 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -from unittest.mock import MagicMock, patch -from zoneinfo import ZoneInfo - from instance_scheduler import configuration -from instance_scheduler.configuration.scheduler_config import GlobalConfig -from instance_scheduler.util.app_env import AppEnv - -mock_config = GlobalConfig( - scheduled_services=["ec2"], - schedule_clusters=True, - tag_name="tag", - regions=["us-east-1"], - default_timezone=ZoneInfo("UTC"), - schedules={}, - trace=False, - enable_ssm_maintenance_windows=True, - use_metrics=True, - remote_account_ids=["123456789012"], - namespace="ns", - aws_partition="aws", - scheduler_role_name="rolename", - organization_id="", - schedule_lambda_account=False, - create_rds_snapshot=True, - started_tags="", - stopped_tags="", -) - - -def test_env_var_names() -> None: - assert configuration.ENV_STACK == "STACK_NAME" def test_month_names() -> None: @@ -80,7 +50,6 @@ def test_attributes() -> None: assert ( configuration.ENABLE_SSM_MAINTENANCE_WINDOWS == "enable_ssm_maintenance_windows" ) - assert configuration.METRICS == "use_metrics" assert configuration.REGIONS == "regions" assert configuration.BEGINTIME == "begintime" assert configuration.DESCRIPTION == "description" @@ -99,7 +68,6 @@ def test_attributes() -> None: assert configuration.SCHEDULE_CLUSTERS == "schedule_clusters" assert configuration.CREATE_RDS_SNAPSHOT == "create_rds_snapshot" assert configuration.STOP_NEW_INSTANCES == "stop_new_instances" - assert configuration.USE_MAINTENANCE_WINDOW == "use_maintenance_window" assert configuration.SSM_MAINTENANCE_WINDOW == "ssm_maintenance_window" assert configuration.TIMEZONE == "timezone" assert configuration.TAGNAME == "tagname" @@ -134,43 +102,3 @@ def test_tag_values() -> None: assert configuration.TAG_VAL_MONTH == "month" assert configuration.TAG_VAL_DAY == "day" assert configuration.TAG_VAL_TIMEZONE == "timezone" - - -def test_configuration_global() -> None: - assert configuration.__configuration is None - - -@patch("instance_scheduler.configuration.SchedulerConfigBuilder") -@patch("instance_scheduler.configuration.ConfigDynamodbAdapter") -def test_get_scheduler_configuration( - mock_config_dynamodb_adapter: MagicMock, - mock_scheduler_config_builder: MagicMock, - app_env: AppEnv, -) -> None: - my_configdata = "my config" - mock_config_dynamodb_adapter.return_value.config = my_configdata - expected_configuration = mock_config - mock_scheduler_config_builder.return_value.build.return_value = ( - expected_configuration - ) - - result = configuration.get_global_configuration(None) - assert result == expected_configuration - - assert configuration.__configuration == expected_configuration - mock_config_dynamodb_adapter.assert_called_once_with(app_env.config_table_name) - mock_scheduler_config_builder.assert_called_once_with(logger=None) - mock_scheduler_config_builder.return_value.build.assert_called_once_with( - my_configdata - ) - - -def test_get_scheduler_configuration_already_set() -> None: - configuration.__configuration = mock_config - assert configuration.get_global_configuration(None) == mock_config - - -def test_unload_scheduler_configuration() -> None: - configuration.__configuration = mock_config - configuration.unload_global_configuration() - assert configuration.__configuration is None diff --git a/source/app/tests/configuration/test_configuration_module.py b/source/app/tests/configuration/test_configuration_module.py deleted file mode 100644 index 2f195ddd..00000000 --- a/source/app/tests/configuration/test_configuration_module.py +++ /dev/null @@ -1,305 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -import datetime - -from instance_scheduler.configuration.scheduler_config_builder import ( - SchedulerConfigBuilder, -) -from instance_scheduler.util.app_env import AppEnv -from tests.logger import MockLogger - - -def test_scheduler_config_builder(app_env: AppEnv) -> None: - logger = MockLogger() - config_data = { - "regions": ["us-east-1"], - "scheduled_services": ["ec2"], - "stopped_tags": "ScheduleMessage=Stopped on {year}/{month}/{day} at {hour}:{minute} {timezone}", - "create_rds_snapshot": False, - "default_timezone": "US/Eastern", - "trace": True, - "started_tags": "ScheduleMessage=Started on {year}/{month}/{day} at {hour}:{minute} {timezone}", - "schedule_clusters": True, - "name": "scheduler", - "tagname": "Schedule", - "type": "config", - "periods": [ - { - "months": {"jan/3"}, - "description": "Every first monday of each quarter", - "weekdays": {"mon#1"}, - "name": "first-monday-in-quarter", - "type": "period", - }, - { - "begintime": "09:00", - "description": "Office hours", - "endtime": "17:00", - "weekdays": {"mon-fri"}, - "name": "office-hours", - "type": "period", - }, - {"endtime": "09:00", "name": "onlystop", "type": "period"}, - {"endtime": "13:00", "name": "onlystop1", "type": "period"}, - { - "begintime": "09:00", - "endtime": "10:00", - "weekdays": {"mon-sun"}, - "name": "period1", - "type": "period", - }, - { - "begintime": "17:00", - "endtime": "18:00", - "weekdays": {"mon#4"}, - "name": "period2", - "type": "period", - }, - { - "begintime": "04:00", - "endtime": "07:00", - "weekdays": {"sat#4"}, - "name": "sat3", - "type": "period", - }, - { - "begintime": "12:00", - "endtime": "22:00", - "weekdays": {"sat#4"}, - "name": "sat4", - "type": "period", - }, - { - "begintime": "09:00", - "endtime": "10:50", - "weekdays": {"sun#4"}, - "name": "sun4", - "type": "period", - }, - { - "begintime": "15:00", - "endtime": "21:00", - "weekdays": {"sun#4"}, - "name": "sun4-2", - "type": "period", - }, - { - "begintime": "01:00", - "weekdays": {"fri-sat"}, - "name": "test1", - "type": "period", - }, - { - "begintime": "00:05", - "endtime": "01:05", - "weekdays": {"sat"}, - "name": "test12", - "type": "period", - }, - { - "begintime": "09:00", - "endtime": "12:00", - "weekdays": {"mon-fri"}, - "name": "test2", - "type": "period", - }, - { - "begintime": "11:00", - "endtime": "13:00", - "weekdays": {"mon-fro"}, - "name": "test3", - "type": "period", - }, - { - "begintime": "11:00", - "endtime": "15:00", - "weekdays": {"mon-fri"}, - "name": "test4", - "type": "period", - }, - { - "description": "Days in weekend", - "weekdays": {"sat-sun"}, - "name": "weekends", - "type": "period", - }, - { - "description": "Working days", - "weekdays": {"mon-fri"}, - "name": "working-days", - "type": "period", - }, - { - "description": "monday start", - "weekdays": {"mon"}, - "name": "monstart", - "type": "period", - "begintime": "09:00", - }, - { - "description": "run all day tuesday thursday", - "weekdays": {"tue-thu"}, - "name": "tuethu", - "type": "period", - "begintime": "00:00", - "endtime": "23:59", - }, - { - "description": "stop friday five pm", - "weekdays": {"fri"}, - "name": "fridaystop", - "type": "period", - "begintime": "00:00", - "endtime": "17:00", - }, - ], - "schedules": [ - { - "timezone": "US/Eastern", - "periods": {"period2", "period1"}, - "name": "describe1", - "type": "schedule", - }, - { - "timezone": "US/Pacific", - "periods": {"monstart", "tuethu", "fridaystop"}, - "name": "monstartandfridayend", - "type": "schedule", - }, - { - "timezone": "US/Eastern", - "periods": {"test4"}, - "name": "docDbSchedule", - "type": "schedule", - }, - { - "periods": {"onlystop1", "onlystop"}, - "name": "onlystop", - "type": "schedule", - }, - { - "timezone": "US/Eastern", - "description": "Retain running", - "periods": {"office-hours"}, - "name": "running", - "type": "schedule", - }, - { - "timezone": "US/Pacific", - "description": "Office hours in Seattle (Pacific)", - "periods": {"office-hours"}, - "name": "seattle-office-hours", - "type": "schedule", - }, - {"description": "Instances stopped", "name": "stopped", "type": "schedule"}, - {"periods": {"test3"}, "name": "test-error", "type": "schedule"}, - { - "timezone": "US/Eastern", - "periods": {"sat3", "sat4"}, - "name": "test-sat4", - "type": "schedule", - }, - { - "timezone": "Asia/Macau", - "periods": {"test12", "test1"}, - "name": "test-ssm1", - "type": "schedule", - }, - {"periods": {"test1"}, "name": "test-ssm2", "type": "schedule"}, - { - "description": "Testing config sun#4", - "periods": {"sun4", "sun4-2"}, - "name": "test-sun4", - "type": "schedule", - }, - { - "timezone": "US/Eastern", - "periods": {"test1"}, - "name": "testMetrics", - "type": "schedule", - }, - { - "timezone": "Europe/London", - "description": "Office hours in UK", - "periods": {"office-hours"}, - "name": "uk-office-hours", - "type": "schedule", - }, - ], - } - response = SchedulerConfigBuilder(logger=logger).build(config_data) - assert set(response.scheduled_services) == set(app_env.scheduled_services()) - assert response.default_timezone == app_env.default_timezone - assert response.tag_name == app_env.schedule_tag_key - assert response.create_rds_snapshot is app_env.enable_rds_snapshots - assert set(response.regions) == set(app_env.schedule_regions) - assert response.remote_account_ids == [] - - for schedule_name in response.schedules: - if schedule_name == "docDbSchedule": - schedule = response.get_schedule(schedule_name) - assert schedule is not None - assert schedule.timezone == "US/Eastern" - for period in schedule.periods: - valid_current_datetime = datetime.datetime( - 2021, 6, 18, 14, 5, 59, 342380 - ) - assert ( - period["period"].get_desired_state(logger, valid_current_datetime) - == "running" - ) - - invalid_datetime = datetime.datetime(2021, 6, 20, 17, 30, 59, 34258) - assert ( - period["period"].get_desired_state(logger, invalid_datetime) - == "stopped" - ) - - if schedule_name == "monstartandfridayend": - schedule = response.get_schedule(schedule_name) - assert schedule is not None - assert schedule.timezone == "US/Pacific" - for period in schedule.periods: - if period["period"].name == "tuethu": - tuesday_nine_am = datetime.datetime(2021, 9, 14, 9, 0, 00, 0) - assert ( - period["period"].get_desired_state(logger, tuesday_nine_am) - == "running" - ) - if period["period"].name == "fridaystop": - friday_five_fifteen_pm = datetime.datetime( - 2021, 9, 17, 17, 15, 00, 0 - ) - assert ( - period["period"].get_desired_state( - logger, friday_five_fifteen_pm - ) - == "stopped" - ) - friday_four_fortyfive_pm = datetime.datetime( - 2021, 9, 17, 16, 45, 00, 0 - ) - assert ( - period["period"].get_desired_state( - logger, friday_four_fortyfive_pm - ) - == "running" - ) - sat_four_fortyfive_pm = datetime.datetime( - 2021, 9, 18, 16, 45, 00, 0 - ) - assert ( - period["period"].get_desired_state( - logger, sat_four_fortyfive_pm - ) - == "stopped" - ) - sunday_four_fortyfive_pm = datetime.datetime( - 2021, 9, 19, 16, 45, 00, 0 - ) - assert ( - period["period"].get_desired_state( - logger, sunday_four_fortyfive_pm - ) - == "stopped" - ) diff --git a/source/app/tests/configuration/test_running_period.py b/source/app/tests/configuration/test_running_period.py index 0382148c..6b96b2ee 100644 --- a/source/app/tests/configuration/test_running_period.py +++ b/source/app/tests/configuration/test_running_period.py @@ -1,14 +1,72 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import calendar -from datetime import datetime +from datetime import datetime, time + +import pytest from instance_scheduler.configuration.running_period import RunningPeriod +from instance_scheduler.cron.cron_recurrence_expression import CronRecurrenceExpression +from instance_scheduler.cron.parser import ( + parse_monthdays_expr, + parse_months_expr, + parse_weekdays_expr, +) +from instance_scheduler.schedulers.states import ScheduleState +from tests.integration.helpers.schedule_helpers import quick_time from tests.logger import MockLogger -def test_weekdays_only_is_running_on_specified_day() -> None: - period = RunningPeriod(name="name", weekdays={0, 3, 5}) +@pytest.mark.parametrize( + "at_time,expected", + [ + (quick_time(5, 0, 0), ScheduleState.ANY), + (quick_time(10, 0, 0), ScheduleState.RUNNING), + (quick_time(15, 0, 0), ScheduleState.RUNNING), + ], +) +def test_one_sided_start(at_time: datetime, expected: ScheduleState) -> None: + period = RunningPeriod(name="test-period", begintime=time(10, 0, 0)) + assert period.get_desired_state(MockLogger(), at_time) == expected + + +@pytest.mark.parametrize( + "at_time,expected", + [ + (quick_time(5, 0, 0), ScheduleState.ANY), + (quick_time(10, 0, 0), ScheduleState.STOPPED), + (quick_time(15, 0, 0), ScheduleState.STOPPED), + ], +) +def test_one_sided_stop(at_time: datetime, expected: ScheduleState) -> None: + period = RunningPeriod(name="test-period", endtime=time(10, 0, 0)) + assert period.get_desired_state(MockLogger(), at_time) == expected + + +@pytest.mark.parametrize( + "at_time,expected", + [ + (quick_time(5, 0, 0), ScheduleState.STOPPED), + (quick_time(10, 0, 0), ScheduleState.RUNNING), + (quick_time(15, 0, 0), ScheduleState.RUNNING), + (quick_time(20, 0, 0), ScheduleState.STOPPED), + (quick_time(22, 0, 0), ScheduleState.STOPPED), + ], +) +def test_regular_period(at_time: datetime, expected: ScheduleState) -> None: + period = RunningPeriod( + name="test-period", begintime=time(10, 0, 0), endtime=time(20, 0, 0) + ) + assert period.get_desired_state(MockLogger(), at_time) == expected + + +def test_weekdays_only_is_running_on_specified_days() -> None: + period = RunningPeriod( + name="name", + cron_recurrence=CronRecurrenceExpression( + weekdays=parse_weekdays_expr({"0", "3", "5"}) + ), + ) # monday (0) assert ( period.get_desired_state(MockLogger(), datetime(year=2023, month=7, day=10)) @@ -47,7 +105,10 @@ def test_weekdays_only_is_running_on_specified_day() -> None: def test_month_only_is_running_for_whole_month() -> None: - period = RunningPeriod(name="name", months={4}) + period = RunningPeriod( + name="name", + cron_recurrence=CronRecurrenceExpression(months=parse_months_expr({"4"})), + ) _, days_in_month = calendar.monthrange(year=2023, month=4) for month_day in range(1, days_in_month + 1): @@ -60,7 +121,10 @@ def test_month_only_is_running_for_whole_month() -> None: def test_month_only_is_stopped_in_other_months() -> None: - period = RunningPeriod(name="name", months={4}) + period = RunningPeriod( + name="name", + cron_recurrence=CronRecurrenceExpression(months=parse_months_expr({"4"})), + ) for month in range(1, 13): if month == 4: @@ -74,7 +138,13 @@ def test_month_only_is_stopped_in_other_months() -> None: def test_monthday_only_is_running_on_specific_monthdays() -> None: - period = RunningPeriod(name="name", monthdays={3, 5, 15}) + period = RunningPeriod( + name="name", + cron_recurrence=CronRecurrenceExpression( + monthdays=parse_monthdays_expr({"3", "5", "15"}) + ), + ) + for month_day in [3, 5, 15]: assert ( period.get_desired_state( @@ -85,7 +155,12 @@ def test_monthday_only_is_running_on_specific_monthdays() -> None: def test_monthday_only_is_stopped_on_other_monthdays() -> None: - period = RunningPeriod(name="name", monthdays={3, 5, 15}) + period = RunningPeriod( + name="name", + cron_recurrence=CronRecurrenceExpression( + monthdays=parse_monthdays_expr({"3", "5", "15"}) + ), + ) _, days_in_month = calendar.monthrange(year=2023, month=4) for month_day in range(1, days_in_month + 1): if month_day in {3, 5, 15}: diff --git a/source/app/tests/configuration/test_scheduler_config.py b/source/app/tests/configuration/test_scheduler_config.py deleted file mode 100644 index 2cef588b..00000000 --- a/source/app/tests/configuration/test_scheduler_config.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -from instance_scheduler.configuration import scheduler_config - - -def test_inf_scheduler_display() -> None: - assert scheduler_config.INF_SCHEDULE_DISPLAY == ( - "Configuration:\n" - 'Scheduled services = "{}"\n' - 'Schedule clusters = "{}"\n' - 'Create RDS instance snapshot = "{}"\n' - 'Tagname = "{}"\n' - 'Default timezone = "{}"\n' - 'Trace = "{}"\n' - 'Enable SSM Maintenance Windows = "{}"\n' - 'Use metrics = "{}"\n' - 'Regions = "{}"\n' - 'Started tags = "{}"\n' - 'Stopped tags = "{}"\n' - 'Process Lambda account = "{}"\n' - 'Scheduler Role Name = "{}"\n' - 'Namespace = "{}"\n' - 'Organization Id = "{}"\n' - 'Aws Partition = "{}"\n' - 'Remote Account Ids = "{}"' - ) - - -def test_tag_val_str() -> None: - assert scheduler_config.TAG_VAL_STR == "{{{}}}" diff --git a/source/app/tests/configuration/test_scheduler_config_builder.py b/source/app/tests/configuration/test_scheduler_config_builder.py deleted file mode 100644 index ac842b7a..00000000 --- a/source/app/tests/configuration/test_scheduler_config_builder.py +++ /dev/null @@ -1,235 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -import datetime - -from instance_scheduler.configuration.instance_schedule import Instance -from instance_scheduler.configuration.scheduler_config_builder import ( - SchedulerConfigBuilder, -) -from instance_scheduler.util.app_env import AppEnv -from tests.logger import MockLogger - - -def test_instance_schedule(app_env: AppEnv) -> None: - logger = MockLogger() - config_data = { - "regions": {"us-east-1"}, - "scheduled_services": {"ec2"}, - "stopped_tags": "ScheduleMessage=Stopped on {year}/{month}/{day} at {hour}:{minute} {timezone}", - "create_rds_snapshot": False, - "default_timezone": "US/Eastern", - "trace": True, - "started_tags": "ScheduleMessage=Started on {year}/{month}/{day} at {hour}:{minute} {timezone}", - "schedule_clusters": True, - "name": "scheduler", - "tagname": "Schedule", - "type": "config", - "periods": [ - { - "months": {"jan/3"}, - "description": "Every first monday of each quarter", - "weekdays": {"mon#1"}, - "name": "first-monday-in-quarter", - "type": "period", - }, - { - "begintime": "09:00", - "description": "Office hours", - "endtime": "17:00", - "weekdays": {"mon-fri"}, - "name": "office-hours", - "type": "period", - }, - {"endtime": "09:00", "name": "onlystop", "type": "period"}, - {"endtime": "13:00", "name": "onlystop1", "type": "period"}, - { - "begintime": "09:00", - "endtime": "10:00", - "weekdays": {"mon-sun"}, - "name": "period1", - "type": "period", - }, - { - "begintime": "17:00", - "endtime": "18:00", - "weekdays": {"mon#4"}, - "name": "period2", - "type": "period", - }, - { - "begintime": "04:00", - "endtime": "07:00", - "weekdays": {"sat#4"}, - "name": "sat3", - "type": "period", - }, - { - "begintime": "12:00", - "endtime": "22:00", - "weekdays": {"sat#4"}, - "name": "sat4", - "type": "period", - }, - { - "begintime": "09:00", - "endtime": "10:50", - "weekdays": {"sun#4"}, - "name": "sun4", - "type": "period", - }, - { - "begintime": "15:00", - "endtime": "21:00", - "weekdays": {"sun#4"}, - "name": "sun4-2", - "type": "period", - }, - { - "begintime": "01:00", - "weekdays": {"fri-sat"}, - "name": "test1", - "type": "period", - }, - { - "begintime": "00:05", - "endtime": "01:05", - "weekdays": {"sat"}, - "name": "test12", - "type": "period", - }, - { - "begintime": "09:00", - "endtime": "12:00", - "weekdays": {"mon-fri"}, - "name": "test2", - "type": "period", - }, - { - "begintime": "11:00", - "endtime": "13:00", - "weekdays": {"mon-fro"}, - "name": "test3", - "type": "period", - }, - { - "begintime": "11:00", - "endtime": "15:00", - "weekdays": {"mon-fri"}, - "name": "test4", - "type": "period", - }, - { - "description": "Days in weekend", - "weekdays": {"sat-sun"}, - "name": "weekends", - "type": "period", - }, - { - "description": "Working days", - "weekdays": {"mon-fri"}, - "name": "working-days", - "type": "period", - }, - ], - "schedules": [ - { - "timezone": "US/Eastern", - "periods": {"period2", "period1"}, - "name": "describe1", - "type": "schedule", - }, - { - "timezone": "US/Eastern", - "periods": {"test4"}, - "name": "docDbSchedule", - "type": "schedule", - }, - { - "periods": {"onlystop1", "onlystop"}, - "name": "onlystop", - "type": "schedule", - }, - { - "description": "Retain running", - "periods": {"office-hours"}, - "name": "running", - "type": "schedule", - }, - { - "timezone": "US/Pacific", - "description": "Office hours in Seattle (Pacific)", - "periods": {"office-hours"}, - "name": "seattle-office-hours", - "type": "schedule", - }, - {"description": "Instances stopped", "name": "stopped", "type": "schedule"}, - {"periods": {"test3"}, "name": "test-error", "type": "schedule"}, - { - "timezone": "US/Eastern", - "periods": {"sat3", "sat4"}, - "name": "test-sat4", - "type": "schedule", - }, - {"periods": {"test12", "test1"}, "name": "test-ssm1", "type": "schedule"}, - {"periods": {"test1"}, "name": "test-ssm2", "type": "schedule"}, - { - "description": "Testing config sun#4", - "periods": {"sun4", "sun4-2"}, - "name": "test-sun4", - "type": "schedule", - }, - {"periods": {"test1"}, "name": "testMetrics", "type": "schedule"}, - { - "timezone": "Europe/London", - "description": "Office hours in UK", - "periods": {"office-hours"}, - "name": "uk-office-hours", - "type": "schedule", - }, - ], - } - - response = SchedulerConfigBuilder(logger=logger).build(config_data) - - schedule = response.get_schedule("running") - assert schedule is not None - - instance = Instance( - id="i-00bedf8a12df1dd6a", - schedule_name="stopped", - name="ISRelated", - state=16, - state_name="running", - is_running=True, - is_terminated=False, - current_state="running", - instancetype="t2.micro", - tags={"Name": "ISRelated", "Schedule": "running"}, - account="111111111111", - region="us-east-1", - service="ec2", - instance_str="EC2:i-00bedf8a12df1dd6a (ISRelated)", - allow_resize=True, - hibernate=False, - maintenance_window=None, - ) - - current_test_execution_time = datetime.datetime.now(app_env.default_timezone) - inst_state, _, valid_period = schedule.get_desired_state( - instance, current_test_execution_time, logger=logger - ) - - # the configured period is a weekday mon-fri and time is between 9:00 AM and 5:00 PM. - if current_test_execution_time.today().weekday() in [0, 1, 2, 3, 4]: - if ( - current_test_execution_time.hour < 17 - and current_test_execution_time.hour > 9 - ): - assert valid_period == "office-hours" - assert inst_state == "running" - else: - assert valid_period is None - assert inst_state == "stopped" - else: - assert valid_period is None - assert inst_state == "stopped" diff --git a/source/app/tests/configuration/test_scheduling_context.py b/source/app/tests/configuration/test_scheduling_context.py deleted file mode 100644 index 552a0218..00000000 --- a/source/app/tests/configuration/test_scheduling_context.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -import datetime -from typing import Any -from zoneinfo import ZoneInfo - -from instance_scheduler.configuration import scheduling_context -from instance_scheduler.configuration.instance_schedule import InstanceSchedule -from instance_scheduler.configuration.running_period import RunningPeriod -from instance_scheduler.configuration.running_period_dict_element import ( - RunningPeriodDictElement, -) -from instance_scheduler.configuration.scheduling_context import ( - SchedulingContext, - TagTemplate, -) - -test_period_1 = RunningPeriod( - name="test-period-1", - begintime=datetime.time(9, 0, 0), - endtime=datetime.time(17, 0, 0), - weekdays={0, 1, 2, 3, 4}, -) - -test_schedule_1 = InstanceSchedule( - name="test-schedule-1", - timezone="UTC", - override_status=None, - use_metrics=True, - periods=[RunningPeriodDictElement(period=test_period_1, instancetype=None)], - retain_running=None, - enforced=False, - hibernate=False, - use_maintenance_window=False, - stop_new_instances=True, -) - -test_schedule_2 = InstanceSchedule( - name="test-schedule-2", - timezone="UTC", - override_status=None, - use_metrics=True, - periods=[{"period": test_period_1, "instancetype": None}], - retain_running=None, - enforced=False, - hibernate=False, - use_maintenance_window=False, - stop_new_instances=True, -) - -context = SchedulingContext( - account_id="111122223333", - schedule_lambda_account=True, - service="ec2", - region="us-east-1", - tag_name="Schedule", - current_dt=datetime.datetime.fromisoformat("2023-06-09T16:38:42.862261+00:00"), - default_timezone=ZoneInfo("UTC"), - schedules={"test-schedule-1": test_schedule_1, "test-schedule-2": test_schedule_2}, - schedule_clusters=False, - trace=False, - enable_ssm_maintenance_windows=False, - use_metrics=False, - namespace="namespace", - aws_partition="aws", - scheduler_role_name="scheduler-role", - organization_id="", - create_rds_snapshot=False, - started_tags=[TagTemplate(Key="action", Value="started")], - stopped_tags=[TagTemplate(Key="action", Value="stopped")], -) - - -expectedOut: dict[str, Any] = { - "tag_name": "Schedule", - "default_timezone": "UTC", - "trace": False, - "namespace": "namespace", - "current_dt": "2023-06-09T16:38:42.862261+00:00", - "scheduler_role_name": "scheduler-role", - "organization_id": "", - "aws_partition": "aws", - "enable_ssm_maintenance_windows": False, - "use_metrics": False, - "schedule_clusters": False, - "create_rds_snapshot": False, - "schedule_lambda_account": True, - "started_tags": "action=started", # many of these are optional and not normally included in shape when none - "stopped_tags": "action=stopped", - "region": "us-east-1", - "service": "ec2", - "account_id": "111122223333", - "schedules": { - "test-schedule-1": { - "name": "test-schedule-1", - "timezone": "UTC", - "stop_new_instances": True, - "use_metrics": True, - "enforced": False, - "hibernate": False, - "use_maintenance_window": False, - "periods": ["test-period-1"], - }, - "test-schedule-2": { - "name": "test-schedule-2", - "timezone": "UTC", - "stop_new_instances": True, - "use_metrics": True, - "enforced": False, - "hibernate": False, - "use_maintenance_window": False, - "periods": ["test-period-1"], - }, - }, - "periods": { - "test-period-1": { - "begintime": "09:00", - "endtime": "17:00", - "weekdays": [0, 1, 2, 3, 4], - }, - }, -} - - -def test_to_dict_matches_expected_event_format() -> None: - # print(context.to_dict()) - assert context.to_dict() == expectedOut - - -def test_result_of_from_dict_matches_original_object() -> None: - dict_representation = context.to_dict() - built_context = scheduling_context.from_dict(dict_representation) - assert context.to_dict() == built_context.to_dict() diff --git a/source/app/tests/configuration/test_time_utils.py b/source/app/tests/configuration/test_time_utils.py new file mode 100644 index 00000000..84e16d31 --- /dev/null +++ b/source/app/tests/configuration/test_time_utils.py @@ -0,0 +1,17 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import pytest + +from instance_scheduler.configuration.time_utils import is_valid_time_str + + +@pytest.mark.parametrize( + "time_str", ["00:00", "1:00", "01:00", "10:00", "00:05", "00:15", "23:59"] +) +def test_valid_time_str(time_str: str) -> None: + assert is_valid_time_str(time_str) is True + + +@pytest.mark.parametrize("time_str", ["abc", "10:5", "1:5", "24:00", "25:00"]) +def test_invalid_time_str(time_str: str) -> None: + assert is_valid_time_str(time_str) is False diff --git a/source/app/tests/conftest.py b/source/app/tests/conftest.py index d5b44f09..149b980e 100644 --- a/source/app/tests/conftest.py +++ b/source/app/tests/conftest.py @@ -2,24 +2,43 @@ # SPDX-License-Identifier: Apache-2.0 from collections.abc import Iterator from os import environ -from typing import TYPE_CHECKING, Final +from typing import TYPE_CHECKING, Final, Optional from unittest.mock import patch import boto3 from moto import mock_aws from pytest import fixture -import instance_scheduler.util.app_env -from instance_scheduler.configuration import unload_global_configuration +from instance_scheduler.model import EC2SSMMaintenanceWindowStore, MWStore +from instance_scheduler.model.ddb_config_item import DdbConfigItem +from instance_scheduler.model.store.ddb_config_item_store import DdbConfigItemStore +from instance_scheduler.model.store.dynamo_mw_store import DynamoMWStore +from instance_scheduler.model.store.dynamo_period_definition_store import ( + DynamoPeriodDefinitionStore, +) +from instance_scheduler.model.store.dynamo_schedule_definition_store import ( + DynamoScheduleDefinitionStore, +) +from instance_scheduler.model.store.period_definition_store import PeriodDefinitionStore +from instance_scheduler.model.store.schedule_definition_store import ( + ScheduleDefinitionStore, +) +from instance_scheduler.ops_metrics.metrics import MetricsEnvironment from instance_scheduler.util.app_env import AppEnv -from tests.util.test_app_env import env_from_app_env, example_app_env +from instance_scheduler.util.session_manager import AssumedRole +from tests import DEFAULT_REGION +from tests.test_utils.app_env_utils import mock_app_env +from tests.test_utils.mock_metrics_environment import MockMetricsEnviron if TYPE_CHECKING: from mypy_boto3_dynamodb.client import DynamoDBClient + from mypy_boto3_ec2.client import EC2Client + from mypy_boto3_ec2.type_defs import FilterTypeDef from mypy_boto3_logs.client import CloudWatchLogsClient from mypy_boto3_sns.client import SNSClient else: DynamoDBClient = object + EC2Client = object CloudWatchLogsClient = object SNSClient = object @@ -31,7 +50,7 @@ def aws_credentials() -> Iterator[None]: "AWS_SECRET_ACCESS_KEY": "testing", "AWS_SECURITY_TOKEN": "testing", "AWS_SESSION_TOKEN": "testing", - "AWS_DEFAULT_REGION": "us-east-1", + "AWS_DEFAULT_REGION": DEFAULT_REGION, } with patch.dict(environ, creds, clear=True): yield @@ -39,19 +58,14 @@ def aws_credentials() -> Iterator[None]: @fixture(autouse=True) def app_env(aws_credentials: None) -> Iterator[AppEnv]: - # clear cached env for each test for isolation - instance_scheduler.util.app_env._app_env = None - env = example_app_env() - with patch.dict(environ, env_from_app_env(env)): + with mock_app_env() as env: yield env @fixture(autouse=True) -def test_cleanup() -> Iterator[None]: - # runs before each test - yield - # runs after each test - unload_global_configuration() +def metrics_environment(app_env: None) -> Iterator[MetricsEnvironment]: + with MockMetricsEnviron() as metrics_env: + yield metrics_env @fixture @@ -60,6 +74,37 @@ def moto_backend() -> Iterator[None]: yield +def get_ami(region: str = "us-east-1") -> str: + ec2: Final[EC2Client] = boto3.client("ec2", region_name=region) + paginator: Final = ec2.get_paginator("describe_images") + filters: Final[list[FilterTypeDef]] = [ + {"Name": "name", "Values": ["al2023-ami-minimal-*-arm64"]}, + ] + image_id: Optional[str] = None + for page in paginator.paginate(Filters=filters, Owners=["amazon"]): + if page["Images"]: + image_id = page["Images"][0]["ImageId"] + break + if not image_id: + raise ValueError("No AMI found") + return image_id + + +@fixture() +def hub_role() -> AssumedRole: + return AssumedRole( + role_name="hub-role", + account="123456789012", + region="us-east-1", + session=boto3.Session(), + ) + + +@fixture +def ami(moto_backend: None) -> Iterator[str]: + yield get_ami() + + @fixture def dynamodb_client(moto_backend: None) -> Iterator[DynamoDBClient]: """DDB Mock Client""" @@ -68,7 +113,28 @@ def dynamodb_client(moto_backend: None) -> Iterator[DynamoDBClient]: @fixture -def config_table(app_env: AppEnv, moto_backend: None) -> None: +def config_item_store( + config_table: str, +) -> DdbConfigItemStore: + store = DdbConfigItemStore(config_table) + store.put( + DdbConfigItem("", []) + ) # expected to always exist as these are set up by the initial custom resource + return store + + +@fixture +def schedule_store(config_table: str) -> ScheduleDefinitionStore: + return DynamoScheduleDefinitionStore(config_table) + + +@fixture +def period_store(config_table: str) -> PeriodDefinitionStore: + return DynamoPeriodDefinitionStore(config_table) + + +@fixture +def config_table(app_env: AppEnv, moto_backend: None) -> Iterator[str]: boto3.client("dynamodb").create_table( AttributeDefinitions=[ {"AttributeName": "name", "AttributeType": "S"}, @@ -81,25 +147,36 @@ def config_table(app_env: AppEnv, moto_backend: None) -> None: ], BillingMode="PAY_PER_REQUEST", ) + yield app_env.config_table_name @fixture -def maint_win_table(moto_backend: None, app_env: AppEnv) -> str: - maint_win_table_name: Final = app_env.maintenance_window_table_name +def maint_win_table(app_env: AppEnv, moto_backend: None) -> Iterator[str]: + table_name: Final = app_env.maintenance_window_table_name ddb: Final[DynamoDBClient] = boto3.client("dynamodb") ddb.create_table( AttributeDefinitions=[ - {"AttributeName": "Name", "AttributeType": "S"}, {"AttributeName": "account-region", "AttributeType": "S"}, + {"AttributeName": "name-id", "AttributeType": "S"}, ], - TableName=maint_win_table_name, + TableName=table_name, KeySchema=[ - {"AttributeName": "Name", "KeyType": "HASH"}, - {"AttributeName": "account-region", "KeyType": "RANGE"}, + {"AttributeName": "account-region", "KeyType": "HASH"}, + {"AttributeName": "name-id", "KeyType": "RANGE"}, ], BillingMode="PAY_PER_REQUEST", ) - return maint_win_table_name + yield table_name + + +@fixture() +def maint_win_store(maint_win_table: str) -> EC2SSMMaintenanceWindowStore: + return EC2SSMMaintenanceWindowStore(maint_win_table) + + +@fixture() +def mw_store(maint_win_table: str) -> MWStore: + return DynamoMWStore(maint_win_table) @fixture diff --git a/source/app/tests/cron/__init__.py b/source/app/tests/cron/__init__.py new file mode 100644 index 00000000..04f8b7b7 --- /dev/null +++ b/source/app/tests/cron/__init__.py @@ -0,0 +1,2 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 diff --git a/source/app/tests/cron/test_asg.py b/source/app/tests/cron/test_asg.py new file mode 100644 index 00000000..cc7c180b --- /dev/null +++ b/source/app/tests/cron/test_asg.py @@ -0,0 +1,151 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from pytest import raises + +from instance_scheduler.cron.asg import ( + to_asg_expr_monthdays, + to_asg_expr_months, + to_asg_expr_weekdays, +) +from instance_scheduler.cron.expression import ( + CronAll, + CronRange, + CronSingleValueNumeric, +) +from instance_scheduler.cron.parser import ( + parse_monthdays_expr, + parse_months_expr, + parse_weekdays_expr, +) + + +def test_to_asg_expr_months() -> None: + assert to_asg_expr_months(CronAll()) == "*" + assert to_asg_expr_months(CronSingleValueNumeric(value=3)) == "mar" + assert to_asg_expr_months(CronSingleValueNumeric(value=7)) == "jul" + assert ( + to_asg_expr_months( + CronRange( + start=CronSingleValueNumeric(value=2), + end=CronSingleValueNumeric(value=9), + ) + ) + == "feb-sep" + ) + + +def test_e2e_months() -> None: + assert to_asg_expr_months(parse_months_expr({"*"})) == "*" + assert to_asg_expr_months(parse_months_expr({"?"})) == "*" + + assert to_asg_expr_months(parse_months_expr({"jan"})) == "jan" + assert to_asg_expr_months(parse_months_expr({"February"})) == "feb" + assert to_asg_expr_months(parse_months_expr({"July"})) == "jul" + + assert to_asg_expr_months(parse_months_expr({"3"})) == "mar" + assert to_asg_expr_months(parse_months_expr({"6"})) == "jun" + + assert to_asg_expr_months(parse_months_expr({"3-8"})) == "mar-aug" + assert to_asg_expr_months(parse_months_expr({"5-12"})) == "may-dec" + + assert to_asg_expr_months(parse_months_expr({"jun-nov"})) == "jun-nov" + assert to_asg_expr_months(parse_months_expr({"April-February"})) == "apr-feb" + assert to_asg_expr_months(parse_months_expr({"7-1"})) == "jul-jan" + + assert to_asg_expr_months(parse_months_expr({"4/2"})) == "apr/2" + assert to_asg_expr_months(parse_months_expr({"2-9/3"})) == "feb-sep/3" + assert to_asg_expr_months(parse_months_expr({"Jun-Oct/5"})) == "jun-oct/5" + assert to_asg_expr_months(parse_months_expr({"January-August/3"})) == "jan-aug/3" + + assert to_asg_expr_months(parse_months_expr({"7-2/3"})) == "jul-feb/3" + assert to_asg_expr_months(parse_months_expr({"nov-jun/4"})) == "nov-jun/4" + assert to_asg_expr_months(parse_months_expr({" October-April/ 3 "})) == "oct-apr/3" + + assert set( + to_asg_expr_months( + parse_months_expr({"jul,sep", "Oct-Dec/2", "feb-mar"}) + ).split(",") + ) == {"jul", "sep", "oct-dec/2", "feb-mar"} + + +def test_e2e_monthdays() -> None: + assert to_asg_expr_monthdays(parse_monthdays_expr({"*"})) == "*" + assert to_asg_expr_monthdays(parse_monthdays_expr({"?"})) == "*" + + assert to_asg_expr_monthdays(parse_monthdays_expr({"3"})) == "3" + assert to_asg_expr_monthdays(parse_monthdays_expr({"6"})) == "6" + + assert to_asg_expr_monthdays(parse_monthdays_expr({"3-8"})) == "3-8" + + assert to_asg_expr_monthdays(parse_monthdays_expr({"L"})) == "L" + assert to_asg_expr_monthdays(parse_monthdays_expr({"4-L"})) == "4-L" + + assert to_asg_expr_monthdays(parse_monthdays_expr({"4/2"})) == "4/2" + assert to_asg_expr_monthdays(parse_monthdays_expr({"3-9/3"})) == "3-9/3" + assert to_asg_expr_monthdays(parse_monthdays_expr({"16-L/4"})) == "16-L/4" + + assert set( + to_asg_expr_monthdays(parse_monthdays_expr({"3,6", "10-15/2", "22-23"})).split( + "," + ) + ) == {"3", "6", "10-15/2", "22-23"} + + with raises(NotImplementedError): + to_asg_expr_monthdays(parse_monthdays_expr({"15,L,5W"})) + + +def test_e2e_weekdays() -> None: + assert to_asg_expr_weekdays(parse_weekdays_expr({"*"})) == "*" + assert to_asg_expr_weekdays(parse_weekdays_expr({"?"})) == "*" + + assert to_asg_expr_weekdays(parse_weekdays_expr({"mon"})) == "mon" + assert to_asg_expr_weekdays(parse_weekdays_expr({"Tuesday"})) == "tue" + assert to_asg_expr_weekdays(parse_weekdays_expr({"Saturday"})) == "sat" + + assert to_asg_expr_weekdays(parse_weekdays_expr({"3"})) == "thu" + assert to_asg_expr_weekdays(parse_weekdays_expr({"6"})) == "sun" + + assert to_asg_expr_weekdays(parse_weekdays_expr({"3-6"})) == "thu-sun" + assert to_asg_expr_weekdays(parse_weekdays_expr({"1-3"})) == "tue-thu" + + assert to_asg_expr_weekdays(parse_weekdays_expr({"tue-thu"})) == "tue-thu" + assert to_asg_expr_weekdays(parse_weekdays_expr({"Monday-Friday"})) == "mon-fri" + + assert to_asg_expr_weekdays(parse_weekdays_expr({"4-2"})) == "fri-wed" + assert to_asg_expr_weekdays(parse_weekdays_expr({"sat-mon"})) == "sat-mon" + + assert to_asg_expr_weekdays(parse_weekdays_expr({"4/2"})) == "fri/2" + assert to_asg_expr_weekdays(parse_weekdays_expr({"0-4/3"})) == "mon-fri/3" + assert to_asg_expr_weekdays(parse_weekdays_expr({"Tue-Sun/3"})) == "tue-sun/3" + assert to_asg_expr_weekdays(parse_weekdays_expr({"Monday-Friday/5"})) == "mon-fri/5" + + assert to_asg_expr_weekdays(parse_weekdays_expr({"6-2/3"})) == "sun-wed/3" + assert to_asg_expr_weekdays(parse_weekdays_expr({"fri-wed/3"})) == "fri-wed/3" + assert ( + to_asg_expr_weekdays(parse_weekdays_expr({" Saturday-Thursday/ 2 "})) + == "sat-thu/2" + ) + + assert set( + to_asg_expr_weekdays( + parse_weekdays_expr({"0,thu", "Wednesday-Friday/2", "sat-sun"}) + ).split(",") + ) == {"mon", "thu", "wed-fri/2", "sat-sun"} + + with raises(NotImplementedError): + to_asg_expr_weekdays(parse_weekdays_expr({"Mon#3"})) + + with raises(NotImplementedError): + to_asg_expr_weekdays(parse_weekdays_expr({"Wednesday#2"})) + + with raises(NotImplementedError): + to_asg_expr_weekdays(parse_weekdays_expr({"3#1"})) + + with raises(NotImplementedError): + to_asg_expr_weekdays(parse_weekdays_expr({"satL"})) + + with raises(NotImplementedError): + to_asg_expr_weekdays(parse_weekdays_expr({"ThursdayL"})) + + with raises(NotImplementedError): + to_asg_expr_weekdays(parse_weekdays_expr({"2L"})) diff --git a/source/app/tests/cron/test_cron_to_running_period.py b/source/app/tests/cron/test_cron_to_running_period.py new file mode 100644 index 00000000..fd13519e --- /dev/null +++ b/source/app/tests/cron/test_cron_to_running_period.py @@ -0,0 +1,187 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from datetime import datetime, timezone + +import pytest + +from instance_scheduler.cron.cron_recurrence_expression import CronRecurrenceExpression +from instance_scheduler.cron.cron_to_running_period import ( + IntDomain, + _range_to_discrete_values, + _resolve_first_occurrence_of_weekday_in_month, + months_cron_expr_contains, +) +from instance_scheduler.cron.expression import CronRange +from instance_scheduler.cron.parser import _parse_multi_general, parse_months_expr + + +def test_in_period_check_months() -> None: + assert months_cron_expr_contains( + parse_months_expr({"*"}), + datetime(year=2024, month=1, day=15, tzinfo=timezone.utc), + ) + + assert months_cron_expr_contains( + parse_months_expr({"jan"}), + datetime(year=2024, month=1, day=15, tzinfo=timezone.utc), + ) + assert not months_cron_expr_contains( + parse_months_expr({"sep"}), + datetime(year=2024, month=2, day=15, tzinfo=timezone.utc), + ) + assert months_cron_expr_contains( + parse_months_expr({"3"}), + datetime(year=2024, month=3, day=15, tzinfo=timezone.utc), + ) + assert not months_cron_expr_contains( + parse_months_expr({"8"}), + datetime(year=2024, month=9, day=15, tzinfo=timezone.utc), + ) + + +@pytest.mark.parametrize( + "expr,expected_result", + [ + ("1-4", {1, 2, 3, 4}), # simple no-step case + ("1-7/2", {1, 3, 5, 7}), # simple step case + ("1-7/4", {1, 5}), # larger step with truncation + ("1-L/2", {1, 3, 5, 7}), # simple step case with L + ("5-4/2", {5, 7, 2, 4}), # wrap + ("6-4/2", {6, 1, 3}), # wrap + ("0/2", {2, 4, 6}), # start before domain (iterates through domain) + ("3-12/2", {3, 5, 7}), # truncate overruns to within domain + ("4-4/2", {4}), # -4/2 adds nothing, but should also not be an invalid format + ("8-L/2", set()), # start after domain (no valid days) + ("8/2", set()), # start after domain (no valid days) + ("8-12/2", set()), # start after domain (no valid days) + ], +) +def test_range_wraps_as_expected(expr: str, expected_result: set[int]) -> None: + # using 1-7 (weekdays) to make scenarios easier/more obvious + domain = domain = IntDomain(start=1, end=7) + parsed_range = _parse_multi_general({expr}, {}) + assert isinstance(parsed_range, CronRange) + + assert _range_to_discrete_values(parsed_range, domain) == expected_result + + +@pytest.mark.parametrize( + "expr, expected_day_to_run_on", + [ + ("1W", 3), # Saturday as 1st of month, go forward to Monday + ("2W", 3), # Sunday, run on the Monday after + ("3W", 3), # Monday, no need to adjust + ("15W", 14), # Saturday, run on the Friday before + ("16W", 17), # Sunday, run on the Monday after + ("29W", 28), # Saturday, run on the Friday before + ("30W", 28), # Sunday as last of month, go backward to Friday + ], +) +def test_nearest_weekday_runs_as_expected( + expr: str, expected_day_to_run_on: int +) -> None: + # april 2023 is a month that started on a Saturday and ended on a Sunday. + parsed_expr = CronRecurrenceExpression.parse(monthdays={expr}) + for day in range(1, 31): + expected_result = day == expected_day_to_run_on + actual_result = parsed_expr.contains(datetime(year=2023, month=4, day=day)) + assert ( + expected_result == actual_result + ), f"Expected {expected_result}, got {actual_result}, on day {day}" + + +@pytest.mark.parametrize( + "expr, expected_day_to_run_on", + [ + ("monL", 29), + ("1L", 30), + ("WedL", 24), + ("3L", 25), + ("FRIL", 26), + ("satL", 27), + ("6L", 28), + ], +) +def test_last_weekday_runs_on_expected_day( + expr: str, expected_day_to_run_on: int +) -> None: + # The last day of April 2024 is a tuesday + parsed_expr = CronRecurrenceExpression.parse(weekdays={expr}) + for day in range(1, 31): + expected_result = day == expected_day_to_run_on + actual_result = parsed_expr.contains(datetime(year=2024, month=4, day=day)) + assert ( + expected_result == actual_result + ), f"Expected {expected_result}, got {actual_result}, on day {day}" + + +@pytest.mark.parametrize( + "expr, expected_day_to_run_on", + [ + ("mon#1", 1), + ("tue#1", 2), + ("wed#1", 3), + ("thu#1", 4), + ("fri#1", 5), + ("sat#1", 6), + ("sun#1", 7), + ("mon#2", 8), + ("mon#3", 15), + ("mon#4", 22), + ("mon#5", 29), + ("TUE#3", 16), + ("sun#2", 14), + ("Tue#5", 30), # last day of month + ], +) +def test_nth_weekday_runs_on_expected_day( + expr: str, expected_day_to_run_on: int +) -> None: + # using April 2024 as test month. + parsed_expr = CronRecurrenceExpression.parse(weekdays={expr}) + for day in range(1, 31): + expected_result = day == expected_day_to_run_on + actual_result = parsed_expr.contains(datetime(year=2024, month=4, day=day)) + assert ( + expected_result == actual_result + ), f"Expected {expected_result}, got {actual_result}, on day {day}" + + +@pytest.mark.parametrize( + "expr", + [ + # ("mon#6"), illegal format + "Thu#5" + ], +) +def test_nth_weekday_does_not_run_when_weekday_does_not_exist_in_month( + expr: str, +) -> None: + # using April 2024 as test month. + parsed_expr = CronRecurrenceExpression.parse(weekdays={expr}) + for day in range(1, 31): + result = parsed_expr.contains(datetime(year=2024, month=4, day=day)) + assert result is not True, f"Expected never to run, but ran on day {day}" + + +@pytest.mark.parametrize( + "weekday, expected", + [ + # ("mon#6"), illegal format + (0, 1), + (1, 2), + (2, 3), + (3, 4), + (4, 5), + (5, 6), + (6, 7), + ], +) +def test_resolve_first_occurrence_of_weekday_in_month_returns_expected( + weekday: int, expected: int +) -> None: + # using April 2024 as test month. + reference_date = datetime(year=2024, month=4, day=1) + assert _resolve_first_occurrence_of_weekday_in_month( + weekday, reference_date + ) == reference_date.replace(day=expected) diff --git a/source/app/tests/cron/test_monthdays_parser.py b/source/app/tests/cron/test_monthdays_parser.py new file mode 100644 index 00000000..c3c15004 --- /dev/null +++ b/source/app/tests/cron/test_monthdays_parser.py @@ -0,0 +1,127 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from typing import Final + +from pytest import raises + +from instance_scheduler.cron.expression import ( + CronAll, + CronNearestWeekday, + CronRange, + CronSingleValueLast, + CronSingleValueNumeric, + CronUnion, +) +from instance_scheduler.cron.parser import parse_monthdays_expr + + +def test_monthdays_parser_parses_none_as_all_values() -> None: + assert parse_monthdays_expr(None) == CronAll() + + +def test_monthdays_parser_errors_on_empty_set() -> None: + # string sets in DynamoDB are not allowed to be empty, so an empty set for a + # days-of-month field would be malformed + with raises(ValueError): + parse_monthdays_expr(set()) + + +def test_monthdays_parser_parses_wildcards() -> None: + assert parse_monthdays_expr({"*"}) == CronAll() + assert parse_monthdays_expr({"?"}) == CronAll() + + +def test_monthdays_parser_parses_single_values() -> None: + for i in range(1, 32): + assert parse_monthdays_expr({str(i)}) == CronSingleValueNumeric(value=i) + + +def test_monthdays_parser_parses_ranges() -> None: + assert parse_monthdays_expr({"5-20"}) == CronRange( + start=CronSingleValueNumeric(value=5), end=CronSingleValueNumeric(value=20) + ) + assert parse_monthdays_expr({"1-30"}) == CronRange( + start=CronSingleValueNumeric(value=1), end=CronSingleValueNumeric(value=30) + ) + + +def test_monthdays_parser_parses_comma_separated() -> None: + assert parse_monthdays_expr({"1,2,3,4,5"}) == CronUnion( + exprs=( + CronSingleValueNumeric(value=1), + CronSingleValueNumeric(value=2), + CronSingleValueNumeric(value=3), + CronSingleValueNumeric(value=4), + CronSingleValueNumeric(value=5), + ) + ) + assert parse_monthdays_expr({"2, 8, 10, 20, 22"}) == CronUnion( + exprs=( + CronSingleValueNumeric(value=2), + CronSingleValueNumeric(value=8), + CronSingleValueNumeric(value=10), + CronSingleValueNumeric(value=20), + CronSingleValueNumeric(value=22), + ) + ) + + +def test_monthdays_parser_parses_last_day_wildcard() -> None: + assert parse_monthdays_expr({"L"}) == CronSingleValueLast() + assert parse_monthdays_expr({"6-L"}) == CronRange( + start=CronSingleValueNumeric(value=6), end=CronSingleValueLast() + ) + assert parse_monthdays_expr({"3-L/2"}) == CronRange( + start=CronSingleValueNumeric(value=3), end=CronSingleValueLast(), interval=2 + ) + + +def test_monthdays_parser_parses_steps() -> None: + assert parse_monthdays_expr({"1/7"}) == CronRange( + start=CronSingleValueNumeric(value=1), interval=7 + ) + + +def test_monthdays_parser_parses_range_steps() -> None: + assert parse_monthdays_expr({"15-30/3"}) == CronRange( + start=CronSingleValueNumeric(value=15), + end=CronSingleValueNumeric(value=30), + interval=3, + ) + + +def test_monthdays_parser_parses_nearest_weekday_wildcard() -> None: + assert parse_monthdays_expr({"17W"}) == CronNearestWeekday( + value=CronSingleValueNumeric(value=17) + ) + + +def test_monthdays_parser_parses_multiple_expressions() -> None: + result: Final = parse_monthdays_expr({"2", "13-15", "5-8"}) + assert isinstance(result, CronUnion) + assert set(result.exprs) == set( + ( + CronSingleValueNumeric(value=2), + CronRange( + start=CronSingleValueNumeric(value=13), + end=CronSingleValueNumeric(value=15), + ), + CronRange( + start=CronSingleValueNumeric(value=5), + end=CronSingleValueNumeric(value=8), + ), + ) + ) + + +def test_monthdays_parser_errors_on_invalid_values() -> None: + with raises(ValueError): + parse_monthdays_expr({"W"}) + with raises(ValueError): + parse_monthdays_expr({"32W"}) + with raises(ValueError): + parse_monthdays_expr({"36"}) + with raises(ValueError): + parse_monthdays_expr({"2W-15"}) + with raises(ValueError): + parse_monthdays_expr({"L-13"}) # range cannot start with L diff --git a/source/app/tests/cron/test_months_parser.py b/source/app/tests/cron/test_months_parser.py new file mode 100644 index 00000000..fe6d172e --- /dev/null +++ b/source/app/tests/cron/test_months_parser.py @@ -0,0 +1,232 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from pytest import raises + +from instance_scheduler.cron.expression import ( + CronAll, + CronRange, + CronSingleValueNumeric, + CronUnion, +) +from instance_scheduler.cron.parser import month_names, parse_months_expr + + +def test_months_parser_parses_none_as_all() -> None: + assert parse_months_expr(None) == CronAll() + + +def test_months_parser_errors_on_empty_set() -> None: + # string sets in DynamoDB are not allowed to be empty, so an empty set for a months + # field would be malformed + with raises(ValueError): + parse_months_expr(set()) + + +def test_months_parser_parses_wildcards() -> None: + assert parse_months_expr({"*"}) == CronAll() + assert parse_months_expr({"?"}) == CronAll() + + +def test_months_parser_parses_single_numeric_values() -> None: + for i in range(1, 13): + assert parse_months_expr({str(i)}) == CronSingleValueNumeric(value=i) + + +def test_months_parser_parses_single_names() -> None: + for i, month in enumerate(month_names): + expected = CronSingleValueNumeric(value=i + 1) + # full name + assert parse_months_expr({month}) == expected + # full name, all caps + assert parse_months_expr({month.upper()}) == expected + # truncated + assert parse_months_expr({month[:3]}) == expected + # truncated, all caps + assert parse_months_expr({month[:3].upper()}) == expected + + +def test_months_parser_parses_numeric_ranges() -> None: + assert parse_months_expr({"1-3"}) == CronRange( + start=CronSingleValueNumeric(value=1), + end=CronSingleValueNumeric(value=3), + ) + assert parse_months_expr({"7-8"}) == CronRange( + start=CronSingleValueNumeric(value=7), + end=CronSingleValueNumeric(value=8), + ) + assert parse_months_expr({"1-12"}) == CronRange( + start=CronSingleValueNumeric(value=1), + end=CronSingleValueNumeric(value=12), + ) + + +def test_months_parser_parses_numeric_ranges_wrapped() -> None: + assert parse_months_expr({"12-1"}) == CronRange( + start=CronSingleValueNumeric(value=12), + end=CronSingleValueNumeric(value=1), + ) + assert parse_months_expr({"2-1"}) == CronRange( + start=CronSingleValueNumeric(value=2), + end=CronSingleValueNumeric(value=1), + ) + + +def test_months_parser_parses_comma_separated() -> None: + assert parse_months_expr({"1,3,7"}) == CronUnion( + exprs=( + CronSingleValueNumeric(value=1), + CronSingleValueNumeric(value=3), + CronSingleValueNumeric(value=7), + ) + ) + assert parse_months_expr({"10, 2, 12, 11, 6"}) == CronUnion( + exprs=( + CronSingleValueNumeric(value=10), + CronSingleValueNumeric(value=2), + CronSingleValueNumeric(value=12), + CronSingleValueNumeric(value=11), + CronSingleValueNumeric(value=6), + ) + ) + + +def test_months_parser_parses_name_ranges() -> None: + assert parse_months_expr({"January-June"}) == CronRange( + start=CronSingleValueNumeric(value=1), + end=CronSingleValueNumeric(value=6), + ) + assert parse_months_expr({"July-November"}) == CronRange( + start=CronSingleValueNumeric(value=7), + end=CronSingleValueNumeric(value=11), + ) + + +def test_months_parser_parses_name_ranges_wrapped() -> None: + assert parse_months_expr({"October-June"}) == CronRange( + start=CronSingleValueNumeric(value=10), + end=CronSingleValueNumeric(value=6), + ) + assert parse_months_expr({"July-June"}) == CronRange( + start=CronSingleValueNumeric(value=7), + end=CronSingleValueNumeric(value=6), + ) + + +def test_months_parser_parses_comma_separated_names() -> None: + assert parse_months_expr({"january,march,july"}) == CronUnion( + exprs=( + CronSingleValueNumeric(value=1), + CronSingleValueNumeric(value=3), + CronSingleValueNumeric(value=7), + ) + ) + assert parse_months_expr( + {"october, february, december, november, june"} + ) == CronUnion( + exprs=( + CronSingleValueNumeric(value=10), + CronSingleValueNumeric(value=2), + CronSingleValueNumeric(value=12), + CronSingleValueNumeric(value=11), + CronSingleValueNumeric(value=6), + ) + ) + + +def test_months_parser_parses_abbr_ranges() -> None: + assert parse_months_expr({"jan-jun"}) == CronRange( + start=CronSingleValueNumeric(value=1), + end=CronSingleValueNumeric(value=6), + ) + assert parse_months_expr({"jul-nov"}) == CronRange( + start=CronSingleValueNumeric(value=7), + end=CronSingleValueNumeric(value=11), + ) + + +def test_months_parser_parses_abbr_ranges_wrapped() -> None: + assert parse_months_expr({"Oct-Jun"}) == CronRange( + start=CronSingleValueNumeric(value=10), + end=CronSingleValueNumeric(value=6), + ) + assert parse_months_expr({"jul-jun"}) == CronRange( + start=CronSingleValueNumeric(value=7), + end=CronSingleValueNumeric(value=6), + ) + + +def test_months_parser_parses_comma_separated_abbrs() -> None: + assert parse_months_expr({"jan,mar,jul"}) == CronUnion( + exprs=( + CronSingleValueNumeric(value=1), + CronSingleValueNumeric(value=3), + CronSingleValueNumeric(value=7), + ) + ) + assert parse_months_expr({"Oct, Feb, Dec, Nov, Jun"}) == CronUnion( + exprs=( + CronSingleValueNumeric(value=10), + CronSingleValueNumeric(value=2), + CronSingleValueNumeric(value=12), + CronSingleValueNumeric(value=11), + CronSingleValueNumeric(value=6), + ) + ) + + +def test_months_parser_parses_steps() -> None: + assert parse_months_expr({"4/2"}) == CronRange( + start=CronSingleValueNumeric(value=4), interval=2 + ) + assert parse_months_expr({"Jan/3"}) == CronRange( + start=CronSingleValueNumeric(value=1), interval=3 + ) + + +def test_months_parser_parses_range_steps() -> None: + assert parse_months_expr({"3-9/2"}) == CronRange( + start=CronSingleValueNumeric(value=3), + end=CronSingleValueNumeric(value=9), + interval=2, + ) + assert parse_months_expr({"feb-sep/6"}) == CronRange( + start=CronSingleValueNumeric(value=2), + end=CronSingleValueNumeric(value=9), + interval=6, + ) + assert parse_months_expr({"October-December/2"}) == CronRange( + start=CronSingleValueNumeric(value=10), + end=CronSingleValueNumeric(value=12), + interval=2, + ) + + +def test_months_parser_parses_multiple_expressions() -> None: + result = parse_months_expr({"3", "4-6"}) + assert isinstance(result, CronUnion) + assert set(result.exprs) == { + CronSingleValueNumeric(value=3), + CronRange( + start=CronSingleValueNumeric(value=4), + end=CronSingleValueNumeric(value=6), + ), + } + + +def test_months_parser_errors_on_invalid() -> None: + with raises(ValueError): + parse_months_expr({"13"}) + with raises(ValueError): + parse_months_expr({"befruary"}) + with raises(ValueError): + parse_months_expr({"sup"}) + with raises(ValueError): + parse_months_expr({"L"}) + with raises(ValueError): + parse_months_expr({"3-L"}) + with raises(ValueError): + parse_months_expr({"^-6"}) + with raises(ValueError): + parse_months_expr({"L-2"}) + with raises(ValueError): + parse_months_expr({""}) diff --git a/source/app/tests/cron/test_weekdays_parser.py b/source/app/tests/cron/test_weekdays_parser.py new file mode 100644 index 00000000..3ddcf20e --- /dev/null +++ b/source/app/tests/cron/test_weekdays_parser.py @@ -0,0 +1,278 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from typing import Final + +from pytest import raises + +from instance_scheduler.cron.expression import ( + CronAll, + CronLastWeekday, + CronNthWeekday, + CronRange, + CronSingleValueLast, + CronSingleValueNumeric, + CronUnion, +) +from instance_scheduler.cron.parser import parse_weekdays_expr, weekday_names + + +def test_weekdays_parser_parses_none_as_all() -> None: + assert parse_weekdays_expr(None) == CronAll() + + +def test_weekdays_parser_errors_on_empty_set() -> None: + # string sets in DynamoDB are not allowed to be empty, so an empty set for a months + # field would be malformed + with raises(ValueError): + parse_weekdays_expr(set()) + + +def test_weekdays_parser_parses_wildcards() -> None: + assert parse_weekdays_expr({"*"}) == CronAll() + assert parse_weekdays_expr({"?"}) == CronAll() + + +def test_weekdays_parser_parses_single_numeric_values() -> None: + for i in range(0, 7): + assert parse_weekdays_expr({str(i)}) == CronSingleValueNumeric(value=i) + + +def test_weekdays_parser_parses_single_names() -> None: + for i, day in enumerate(weekday_names): + expected = CronSingleValueNumeric(value=i) + # full name + assert parse_weekdays_expr({day}) == expected + # full name, all caps + assert parse_weekdays_expr({day.upper()}) == expected + # truncated + assert parse_weekdays_expr({day[:3]}) == expected + # truncated, all caps + assert parse_weekdays_expr({day[:3].upper()}) == expected + + +def test_weekdays_parser_parses_numeric_ranges() -> None: + assert parse_weekdays_expr({"0-2"}) == CronRange( + start=CronSingleValueNumeric(value=0), + end=CronSingleValueNumeric(value=2), + ) + assert parse_weekdays_expr({"1-5"}) == CronRange( + start=CronSingleValueNumeric(value=1), + end=CronSingleValueNumeric(value=5), + ) + assert parse_weekdays_expr({"5-6"}) == CronRange( + start=CronSingleValueNumeric(value=5), + end=CronSingleValueNumeric(value=6), + ) + + +def test_weekdays_parser_parses_numeric_ranges_wrapped() -> None: + assert parse_weekdays_expr({"6-0"}) == CronRange( + start=CronSingleValueNumeric(value=6), + end=CronSingleValueNumeric(value=0), + ) + assert parse_weekdays_expr({"2-1"}) == CronRange( + start=CronSingleValueNumeric(value=2), + end=CronSingleValueNumeric(value=1), + ) + + +def test_weekdays_parser_parses_comma_separated() -> None: + assert parse_weekdays_expr({"1,3,6"}) == CronUnion( + exprs=( + CronSingleValueNumeric(value=1), + CronSingleValueNumeric(value=3), + CronSingleValueNumeric(value=6), + ) + ) + assert parse_weekdays_expr({"4, 2, 5, 0, 6"}) == CronUnion( + exprs=( + CronSingleValueNumeric(value=4), + CronSingleValueNumeric(value=2), + CronSingleValueNumeric(value=5), + CronSingleValueNumeric(value=0), + CronSingleValueNumeric(value=6), + ) + ) + + +def test_weekdays_parser_parses_name_ranges() -> None: + assert parse_weekdays_expr({"Monday-Friday"}) == CronRange( + start=CronSingleValueNumeric(value=0), + end=CronSingleValueNumeric(value=4), + ) + assert parse_weekdays_expr({"Saturday-Sunday"}) == CronRange( + start=CronSingleValueNumeric(value=5), + end=CronSingleValueNumeric(value=6), + ) + + +def test_weekdays_parser_parses_name_ranges_wrapped() -> None: + assert parse_weekdays_expr({"Sunday-Monday"}) == CronRange( + start=CronSingleValueNumeric(value=6), + end=CronSingleValueNumeric(value=0), + ) + assert parse_weekdays_expr({"wednesday-tuesday"}) == CronRange( + start=CronSingleValueNumeric(value=2), + end=CronSingleValueNumeric(value=1), + ) + + +def test_weekdays_parser_parses_comma_separated_names() -> None: + assert parse_weekdays_expr({"Tuesday,Thursday,Sunday"}) == CronUnion( + exprs=( + CronSingleValueNumeric(value=1), + CronSingleValueNumeric(value=3), + CronSingleValueNumeric(value=6), + ) + ) + assert parse_weekdays_expr( + {"Monday, Friday, Sunday, Tuesday, Wednesday"} + ) == CronUnion( + exprs=( + CronSingleValueNumeric(value=0), + CronSingleValueNumeric(value=4), + CronSingleValueNumeric(value=6), + CronSingleValueNumeric(value=1), + CronSingleValueNumeric(value=2), + ) + ) + + +def test_weekdays_parser_parses_abbr_ranges() -> None: + assert parse_weekdays_expr({"mon-fri"}) == CronRange( + start=CronSingleValueNumeric(value=0), + end=CronSingleValueNumeric(value=4), + ) + assert parse_weekdays_expr({"Sat-Sun"}) == CronRange( + start=CronSingleValueNumeric(value=5), + end=CronSingleValueNumeric(value=6), + ) + + +def test_weekdays_parser_parses_abbr_ranges_wrapped() -> None: + assert parse_weekdays_expr({"sun-mon"}) == CronRange( + start=CronSingleValueNumeric(value=6), + end=CronSingleValueNumeric(value=0), + ) + assert parse_weekdays_expr({"Wed-Tue"}) == CronRange( + start=CronSingleValueNumeric(value=2), + end=CronSingleValueNumeric(value=1), + ) + + +def test_weekdays_parser_parses_comma_separated_abbrs() -> None: + assert parse_weekdays_expr({"tue,thu,sun"}) == CronUnion( + exprs=( + CronSingleValueNumeric(value=1), + CronSingleValueNumeric(value=3), + CronSingleValueNumeric(value=6), + ) + ) + assert parse_weekdays_expr({"Mon, Fri, Sun, Tue, Wed"}) == CronUnion( + exprs=( + CronSingleValueNumeric(value=0), + CronSingleValueNumeric(value=4), + CronSingleValueNumeric(value=6), + CronSingleValueNumeric(value=1), + CronSingleValueNumeric(value=2), + ) + ) + + +def test_weekdays_parser_parses_multiple_expressions() -> None: + result: Final = parse_weekdays_expr({"2", "Sat-Sun", "Monday"}) + assert isinstance(result, CronUnion) + assert set(result.exprs) == set( + ( + CronSingleValueNumeric(value=2), + CronRange( + start=CronSingleValueNumeric(value=5), + end=CronSingleValueNumeric(value=6), + ), + CronSingleValueNumeric(value=0), + ) + ) + + +def test_weekdays_parser_parses_last_day_wildcard() -> None: + assert parse_weekdays_expr({"L"}) == CronSingleValueLast() + + +def test_weekdays_parser_parses_steps() -> None: + assert parse_weekdays_expr({"Mon/2"}) == CronRange( + start=CronSingleValueNumeric(value=0), interval=2 + ) + assert parse_weekdays_expr({"3/3"}) == CronRange( + start=CronSingleValueNumeric(value=3), interval=3 + ) + assert parse_weekdays_expr({"Thursday/2"}) == CronRange( + start=CronSingleValueNumeric(value=3), interval=2 + ) + + +def test_weekdays_parser_parses_range_steps() -> None: + assert parse_weekdays_expr({"1-5/3"}) == CronRange( + start=CronSingleValueNumeric(value=1), + end=CronSingleValueNumeric(value=5), + interval=3, + ) + assert parse_weekdays_expr({"Mon-Fri/2"}) == CronRange( + start=CronSingleValueNumeric(value=0), + end=CronSingleValueNumeric(value=4), + interval=2, + ) + assert parse_weekdays_expr({"Tuesday-Sunday/3"}) == CronRange( + start=CronSingleValueNumeric(value=1), + end=CronSingleValueNumeric(value=6), + interval=3, + ) + + +def test_weekdays_parser_parses_nth_weekday() -> None: + assert parse_weekdays_expr({"Thursday#3"}) == CronNthWeekday( + day=CronSingleValueNumeric(value=3), n=3 + ) + assert parse_weekdays_expr({"mon#1"}) == CronNthWeekday( + day=CronSingleValueNumeric(value=0), n=1 + ) + assert parse_weekdays_expr({"2#2"}) == CronNthWeekday( + day=CronSingleValueNumeric(value=2), n=2 + ) + + +def test_weekdays_parser_parses_last_weekday() -> None: + assert parse_weekdays_expr({"1L"}) == CronLastWeekday( + day=CronSingleValueNumeric(value=1) + ) + assert parse_weekdays_expr({"ThursdayL"}) == CronLastWeekday( + day=CronSingleValueNumeric(value=3) + ) + assert parse_weekdays_expr({"monL"}) == CronLastWeekday( + day=CronSingleValueNumeric(value=0) + ) + + +def test_invalid_value() -> None: + with raises(ValueError): + parse_weekdays_expr({"-1"}) + with raises(ValueError): + parse_weekdays_expr({"7"}) + with raises(ValueError): + parse_weekdays_expr({"wensday"}) + with raises(ValueError): + parse_weekdays_expr({"fro"}) + with raises(ValueError): + parse_weekdays_expr({"3-L"}) + with raises(ValueError): + parse_weekdays_expr({"^-6"}) + + +def test_invalid_nth_weekday() -> None: + with raises(ValueError): + parse_weekdays_expr({"0#6"}) + with raises(ValueError): + parse_weekdays_expr({"0#-1"}) + with raises(ValueError): + parse_weekdays_expr({"0#0"}) + with raises(ValueError): + parse_weekdays_expr({"L-2"}) # range cannot start with L diff --git a/source/app/tests/handler/test_asg.py b/source/app/tests/handler/test_asg.py new file mode 100644 index 00000000..55092467 --- /dev/null +++ b/source/app/tests/handler/test_asg.py @@ -0,0 +1,74 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +from datetime import datetime, timezone +from os import environ +from typing import Final +from unittest.mock import MagicMock, patch +from uuid import UUID + +from freezegun import freeze_time + +from instance_scheduler.handler.asg import ( + ASG_SERVICE, + AsgMetricsDefinition, + send_operational_metrics, +) +from tests import DEFAULT_REGION + + +@patch("instance_scheduler.handler.asg.collect_metric") +def test_send_operational_metrics(mock_collect_metric: MagicMock) -> None: + # Prepare + metric_hour: Final = UUID(environ["METRICS_UUID"]).int % 24 + dt: Final = datetime( + year=2024, month=4, day=23, hour=metric_hour, minute=14, tzinfo=timezone.utc + ) + num_tagged_auto_scaling_groups = 4 + num_schedules = 1 + + # Call + with freeze_time(dt): + send_operational_metrics( + AsgMetricsDefinition( + region=DEFAULT_REGION, + num_tagged_auto_scaling_groups=num_tagged_auto_scaling_groups, + num_schedules=num_schedules, + ) + ) + + # Verify + assert mock_collect_metric.call_count == 1 + + instance_count_metric = mock_collect_metric.call_args[1].get("metric") + assert instance_count_metric.service == ASG_SERVICE + assert instance_count_metric.region == DEFAULT_REGION + assert instance_count_metric.num_instances == num_tagged_auto_scaling_groups + assert instance_count_metric.num_schedules == num_schedules + + +@patch("instance_scheduler.handler.asg.collect_metric") +def test_not_send_operational_metrics_when_not_time_to_send( + mock_collect_metric: MagicMock, +) -> None: + # Prepare + metric_hour: Final = UUID(environ["METRICS_UUID"]).int % 24 + current_hour = (metric_hour + 1) % 24 + dt: Final = datetime( + year=2024, month=4, day=23, hour=current_hour, minute=14, tzinfo=timezone.utc + ) + num_tagged_auto_scaling_groups = 4 + num_schedules = 1 + + # Call + with freeze_time(dt): + send_operational_metrics( + AsgMetricsDefinition( + region=DEFAULT_REGION, + num_tagged_auto_scaling_groups=num_tagged_auto_scaling_groups, + num_schedules=num_schedules, + ) + ) + + # Verify + assert mock_collect_metric.call_count == 0 diff --git a/source/app/tests/handler/test_asg_orchestrator.py b/source/app/tests/handler/test_asg_orchestrator.py new file mode 100644 index 00000000..f43fa8a8 --- /dev/null +++ b/source/app/tests/handler/test_asg_orchestrator.py @@ -0,0 +1,165 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import json +from collections.abc import Iterator +from datetime import datetime, timezone +from itertools import chain, product +from typing import TYPE_CHECKING, Final +from unittest.mock import MagicMock, patch + +from boto3 import client +from boto3.session import Session +from freezegun import freeze_time +from moto.core.models import DEFAULT_ACCOUNT_ID +from pytest import fixture + +from instance_scheduler.handler.asg_orchestrator import orchestrate_asgs +from instance_scheduler.handler.scheduling_request import SchedulingRequest +from instance_scheduler.model.ddb_config_item import DdbConfigItem +from instance_scheduler.model.store.ddb_config_item_store import DdbConfigItemStore +from tests.context import MockLambdaContext +from tests.test_utils.mock_asg_orchestrator_environment import ( + MockAsgOrchestratorEnvironment, +) + +if TYPE_CHECKING: + from mypy_boto3_ssm.client import SSMClient +else: + SSMClient = object + + +@fixture +def remote_accounts(config_item_store: DdbConfigItemStore) -> Iterator[list[str]]: + param_name: Final = "my_ssm_param" + param_value: Final = "444444444444" + ssm_client: Final[SSMClient] = client("ssm") + ssm_client.put_parameter(Name=param_name, Value=param_value, Type="String") + + remote_account_ids: Final = [ + "222222222222", + "333333333333", + ] + param_substitution: Final = "{param:" + param_name + "}" + config_item_store.put( + DdbConfigItem( + organization_id="", + remote_account_ids=list(chain(remote_account_ids, [param_substitution])), + ) + ) + + yield list(chain(remote_account_ids, [param_value])) + + +dt: Final = datetime(2024, 2, 16, 9, 38, tzinfo=timezone.utc) + + +@freeze_time(dt) +def test_orchestrator_invokes_all_accounts(remote_accounts: list[str]) -> None: + asg_scheduler_name: Final = "my-asg-scheduler" + env: Final = MockAsgOrchestratorEnvironment( + asg_scheduler_name=asg_scheduler_name, + enable_schedule_hub_account=True, + schedule_regions=[], + ) + + mock_lambda: Final = MagicMock() + mock_lambda.invoke = MagicMock(return_value={"StatusCode": 200}) + + with ( + patch( + "instance_scheduler.handler.asg_orchestrator.get_client_with_standard_retry", + lambda x: {"lambda": mock_lambda}[x], + ), + ): + orchestrate_asgs(env, MockLambdaContext()) + + assert mock_lambda.invoke.call_count == 4 + default_region: Final = Session().region_name + called_accounts: Final = set() + for call_args in mock_lambda.invoke.call_args_list: + assert call_args.kwargs["FunctionName"] == asg_scheduler_name + assert call_args.kwargs["InvocationType"] == "Event" + payload: SchedulingRequest = json.loads(call_args.kwargs["Payload"]) + assert datetime.fromisoformat(payload["dispatch_time"]) == dt + called_accounts.add(payload["account"]) + assert payload["service"] == "asg" + assert payload["region"] == default_region + assert datetime.fromisoformat(payload["current_dt"]) == dt + + assert len(called_accounts) == 4 + assert called_accounts == set(chain([DEFAULT_ACCOUNT_ID], remote_accounts)) + + +@freeze_time(dt) +def test_orchestrator_invokes_remote_accounts_only(remote_accounts: list[str]) -> None: + asg_scheduler_name: Final = "my-asg-scheduler" + env: Final = MockAsgOrchestratorEnvironment( + asg_scheduler_name=asg_scheduler_name, + enable_schedule_hub_account=False, + schedule_regions=[], + ) + + mock_lambda: Final = MagicMock() + mock_lambda.invoke = MagicMock(return_value={"StatusCode": 200}) + + with ( + patch( + "instance_scheduler.handler.asg_orchestrator.get_client_with_standard_retry", + lambda x: {"lambda": mock_lambda}[x], + ), + ): + orchestrate_asgs(env, MockLambdaContext()) + + assert mock_lambda.invoke.call_count == 3 + default_region: Final = Session().region_name + called_accounts: Final = set() + for call_args in mock_lambda.invoke.call_args_list: + assert call_args.kwargs["FunctionName"] == asg_scheduler_name + assert call_args.kwargs["InvocationType"] == "Event" + payload: SchedulingRequest = json.loads(call_args.kwargs["Payload"]) + assert datetime.fromisoformat(payload["dispatch_time"]) == dt + called_accounts.add(payload["account"]) + assert payload["service"] == "asg" + assert payload["region"] == default_region + assert datetime.fromisoformat(payload["current_dt"]) == dt + + assert len(called_accounts) == 3 + assert called_accounts == set(remote_accounts) + + +@freeze_time(dt) +def test_orchestrator_invokes_across_regions(remote_accounts: list[str]) -> None: + asg_scheduler_name: Final = "my-asg-scheduler" + schedule_regions: Final = ["us-east-1", "us-west-2", "ap-southeast-1"] + env: Final = MockAsgOrchestratorEnvironment( + asg_scheduler_name=asg_scheduler_name, + enable_schedule_hub_account=True, + schedule_regions=schedule_regions, + ) + + mock_lambda: Final = MagicMock() + mock_lambda.invoke = MagicMock(return_value={"StatusCode": 200}) + + with ( + patch( + "instance_scheduler.handler.asg_orchestrator.get_client_with_standard_retry", + lambda x: {"lambda": mock_lambda}[x], + ), + ): + orchestrate_asgs(env, MockLambdaContext()) + + assert mock_lambda.invoke.call_count == 12 + called_account_regions: Final = set() + for call_args in mock_lambda.invoke.call_args_list: + assert call_args.kwargs["FunctionName"] == asg_scheduler_name + assert call_args.kwargs["InvocationType"] == "Event" + payload: SchedulingRequest = json.loads(call_args.kwargs["Payload"]) + assert datetime.fromisoformat(payload["dispatch_time"]) == dt + called_account_regions.add((payload["account"], payload["region"])) + assert payload["service"] == "asg" + assert datetime.fromisoformat(payload["current_dt"]) == dt + + assert len(called_account_regions) == 12 + assert called_account_regions == set( + product(chain([DEFAULT_ACCOUNT_ID], remote_accounts), schedule_regions) + ) diff --git a/source/app/tests/handler/test_cfn_schedule_handler.py b/source/app/tests/handler/test_cfn_schedule_handler.py index f725b5d0..22d9b1e5 100644 --- a/source/app/tests/handler/test_cfn_schedule_handler.py +++ b/source/app/tests/handler/test_cfn_schedule_handler.py @@ -1,21 +1,33 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 from datetime import time -from unittest.mock import ANY, MagicMock, patch +from unittest.mock import MagicMock, patch +from zoneinfo import ZoneInfo -from instance_scheduler.configuration import get_global_configuration from instance_scheduler.configuration.instance_schedule import InstanceSchedule from instance_scheduler.configuration.running_period import RunningPeriod from instance_scheduler.configuration.running_period_dict_element import ( RunningPeriodDictElement, ) +from instance_scheduler.cron.cron_recurrence_expression import CronRecurrenceExpression +from instance_scheduler.cron.parser import ( + parse_monthdays_expr, + parse_months_expr, + parse_weekdays_expr, +) from instance_scheduler.handler.cfn_schedule import ( CfnScheduleHandler, + CfnSchedulePeriodProperties, CfnScheduleResourceProperties, ) +from instance_scheduler.model.store.period_definition_store import PeriodDefinitionStore +from instance_scheduler.model.store.schedule_definition_store import ( + ScheduleDefinitionStore, +) +from instance_scheduler.util.app_env import AppEnv from instance_scheduler.util.custom_resource import CustomResourceRequest from tests.context import MockLambdaContext -from tests.logger import MockLogger +from tests.test_utils.any_nonempty_string import AnyNonEmptyString from tests.test_utils.unordered_list import UnorderedList stack_arn = "arn:aws:cloudformation:us-west-2:123456789012:stack/teststack/51af3dc0-da77-11e4-872e-1234567db123" @@ -34,18 +46,22 @@ def new_create_request( "LogicalResourceId": "CFNLogicalID", "PhysicalResourceId": "PhysicalID", "ResourceProperties": resource_properties, + "OldResourceProperties": {}, } @patch.object(CfnScheduleHandler, "_send_response") def test_minimalist_cfn_schedule_creation( - mocked_cfn_callback: MagicMock, config_table: None + mocked_cfn_callback: MagicMock, + schedule_store: ScheduleDefinitionStore, + period_store: PeriodDefinitionStore, + app_env: AppEnv, ) -> None: event = new_create_request( { "NoStackPrefix": "True", "ServiceToken": "serviceTokenARN", - "Periods": [{"Description": "Stop at 5pm", "EndTime": "16:59"}], + "Periods": [{"EndTime": "16:59"}], } ) handler = CfnScheduleHandler(event, MockLambdaContext()) @@ -54,36 +70,71 @@ def test_minimalist_cfn_schedule_creation( expected_schedule = InstanceSchedule( name="CFNLogicalID", configured_in_stack=stack_arn, + timezone=app_env.default_timezone, stop_new_instances=True, - use_metrics=False, - description="", periods=[ RunningPeriodDictElement( - period=RunningPeriod(name=ANY, endtime=time(16, 59, 0)), - instancetype=None, + period=RunningPeriod(name=AnyNonEmptyString(), endtime=time(16, 59, 0)) ) ], ) - assert handler.response["Status"] == "SUCCESS", handler.response[ - "Reason" - ] # todo: appears to return timeout even for exceptions within code? (6/30/23) + response = mocked_cfn_callback.call_args.args[0] + assert response["Status"] == "SUCCESS", response["Reason"] + # todo: appears to return timeout even for exceptions within code? (6/30/23) # -- action: improve error reporting - global_config = get_global_configuration(MockLogger()) - assert len(global_config.schedules) == 1 - assert "CFNLogicalID" in global_config.schedules - assert global_config.schedules["CFNLogicalID"] == expected_schedule + schedules = schedule_store.find_all() + assert len(schedules) == 1 + assert "CFNLogicalID" in schedules + saved_schedule = schedules["CFNLogicalID"].to_instance_schedule(period_store) + assert saved_schedule == expected_schedule @patch.object(CfnScheduleHandler, "_send_response") -def test_cfn_schedule_with_all_parameters( - mocked_cfn_callback: MagicMock, config_table: None +def test_cfn_schedule_with_legacy_maint_win_str_type( + mocked_cfn_callback: MagicMock, + schedule_store: ScheduleDefinitionStore, + period_store: PeriodDefinitionStore, + app_env: AppEnv, ) -> None: - # untested properties: - # "Overwrite" - # "OverrideStatus" - # "Metrics" + event = new_create_request( + { + "NoStackPrefix": "True", + "ServiceToken": "serviceTokenARN", + "Periods": [{"EndTime": "16:59"}], + "SsmMaintenanceWindow": "my_window_name", # backwards compatibility for str type + } + ) + handler = CfnScheduleHandler(event, MockLambdaContext()) + handler.handle_request() + expected_schedule = InstanceSchedule( + name="CFNLogicalID", + configured_in_stack=stack_arn, + timezone=app_env.default_timezone, + ssm_maintenance_window=["my_window_name"], + periods=[ + RunningPeriodDictElement( + period=RunningPeriod(name=AnyNonEmptyString(), endtime=time(16, 59, 0)) + ) + ], + ) + + response = mocked_cfn_callback.call_args.args[0] + assert response["Status"] == "SUCCESS", response["Reason"] + schedules = schedule_store.find_all() + assert len(schedules) == 1 + assert "CFNLogicalID" in schedules + saved_schedule = schedules["CFNLogicalID"].to_instance_schedule(period_store) + assert saved_schedule == expected_schedule + + +@patch.object(CfnScheduleHandler, "_send_response") +def test_cfn_schedule_with_all_parameters( + mocked_cfn_callback: MagicMock, + schedule_store: ScheduleDefinitionStore, + period_store: PeriodDefinitionStore, +) -> None: event = new_create_request( { "ServiceToken": "ServiceTokenARN", @@ -95,8 +146,9 @@ def test_cfn_schedule_with_all_parameters( "Hibernate": "True", "RetainRunning": "True", "StopNewInstances": "True", - "UseMaintenanceWindow": "True", - "SsmMaintenanceWindow": "my_window_name", + "SsmMaintenanceWindow": ["my_window_name"], + "Metrics": "True", # removed in 3.0, but shouldn't cause the template to error (yet) + "OverrideStatus": "running", "Periods": [ { "Description": "run from 9-5 on the first 3 days of March", @@ -120,33 +172,36 @@ def test_cfn_schedule_with_all_parameters( expected_schedule = InstanceSchedule( name="schedule_name", description="template with all values", - timezone="America/New_York", + timezone=ZoneInfo("America/New_York"), configured_in_stack=stack_arn, enforced=True, hibernate=True, retain_running=True, stop_new_instances=True, - use_maintenance_window=True, - ssm_maintenance_window="my_window_name", - use_metrics=False, + ssm_maintenance_window=["my_window_name"], + override_status="running", periods=UnorderedList( [ RunningPeriodDictElement( period=RunningPeriod( - name=ANY, + name=AnyNonEmptyString(), begintime=time(9, 0, 0), endtime=time(17, 0, 0), - monthdays={1, 2, 3}, - months={3}, + cron_recurrence=CronRecurrenceExpression( + monthdays=parse_monthdays_expr({"1-3"}), + months=parse_months_expr({"3"}), + ), ), instancetype="t2.micro", ), RunningPeriodDictElement( period=RunningPeriod( - name=ANY, + name=AnyNonEmptyString(), begintime=time(14, 0, 0), endtime=time(17, 0, 0), - weekdays={5, 6}, + cron_recurrence=CronRecurrenceExpression( + weekdays=parse_weekdays_expr({"sat-sun"}), + ), ), instancetype="t2.micro", ), @@ -156,10 +211,165 @@ def test_cfn_schedule_with_all_parameters( handler = CfnScheduleHandler(event, MockLambdaContext()) handler.handle_request() - - assert handler.response["Status"] == "SUCCESS", handler.response["Reason"] - global_config = get_global_configuration(MockLogger()) - assert len(global_config.schedules) == 1 - assert "schedule_name" in global_config.schedules - saved_schedule = global_config.schedules["schedule_name"] + response = mocked_cfn_callback.call_args.args[0] + assert response["Status"] == "SUCCESS", response["Reason"] + schedules = schedule_store.find_all() + assert len(schedules) == 1 + assert "schedule_name" in schedules + saved_schedule = schedules["schedule_name"].to_instance_schedule(period_store) assert saved_schedule == expected_schedule + + # expect that all periods are also tagged with configured_in_stack parameter: + for period in period_store.find_all().values(): + assert period.configured_in_stack == stack_arn + + +@patch.object(CfnScheduleHandler, "_send_response") +def test_cfn_schedule_fail_when_invalid_schedule_property_provided( + mocked_cfn_callback: MagicMock, + schedule_store: ScheduleDefinitionStore, + period_store: PeriodDefinitionStore, +) -> None: + event = new_create_request( + { + "ServiceToken": "serviceTokenARN", + "NoStackPrefix": "True", + "Timezone": "UTC", + "Invalid": "Invalid parameter", # Should fail as it is not a supported property + } # type: ignore[typeddict-unknown-key] + ) + handler = CfnScheduleHandler(event, MockLambdaContext()) + handler.handle_request() + + error_message = f"Unknown schedule property Invalid, valid properties are {CfnScheduleResourceProperties.__annotations__.keys()}" + response = mocked_cfn_callback.call_args.args[0] + assert response["Status"] == "FAILED" + assert response["Reason"] == error_message + + schedules = schedule_store.find_all() + periods = period_store.find_all() + assert len(schedules) == 0 + assert len(periods) == 0 + + +@patch.object(CfnScheduleHandler, "_send_response") +def test_cfn_schedule_fail_when_schedule_property_case_not_match( + mocked_cfn_callback: MagicMock, + schedule_store: ScheduleDefinitionStore, + period_store: PeriodDefinitionStore, +) -> None: + event = new_create_request( + { + "ServiceToken": "serviceTokenARN", + "NoStackPrefix": "True", + "TimeZone": "UTC", # Should fail as `Timezone` is the expected property + "Periods": [{"BeginTime": "00:00"}], + } # type: ignore[typeddict-unknown-key] + ) + handler = CfnScheduleHandler(event, MockLambdaContext()) + handler.handle_request() + + error_message = f"Unknown schedule property TimeZone, valid properties are {CfnScheduleResourceProperties.__annotations__.keys()}" + response = mocked_cfn_callback.call_args.args[0] + assert response["Status"] == "FAILED" + assert response["Reason"] == error_message + + schedules = schedule_store.find_all() + periods = period_store.find_all() + assert len(schedules) == 0 + assert len(periods) == 0 + + +@patch.object(CfnScheduleHandler, "_send_response") +def test_cfn_schedule_fail_when_schedule_period_empty( + mocked_cfn_callback: MagicMock, + schedule_store: ScheduleDefinitionStore, + period_store: PeriodDefinitionStore, +) -> None: + event = new_create_request( + { + "ServiceToken": "serviceTokenARN", + "NoStackPrefix": "True", + "Timezone": "UTC", + "Periods": [], # Should fail as it is empty + }, + ) + handler = CfnScheduleHandler(event, MockLambdaContext()) + handler.handle_request() + + logical_id = event["LogicalResourceId"] + error_message = f"Error parsing schedule {logical_id}: At least one period must be specified for a schedule" + response = mocked_cfn_callback.call_args.args[0] + assert response["Status"] == "FAILED" + assert response["Reason"] == error_message + + schedules = schedule_store.find_all() + periods = period_store.find_all() + assert len(schedules) == 0 + assert len(periods) == 0 + + +@patch.object(CfnScheduleHandler, "_send_response") +def test_cfn_schedule_fail_when_invalid_schedule_period_provided( + mocked_cfn_callback: MagicMock, + schedule_store: ScheduleDefinitionStore, + period_store: PeriodDefinitionStore, +) -> None: + event = new_create_request( + { + "ServiceToken": "serviceTokenARN", + "NoStackPrefix": "True", + "Timezone": "UTC", + "Periods": [ + { + "BeginTime": "00:00", + "EndTime": "01:00", + "Invalid": "Invalid parameter", # Should fail as it is not a supported property + } # type: ignore[typeddict-unknown-key] + ], + }, + ) + handler = CfnScheduleHandler(event, MockLambdaContext()) + handler.handle_request() + + error_message = f"Unknown period property Invalid, valid properties are {CfnSchedulePeriodProperties.__annotations__.keys()}" + response = mocked_cfn_callback.call_args.args[0] + assert response["Status"] == "FAILED" + assert response["Reason"] == error_message + + schedules = schedule_store.find_all() + periods = period_store.find_all() + assert len(schedules) == 0 + assert len(periods) == 0 + + +@patch.object(CfnScheduleHandler, "_send_response") +def test_cfn_schedule_fail_when_schedule_period_property_case_not_match( + mocked_cfn_callback: MagicMock, + schedule_store: ScheduleDefinitionStore, + period_store: PeriodDefinitionStore, +) -> None: + event = new_create_request( + { + "ServiceToken": "serviceTokenARN", + "NoStackPrefix": "True", + "Timezone": "UTC", + "Periods": [ + { + "Endtime": "01:00", # Should fail as `EndTime` is the expected property + } # type: ignore[typeddict-unknown-key] + ], + }, + ) + handler = CfnScheduleHandler(event, MockLambdaContext()) + handler.handle_request() + + error_message = f"Unknown period property Endtime, valid properties are {CfnSchedulePeriodProperties.__annotations__.keys()}" + response = mocked_cfn_callback.call_args.args[0] + assert response["Status"] == "FAILED" + assert response["Reason"] == error_message + + schedules = schedule_store.find_all() + periods = period_store.find_all() + assert len(schedules) == 0 + assert len(periods) == 0 diff --git a/source/app/tests/handler/test_eventbus_request_handler.py b/source/app/tests/handler/test_eventbus_request_handler.py deleted file mode 100644 index ffe2f9bc..00000000 --- a/source/app/tests/handler/test_eventbus_request_handler.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -from aws_lambda_powertools.utilities.typing import LambdaContext - -from instance_scheduler.handler.spoke_registration import SpokeRegistrationHandler -from instance_scheduler.util.app_env import AppEnv -from instance_scheduler.util.dynamodb_utils import DynamoDBUtils - - -def test_handler(app_env: AppEnv, config_table: None) -> None: - create_event = { - "version": "0", - "id": "c4556ddf-88ad-1b22-e482-19f2d72eb7e3", - "detail-type": "Parameter Store Change", - "source": "aws.ssm", - "account": "111111111111", - "time": "2021-08-19T06:26:38Z", - "region": "us-east-1", - "resources": [ - "arn:aws:ssm:us-east-1:111111111111:parameter/scheduler/do-not-delete-manually/static" - ], - "detail": { - "name": "/scheduler/do-not-delete-manually/static", - "type": "String", - "operation": "Create", - }, - } - dynamodb_table = DynamoDBUtils.get_dynamodb_table_resource_ref( - app_env.config_table_name - ) - dynamodb_table.put_item( - Item={"type": "config", "name": "scheduler", "namespace": "static"} - ) - handler = SpokeRegistrationHandler(create_event, LambdaContext()) - response: bool | str = handler.is_handling_request(create_event) - assert response is True - - response = handler.handle_request() - assert response == "Exiting event bus request handler" - delete_event = { - "version": "0", - "id": "c4556ddf-88ad-1b22-e482-19f2d72eb7e3", - "detail-type": "Parameter Store Change", - "source": "aws.ssm", - "account": "111111111111", - "time": "2021-08-19T06:26:38Z", - "region": "us-east-1", - "resources": [ - "arn:aws:ssm:us-east-1:111111111111:parameter/scheduler/do-not-delete-manually/static" - ], - "detail": { - "name": "/scheduler/do-not-delete-manually/static", - "type": "String", - "operation": "Delete", - }, - } - handler = SpokeRegistrationHandler(delete_event, LambdaContext()) - response = handler.is_handling_request(delete_event) - assert response is True - - response = handler.handle_request() - assert response == "Exiting event bus request handler" diff --git a/source/app/tests/handler/test_metrics_uuid_custom_resource.py b/source/app/tests/handler/test_metrics_uuid_custom_resource.py new file mode 100644 index 00000000..063e784d --- /dev/null +++ b/source/app/tests/handler/test_metrics_uuid_custom_resource.py @@ -0,0 +1,82 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import uuid +from typing import TYPE_CHECKING +from unittest.mock import MagicMock, patch +from uuid import UUID + +import boto3 + +from instance_scheduler.handler.metrics_uuid_custom_resource import ( + CreateUuidRequest, + MetricsUuidCustomResource, + handle_metrics_uuid_request, +) +from instance_scheduler.util.custom_resource import ( + CustomResourceRequest, + CustomResourceResponse, +) +from tests.context import MockLambdaContext +from tests.test_utils.mock_metrics_uuid_environment import MockMetricsUuidEnviron + +if TYPE_CHECKING: + from mypy_boto3_ssm.client import SSMClient +else: + SSMClient = object + +stack_arn = "arn:aws:cloudformation:us-west-2:123456789012:stack/teststack/51af3dc0-da77-11e4-872e-1234567db123" + + +def new_create_request() -> CustomResourceRequest[CreateUuidRequest]: + return { + "RequestType": "Create", + "ServiceToken": "LambdaARN", + "ResponseURL": "url", + "StackId": stack_arn, + "RequestId": "requestId", + "ResourceType": "Custom::ServiceInstanceSchedule", + "LogicalResourceId": "CFNLogicalID", + "PhysicalResourceId": "PhysicalID", + "ResourceProperties": {}, + "OldResourceProperties": {}, + } + + +@patch.object(MetricsUuidCustomResource, "_send_response") +def test_metrics_uuid_generates_new_uuid_when_one_not_present( + mocked_cfn_callback: MagicMock, moto_backend: None +) -> None: + with MockMetricsUuidEnviron(): + event = new_create_request() + handle_metrics_uuid_request(event, MockLambdaContext()) + + response: CustomResourceResponse = mocked_cfn_callback.call_args.args[0] + assert response["Status"] == "SUCCESS" + assert "Uuid" in response["Data"] + assert UUID(response["Data"]["Uuid"]) is not None # is a valid uuid + + +@patch.object(MetricsUuidCustomResource, "_send_response") +def test_metrics_uuid_uses_existing_uuid_when_one_is_present( + mocked_cfn_callback: MagicMock, + moto_backend: None, +) -> None: + with MockMetricsUuidEnviron() as env: + existing_uuid = uuid.uuid4() + stack_id = env.stack_id[-36:] + uuid_key = env.uuid_key + str(stack_id) + ssm_client: SSMClient = boto3.client("ssm") + ssm_client.put_parameter( + Name=uuid_key, + Description="metrics uuid as stored in v1.5.3 and below", + Value=str(existing_uuid), + Type="String", + ) + + event = new_create_request() + handle_metrics_uuid_request(event, MockLambdaContext()) + + response: CustomResourceResponse = mocked_cfn_callback.call_args.args[0] + assert response["Status"] == "SUCCESS" + assert "Uuid" in response["Data"] + assert UUID(response["Data"]["Uuid"]) == existing_uuid diff --git a/source/app/tests/handler/test_remote_registration_custom_resource.py b/source/app/tests/handler/test_remote_registration_custom_resource.py new file mode 100644 index 00000000..3963c382 --- /dev/null +++ b/source/app/tests/handler/test_remote_registration_custom_resource.py @@ -0,0 +1,214 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import json +from os import environ +from typing import Iterator, Mapping +from unittest.mock import MagicMock, patch + +import pytest +from _pytest.fixtures import fixture +from aws_lambda_powertools import Logger + +from instance_scheduler.handler.environments.remote_registration_environment import ( + RemoteRegistrationEnvironment, +) +from instance_scheduler.handler.remote_registration_custom_resource import ( + RemoteRegistrationCustomResourceHandler, + RemoteRegistrationProperties, + handle_remote_registration_request, +) +from instance_scheduler.util.app_env import AppEnvError +from instance_scheduler.util.custom_resource import CustomResourceRequest +from tests.context import MockLambdaContext + + +@fixture +def mocked_environment() -> Iterator[Mapping[str, str]]: + mocked_environment = { + "USER_AGENT_EXTRA": "test-user-agent-extra", + "HUB_REGISTRATION_LAMBDA_ARN": "arn:aws:lambda:us-east-1:111122223333:function:Test-SpokeRegistration", + } + with patch.dict(environ, mocked_environment, clear=True): + yield mocked_environment + + +@fixture +def custom_resource_base_event() -> CustomResourceRequest[RemoteRegistrationProperties]: + return { + "RequestType": "Create", + "ServiceToken": "LambdaARN", + "ResponseURL": "url", + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/teststack/51af3dc0-da77-11e4-872e-1234567db123", + "RequestId": "requestId", + "ResourceType": "Custom::RegisterSpokeAccount", + "LogicalResourceId": "CFNLogicalID", + "PhysicalResourceId": "PhysicalID", + "ResourceProperties": {}, + "OldResourceProperties": {}, + } + + +@fixture +def custom_resource_create_event( + custom_resource_base_event: CustomResourceRequest[RemoteRegistrationProperties], +) -> CustomResourceRequest[RemoteRegistrationProperties]: + custom_resource_base_event["RequestType"] = "Create" + return custom_resource_base_event + + +@fixture +def custom_resource_update_event( + custom_resource_base_event: CustomResourceRequest[RemoteRegistrationProperties], +) -> CustomResourceRequest[RemoteRegistrationProperties]: + custom_resource_base_event["RequestType"] = "Update" + return custom_resource_base_event + + +@fixture +def custom_resource_delete_event( + custom_resource_base_event: CustomResourceRequest[RemoteRegistrationProperties], +) -> CustomResourceRequest[RemoteRegistrationProperties]: + custom_resource_base_event["RequestType"] = "Delete" + return custom_resource_base_event + + +@fixture +def mocked_lambda_invoke() -> Iterator[MagicMock]: + with patch.object( + RemoteRegistrationCustomResourceHandler, "_lambda_client" + ) as lambda_client: + with patch.object(lambda_client, "invoke") as invoke_func: + yield invoke_func + + +@patch.object(RemoteRegistrationCustomResourceHandler, "_send_response") +def test_remote_registration_custom_resource_create( + mocked_cfn_callback: MagicMock, + mocked_lambda_invoke: MagicMock, + mocked_environment: Mapping[str, str], + custom_resource_create_event: CustomResourceRequest[RemoteRegistrationProperties], +) -> None: + function_arn = ( + "arn:aws:lambda:us-east-1:111122223333:function:Test-SpokeRegistration" + ) + context = MockLambdaContext() + logger = Logger() + env = RemoteRegistrationEnvironment( + user_agent_extra="UserAgentExtra", hub_registration_lambda_arn=function_arn + ) + + handler = RemoteRegistrationCustomResourceHandler( + custom_resource_create_event, context, logger, env + ) + handler.handle_request() + assert mocked_lambda_invoke.call_args.kwargs["FunctionName"] == function_arn + assert mocked_lambda_invoke.call_args.kwargs["Payload"] == str.encode( + json.dumps({"account": "123456789012", "operation": "Register"}) + ) + + response = mocked_cfn_callback.call_args.args[0] + assert response["Status"] == "SUCCESS", response["Reason"] + + +@patch.object(RemoteRegistrationCustomResourceHandler, "_send_response") +def test_remote_registration_custom_resource_update( + mocked_cfn_callback: MagicMock, + mocked_environment: Mapping[str, str], + custom_resource_update_event: CustomResourceRequest[RemoteRegistrationProperties], +) -> None: + function_arn = ( + "arn:aws:lambda:us-east-1:111122223333:function:Test-SpokeRegistration" + ) + context = MockLambdaContext() + logger = Logger() + env = RemoteRegistrationEnvironment( + user_agent_extra="UserAgentExtra", hub_registration_lambda_arn=function_arn + ) + + handler = RemoteRegistrationCustomResourceHandler( + custom_resource_update_event, context, logger, env + ) + handler.handle_request() + + response = mocked_cfn_callback.call_args.args[0] + assert response["Status"] == "SUCCESS", response["Reason"] + + +@patch.object(RemoteRegistrationCustomResourceHandler, "_send_response") +def test_remote_registration_custom_resource_delete( + mocked_cfn_callback: MagicMock, + mocked_lambda_invoke: MagicMock, + mocked_environment: Mapping[str, str], + custom_resource_delete_event: CustomResourceRequest[RemoteRegistrationProperties], +) -> None: + function_arn = ( + "arn:aws:lambda:us-east-1:111122223333:function:Test-SpokeRegistration" + ) + context = MockLambdaContext() + logger = Logger() + env = RemoteRegistrationEnvironment( + user_agent_extra="UserAgentExtra", hub_registration_lambda_arn=function_arn + ) + + handler = RemoteRegistrationCustomResourceHandler( + custom_resource_delete_event, context, logger, env + ) + handler.handle_request() + + assert mocked_lambda_invoke.call_args.kwargs["FunctionName"] == function_arn + assert mocked_lambda_invoke.call_args.kwargs["Payload"] == str.encode( + json.dumps({"account": "123456789012", "operation": "Deregister"}) + ) + + response = mocked_cfn_callback.call_args.args[0] + assert response["Status"] == "SUCCESS", response["Reason"] + + +def test_remote_registration_from_env( + mocked_environment: Mapping[str, str], +) -> None: + env = RemoteRegistrationEnvironment.from_env() + assert env.user_agent_extra == mocked_environment["USER_AGENT_EXTRA"] + assert ( + env.hub_registration_lambda_arn + == mocked_environment["HUB_REGISTRATION_LAMBDA_ARN"] + ) + + +def test_remote_registration_missing_env_var() -> None: + mocked_environment = { + "USER_AGENT_EXTRA": "test-user-agent-extra", + } + with patch.dict(environ, mocked_environment, clear=True): + with pytest.raises(AppEnvError): + RemoteRegistrationEnvironment.from_env() + + +def test_remote_registration_sends_cfn_response( + custom_resource_base_event: CustomResourceRequest[RemoteRegistrationProperties], +) -> None: + with patch( + "instance_scheduler.handler.remote_registration_custom_resource.http" + ) as mocked_http: + context = MockLambdaContext() + handle_remote_registration_request(custom_resource_base_event, context) + mocked_http.request.assert_called_once_with( + "PUT", + custom_resource_base_event.get("ResponseURL"), + headers={"Content-Type": "application/json"}, + body=json.dumps( + { + "Status": "FAILED", + "Reason": "Response sent to cloudformation to prevent hung resource", + "PhysicalResourceId": custom_resource_base_event.get( + "LogicalResourceId" + ), + "StackId": custom_resource_base_event.get("StackId"), + "RequestId": custom_resource_base_event.get("RequestId"), + "LogicalResourceId": custom_resource_base_event.get( + "LogicalResourceId" + ), + } + ), + ) diff --git a/source/app/tests/handler/test_schedule_update.py b/source/app/tests/handler/test_schedule_update.py new file mode 100644 index 00000000..ae014c68 --- /dev/null +++ b/source/app/tests/handler/test_schedule_update.py @@ -0,0 +1,95 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from typing import Final, Literal + +from aws_lambda_powertools.utilities.data_classes.dynamo_db_stream_event import ( + DynamoDBRecord, +) + +from instance_scheduler.handler.schedule_update import schedule_names_from_records +from instance_scheduler.model.period_identifier import PeriodIdentifier +from instance_scheduler.model.schedule_definition import ScheduleDefinition +from instance_scheduler.model.store.in_memory_schedule_definition_store import ( + InMemoryScheduleDefinitionStore, +) + + +def ddb_record( + *, record_type: Literal["schedule", "period"], record_name: str +) -> DynamoDBRecord: + return DynamoDBRecord( + { + "eventID": "d9fffe842d88d0cc3d325208f0c3a7c5", + "eventName": "MODIFY", + "eventVersion": "1.1", + "eventSource": "aws:dynamodb", + "awsRegion": "us-west-2", + "dynamodb": { + "ApproximateCreationDateTime": 1708554658, + "Keys": { + "name": {"S": record_name}, + "type": {"S": record_type}, + }, + "SequenceNumber": "155230700000000069047952291", + "SizeBytes": 35, + "StreamViewType": "KEYS_ONLY", + }, + "eventSourceARN": "arn:aws:dynamodb:us-west-2:111111111111:table/my-config-table/stream/2024-02-21T15:35:36.225", + } + ) + + +def test_schedule_names_from_records() -> None: + store: Final = InMemoryScheduleDefinitionStore( + { + "foo": ScheduleDefinition( + name="foo", + periods=[ + PeriodIdentifier.of("a"), + PeriodIdentifier.of("b"), + PeriodIdentifier.of("c"), + ], + ), + "bar": ScheduleDefinition( + name="bar", + periods=[ + PeriodIdentifier.of("b"), + PeriodIdentifier.of("d"), + PeriodIdentifier.of("e"), + ], + ), + "baz": ScheduleDefinition( + name="baz", + periods=[ + PeriodIdentifier.of("a"), + PeriodIdentifier.of("f"), + PeriodIdentifier.of("g"), + ], + ), + "qux": ScheduleDefinition( + name="qux", + periods=[ + PeriodIdentifier.of("b"), + PeriodIdentifier.of("h"), + PeriodIdentifier.of("i"), + ], + ), + } + ) + + periods: Final = [ddb_record(record_type="period", record_name="b")] + + assert set(schedule_names_from_records(periods, store)) == set( + ["foo", "bar", "qux"] + ) + + schedules: Final = [ddb_record(record_type="schedule", record_name="bar")] + + assert list(schedule_names_from_records(schedules, store)) == ["bar"] + + mixed: Final = [ + ddb_record(record_type="period", record_name="h"), + ddb_record(record_type="schedule", record_name="baz"), + ] + + assert set(schedule_names_from_records(mixed, store)) == set(["baz", "qux"]) diff --git a/source/app/tests/handler/test_scheduler_setup_handler.py b/source/app/tests/handler/test_scheduler_setup_handler.py index 0c17e9f1..e561c937 100644 --- a/source/app/tests/handler/test_scheduler_setup_handler.py +++ b/source/app/tests/handler/test_scheduler_setup_handler.py @@ -1,183 +1,290 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -from os import environ +from typing import Iterator +from unittest.mock import patch import boto3 - -from instance_scheduler.handler.config_resource import SchedulerSetupHandler +from pytest import fixture + +from instance_scheduler.handler.config_resource import ( + SchedulerSetupHandler, + ServiceSetupRequest, + ServiceSetupResourceProperties, + is_org_id, +) +from instance_scheduler.handler.setup_demo_data import DEMO_PERIODS, DEMO_SCHEDULES +from instance_scheduler.model.ddb_config_item import DdbConfigItem +from instance_scheduler.model.schedule_definition import ScheduleDefinition +from instance_scheduler.model.store.ddb_config_item_store import DdbConfigItemStore +from instance_scheduler.model.store.period_definition_store import PeriodDefinitionStore +from instance_scheduler.model.store.schedule_definition_store import ( + ScheduleDefinitionStore, +) from instance_scheduler.util.app_env import AppEnv +from instance_scheduler.util.custom_resource import CustomResourceRequest from tests.context import MockLambdaContext +from tests.test_utils.app_env_utils import mock_app_env +from tests.test_utils.unordered_list import UnorderedList + +stack_arn = "arn:aws:cloudformation:us-west-2:123456789012:stack/teststack/51af3dc0-da77-11e4-872e-1234567db123" + + +@fixture(autouse=True) +def intercept_cfn_responses() -> Iterator[None]: + with patch.object(SchedulerSetupHandler, "_send_response"): + yield + + +def new_create_request( + resource_properties: ServiceSetupResourceProperties, +) -> ServiceSetupRequest: + return { + "RequestType": "Create", + "ServiceToken": "LambdaARN", + "ResponseURL": "url", + "StackId": stack_arn, + "RequestId": "requestId", + "ResourceType": "Custom::ServiceInstanceSchedule", + "LogicalResourceId": "CFNLogicalID", + "PhysicalResourceId": "PhysicalID", + "ResourceProperties": resource_properties, + } + + +def new_update_request( + resource_properties: ServiceSetupResourceProperties, + old_resource_properties: ServiceSetupResourceProperties, +) -> CustomResourceRequest[ServiceSetupResourceProperties]: + return { + "RequestType": "Update", + "ServiceToken": "LambdaARN", + "ResponseURL": "url", + "StackId": stack_arn, + "RequestId": "requestId", + "ResourceType": "Custom::ServiceInstanceSchedule", + "LogicalResourceId": "CFNLogicalID", + "PhysicalResourceId": "PhysicalID", + "ResourceProperties": resource_properties, + "OldResourceProperties": old_resource_properties, + } def test_a_valid_org_id_pattern() -> None: - handler = SchedulerSetupHandler( - {"ResourceProperties": {"stack_version": "test"}}, MockLambdaContext() - ) - response = handler.get_valid_org_id("o-x1mhq1lvsr") - assert response + assert is_org_id("o-a1b1c3d4e5") def test_an_invalid_org_id_pattern() -> None: - handler = SchedulerSetupHandler( - {"ResourceProperties": {"stack_version": "test"}}, MockLambdaContext() - ) - response = handler.get_valid_org_id("111111111111,222222222222") - assert response is None + assert not is_org_id("111111111111,222222222222") -def test_create_request(config_table: None) -> None: - """Happy path no errors""" - handler = SchedulerSetupHandler( +def test_create_request_no_orgs_or_accounts( + config_item_store: DdbConfigItemStore, +) -> None: + event = new_create_request( { - "ResourceProperties": { - "stack_version": "test", - "remote_account_ids": ["111111111111"], - "scheduled_services": ["ec2"], - "regions": ["us-east-1"], - } - }, - MockLambdaContext(), + "timeout": 120, + "log_retention_days": 7, + "remote_account_ids": [], + } ) - assert handler._create_request() is True - - -def test_update_request(config_table: None) -> None: - """Happy path no errors""" handler = SchedulerSetupHandler( - { - "ResourceProperties": { - "stack_version": "test", - "remote_account_ids": ["111111111111"], - "scheduled_services": ["ec2"], - "regions": ["us-east-1"], - } - }, + event, MockLambdaContext(), ) - assert handler._update_request() is True + handler.handle_request() - -def test_regions() -> None: - regions = ["us-east-1", "us-west-2"] - handler = SchedulerSetupHandler( - {"ResourceProperties": {"stack_version": "test", "regions": regions}}, - MockLambdaContext(), - ) - assert handler.regions == set(regions) + saved_item = config_item_store.get() + assert saved_item == DdbConfigItem(remote_account_ids=[], organization_id="") -def test_regions_empty() -> None: - handler = SchedulerSetupHandler( - {"ResourceProperties": {"stack_version": "test", "regions": []}}, - MockLambdaContext(), +def test_create_request_with_account_ids(config_item_store: DdbConfigItemStore) -> None: + accounts = ["111111111111", "222222222222"] + event = new_create_request( + { + "timeout": 120, + "log_retention_days": 7, + "remote_account_ids": accounts, + } ) - assert handler.regions == [environ["AWS_DEFAULT_REGION"]] - - -def test_regions_empty_strings() -> None: handler = SchedulerSetupHandler( - {"ResourceProperties": {"stack_version": "test", "regions": ["", ""]}}, + event, MockLambdaContext(), ) - assert handler.regions == [environ["AWS_DEFAULT_REGION"]] + handler.handle_request() - -def test_remote_account_ids() -> None: - accounts = ["111111111111", "222222222222"] - handler = SchedulerSetupHandler( - { - "ResourceProperties": { - "stack_version": "test", - "remote_account_ids": accounts, - } - }, - MockLambdaContext(), + saved_item = config_item_store.get() + assert saved_item == DdbConfigItem( + remote_account_ids=["111111111111", "222222222222"], organization_id="" ) - assert handler.remote_account_ids == set(accounts) -def test_remote_account_ids_empty() -> None: - handler = SchedulerSetupHandler( - {"ResourceProperties": {"stack_version": "test", "remote_account_ids": []}}, - MockLambdaContext(), +def test_create_request_with_orgs(config_item_store: DdbConfigItemStore) -> None: + """Happy path, orgs_disabled""" + with mock_app_env(enable_aws_organizations=True): + SchedulerSetupHandler( + new_create_request( + { + "timeout": 120, + "log_retention_days": 7, + "remote_account_ids": ["o-a1b1c3d4e5"], + } + ), + MockLambdaContext(), + ).handle_request() + + saved_item = config_item_store.get() + assert saved_item == DdbConfigItem( + remote_account_ids=[], organization_id="o-a1b1c3d4e5" ) - assert not handler.remote_account_ids -def test_remote_account_ids_empty_strings() -> None: - handler = SchedulerSetupHandler( - { - "ResourceProperties": { - "stack_version": "test", - "remote_account_ids": ["", ""], - } - }, - MockLambdaContext(), +def test_update_request_preserves_registered_accounts_when_org_doesnt_change( + config_item_store: DdbConfigItemStore, +) -> None: + config_item_store.put( + DdbConfigItem( + organization_id="o-a1b1c3d4e5", + remote_account_ids=["111122223333", "222233334444"], + ) ) - assert not handler.remote_account_ids - - -def test_scheduled_services() -> None: - services = ["ec2", "rds"] - handler = SchedulerSetupHandler( - { - "ResourceProperties": { - "stack_version": "test", - "scheduled_services": services, - } - }, - MockLambdaContext(), + with mock_app_env(enable_aws_organizations=True): + SchedulerSetupHandler( + new_update_request( + { + "timeout": 120, + "log_retention_days": 7, + "remote_account_ids": ["o-a1b1c3d4e5"], + }, + { + "timeout": 120, + "log_retention_days": 30, + "remote_account_ids": ["o-a1b1c3d4e5"], + }, + ), + MockLambdaContext(), + ).handle_request() + + saved_item = config_item_store.get() + assert saved_item == DdbConfigItem( + remote_account_ids=["111122223333", "222233334444"], + organization_id="o-a1b1c3d4e5", ) - assert handler.scheduled_services == set(services) -def test_scheduled_services_empty() -> None: - handler = SchedulerSetupHandler( - {"ResourceProperties": {"stack_version": "test", "scheduled_services": []}}, - MockLambdaContext(), +def test_update_request_clears_registered_accounts_when_org_changes( + config_item_store: DdbConfigItemStore, +) -> None: + config_item_store.put( + DdbConfigItem( + organization_id="o-abcdefghijkl", + remote_account_ids=["111122223333", "222233334444"], + ) + ) + with mock_app_env(enable_aws_organizations=True): + SchedulerSetupHandler( + new_update_request( + { + "timeout": 120, + "log_retention_days": 7, + "remote_account_ids": ["o-a1b1c3d4e5"], + }, + { + "timeout": 120, + "log_retention_days": 7, + "remote_account_ids": ["o-abcdefghijkl"], + }, + ), + MockLambdaContext(), + ).handle_request() + + saved_item = config_item_store.get() + assert saved_item == DdbConfigItem( + remote_account_ids=[], + organization_id="o-a1b1c3d4e5", ) - assert not handler.scheduled_services -def test_scheduled_services_empty_strings() -> None: - handler = SchedulerSetupHandler( - { - "ResourceProperties": { - "stack_version": "test", - "scheduled_services": ["", ""], - } - }, - MockLambdaContext(), +def test_update_request_overwrites_remote_accounts_when_orgs_disabled( + config_item_store: DdbConfigItemStore, +) -> None: + config_item_store.put( + DdbConfigItem( + organization_id="o-abcdefghijkl", + remote_account_ids=["111122223333", "222233334444"], + ) + ) + with mock_app_env(enable_aws_organizations=False): + SchedulerSetupHandler( + new_update_request( + { + "timeout": 120, + "log_retention_days": 7, + "remote_account_ids": ["333344445555", "444455556666"], + }, + { + "timeout": 120, + "log_retention_days": 7, + "remote_account_ids": ["o-abcdefghijkl"], + }, + ), + MockLambdaContext(), + ).handle_request() + + saved_item = config_item_store.get() + assert saved_item == DdbConfigItem( + remote_account_ids=["333344445555", "444455556666"], + organization_id="", ) - assert not handler.scheduled_services -def test_set_lambda_logs_retention_period(app_env: AppEnv) -> None: +def test_sets_lambda_logs_retention_period_on_create( + app_env: AppEnv, config_item_store: DdbConfigItemStore +) -> None: """With no period, expect set to default""" log_group = app_env.log_group handler = SchedulerSetupHandler( - {"ResourceProperties": {"stack_version": "test"}}, MockLambdaContext(log_group) + new_create_request( + {"timeout": 120, "remote_account_ids": [], "log_retention_days": 30} + ), + MockLambdaContext(log_group), ) - handler.set_lambda_logs_retention_period() + handler.handle_request() response = boto3.client("logs").describe_log_groups(logGroupNamePrefix=log_group) assert response["logGroups"][0]["logGroupName"] == log_group assert response["logGroups"][0]["retentionInDays"] == 30 -def test_set_lambda_logs_retention_period_custom_retention(app_env: AppEnv) -> None: - """With custom period, expect set to desired""" - log_group = app_env.log_group - retention_period = 90 - handler = SchedulerSetupHandler( +def test_creates_example_schedules_on_create( + config_item_store: DdbConfigItemStore, + period_store: PeriodDefinitionStore, + schedule_store: ScheduleDefinitionStore, +) -> None: + event = new_create_request( { - "ResourceProperties": { - "stack_version": "test", - "log_retention_days": retention_period, - } - }, - MockLambdaContext(log_group), + "timeout": 120, + "log_retention_days": 7, + "remote_account_ids": [], + } ) - handler.set_lambda_logs_retention_period() - response = boto3.client("logs").describe_log_groups(logGroupNamePrefix=log_group) - assert response["logGroups"][0]["logGroupName"] == log_group - assert response["logGroups"][0]["retentionInDays"] == retention_period + handler = SchedulerSetupHandler( + event, + MockLambdaContext(), + ) + handler.handle_request() + + saved_schedules = list(schedule_store.find_all().values()) + saved_periods = list(period_store.find_all().values()) + + demo_schedules_with_unordered_periods = [ + ScheduleDefinition( + name=orig.name, + description=orig.description, + periods=UnorderedList(orig.periods), + timezone=orig.timezone, + override_status=orig.override_status, + ) + for orig in DEMO_SCHEDULES + ] + + assert saved_schedules == UnorderedList(demo_schedules_with_unordered_periods) + assert saved_periods == UnorderedList(DEMO_PERIODS) diff --git a/source/app/tests/handler/test_scheduling_orchestration_handler.py b/source/app/tests/handler/test_scheduling_orchestration_handler.py index b1516fc9..ccfc392a 100644 --- a/source/app/tests/handler/test_scheduling_orchestration_handler.py +++ b/source/app/tests/handler/test_scheduling_orchestration_handler.py @@ -1,317 +1,483 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -import datetime -from typing import Any +import json +from typing import TYPE_CHECKING, Any, Iterator from unittest.mock import MagicMock, patch -from instance_scheduler.configuration.instance_schedule import InstanceSchedule -from instance_scheduler.configuration.running_period import RunningPeriod -from instance_scheduler.configuration.running_period_dict_element import ( - RunningPeriodDictElement, -) -from instance_scheduler.configuration.scheduler_config import GlobalConfig +import boto3 +from _pytest.fixtures import fixture + from instance_scheduler.handler.scheduling_orchestrator import ( OrchestrationRequest, SchedulingOrchestratorHandler, + prefetch_schedules_and_periods, +) +from instance_scheduler.handler.scheduling_request import SchedulingRequest +from instance_scheduler.model.ddb_config_item import DdbConfigItem +from instance_scheduler.model.period_identifier import PeriodIdentifier +from instance_scheduler.model.schedule_definition import ScheduleDefinition +from instance_scheduler.model.store.ddb_config_item_store import DdbConfigItemStore +from instance_scheduler.model.store.dynamo_period_definition_store import ( + DynamoPeriodDefinitionStore, +) +from instance_scheduler.model.store.dynamo_schedule_definition_store import ( + DynamoScheduleDefinitionStore, ) from tests.context import MockLambdaContext +from tests.logger import MockLogger +from tests.test_utils.mock_orchestrator_environment import MockOrchestratorEnvironment +if TYPE_CHECKING: + from mypy_boto3_dynamodb.client import DynamoDBClient + from mypy_boto3_ssm.client import SSMClient +else: + SSMClient = object + DynamoDBClient = object -def get_scheduler_conf_defaults() -> dict[str, Any]: +mockEvent: OrchestrationRequest = {"scheduled_action": "run_orchestrator"} +moto_hub_account = "123456789012" + + +def orchestrator_env_overrides( + schedule_regions: list[str] = [], + enable_ec2_service: bool = False, + enable_rds_service: bool = False, + enable_rds_clusters: bool = False, + enable_neptune_service: bool = False, + enable_docdb_service: bool = False, + enable_schedule_hub_account: bool = False, +) -> dict[str, Any]: return { - "scheduled_services": ["ec2"], - "schedule_clusters": False, - "tag_name": "Schedule", - "regions": [], - "default_timezone": "UTC", - "schedules": { - "test-schedule": InstanceSchedule( - name="test-schedule", - periods=[ - RunningPeriodDictElement( - period=RunningPeriod( - name="test-period", - begintime=datetime.time(10, 0, 0), - endtime=datetime.time(20, 0, 0), - ) - ) - ], - ) - }, - "trace": "False", - "enable_ssm_maintenance_windows": False, - "use_metrics": "False", - "schedule_lambda_account": True, - "create_rds_snapshot": False, - "started_tags": "", - "stopped_tags": "", - "scheduler_role_name": "Scheduler-Role", - "namespace": "dev", - "organization_id": "", - "aws_partition": "aws", - "remote_account_ids": [], + "schedule_regions": schedule_regions, + "enable_ec2_service": enable_ec2_service, + "enable_rds_service": enable_rds_service, + "enable_rds_clusters": enable_rds_clusters, + "enable_neptune_service": enable_neptune_service, + "enable_docdb_service": enable_docdb_service, + "enable_schedule_hub_account": enable_schedule_hub_account, } -mockEvent: OrchestrationRequest = {"scheduled_action": "run_orchestrator"} +@fixture +def mocked_lambda_invoke() -> Iterator[MagicMock]: + with patch.object(SchedulingOrchestratorHandler, "lambda_client") as lambda_client: + with patch.object(lambda_client, "invoke") as invoke_func: + yield invoke_func -@patch.object(SchedulingOrchestratorHandler, "_run_scheduling_lambda") -def test_no_region_provided_uses_local_region( - run_scheduling_lambda_func: MagicMock, +def scheduling_request_from_lambda_invoke(call_args: Any) -> SchedulingRequest: + payload = call_args.kwargs["Payload"] + scheduling_request: SchedulingRequest = json.loads(payload) + return scheduling_request + + +def test_prefetch_gracefully_handles_invalid_configurations( + config_table: str, + period_store: DynamoPeriodDefinitionStore, + schedule_store: DynamoScheduleDefinitionStore, ) -> None: - conf = get_scheduler_conf_defaults() - conf["scheduled_services"] = ["ec2"] - conf["regions"] = [] - - with patch.object( - SchedulingOrchestratorHandler, "configuration", GlobalConfig(**conf) - ): - cloudwatch_handler = SchedulingOrchestratorHandler( - event=mockEvent, context=MockLambdaContext() + ddb_client: DynamoDBClient = boto3.client("dynamodb") + ddb_client.put_item( + TableName=config_table, + Item={ + "type": {"S": "period"}, + "name": {"S": "invalid-period"}, + "begintime": {"S": "20:00"}, # begintime after endtime + "endtime": {"S": "15:00"}, + }, + ) + + schedule_store.put( + ScheduleDefinition( + name="schedule-with-missing-periods", periods=[PeriodIdentifier("unknown")] ) - cloudwatch_handler.handle_request() + ) - assert run_scheduling_lambda_func.call_count == 1 + schedules, periods = prefetch_schedules_and_periods( + MockOrchestratorEnvironment(), MockLogger() + ) + assert len(periods.find_all()) == 0 + assert len(schedules.find_all()) == 0 + # todo: write assertions against the errors that get logged to sns (see output to MockLogger) - scheduling_context = run_scheduling_lambda_func.call_args.args[0] - assert scheduling_context.schedule_lambda_account is True - assert scheduling_context.service == "ec2" - assert scheduling_context.region == "us-east-1" +# ##------------------- FAN OUT BEHAVIOR -----------------## # +def test_no_region_provided_uses_local_region( + mocked_lambda_invoke: MagicMock, + config_item_store: DdbConfigItemStore, +) -> None: + orchestrator = SchedulingOrchestratorHandler( + event=mockEvent, + context=MockLambdaContext(), + env=MockOrchestratorEnvironment( + schedule_regions=[], + enable_ec2_service=True, + enable_schedule_hub_account=True, + ), + logger=MockLogger(), + ) + orchestrator.handle_request() + assert mocked_lambda_invoke.call_count == 1 + scheduling_request: SchedulingRequest = scheduling_request_from_lambda_invoke( + mocked_lambda_invoke.call_args + ) + assert scheduling_request["account"] == moto_hub_account + assert scheduling_request["service"] == "ec2" + assert scheduling_request["region"] == "us-east-1" -@patch.object(SchedulingOrchestratorHandler, "_run_scheduling_lambda") def test_no_service_provided_does_not_run_any_lambdas( - run_scheduling_lambda_func: MagicMock, + mocked_lambda_invoke: MagicMock, + config_item_store: DdbConfigItemStore, ) -> None: - conf = get_scheduler_conf_defaults() - conf["scheduled_services"] = [] - conf["regions"] = ["us-east-1"] - - with patch.object( - SchedulingOrchestratorHandler, "configuration", GlobalConfig(**conf) - ): - cloudwatch_handler = SchedulingOrchestratorHandler( - event=mockEvent, context=MockLambdaContext() - ) - cloudwatch_handler.handle_request() + orchestrator = SchedulingOrchestratorHandler( + event=mockEvent, + context=MockLambdaContext(), + env=MockOrchestratorEnvironment( + schedule_regions=["us-east-1"], enable_schedule_hub_account=True + ), + logger=MockLogger(), + ) + orchestrator.handle_request() - assert run_scheduling_lambda_func.call_count == 0 + assert mocked_lambda_invoke.call_count == 0 -@patch.object(SchedulingOrchestratorHandler, "_run_scheduling_lambda") def test_remote_account_only_does_not_schedule_locally( - run_scheduling_lambda_func: MagicMock, + mocked_lambda_invoke: MagicMock, + config_item_store: DdbConfigItemStore, ) -> None: - conf = get_scheduler_conf_defaults() - conf["scheduled_services"] = ["ec2"] - conf["regions"] = ["us-east-1"] - conf["remote_account_ids"] = ["222233334444"] - conf["schedule_lambda_account"] = False - - with patch.object( - SchedulingOrchestratorHandler, "configuration", GlobalConfig(**conf) - ): - cloudwatch_handler = SchedulingOrchestratorHandler( - event=mockEvent, context=MockLambdaContext() - ) - cloudwatch_handler.handle_request() - - assert run_scheduling_lambda_func.call_count == 1 - - scheduling_context = run_scheduling_lambda_func.call_args.args[0] - assert scheduling_context.schedule_lambda_account is False - assert scheduling_context.service == "ec2" - assert scheduling_context.account_id == "222233334444" - assert scheduling_context.region == "us-east-1" + config_item_store.put( + DdbConfigItem(organization_id="", remote_account_ids=["222233334444"]) + ) + + orchestrator = SchedulingOrchestratorHandler( + event=mockEvent, + context=MockLambdaContext(), + env=MockOrchestratorEnvironment( + schedule_regions=["us-east-1"], + enable_ec2_service=True, + enable_schedule_hub_account=False, + ), + logger=MockLogger(), + ) + orchestrator.handle_request() + + assert mocked_lambda_invoke.call_count == 1 + + scheduling_request: SchedulingRequest = scheduling_request_from_lambda_invoke( + mocked_lambda_invoke.call_args + ) + assert scheduling_request["service"] == "ec2" + assert scheduling_request["account"] == "222233334444" + assert scheduling_request["region"] == "us-east-1" -@patch.object(SchedulingOrchestratorHandler, "_run_scheduling_lambda") def test_1region_1service_calls_scheduler_x1( - run_scheduling_lambda_func: MagicMock, + mocked_lambda_invoke: MagicMock, + config_item_store: DdbConfigItemStore, ) -> None: - conf = get_scheduler_conf_defaults() - conf["scheduled_services"] = ["ec2"] - conf["regions"] = ["us-east-1"] - - with patch.object( - SchedulingOrchestratorHandler, "configuration", GlobalConfig(**conf) - ): - cloudwatch_handler = SchedulingOrchestratorHandler( - event=mockEvent, context=MockLambdaContext() - ) - cloudwatch_handler.handle_request() - - assert run_scheduling_lambda_func.call_count == 1 + orchestrator = SchedulingOrchestratorHandler( + event=mockEvent, + context=MockLambdaContext(), + env=MockOrchestratorEnvironment( + schedule_regions=["us-east-1"], + enable_ec2_service=True, + enable_schedule_hub_account=True, + ), + logger=MockLogger(), + ) + orchestrator.handle_request() + + assert mocked_lambda_invoke.call_count == 1 + + scheduling_request: SchedulingRequest = scheduling_request_from_lambda_invoke( + mocked_lambda_invoke.call_args + ) + assert scheduling_request["account"] == moto_hub_account + assert scheduling_request["region"] == "us-east-1" + assert scheduling_request["service"] == "ec2" - scheduling_context = run_scheduling_lambda_func.call_args.args[0] - assert scheduling_context.region == "us-east-1" - assert scheduling_context.service == "ec2" - -@patch.object(SchedulingOrchestratorHandler, "_run_scheduling_lambda") def test_2region_1service_calls_scheduler_x2( - run_scheduling_lambda_func: MagicMock, + mocked_lambda_invoke: MagicMock, + config_item_store: DdbConfigItemStore, ) -> None: - conf = get_scheduler_conf_defaults() - conf["scheduled_services"] = ["ec2"] - conf["regions"] = ["us-east-1", "us-east-2"] - - with patch.object( - SchedulingOrchestratorHandler, "configuration", GlobalConfig(**conf) - ): - cloudwatch_handler = SchedulingOrchestratorHandler( - event=mockEvent, context=MockLambdaContext() - ) - cloudwatch_handler.handle_request() - - assert run_scheduling_lambda_func.call_count == 2 - - # first call - scheduling_context = run_scheduling_lambda_func.call_args_list[0].args[0] - assert scheduling_context.region == "us-east-1" - assert scheduling_context.service == "ec2" - - # second call - scheduling_context = run_scheduling_lambda_func.call_args_list[1].args[0] - assert scheduling_context.region == "us-east-2" - assert scheduling_context.service == "ec2" + orchestrator = SchedulingOrchestratorHandler( + event=mockEvent, + context=MockLambdaContext(), + env=MockOrchestratorEnvironment( + schedule_regions=["us-east-1", "us-east-2"], + enable_ec2_service=True, + enable_schedule_hub_account=True, + ), + logger=MockLogger(), + ) + orchestrator.handle_request() + + assert mocked_lambda_invoke.call_count == 2 + + # first call + scheduling_request: SchedulingRequest = scheduling_request_from_lambda_invoke( + mocked_lambda_invoke.call_args_list[0] + ) + + assert scheduling_request["account"] == moto_hub_account + assert scheduling_request["region"] == "us-east-1" + assert scheduling_request["service"] == "ec2" + + # second call + scheduling_request = scheduling_request_from_lambda_invoke( + mocked_lambda_invoke.call_args_list[1] + ) + assert scheduling_request["account"] == moto_hub_account + assert scheduling_request["region"] == "us-east-2" + assert scheduling_request["service"] == "ec2" -@patch.object(SchedulingOrchestratorHandler, "_run_scheduling_lambda") def test_2accounts_1region_1service_nolocal_calls_scheduler_twice( - run_scheduling_lambda_func: MagicMock, + mocked_lambda_invoke: MagicMock, + config_item_store: DdbConfigItemStore, ) -> None: - conf = get_scheduler_conf_defaults() - conf["scheduled_services"] = ["ec2"] - conf["regions"] = ["us-east-1"] - conf["schedule_lambda_account"] = False - conf["remote_account_ids"] = ["222233334444", "333344445555"] - - # awkward behavior: - # providing local account (as defined in mockEvent) as one of the remote accounts - # causes scheduling in local-mode (remote id removed, schedule_lambda_account = true) - - with patch.object( - SchedulingOrchestratorHandler, "configuration", GlobalConfig(**conf) - ): - cloudwatch_handler = SchedulingOrchestratorHandler( - event=mockEvent, context=MockLambdaContext() + config_item_store.put( + DdbConfigItem( + organization_id="", remote_account_ids=["222233334444", "333344445555"] ) - cloudwatch_handler.handle_request() + ) + + orchestrator = SchedulingOrchestratorHandler( + event=mockEvent, + context=MockLambdaContext(), + env=MockOrchestratorEnvironment( + schedule_regions=["us-east-1"], + enable_ec2_service=True, + enable_schedule_hub_account=False, + ), + logger=MockLogger(), + ) + orchestrator.handle_request() + + assert mocked_lambda_invoke.call_count == 2 + + # first call + scheduling_request: SchedulingRequest = scheduling_request_from_lambda_invoke( + mocked_lambda_invoke.call_args_list[0] + ) + assert scheduling_request["region"] == "us-east-1" + assert scheduling_request["service"] == "ec2" + assert scheduling_request["account"] == "222233334444" + + # second call + scheduling_request = scheduling_request_from_lambda_invoke( + mocked_lambda_invoke.call_args_list[1] + ) + assert scheduling_request["region"] == "us-east-1" + assert scheduling_request["service"] == "ec2" + assert scheduling_request["account"] == "333344445555" - assert run_scheduling_lambda_func.call_count == 2 - # first call - scheduling_context = run_scheduling_lambda_func.call_args_list[0].args[0] - assert scheduling_context.region == "us-east-1" - assert scheduling_context.service == "ec2" - assert scheduling_context.account_id == "222233334444" - assert scheduling_context.schedule_lambda_account is False - - # second call - scheduling_context = run_scheduling_lambda_func.call_args_list[1].args[0] - assert scheduling_context.region == "us-east-1" - assert scheduling_context.service == "ec2" - assert scheduling_context.account_id == "333344445555" - assert scheduling_context.schedule_lambda_account is False - - -@patch.object(SchedulingOrchestratorHandler, "_run_scheduling_lambda") def test_2accounts_1region_1service_with_local_calls_scheduler_x3( - run_scheduling_lambda_func: MagicMock, + mocked_lambda_invoke: MagicMock, + config_item_store: DdbConfigItemStore, ) -> None: - conf = get_scheduler_conf_defaults() - conf["scheduled_services"] = ["ec2"] - conf["regions"] = ["us-east-1"] - conf["schedule_lambda_account"] = True - conf["remote_account_ids"] = ["222233334444", "333344445555"] - - with patch.object( - SchedulingOrchestratorHandler, "configuration", GlobalConfig(**conf) - ): - cloudwatch_handler = SchedulingOrchestratorHandler( - event=mockEvent, context=MockLambdaContext() + config_item_store.put( + DdbConfigItem( + organization_id="", remote_account_ids=["222233334444", "333344445555"] ) - cloudwatch_handler.handle_request() - - assert run_scheduling_lambda_func.call_count == 3 - - # first call - scheduling_context = run_scheduling_lambda_func.call_args_list[0].args[0] - assert scheduling_context.region == "us-east-1" - assert scheduling_context.service == "ec2" - assert scheduling_context.account_id == "" - assert scheduling_context.schedule_lambda_account is True + ) + + orchestrator = SchedulingOrchestratorHandler( + event=mockEvent, + context=MockLambdaContext(), + env=MockOrchestratorEnvironment( + schedule_regions=["us-east-1"], + enable_ec2_service=True, + enable_schedule_hub_account=True, + ), + logger=MockLogger(), + ) + orchestrator.handle_request() + + assert mocked_lambda_invoke.call_count == 3 + + # first call + scheduling_request: SchedulingRequest = scheduling_request_from_lambda_invoke( + mocked_lambda_invoke.call_args_list[0] + ) + assert scheduling_request["region"] == "us-east-1" + assert scheduling_request["service"] == "ec2" + assert scheduling_request["account"] == moto_hub_account + + # second call + scheduling_request = scheduling_request_from_lambda_invoke( + mocked_lambda_invoke.call_args_list[1] + ) + assert scheduling_request["region"] == "us-east-1" + assert scheduling_request["service"] == "ec2" + assert scheduling_request["account"] == "222233334444" + + # third call + scheduling_request = scheduling_request_from_lambda_invoke( + mocked_lambda_invoke.call_args_list[2] + ) + assert scheduling_request["region"] == "us-east-1" + assert scheduling_request["service"] == "ec2" + assert scheduling_request["account"] == "333344445555" - # second call - scheduling_context = run_scheduling_lambda_func.call_args_list[1].args[0] - assert scheduling_context.region == "us-east-1" - assert scheduling_context.service == "ec2" - assert scheduling_context.account_id == "222233334444" - assert scheduling_context.schedule_lambda_account is False - # third call - scheduling_context = run_scheduling_lambda_func.call_args_list[2].args[0] - assert scheduling_context.region == "us-east-1" - assert scheduling_context.service == "ec2" - assert scheduling_context.account_id == "333344445555" - assert scheduling_context.schedule_lambda_account is False - - -@patch.object(SchedulingOrchestratorHandler, "_run_scheduling_lambda") def test_1region_2service_calls_scheduler_x2( - run_scheduling_lambda_func: MagicMock, + mocked_lambda_invoke: MagicMock, + config_item_store: DdbConfigItemStore, +) -> None: + orchestrator = SchedulingOrchestratorHandler( + event=mockEvent, + context=MockLambdaContext(), + env=MockOrchestratorEnvironment( + schedule_regions=["us-east-1"], + enable_ec2_service=True, + enable_rds_service=True, + enable_schedule_hub_account=True, + ), + logger=MockLogger(), + ) + orchestrator.handle_request() + + assert mocked_lambda_invoke.call_count == 2 + + # first call + scheduling_request: SchedulingRequest = scheduling_request_from_lambda_invoke( + mocked_lambda_invoke.call_args_list[0] + ) + assert scheduling_request["region"] == "us-east-1" + assert scheduling_request["service"] == "ec2" + + # second call + scheduling_request = scheduling_request_from_lambda_invoke( + mocked_lambda_invoke.call_args_list[1] + ) + assert scheduling_request["region"] == "us-east-1" + assert scheduling_request["service"] == "rds" + + +def test_3account_3region_2service_calls_scheduler_x18( + mocked_lambda_invoke: MagicMock, + config_item_store: DdbConfigItemStore, ) -> None: - conf = get_scheduler_conf_defaults() - conf["scheduled_services"] = ["ec2", "rds"] - conf["regions"] = ["us-east-1"] - - with patch.object( - SchedulingOrchestratorHandler, "configuration", GlobalConfig(**conf) - ): - cloudwatch_handler = SchedulingOrchestratorHandler( - event=mockEvent, context=MockLambdaContext() + config_item_store.put( + DdbConfigItem( + organization_id="", + remote_account_ids=["111122223333", "222233334444", "333344445555"], ) - cloudwatch_handler.handle_request() - - assert run_scheduling_lambda_func.call_count == 2 - - # first call - scheduling_context = run_scheduling_lambda_func.call_args_list[0].args[0] - assert scheduling_context.region == "us-east-1" - assert scheduling_context.service == "ec2" - - # second call - scheduling_context = run_scheduling_lambda_func.call_args_list[1].args[0] - assert scheduling_context.region == "us-east-1" - assert scheduling_context.service == "rds" + ) + + orchestrator = SchedulingOrchestratorHandler( + event=mockEvent, + context=MockLambdaContext(), + env=MockOrchestratorEnvironment( + schedule_regions=["us-east-1", "us-east-2", "us-west-1"], + enable_ec2_service=True, + enable_rds_service=True, + enable_schedule_hub_account=False, + ), + logger=MockLogger(), + ) + orchestrator.handle_request() + + assert mocked_lambda_invoke.call_count == 18 + for index, call in enumerate(mocked_lambda_invoke.call_args_list): + scheduling_request: SchedulingRequest = scheduling_request_from_lambda_invoke( + call + ) + assert scheduling_request["region"] # assert exists + assert scheduling_request["account"] + assert scheduling_request["service"] -@patch.object(SchedulingOrchestratorHandler, "_run_scheduling_lambda") -def test_3account_3region_3service_calls_scheduler_x27( - run_scheduling_lambda_func: MagicMock, +# ##------------------- SSM Parameter Resolution -----------------## # +def test_ssm_parameter_string_list_is_resolved_to_account_ids( + mocked_lambda_invoke: MagicMock, + config_item_store: DdbConfigItemStore, ) -> None: - conf = get_scheduler_conf_defaults() - conf["scheduled_services"] = ["ec2", "rds", "ecs"] - conf["regions"] = ["us-east-1", "us-east-2", "us-west-1"] - conf["remote_account_ids"] = ["111122223333", "222233334444", "333344445555"] - conf["schedule_lambda_account"] = False - - with patch.object( - SchedulingOrchestratorHandler, "configuration", GlobalConfig(**conf) - ): - cloudwatch_handler = SchedulingOrchestratorHandler( - event=mockEvent, context=MockLambdaContext() + ssm_client: SSMClient = boto3.client("ssm") + ssm_client.put_parameter( + Name="my_ssm_param", Value="555566667777,666677778888", Type="StringList" + ) + + config_item_store.put( + DdbConfigItem( + organization_id="", + remote_account_ids=["{param:my_ssm_param}", "111122223333"], + ) + ) + + orchestrator = SchedulingOrchestratorHandler( + event=mockEvent, + context=MockLambdaContext(), + env=MockOrchestratorEnvironment( + schedule_regions=["us-east-1"], + enable_ec2_service=True, + enable_schedule_hub_account=False, + ), + logger=MockLogger(), + ) + orchestrator.handle_request() + + # first call + scheduling_request: SchedulingRequest = scheduling_request_from_lambda_invoke( + mocked_lambda_invoke.call_args_list[0] + ) + assert scheduling_request["account"] == "555566667777" + assert scheduling_request["region"] == "us-east-1" + assert scheduling_request["service"] == "ec2" + + # second call + scheduling_request = scheduling_request_from_lambda_invoke( + mocked_lambda_invoke.call_args_list[1] + ) + assert scheduling_request["account"] == "666677778888" + assert scheduling_request["region"] == "us-east-1" + assert scheduling_request["service"] == "ec2" + + # third call + scheduling_request = scheduling_request_from_lambda_invoke( + mocked_lambda_invoke.call_args_list[2] + ) + assert scheduling_request["account"] == "111122223333" + assert scheduling_request["region"] == "us-east-1" + assert scheduling_request["service"] == "ec2" + + +def test_ssm_parameter_string_is_resolved_to_account_id( + mocked_lambda_invoke: MagicMock, + config_item_store: DdbConfigItemStore, +) -> None: + ssm_client: SSMClient = boto3.client("ssm") + ssm_client.put_parameter(Name="my_ssm_param", Value="555566667777", Type="String") + + config_item_store.put( + DdbConfigItem( + organization_id="", + remote_account_ids=["{param:my_ssm_param}"], ) - cloudwatch_handler.handle_request() - - assert run_scheduling_lambda_func.call_count == 27 - for index, call in enumerate(run_scheduling_lambda_func.call_args_list): - scheduling_context = call.args[0] - assert scheduling_context.region # assert exists - assert scheduling_context.account_id or ( - scheduling_context.schedule_lambda_account is True - ) - assert scheduling_context.service + ) + + orchestrator = SchedulingOrchestratorHandler( + event=mockEvent, + context=MockLambdaContext(), + env=MockOrchestratorEnvironment( + schedule_regions=["us-east-1"], + enable_ec2_service=True, + enable_schedule_hub_account=False, + ), + logger=MockLogger(), + ) + orchestrator.handle_request() + + assert mocked_lambda_invoke.call_count == 1 + + scheduling_request: SchedulingRequest = scheduling_request_from_lambda_invoke( + mocked_lambda_invoke.call_args + ) + assert scheduling_request["service"] == "ec2" + assert scheduling_request["account"] == "555566667777" + assert scheduling_request["region"] == "us-east-1" diff --git a/source/app/tests/handler/test_scheduling_request_handler.py b/source/app/tests/handler/test_scheduling_request_handler.py deleted file mode 100644 index 6a0505ea..00000000 --- a/source/app/tests/handler/test_scheduling_request_handler.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -from unittest.mock import ANY, MagicMock, patch - -from instance_scheduler.handler.scheduling_request import SchedulingRequestHandler -from tests.context import MockLambdaContext -from tests.integration.helpers.schedule_helpers import quick_time -from tests.integration.helpers.scheduling_context_builder import ( - build_context, - build_scheduling_event, -) - - -@patch("instance_scheduler.handler.scheduling_request.Session") -def test_scheduler_uses_regional_sts_endpoint(mock_session: MagicMock) -> None: - mock_client = MagicMock() - mock_session.return_value.client = mock_client - region_name = "executing-region" - mock_session.return_value.region_name = region_name - - context = build_context(current_dt=quick_time(10, 0, 0), service="rds") - event = build_scheduling_event(context) - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.sts - - mock_client.assert_called_with( - "sts", - region_name=region_name, - endpoint_url=f"https://sts.{region_name}.amazonaws.com", - config=ANY, - ) diff --git a/source/app/tests/handler/test_spoke_registration_handler.py b/source/app/tests/handler/test_spoke_registration_handler.py new file mode 100644 index 00000000..6da09155 --- /dev/null +++ b/source/app/tests/handler/test_spoke_registration_handler.py @@ -0,0 +1,109 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import pytest +from _pytest.fixtures import fixture + +from instance_scheduler.handler.spoke_registration import ( + SpokeRegistrationEnvironment, + SpokeRegistrationHandler, + SpokeRequest, + validate_spoke_request, +) +from instance_scheduler.model.store.ddb_config_item_store import DdbConfigItemStore +from instance_scheduler.util.validation import ValidationException +from tests.logger import MockLogger + + +def registration_request(account_id: str) -> SpokeRequest: + return { + "account": account_id, + "operation": "Register", + } + + +def deregistration_request(account_id: str) -> SpokeRequest: + return { + "account": account_id, + "operation": "Deregister", + } + + +@fixture +def spoke_registration_env(config_table: str) -> SpokeRegistrationEnvironment: + return SpokeRegistrationEnvironment( + config_table_name=config_table, + enable_debug_logging=True, + log_group="log_group", + topic_arn="topic_arn", + user_agent_extra="user_agent_extra", + ) + + +def test_registration_request_adds_account( + config_item_store: DdbConfigItemStore, + spoke_registration_env: SpokeRegistrationEnvironment, +) -> None: + account_id = "111122223333" + handler = SpokeRegistrationHandler( + registration_request(account_id), spoke_registration_env, MockLogger() + ) + + result = handler.handle_request() + + assert result == f"Registered spoke account {account_id}" + assert config_item_store.get().remote_account_ids == [account_id] + + +def test_deletion_request_removes_account( + config_item_store: DdbConfigItemStore, + spoke_registration_env: SpokeRegistrationEnvironment, +) -> None: + account_id = "111122223333" + config_item_store.register_spoke_accounts({account_id}) + handler = SpokeRegistrationHandler( + deregistration_request(account_id), spoke_registration_env, MockLogger() + ) + + result = handler.handle_request() + + assert result == f"Deregistered spoke account {account_id}" + assert config_item_store.get().remote_account_ids == [] + + +def test_deletion_request_does_not_fail_if_account_does_not_exist( + config_item_store: DdbConfigItemStore, + spoke_registration_env: SpokeRegistrationEnvironment, +) -> None: + account_id = "111122223333" + handler = SpokeRegistrationHandler( + deregistration_request(account_id), spoke_registration_env, MockLogger() + ) + + result = handler.handle_request() + + assert result == f"Deregistered spoke account {account_id}" + assert config_item_store.get().remote_account_ids == [] + + +@pytest.mark.parametrize( + "operation", + ["Register", "Deregister"], +) +def test_validate_spoke_request_success(operation: str) -> None: + spoke_registration_request = {"account": "111111111111", "operation": operation} + validate_spoke_request(spoke_registration_request) + + +def test_validate_spoke_request_invalid_account() -> None: + with pytest.raises(ValidationException): + spoke_registration_request = {"account": None, "operation": "Register"} + validate_spoke_request(spoke_registration_request) + + +def test_validate_spoke_request_invalid_operation() -> None: + with pytest.raises(ValidationException): + spoke_registration_request = { + "account": "111111111111", + "operation": "INVALID_OPERATION", + } + validate_spoke_request(spoke_registration_request) diff --git a/source/app/tests/integration/conftest.py b/source/app/tests/integration/conftest.py index 2be42954..02452194 100644 --- a/source/app/tests/integration/conftest.py +++ b/source/app/tests/integration/conftest.py @@ -8,7 +8,6 @@ from instance_scheduler.schedulers.instance_states import InstanceStates from instance_scheduler.util.app_env import AppEnv -from tests import ami from tests.logger import MockLogger if TYPE_CHECKING: @@ -34,7 +33,7 @@ def auto_setup_sns_error_reporting_topic(mock_sns_errors_topic: None) -> None: @fixture -def ec2_instance(moto_backend: None) -> Iterator[str]: +def ec2_instance(moto_backend: None, ami: str) -> Iterator[str]: ec2_client: EC2Client = boto3.client("ec2") instance_id: str = ec2_client.run_instances(ImageId=ami, MinCount=1, MaxCount=1)[ "Instances" diff --git a/source/app/tests/integration/helpers/asg_helpers.py b/source/app/tests/integration/helpers/asg_helpers.py new file mode 100644 index 00000000..d58aa69d --- /dev/null +++ b/source/app/tests/integration/helpers/asg_helpers.py @@ -0,0 +1,323 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import json +from collections.abc import Iterable, Iterator +from dataclasses import dataclass +from datetime import datetime, time, timedelta, timezone +from typing import TYPE_CHECKING, Any, Final, cast +from unittest.mock import MagicMock +from zoneinfo import ZoneInfo + +from boto3 import client +from botocore.exceptions import ClientError +from moto.core.models import DEFAULT_ACCOUNT_ID + +from instance_scheduler.handler.asg import ASG_SERVICE +from instance_scheduler.handler.scheduling_request import SchedulingRequest +from instance_scheduler.model.period_definition import PeriodDefinition +from instance_scheduler.model.period_identifier import PeriodIdentifier +from instance_scheduler.model.schedule_definition import ScheduleDefinition +from instance_scheduler.model.store.dynamo_period_definition_store import ( + DynamoPeriodDefinitionStore, +) +from instance_scheduler.model.store.dynamo_schedule_definition_store import ( + DynamoScheduleDefinitionStore, +) +from instance_scheduler.ops_metrics.metric_type.asg_count_metric import AsgCountMetric +from instance_scheduler.service.asg import AsgSize, AsgTag +from tests import DEFAULT_REGION +from tests.conftest import get_ami + +if TYPE_CHECKING: + from mypy_boto3_autoscaling.client import AutoScalingClient + from mypy_boto3_autoscaling.type_defs import ScheduledUpdateGroupActionTypeDef + from mypy_boto3_ec2.client import EC2Client + from mypy_boto3_ecs.client import ECSClient +else: + AutoScalingClient = object + ScheduledUpdateGroupActionTypeDef = object + EC2Client = object + ECSClient = object + + +ASG_GROUP_NAME: Final = "my-group" +ASG_SCHEDULED_TAG_KEY: Final = "scheduled" +RULE_PREFIX: Final = "is-" +SCHEDULE_TAG_KEY: Final = "Schedule" +TEST_DATETIME: Final = datetime(year=2024, month=2, day=28, tzinfo=timezone.utc) + + +class AsgTestError(Exception): + pass + + +@dataclass(frozen=True) +class ScheduleHelper: + name: str + start_recurrence: str + end_recurrence: str + time_zone: ZoneInfo | None + + +def create_simple_schedule( + *, + config_table_name: str, + schedule_name: str, + begin_time: time, + end_time: time, + time_zone: ZoneInfo | None = None, + monthdays: set[str] | None = None, +) -> None: + schedule_store: Final = DynamoScheduleDefinitionStore(table_name=config_table_name) + period_store: Final = DynamoPeriodDefinitionStore(table_name=config_table_name) + + period_name: Final = f"{schedule_name}-period" + period: Final = PeriodDefinition( + name=period_name, + begintime=begin_time.strftime("%H:%M"), + endtime=end_time.strftime("%H:%M"), + monthdays=monthdays, + ) + period_store.put(period) + + schedule: Final = ScheduleDefinition( + name=schedule_name, + periods=[PeriodIdentifier.of(period_name)], + timezone=str(time_zone) if time_zone else None, + ) + schedule_store.put(schedule) + + +def get_or_create_launch_template() -> str: + ec2: Final[EC2Client] = client("ec2") + template_name: Final = "my-template" + try: + ec2.describe_launch_templates(LaunchTemplateNames=[template_name]) + except ClientError as err: + if ( + err.response["Error"]["Code"] + == "InvalidLaunchTemplateName.NotFoundException" + ): + ec2.create_launch_template( + LaunchTemplateName=template_name, + LaunchTemplateData={"ImageId": get_ami()}, + ) + else: + raise + + return template_name + + +def get_az_name() -> str: + ec2: Final[EC2Client] = client("ec2") + zones: Final = ec2.describe_availability_zones() + if len(zones["AvailabilityZones"]) == 0: + raise AsgTestError("No availability zones") + return zones["AvailabilityZones"][0]["ZoneName"] + + +def create_asg(group_name: str, running_size: AsgSize) -> None: + autoscaling: Final[AutoScalingClient] = client("autoscaling") + autoscaling.create_auto_scaling_group( + AutoScalingGroupName=group_name, + MinSize=running_size.min_size, + DesiredCapacity=running_size.desired_size, + MaxSize=running_size.max_size, + LaunchTemplate={"LaunchTemplateName": get_or_create_launch_template()}, + AvailabilityZones=[get_az_name()], + ) + + +def tag_group(*, group_name: str, tag_key: str, tag_value: str) -> None: + autoscaling: Final[AutoScalingClient] = client("autoscaling") + autoscaling.create_or_update_tags( + Tags=[ + { + "ResourceType": "auto-scaling-group", + "ResourceId": group_name, + "Key": tag_key, + "Value": tag_value, + "PropagateAtLaunch": False, + } + ] + ) + + +def get_actions(group_name: str) -> Iterator[ScheduledUpdateGroupActionTypeDef]: + autoscaling: Final[AutoScalingClient] = client("autoscaling") + paginator: Final = autoscaling.get_paginator("describe_scheduled_actions") + for page in paginator.paginate(AutoScalingGroupName=group_name): + yield from page["ScheduledUpdateGroupActions"] + + +def find_action( + actions: Iterable[ScheduledUpdateGroupActionTypeDef], + size: AsgSize, + recurrence: str, + time_zone: ZoneInfo, +) -> ScheduledUpdateGroupActionTypeDef: + matches: Final = list( + filter( + lambda action: action["MinSize"] == size.min_size + and action["DesiredCapacity"] == size.desired_size + and action["MaxSize"] == size.max_size + and action["Recurrence"] == recurrence + and action["TimeZone"] == str(time_zone), + actions, + ) + ) + if len(matches) == 0: + raise AsgTestError("No matching action") + return matches[0] + + +def add_actions(group_name: str, asg_size: AsgSize, schedule: ScheduleHelper) -> None: + autoscaling: Final[AutoScalingClient] = client("autoscaling") + autoscaling.batch_put_scheduled_update_group_action( + AutoScalingGroupName=group_name, + ScheduledUpdateGroupActions=[ + { + "ScheduledActionName": f"{RULE_PREFIX}{schedule.name}-periodStart", + "Recurrence": schedule.start_recurrence, + "MinSize": asg_size.min_size, + "MaxSize": asg_size.max_size, + "DesiredCapacity": asg_size.desired_size, + "TimeZone": str(schedule.time_zone), + }, + { + "ScheduledActionName": f"{RULE_PREFIX}{schedule.name}-periodStop", + "Recurrence": schedule.end_recurrence, + "MinSize": 0, + "MaxSize": 0, + "DesiredCapacity": 0, + "TimeZone": str(schedule.time_zone), + }, + ], + ) + + +def get_actions_for_schedule( + group_name: str, schedule: ScheduleHelper, running_size: AsgSize +) -> Iterator[ScheduledUpdateGroupActionTypeDef]: + if schedule.time_zone is None: + raise AsgTestError("Need time zone to find actions") + + actions: Final = list(get_actions(group_name)) + yield find_action( + actions, running_size, schedule.start_recurrence, schedule.time_zone + ) + yield find_action( + actions, AsgSize.stopped(), schedule.end_recurrence, schedule.time_zone + ) + + +def get_tag_value(*, group_name: str, tag_key: str) -> str: + autoscaling: Final[AutoScalingClient] = client("autoscaling") + desc: Final = autoscaling.describe_auto_scaling_groups( + AutoScalingGroupNames=[group_name] + ) + if len(desc["AutoScalingGroups"]) == 0: + raise AsgTestError("Group not found") + matches: Final = list( + filter(lambda tag: tag["Key"] == tag_key, desc["AutoScalingGroups"][0]["Tags"]) + ) + if len(matches) == 0: + raise KeyError("No tag found") + return matches[0]["Value"] + + +def delete_all_actions(group_name: str) -> None: + autoscaling: Final[AutoScalingClient] = client("autoscaling") + actions: Final = autoscaling.describe_scheduled_actions( + AutoScalingGroupName=group_name + ) + response: Final = autoscaling.batch_delete_scheduled_action( + AutoScalingGroupName=group_name, + ScheduledActionNames=list( + action["ScheduledActionName"] + for action in actions["ScheduledUpdateGroupActions"] + ), + ) + if len(response["FailedScheduledActions"]) > 0: + raise AsgTestError("Failed to delete some actions") + + +def get_scheduled_tag(group_name: str) -> AsgTag: + tag_value: Final = json.loads( + get_tag_value(group_name=group_name, tag_key=ASG_SCHEDULED_TAG_KEY) + ) + + return AsgTag( + schedule=tag_value["schedule"], + ttl=tag_value["ttl"], + min_size=tag_value["min_size"], + max_size=tag_value["max_size"], + desired_size=tag_value["desired_size"], + ) + + +def create_ecs_cluster_with_auto_scaling( + ecs_asg_group_name: str, running_size: AsgSize +) -> None: + create_asg(group_name=ecs_asg_group_name, running_size=running_size) + ecs: Final[ECSClient] = client("ecs") + ecs.create_cluster(clusterName="MyCluster", capacityProviders=[ecs_asg_group_name]) + + +def verify_operational_metrics( + mock_collect_metric: MagicMock, should_send_metrics: bool +) -> None: + if should_send_metrics: + assert mock_collect_metric.call_count == 1 + instance_count_metric = mock_collect_metric.call_args[1].get("metric") + assert instance_count_metric == AsgCountMetric( + service=ASG_SERVICE, region=DEFAULT_REGION, num_instances=1, num_schedules=1 + ) + else: + assert mock_collect_metric.call_count == 0 + + mock_collect_metric.reset_mock() + + +def verify_scheduled_actions_and_tagged( + asg_group_name: str, schedule: ScheduleHelper, asg_size: AsgSize, dt: datetime +) -> None: + num_scheduled_actions: Final = len( + list(get_actions_for_schedule(asg_group_name, schedule, asg_size)) + ) + assert num_scheduled_actions == 2 + + tag: Final[AsgTag] = get_scheduled_tag(asg_group_name) + assert tag == AsgTag( + schedule=schedule.name, + ttl=(dt + timedelta(days=30)).isoformat(), + min_size=asg_size.min_size, + max_size=asg_size.max_size, + desired_size=asg_size.desired_size, + ) + + +def build_lambda_event( + dt: datetime, schedule_names: list[str] | None +) -> dict[str, Any]: + if schedule_names: + scheduling_request = SchedulingRequest( + action="scheduler:run", + account=DEFAULT_ACCOUNT_ID, + region=DEFAULT_REGION, + service=ASG_SERVICE, + current_dt=dt.isoformat(), + dispatch_time=dt.isoformat(), + schedule_names=schedule_names, + ) + else: + scheduling_request = SchedulingRequest( + action="scheduler:run", + account=DEFAULT_ACCOUNT_ID, + region=DEFAULT_REGION, + service=ASG_SERVICE, + current_dt=dt.isoformat(), + dispatch_time=dt.isoformat(), + ) + + return cast(dict[str, Any], scheduling_request) diff --git a/source/app/tests/integration/helpers/ec2_helpers.py b/source/app/tests/integration/helpers/ec2_helpers.py index 8fdfbed5..d81e1e0b 100644 --- a/source/app/tests/integration/helpers/ec2_helpers.py +++ b/source/app/tests/integration/helpers/ec2_helpers.py @@ -4,36 +4,60 @@ import boto3 from mypy_boto3_ec2.client import EC2Client +from mypy_boto3_ec2.literals import InstanceStateNameType, InstanceTypeType from mypy_boto3_ec2.type_defs import ( DescribeInstancesResultTypeDef, DescribeInstanceStatusResultTypeDef, TagTypeDef, ) -from instance_scheduler.configuration.instance_schedule import InstanceSchedule -from tests import ami +from tests.conftest import get_ami from tests.integration.helpers.boto_client_helpers import client_in_account_region def create_ec2_instances( count: int, - schedule: InstanceSchedule, + schedule_name: str, account: str = "123456789012", region: str = "us-east-1", + instance_type: InstanceTypeType = "t2.micro", ) -> tuple[str, ...]: ec2_client: EC2Client = client_in_account_region("ec2", account, region) create_response = ec2_client.run_instances( - ImageId=ami, MinCount=count, MaxCount=count + ImageId=get_ami(region), + MinCount=count, + MaxCount=count, + InstanceType=instance_type, ) instance_ids = [instance["InstanceId"] for instance in create_response["Instances"]] ec2_client.create_tags( - Resources=instance_ids, Tags=[{"Key": "Schedule", "Value": schedule.name}] + Resources=instance_ids, Tags=[{"Key": "Schedule", "Value": schedule_name}] ) return tuple(instance_ids) -def get_current_state(instance_id: str, ec2_client: Optional[EC2Client] = None) -> str: +def stop_ec2_instances( + *instance_ids: str, + account: str = "123456789012", + region: str = "us-east-1", +) -> None: + ec2_client: EC2Client = client_in_account_region("ec2", account, region) + ec2_client.stop_instances(InstanceIds=instance_ids) + + +def start_ec2_instances( + *instance_ids: str, + account: str = "123456789012", + region: str = "us-east-1", +) -> None: + ec2_client: EC2Client = client_in_account_region("ec2", account, region) + ec2_client.start_instances(InstanceIds=instance_ids) + + +def get_current_state( + instance_id: str, ec2_client: Optional[EC2Client] = None +) -> InstanceStateNameType: client: EC2Client if ec2_client: client = ec2_client @@ -48,6 +72,19 @@ def get_current_state(instance_id: str, ec2_client: Optional[EC2Client] = None) return describe_response["InstanceStatuses"][0]["InstanceState"]["Name"] +def get_current_instance_type( + instance_id: str, ec2_client: Optional[EC2Client] = None +) -> str: + client: EC2Client + if ec2_client: + client = ec2_client + else: + client = boto3.client("ec2") + return client.describe_instances(InstanceIds=[instance_id])["Reservations"][0][ + "Instances" + ][0]["InstanceType"] + + def get_tags(instance_id: str) -> list[TagTypeDef]: ec2_client: EC2Client = boto3.client("ec2") describe_response: DescribeInstancesResultTypeDef = ec2_client.describe_instances( diff --git a/source/app/tests/integration/helpers/global_config.py b/source/app/tests/integration/helpers/global_config.py deleted file mode 100644 index 23a71cb5..00000000 --- a/source/app/tests/integration/helpers/global_config.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -from typing import Optional -from zoneinfo import ZoneInfo - -from instance_scheduler.configuration.instance_schedule import InstanceSchedule -from instance_scheduler.configuration.scheduler_config import GlobalConfig - - -def build_global_config( - schedules: dict[str, InstanceSchedule], - remote_account_ids: Optional[list[str]] = None, - scheduled_services: Optional[list[str]] = None, - regions: Optional[list[str]] = None, - tag_name: str = "Schedule", - default_timezone: str = "UTC", - schedule_clusters: bool = False, - trace: bool = False, - enable_ssm_maintenance_windows: bool = False, - use_metrics: bool = False, - namespace: str = "namespace", - aws_partition: str = "aws", - scheduler_role_name: str = "scheduler_role", - organization_id: str = "", - schedule_lambda_account: bool = True, - create_rds_snapshot: bool = False, - started_tags: str = "", - stopped_tags: str = "", -) -> GlobalConfig: - if remote_account_ids is None: - remote_account_ids = [] - if scheduled_services is None: - scheduled_services = ["ec2"] - if regions is None: - regions = ["us-east-1"] - - return GlobalConfig( - schedules=schedules, - remote_account_ids=remote_account_ids, - scheduled_services=scheduled_services, - regions=regions, - tag_name=tag_name, - default_timezone=ZoneInfo(default_timezone), - schedule_clusters=schedule_clusters, - trace=trace, - enable_ssm_maintenance_windows=enable_ssm_maintenance_windows, - use_metrics=use_metrics, - namespace=namespace, - aws_partition=aws_partition, - scheduler_role_name=scheduler_role_name, - organization_id=organization_id, - schedule_lambda_account=schedule_lambda_account, - create_rds_snapshot=create_rds_snapshot, - started_tags=started_tags, - stopped_tags=stopped_tags, - ) diff --git a/source/app/tests/integration/helpers/rds_helpers.py b/source/app/tests/integration/helpers/rds_helpers.py index 993aefa4..4a2f1785 100644 --- a/source/app/tests/integration/helpers/rds_helpers.py +++ b/source/app/tests/integration/helpers/rds_helpers.py @@ -3,6 +3,9 @@ from typing import TYPE_CHECKING import boto3 +from mypy_boto3_rds.type_defs import CreateDBInstanceResultTypeDef + +from tests.integration.helpers.boto_client_helpers import client_in_account_region if TYPE_CHECKING: from mypy_boto3_rds import RDSClient @@ -20,3 +23,72 @@ def get_rds_cluster_state(cluster_id: str) -> str: rds: RDSClient = boto3.client("rds") response = rds.describe_db_clusters(DBClusterIdentifier=cluster_id) return response["DBClusters"][0]["Status"] + + +def stop_rds_instances( + *instance_ids: str, + account: str = "123456789012", + region: str = "us-east-1", +) -> None: + rds: RDSClient = client_in_account_region("rds", account, region) + for rds_instance in instance_ids: + rds.stop_db_instance(DBInstanceIdentifier=rds_instance) + + +def create_rds_instances( + count: int, + schedule_name: str = "test-schedule", + account: str = "123456789012", + region: str = "us-east-1", + instance_type: str = "db.m5.large", + engine: str = "postgres", + id_prefix: str = "test-rds-instance", +) -> tuple[str, ...]: + rds_client: RDSClient = client_in_account_region("rds", account, region) + + ids: list[str] = list() + for i in range(count): + instance_id = f"{id_prefix}-{i}" + result: CreateDBInstanceResultTypeDef = rds_client.create_db_instance( + DBInstanceIdentifier=instance_id, + DBInstanceClass=instance_type, + Engine=engine, + ) + instance_arn = result["DBInstance"]["DBInstanceArn"] + rds_client.add_tags_to_resource( + ResourceName=instance_arn, + Tags=[{"Key": "Schedule", "Value": schedule_name}], + ) + ids.append(instance_id) + + return tuple(ids) + + +def start_rds_instances( + *instance_ids: str, + account: str = "123456789012", + region: str = "us-east-1", +) -> None: + rds: RDSClient = client_in_account_region("rds", account, region) + for rds_instance in instance_ids: + rds.start_db_instance(DBInstanceIdentifier=rds_instance) + + +def stop_rds_clusters( + *cluster_ids: str, + account: str = "123456789012", + region: str = "us-east-1", +) -> None: + rds: RDSClient = client_in_account_region("rds", account, region) + for rds_cluster in cluster_ids: + rds.stop_db_cluster(DBClusterIdentifier=rds_cluster) + + +def start_rds_clusters( + *cluster_ids: str, + account: str = "123456789012", + region: str = "us-east-1", +) -> None: + rds: RDSClient = client_in_account_region("rds", account, region) + for rds_cluster in cluster_ids: + rds.start_db_cluster(DBClusterIdentifier=rds_cluster) diff --git a/source/app/tests/integration/helpers/run_handler.py b/source/app/tests/integration/helpers/run_handler.py new file mode 100644 index 00000000..563c8847 --- /dev/null +++ b/source/app/tests/integration/helpers/run_handler.py @@ -0,0 +1,205 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from contextlib import contextmanager +from dataclasses import dataclass, field +from datetime import datetime +from typing import Any, Iterator, Optional, Sequence, TypedDict + +from mypy_boto3_ec2.literals import InstanceTypeType + +from instance_scheduler.handler.environments.scheduling_request_environment import ( + SchedulingRequestEnvironment, +) +from instance_scheduler.handler.scheduling_request import ( + SchedulingRequest, + SchedulingRequestHandler, +) +from instance_scheduler.model.period_definition import PeriodDefinition +from instance_scheduler.model.period_identifier import PeriodIdentifier +from instance_scheduler.model.schedule_definition import ScheduleDefinition +from instance_scheduler.model.store.in_memory_period_definition_store import ( + InMemoryPeriodDefinitionStore, +) +from instance_scheduler.model.store.in_memory_schedule_definition_store import ( + InMemoryScheduleDefinitionStore, +) +from instance_scheduler.util.scheduling_target import SchedulingTarget +from tests.context import MockLambdaContext +from tests.logger import MockLogger +from tests.test_utils.mock_scheduling_request_environment import ( + MockSchedulingRequestEnvironment, +) + + +def _default_schedules() -> list[ScheduleDefinition]: + return [ + ScheduleDefinition( + name="test-schedule", + timezone="UTC", + periods=[PeriodIdentifier.of(period_name="test-period")], + ) + ] + + +def _default_periods() -> list[PeriodDefinition]: + return [PeriodDefinition(name="test-period", begintime="10:00", endtime="20:00")] + + +def target( + account: str = "123456789012", region: str = "us-east-1", service: str = "ec2" +) -> SchedulingTarget: + return SchedulingTarget(account=account, region=region, service=service) + + +@dataclass +class SchedulingTestContext: + schedules: list[ScheduleDefinition] = field(default_factory=_default_schedules) + periods: list[PeriodDefinition] = field(default_factory=_default_periods) + + def run_scheduling_request_handler( + self, + dt: datetime, + target: SchedulingTarget = target(), + environment: SchedulingRequestEnvironment = MockSchedulingRequestEnvironment(), + ) -> Any: + + schedule_store = InMemoryScheduleDefinitionStore() + period_store = InMemoryPeriodDefinitionStore() + for schedule in self.schedules: + schedule_store.put(schedule) + for period in self.periods: + period_store.put(period) + + event: SchedulingRequest = { + "action": "scheduler:run", + "account": target.account, + "region": target.region, + "service": target.service, + "current_dt": dt.isoformat(), + "schedules": schedule_store.serialize(), + "periods": period_store.serialize(), + "dispatch_time": "2023-05-12 14:55:10.600619", + } + + return SchedulingRequestHandler( + event, MockLambdaContext(), environment, MockLogger() + ).handle_request() + + +@contextmanager +def simple_schedule( + name: str = "test-schedule", + timezone: str = "UTC", + begintime: Optional[str] = None, + endtime: Optional[str] = None, + weekdays: Optional[set[str]] = None, + monthdays: Optional[set[str]] = None, + months: Optional[set[str]] = None, + override_status: Optional[str] = None, + description: Optional[str] = None, + stop_new_instances: bool = True, + ssm_maintenance_window: Optional[Sequence[str]] = None, + enforced: bool = False, + hibernate: bool = False, + retain_running: bool = False, +) -> Iterator[SchedulingTestContext]: + yield SchedulingTestContext( + schedules=[ + ScheduleDefinition( + name=name, + periods=[PeriodIdentifier.of(f"{name}-period")], + timezone=timezone, + override_status=override_status, + description=description, + stop_new_instances=stop_new_instances, + ssm_maintenance_window=ssm_maintenance_window, + enforced=enforced, + hibernate=hibernate, + retain_running=retain_running, + ) + ], + periods=[ + PeriodDefinition( + name=f"{name}-period", + begintime=begintime, + endtime=endtime, + weekdays=weekdays, + monthdays=monthdays, + months=months, + ) + ], + ) + + +@contextmanager +def multi_period_schedule( + name: str = "test-schedule", + timezone: str = "UTC", + period_definitions: list[PeriodDefinition] = [], + override_status: Optional[str] = None, + description: Optional[str] = None, + stop_new_instances: bool = False, + ssm_maintenance_window: Optional[str] = None, + enforced: bool = False, + hibernate: bool = False, + retain_running: bool = False, +) -> Iterator[SchedulingTestContext]: + yield SchedulingTestContext( + schedules=[ + ScheduleDefinition( + name=name, + periods=[ + PeriodIdentifier.of(period.name) for period in period_definitions + ], + timezone=timezone, + override_status=override_status, + description=description, + stop_new_instances=stop_new_instances, + ssm_maintenance_window=ssm_maintenance_window, + enforced=enforced, + hibernate=hibernate, + retain_running=retain_running, + ) + ], + periods=period_definitions, + ) + + +class PeriodWithDesiredType(TypedDict): + period: PeriodDefinition + desired_type: Optional[InstanceTypeType] + + +@contextmanager +def resizable_multi_period_schedule( + name: str = "test-schedule", + timezone: str = "UTC", + period_definitions: list[PeriodWithDesiredType] = [], + override_status: Optional[str] = None, + description: Optional[str] = None, + stop_new_instances: bool = False, + ssm_maintenance_window: Optional[str] = None, + enforced: bool = False, + hibernate: bool = False, + retain_running: bool = False, +) -> Iterator[SchedulingTestContext]: + yield SchedulingTestContext( + schedules=[ + ScheduleDefinition( + name=name, + periods=[ + PeriodIdentifier.of(period["period"].name, period["desired_type"]) + for period in period_definitions + ], + timezone=timezone, + override_status=override_status, + description=description, + stop_new_instances=stop_new_instances, + ssm_maintenance_window=ssm_maintenance_window, + enforced=enforced, + hibernate=hibernate, + retain_running=retain_running, + ) + ], + periods=[period_with_type["period"] for period_with_type in period_definitions], + ) diff --git a/source/app/tests/integration/helpers/schedule_helpers.py b/source/app/tests/integration/helpers/schedule_helpers.py index 7f5628e0..9b7bb806 100644 --- a/source/app/tests/integration/helpers/schedule_helpers.py +++ b/source/app/tests/integration/helpers/schedule_helpers.py @@ -9,5 +9,5 @@ def at_time( return datetime.datetime.combine(date, time, datetime.timezone.utc) -def quick_time(hrs: int, minutes: int, seconds: int) -> datetime.datetime: +def quick_time(hrs: int, minutes: int, seconds: int = 0) -> datetime.datetime: return at_time(datetime.time(hrs, minutes, seconds)) diff --git a/source/app/tests/integration/helpers/scheduling_context_builder.py b/source/app/tests/integration/helpers/scheduling_context_builder.py index e009eb0d..facfd15f 100644 --- a/source/app/tests/integration/helpers/scheduling_context_builder.py +++ b/source/app/tests/integration/helpers/scheduling_context_builder.py @@ -1,7 +1,6 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 from datetime import datetime, time -from typing import Optional from zoneinfo import ZoneInfo from instance_scheduler.configuration.instance_schedule import InstanceSchedule @@ -9,11 +8,7 @@ from instance_scheduler.configuration.running_period_dict_element import ( RunningPeriodDictElement, ) -from instance_scheduler.configuration.scheduling_context import ( - SchedulingContext, - TagTemplate, -) -from instance_scheduler.handler.scheduling_request import SchedulerRequest +from instance_scheduler.configuration.scheduling_context import SchedulingContext def default_test_schedules() -> dict[str, InstanceSchedule]: @@ -24,6 +19,7 @@ def default_test_schedules() -> dict[str, InstanceSchedule]: def default_test_schedule() -> InstanceSchedule: return InstanceSchedule( name="test-schedule", + timezone=ZoneInfo("UTC"), periods=default_test_periods(), ) @@ -40,66 +36,18 @@ def default_test_periods() -> list[RunningPeriodDictElement]: ] -def custom_schedule( - periods: list[RunningPeriodDictElement] = default_test_periods(), - # mutable warning is desired behavior here as an empty period list should NOT be resolved to the default - timezone: str = "UTC", - override_status: Optional[str] = None, - description: Optional[str] = None, - use_metrics: Optional[bool] = None, - stop_new_instances: Optional[bool] = None, - use_maintenance_window: Optional[bool] = False, - ssm_maintenance_window: Optional[str] = None, - enforced: Optional[bool] = False, - hibernate: Optional[bool] = False, - retain_running: Optional[bool] = False, -) -> dict[str, InstanceSchedule]: - return { - "test-schedule": InstanceSchedule( - name="test-schedule", - periods=periods, - timezone=timezone, - override_status=override_status, - description=description, - use_metrics=use_metrics, - stop_new_instances=stop_new_instances, - use_maintenance_window=use_maintenance_window, - ssm_maintenance_window=ssm_maintenance_window, - enforced=enforced, - hibernate=hibernate, - retain_running=retain_running, - ) - } - - -def build_context( +def build_scheduling_context( current_dt: datetime, schedules: dict[str, InstanceSchedule] = None, # type: ignore[assignment] - account_id: str = "", + account_id: str = "123456789012", service: str = "ec2", region: str = "us-east-1", - tag_name: str = "Schedule", default_timezone: str = "UTC", - schedule_clusters: bool = False, - trace: bool = False, - enable_ssm_maintenance_windows: bool = False, - use_metrics: bool = False, - namespace: str = "namespace", - aws_partition: str = "aws", - scheduler_role_name: str = "scheduler_role", - organization_id: str = "", - schedule_lambda_account: bool = True, - create_rds_snapshot: bool = False, - started_tags: Optional[list[TagTemplate]] = None, - stopped_tags: Optional[list[TagTemplate]] = None, + scheduling_interval_minutes: int = 5, ) -> SchedulingContext: """abstraction layer on SchedulingContextConstructor that provides testing defaults for most values""" if schedules is None: schedules = default_test_schedules() - if started_tags is None: - started_tags = [] - if stopped_tags is None: - stopped_tags = [] return SchedulingContext( current_dt=current_dt, @@ -107,26 +55,6 @@ def build_context( account_id=account_id, service=service, region=region, - tag_name=tag_name, default_timezone=ZoneInfo(default_timezone), - schedule_clusters=schedule_clusters, - trace=trace, - enable_ssm_maintenance_windows=enable_ssm_maintenance_windows, - use_metrics=use_metrics, - namespace=namespace, - aws_partition=aws_partition, - scheduler_role_name=scheduler_role_name, - organization_id=organization_id, - schedule_lambda_account=schedule_lambda_account, - create_rds_snapshot=create_rds_snapshot, - started_tags=started_tags, - stopped_tags=stopped_tags, + scheduling_interval_minutes=scheduling_interval_minutes, ) - - -def build_scheduling_event(scheduling_context: SchedulingContext) -> SchedulerRequest: - return { - "action": "scheduler:run", - "configuration": scheduling_context.to_dict(), - "dispatch_time": "2023-05-12 14:55:10.600619", - } diff --git a/source/app/tests/integration/ops_metrics/conftest.py b/source/app/tests/integration/ops_metrics/conftest.py index d7e65899..9e080d22 100644 --- a/source/app/tests/integration/ops_metrics/conftest.py +++ b/source/app/tests/integration/ops_metrics/conftest.py @@ -1,33 +1,28 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -import uuid +from contextlib import contextmanager from typing import Iterator from unittest.mock import MagicMock, patch from _pytest.fixtures import fixture -from requests import Response +from urllib3 import HTTPResponse -solution_metrics_uuid = uuid.UUID("3d522722-236f-4484-bbd7-e9b1da1bb347") - -@fixture -def metrics_enabled() -> Iterator[None]: +@contextmanager +def override_should_send_metric(return_value: bool) -> Iterator[None]: with patch( "instance_scheduler.ops_metrics.metrics.should_collect_metric" ) as should_collect_metrics_func: - with patch( - "instance_scheduler.ops_metrics.metrics._get_deployment_uuid" - ) as get_deployment_uuid_func: - should_collect_metrics_func.return_value = True - get_deployment_uuid_func.return_value = solution_metrics_uuid - yield + should_collect_metrics_func.return_value = return_value + yield @fixture -def mock_metrics_endpoint(metrics_enabled: None) -> Iterator[MagicMock]: - with patch("requests.post") as post_request_func: - post_response = Response() - post_response.status_code = 200 +def mock_metrics_endpoint() -> Iterator[MagicMock]: + with patch( + "instance_scheduler.ops_metrics.metrics.http.request" + ) as post_request_func: + post_response = HTTPResponse(status=200) post_request_func.return_value = post_response yield post_request_func diff --git a/source/app/tests/integration/ops_metrics/test_cli_metrics.py b/source/app/tests/integration/ops_metrics/test_cli_metrics.py index f46c33da..16dbd28e 100644 --- a/source/app/tests/integration/ops_metrics/test_cli_metrics.py +++ b/source/app/tests/integration/ops_metrics/test_cli_metrics.py @@ -7,35 +7,39 @@ from freezegun.api import freeze_time -from instance_scheduler.handler.cli import CliHandler -from instance_scheduler.util.app_env import AppEnv +from instance_scheduler import __version__ +from instance_scheduler.handler.cli.cli_request_handler import CliRequestHandler from tests.context import MockLambdaContext -from tests.integration.ops_metrics.conftest import solution_metrics_uuid from tests.logger import MockLogger +from tests.test_utils.mock_metrics_environment import MockMetricsEnviron @freeze_time(datetime.datetime(2023, 6, 12, 12, 0, 0, tzinfo=ZoneInfo("UTC"))) -def test_cli_handler_sends_expected_metric( - mock_metrics_endpoint: MagicMock, app_env: AppEnv -) -> None: - action = "my-action" - parameters = {"my_key": "my-value"} - handler = CliHandler( - {"action": action, "parameters": parameters}, - MockLambdaContext(), - ) - handler._logger = MockLogger() - handler.handle_request() +def test_cli_handler_sends_expected_metric(mock_metrics_endpoint: MagicMock) -> None: + with MockMetricsEnviron(send_anonymous_metrics=True) as metrics_environment: + action = "my-action" + parameters = {"my_key": "my-value"} + handler = CliRequestHandler( + { + "action": action, + "parameters": parameters, + "version": __version__, + }, + MockLambdaContext(), + ) + handler._logger = MockLogger() + handler.handle_request() - expected_metric = { - "TimeStamp": "2023-06-12 12:00:00", - "UUID": str(solution_metrics_uuid), - "Solution": app_env.solution_id, - "Version": app_env.solution_version, - "Event_Name": "cli_request", - "Context": {"command_used": action}, - } + expected_metric = { + "timestamp": "2023-06-12 12:00:00", + "uuid": str(metrics_environment.metrics_uuid), + "solution": metrics_environment.solution_id, + "version": metrics_environment.solution_version, + "event_name": "cli_request", + "context_version": 1, + "context": {"command_used": action}, + } - assert mock_metrics_endpoint.call_count == 1 - json_payload = mock_metrics_endpoint.call_args[1]["data"] - assert json.loads(json_payload) == expected_metric + assert mock_metrics_endpoint.call_count == 1 + json_payload = mock_metrics_endpoint.call_args[1]["body"] + assert json.loads(json_payload) == expected_metric diff --git a/source/app/tests/integration/ops_metrics/test_deployment_description_metrics.py b/source/app/tests/integration/ops_metrics/test_deployment_description_metrics.py index 5a3123ae..ffd02653 100644 --- a/source/app/tests/integration/ops_metrics/test_deployment_description_metrics.py +++ b/source/app/tests/integration/ops_metrics/test_deployment_description_metrics.py @@ -1,150 +1,178 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import json -from datetime import datetime, time -from os import environ +from datetime import datetime from unittest.mock import MagicMock, patch from zoneinfo import ZoneInfo from freezegun import freeze_time -from instance_scheduler.configuration.instance_schedule import InstanceSchedule -from instance_scheduler.configuration.running_period import RunningPeriod -from instance_scheduler.configuration.running_period_dict_element import ( - RunningPeriodDictElement, -) -from instance_scheduler.configuration.scheduler_config import GlobalConfig from instance_scheduler.handler.scheduling_orchestrator import ( OrchestrationRequest, SchedulingOrchestratorHandler, ) -from instance_scheduler.util.app_env import AppEnv +from instance_scheduler.model.ddb_config_item import DdbConfigItem +from instance_scheduler.model.period_definition import PeriodDefinition +from instance_scheduler.model.period_identifier import PeriodIdentifier +from instance_scheduler.model.schedule_definition import ScheduleDefinition +from instance_scheduler.model.store.ddb_config_item_store import DdbConfigItemStore +from instance_scheduler.model.store.period_definition_store import PeriodDefinitionStore +from instance_scheduler.model.store.schedule_definition_store import ( + ScheduleDefinitionStore, +) from tests.context import MockLambdaContext -from tests.integration.ops_metrics.conftest import solution_metrics_uuid +from tests.integration.ops_metrics.conftest import override_should_send_metric +from tests.logger import MockLogger +from tests.test_utils.mock_metrics_environment import MockMetricsEnviron +from tests.test_utils.mock_orchestrator_environment import MockOrchestratorEnvironment +from tests.test_utils.unordered_list import UnorderedList mockEvent: OrchestrationRequest = {"scheduled_action": "run_orchestrator"} -periods: list[RunningPeriodDictElement] = [ - RunningPeriodDictElement( - period=RunningPeriod( - name="test-period", - begintime=time(10, 0, 0), - endtime=time(20, 0, 0), + +@patch("instance_scheduler.handler.scheduling_orchestrator.should_collect_metric") +@patch.object(SchedulingOrchestratorHandler, "_run_scheduling_lambda") +@freeze_time(datetime(2023, 6, 12, 12, 0, 0, tzinfo=ZoneInfo("UTC"))) +def test_orchestrator_sends_expected_metric( + run_lambda_func: MagicMock, + should_collect_metric_func: MagicMock, + mock_metrics_endpoint: MagicMock, + schedule_store: ScheduleDefinitionStore, + period_store: PeriodDefinitionStore, + config_item_store: DdbConfigItemStore, +) -> None: + # account configuration + config_item_store.put( + DdbConfigItem( + organization_id="", remote_account_ids=["222233334444", "333344445555"] ) ) -] -global_config = GlobalConfig( - scheduled_services=["ec2", "rds"], - schedule_clusters=False, - tag_name="Schedule", - regions=["us-east-1", "us-east-2", "us-west-2"], - default_timezone=ZoneInfo("Asia/Hong_Kong"), - schedules={ - "all-flags": InstanceSchedule( + period_store.put( + PeriodDefinition(name="test-period", begintime="05:00", endtime="06:00") + ) + period_store.put( + PeriodDefinition(name="test-period-one-sided-begin", begintime="05:00") + ) + period_store.put( + PeriodDefinition(name="test-period-one-sided-end", endtime="06:00") + ) + + # test schedules + schedule_store.put( + ScheduleDefinition( name="all-flags", - periods=periods, + periods=[PeriodIdentifier.of("test-period")], override_status="running", - use_metrics=True, stop_new_instances=True, - use_maintenance_window=True, + ssm_maintenance_window=["test-window"], enforced=True, hibernate=True, retain_running=True, timezone="Asia/Hong_Kong", - ), - "no-flags-cfn": InstanceSchedule( + ) + ) + schedule_store.put( + ScheduleDefinition( name="no-flags-cfn", - periods=periods, + periods=[PeriodIdentifier.of("test-period-one-sided-begin")], configured_in_stack="some-stack-arn", + stop_new_instances=False, timezone="UTC", - ), - "non-default-tz-cfn": InstanceSchedule( + ) + ) + schedule_store.put( + ScheduleDefinition( name="non-default-tz-cfn", - periods=periods, + periods=[ + PeriodIdentifier.of("test-period"), + PeriodIdentifier.of("test-period-one-sided-begin"), + ], configured_in_stack="some-stack-arn", timezone="UTC", - ), - "explicit-tz-matches-default": InstanceSchedule( + ) + ) + schedule_store.put( + ScheduleDefinition( name="explicit-tz-matches-default", - periods=periods, + periods=[ + PeriodIdentifier.of("test-period-one-sided-begin"), + PeriodIdentifier.of("test-period-one-sided-end"), + ], # one sided schedule counted once timezone="Asia/Hong_Kong", # should not be counted by tz metric - ), - }, - trace=False, - enable_ssm_maintenance_windows=True, - use_metrics=True, - schedule_lambda_account=True, - create_rds_snapshot=False, - started_tags="non-null tag", - stopped_tags="", - scheduler_role_name="Scheduler-Role", - namespace="dev", - organization_id="", - aws_partition="aws", - remote_account_ids=["222233334444", "333344445555"], -) - -env = { - "START_TAGS": "non-null tag", - "STOP_TAGS": "", - "SCHEDULER_FREQUENCY": "10", - "ENABLE_AWS_ORGANIZATIONS": "False", - "ENABLE_EC2_SSM_MAINTENANCE_WINDOWS": "True", -} + ) + ) + orchestrator_env = MockOrchestratorEnvironment( + scheduler_frequency_minutes=10, + stop_tags=[], + start_tags=["non-null tag"], + enable_schedule_hub_account=True, + schedule_regions=["us-east-1", "us-west-2"], + enable_ec2_service=True, + enable_rds_service=True, + enable_rds_clusters=True, + enable_neptune_service=True, + enable_docdb_service=True, + enable_asg_service=True, + enable_aws_organizations=False, + enable_rds_snapshots=False, + enable_ec2_ssm_maintenance_windows=True, + ops_dashboard_enabled=True, + default_timezone=ZoneInfo("Asia/Hong_Kong"), + ) -@patch("instance_scheduler.handler.scheduling_orchestrator.should_collect_metric") -@patch.object(SchedulingOrchestratorHandler, "_run_scheduling_lambda") -@patch.dict(environ, env) -@freeze_time(datetime(2023, 6, 12, 12, 0, 0, tzinfo=ZoneInfo("UTC"))) -def test_orchestrator_sends_expected_metric( - run_lambda_func: MagicMock, - should_collect_metric_func: MagicMock, - mock_metrics_endpoint: MagicMock, - app_env: AppEnv, -) -> None: - should_collect_metric_func.return_value = True - with patch.object(SchedulingOrchestratorHandler, "configuration", global_config): + with MockMetricsEnviron( + send_anonymous_metrics=True + ) as metrics_environ, override_should_send_metric(True): + should_collect_metric_func.return_value = ( + True # override return as this metric is normally sent daily + ) handler = SchedulingOrchestratorHandler( - event=mockEvent, context=MockLambdaContext() + event=mockEvent, + context=MockLambdaContext(), + env=orchestrator_env, + logger=MockLogger(), ) handler.handle_request() expected_metric = { - "TimeStamp": "2023-06-12 12:00:00", - "UUID": str(solution_metrics_uuid), - "Solution": app_env.solution_id, - "Version": app_env.solution_version, - "Event_Name": "deployment_description", - "Context": { - "services": ["ec2", "rds"], - "regions": ["us-east-1", "us-east-2", "us-west-2"], + "timestamp": "2023-06-12 12:00:00", + "uuid": str(metrics_environ.metrics_uuid), + "solution": metrics_environ.solution_id, + "version": metrics_environ.solution_version, + "event_name": "deployment_description", + "context_version": 1, + "context": { + "services": UnorderedList( + ["ec2", "rds", "rds-clusters", "neptune", "docdb", "asg"] + ), + "regions": orchestrator_env.schedule_regions, "num_accounts": 3, # local account + 2 remote "num_schedules": 4, "num_cfn_schedules": 2, + "num_one_sided_schedules": 3, "default_timezone": "Asia/Hong_Kong", - "schedule_aurora_clusters": False, "create_rds_snapshots": False, "schedule_interval_minutes": 10, "memory_size_mb": 128, # memory size from MockLambdaContext "using_organizations": False, "enable_ec2_ssm_maintenance_windows": True, + "ops_dashboard_enabled": orchestrator_env.ops_dashboard_enabled, "num_started_tags": 1, "num_stopped_tags": 0, "schedule_flag_counts": { - "stop_new_instances": 1, + "stop_new_instances": 3, "enforced": 1, "retain_running": 1, "hibernate": 1, "override": 1, "use_ssm_maintenance_window": 1, - "use_metrics": 1, "non_default_timezone": 2, }, }, } assert mock_metrics_endpoint.call_count == 1 - json_payload = mock_metrics_endpoint.call_args[1]["data"] + json_payload = mock_metrics_endpoint.call_args[1]["body"] assert json.loads(json_payload) == expected_metric diff --git a/source/app/tests/integration/ops_metrics/test_instance_count_metrics.py b/source/app/tests/integration/ops_metrics/test_instance_count_metrics.py index 96b78aec..02fc6b69 100644 --- a/source/app/tests/integration/ops_metrics/test_instance_count_metrics.py +++ b/source/app/tests/integration/ops_metrics/test_instance_count_metrics.py @@ -1,75 +1,45 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import json -from datetime import datetime, time +from datetime import datetime from typing import Any -from unittest.mock import MagicMock, patch +from unittest.mock import MagicMock from zoneinfo import ZoneInfo -import boto3 from freezegun import freeze_time -from mypy_boto3_ec2.client import EC2Client -from instance_scheduler.configuration.instance_schedule import InstanceSchedule -from instance_scheduler.configuration.running_period import RunningPeriod -from instance_scheduler.configuration.running_period_dict_element import ( - RunningPeriodDictElement, -) -from instance_scheduler.handler.scheduling_request import SchedulingRequestHandler +from instance_scheduler.model.period_definition import PeriodDefinition +from instance_scheduler.model.period_identifier import PeriodIdentifier +from instance_scheduler.model.schedule_definition import ScheduleDefinition from instance_scheduler.ops_metrics import GatheringFrequency from instance_scheduler.ops_metrics.metric_type.instance_count_metric import ( InstanceCountMetric, ) from instance_scheduler.schedulers.instance_states import InstanceStates -from instance_scheduler.util.app_env import AppEnv -from tests import ami -from tests.context import MockLambdaContext +from tests.integration.helpers.ec2_helpers import create_ec2_instances +from tests.integration.helpers.run_handler import SchedulingTestContext from tests.integration.helpers.schedule_helpers import quick_time -from tests.integration.helpers.scheduling_context_builder import ( - build_context, - build_scheduling_event, -) -from tests.integration.ops_metrics.conftest import solution_metrics_uuid +from tests.integration.ops_metrics.conftest import override_should_send_metric +from tests.test_utils.mock_metrics_environment import MockMetricsEnviron -schedule_1 = InstanceSchedule( - name="sched_1", - periods=[ - RunningPeriodDictElement( - period=RunningPeriod(name="stop", begintime=time(12, 0, 0)) - ) - ], +schedule_1 = ScheduleDefinition( + name="sched_1", periods=[PeriodIdentifier.of("period1")] ) -schedule_2 = InstanceSchedule( - name="sched_2", - periods=[ - RunningPeriodDictElement( - period=RunningPeriod(name="start", endtime=time(12, 0, 0)) - ) - ], +schedule_2 = ScheduleDefinition( + name="sched_2", periods=[PeriodIdentifier.of("period1")] ) - -def create_test_instances(count: int, schedule: InstanceSchedule) -> list[str]: - ec2_client: EC2Client = boto3.client("ec2") - create_response = ec2_client.run_instances( - ImageId=ami, MinCount=count, MaxCount=count - ) - instance_ids = [instance["InstanceId"] for instance in create_response["Instances"]] - ec2_client.create_tags( - Resources=instance_ids, Tags=[{"Key": "Schedule", "Value": schedule.name}] - ) - - return instance_ids +periods = [PeriodDefinition(name="period1", begintime="10:00", endtime="20:00")] def get_sent_instance_count_metric(metrics_endpoint: MagicMock) -> Any: desired_metric: dict[str, Any] = {} for call_args in metrics_endpoint.call_args_list: - json_payload = call_args[1]["data"] + json_payload = call_args[1]["body"] sent_metric = json.loads(json_payload) - if sent_metric["Event_Name"] == "instance_count": + if sent_metric["event_name"] == "instance_count": if desired_metric: raise Exception("metric sent multiple times") desired_metric = sent_metric @@ -83,37 +53,39 @@ def test_instance_count_metric_is_daily_metric() -> None: assert InstanceCountMetric.collection_frequency is GatheringFrequency.DAILY -@patch("instance_scheduler.schedulers.instance_scheduler.should_collect_metric") @freeze_time(datetime(2023, 6, 12, 12, 0, 0, tzinfo=ZoneInfo("UTC"))) def test_scheduling_execution_sends_expected_instance_count_metric( - should_collect_metrics_func: MagicMock, mock_metrics_endpoint: MagicMock, ec2_instance_states: InstanceStates, - app_env: AppEnv, ) -> None: - should_collect_metrics_func.return_value = True - - create_test_instances(5, schedule_1) - create_test_instances(10, schedule_2) - - context = build_context(current_dt=quick_time(10, 0, 0)) - event = build_scheduling_event(context) - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - expected_metric = { - "TimeStamp": "2023-06-12 12:00:00", - "UUID": str(solution_metrics_uuid), - "Solution": app_env.solution_id, - "Version": app_env.solution_version, - "Event_Name": "instance_count", - "Context": { - "service": "ec2", - "region": "us-east-1", - "num_instances": 15, - "num_schedules": 2, - }, - } - - sent_metric = get_sent_instance_count_metric(mock_metrics_endpoint) - assert sent_metric == expected_metric + with MockMetricsEnviron( + send_anonymous_metrics=True + ) as metrics_environment, override_should_send_metric(True): + create_ec2_instances(5, schedule_1.name) + create_ec2_instances(10, schedule_2.name) + create_ec2_instances( + 3, "invalid" + ) # these should be ignored as their schedules are invalid/missing + + context = SchedulingTestContext( + schedules=[schedule_1, schedule_2], periods=periods + ) + context.run_scheduling_request_handler(dt=quick_time(10, 0)) + + expected_metric = { + "timestamp": "2023-06-12 12:00:00", + "uuid": str(metrics_environment.metrics_uuid), + "solution": metrics_environment.solution_id, + "version": metrics_environment.solution_version, + "event_name": "instance_count", + "context_version": 1, + "context": { + "service": "ec2", + "region": "us-east-1", + "num_instances": 15, + "num_schedules": 2, + }, + } + + sent_metric = get_sent_instance_count_metric(mock_metrics_endpoint) + assert sent_metric == expected_metric diff --git a/source/app/tests/integration/ops_metrics/test_metrics_handler.py b/source/app/tests/integration/ops_metrics/test_metrics_handler.py index b4cc95f1..090cbea7 100644 --- a/source/app/tests/integration/ops_metrics/test_metrics_handler.py +++ b/source/app/tests/integration/ops_metrics/test_metrics_handler.py @@ -1,92 +1,84 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import json -import uuid from dataclasses import dataclass from datetime import datetime, timedelta -from functools import wraps from os import environ -from typing import Any, Callable, ClassVar, TypeVar, cast +from typing import Any, Callable, ClassVar, TypeVar from unittest.mock import MagicMock, patch from zoneinfo import ZoneInfo from freezegun import freeze_time -from instance_scheduler.ops_metrics import GatheringFrequency, metrics +from instance_scheduler.ops_metrics import GatheringFrequency from instance_scheduler.ops_metrics.metric_type.ops_metric import OpsMetric from instance_scheduler.ops_metrics.metrics import collect_metric, should_collect_metric -from instance_scheduler.util.app_env import AppEnv -from tests.integration.ops_metrics.conftest import solution_metrics_uuid from tests.logger import MockLogger +from tests.test_utils.mock_metrics_environment import MockMetricsEnviron @dataclass(frozen=True) class UnlimitedTestMetric(OpsMetric): collection_frequency: ClassVar[GatheringFrequency] = GatheringFrequency.UNLIMITED event_name: ClassVar[str] = "unlimited-metrics" + context_version: ClassVar[int] = 1 @dataclass(frozen=True) class DailyTestMetric(OpsMetric): collection_frequency: ClassVar[GatheringFrequency] = GatheringFrequency.DAILY event_name: ClassVar[str] = "daily-metrics" + context_version: ClassVar[int] = 1 FuncT = TypeVar("FuncT", bound=Callable[..., Any]) -def metrics_uuid(metric_uuid: uuid.UUID) -> Callable[[FuncT], FuncT]: - def decorator(func_to_decorate: FuncT) -> FuncT: - @wraps(func_to_decorate) - def wrapper(*args: Any, **kwargs: Any) -> Any: - print("metric uuid: {}".format(metric_uuid)) - with patch( - "instance_scheduler.ops_metrics.metrics._get_deployment_uuid" - ) as solution_id_func: - solution_id_func.return_value = metric_uuid - func_to_decorate(*args, **kwargs) +def test_metrics_disabled_when_environment_is_missing() -> None: + environ.clear() + assert not should_collect_metric(UnlimitedTestMetric()) - return cast(FuncT, wrapper) - return decorator +def test_attempting_to_collect_metrics_when_environ_is_missing_does_not_cause_error() -> ( + None +): + environ.clear() + collect_metric(UnlimitedTestMetric(), MockLogger()) + collect_metric(DailyTestMetric(), MockLogger()) -@patch.dict(environ, {"SEND_METRICS": "False"}) -@metrics_uuid(uuid.uuid4()) def test_metrics_not_collected_when_disabled() -> None: - assert not should_collect_metric(UnlimitedTestMetric(), MockLogger()) + with MockMetricsEnviron(send_anonymous_metrics=False): + assert not should_collect_metric(UnlimitedTestMetric()) -@patch.dict(environ, {"SEND_METRICS": "True"}) -@metrics_uuid(uuid.uuid4()) def test_unlimited_metric_should_be_sent() -> None: - assert should_collect_metric(UnlimitedTestMetric, MockLogger()) + with MockMetricsEnviron(send_anonymous_metrics=True): + assert should_collect_metric(UnlimitedTestMetric) -@patch.dict(environ, {"SEND_METRICS": "True"}) -@metrics_uuid(uuid.uuid4()) def test_daily_metric_should_only_be_sent_once_daily() -> None: - minutes_in_day = 1440 - start_of_day = datetime(2023, 6, 23, 0, 0, 0) - - for interval in [5, 30, 60]: # test intervals of 5, 30, and 60 minutes - num_metrics_gathered = 0 - for offset in range(0, minutes_in_day, interval): - sim_time = start_of_day + timedelta(minutes=offset) - with patch.dict( - environ, {"SCHEDULER_FREQUENCY": str(interval)} - ), freeze_time(sim_time): - if should_collect_metric(DailyTestMetric, MockLogger()): - num_metrics_gathered += 1 - - assert ( - num_metrics_gathered == 1 - ), "failed to gather specified number of metrics with interval:{} see output log for uuid used".format( - interval - ) + with MockMetricsEnviron(send_anonymous_metrics=True): + minutes_in_day = 1440 + start_of_day = datetime(2023, 6, 23, 0, 0, 0) + + for interval in [5, 30, 60]: # test intervals of 5, 30, and 60 minutes + num_metrics_gathered = 0 + for offset in range(0, minutes_in_day, interval): + sim_time = start_of_day + timedelta(minutes=offset) + with patch.dict( + environ, {"SCHEDULER_FREQUENCY": str(interval)} + ), freeze_time(sim_time): + if should_collect_metric(DailyTestMetric): + num_metrics_gathered += 1 + + assert ( + num_metrics_gathered == 1 + ), "failed to gather specified number of metrics with interval:{} see output log for uuid used".format( + interval + ) -@metrics_uuid(uuid.uuid4()) def test_collect_metric_does_nothing_when_should_collect_returns_false( mock_metrics_endpoint: MagicMock, ) -> None: @@ -100,9 +92,9 @@ def test_collect_metric_does_nothing_when_should_collect_returns_false( @freeze_time(datetime(2023, 6, 23, 10, 0, 5, tzinfo=ZoneInfo("UTC"))) def test_collect_metric_sends_metric_when_should_collect_returns_true( - mock_metrics_endpoint: MagicMock, app_env: AppEnv + mock_metrics_endpoint: MagicMock, ) -> None: - with patch( + with MockMetricsEnviron() as metrics_environ, patch( "instance_scheduler.ops_metrics.metrics.should_collect_metric" ) as should_collect_metric_func: should_collect_metric_func.return_value = True @@ -110,12 +102,13 @@ def test_collect_metric_sends_metric_when_should_collect_returns_true( expected_data = json.dumps( { - "TimeStamp": "2023-06-23 10:00:05", - "UUID": str(solution_metrics_uuid), - "Solution": app_env.solution_id, - "Version": app_env.solution_version, - "Event_Name": "unlimited-metrics", - "Context": {}, + "timestamp": "2023-06-23 10:00:05", + "uuid": str(metrics_environ.metrics_uuid), + "solution": metrics_environ.solution_id, + "version": metrics_environ.solution_version, + "event_name": "unlimited-metrics", + "context_version": 1, + "context": {}, }, indent=0, ) @@ -126,17 +119,8 @@ def test_collect_metric_sends_metric_when_should_collect_returns_true( } mock_metrics_endpoint.assert_called_once_with( - app_env.anonymous_metrics_url, - data=expected_data, + "POST", + metrics_environ.anonymous_metrics_url, + body=expected_data, headers=expected_headers, - timeout=300, ) - - -@patch("instance_scheduler.ops_metrics.metrics._deployment_uuid_from_ssm") -def test_deployment_uuid_is_cached(deployment_uuid_func: MagicMock) -> None: - logger = MockLogger() - metrics._get_deployment_uuid(logger) - metrics._get_deployment_uuid(logger) - - assert deployment_uuid_func.call_count == 1 diff --git a/source/app/tests/integration/ops_metrics/test_ops_insights_metrics.py b/source/app/tests/integration/ops_metrics/test_ops_insights_metrics.py new file mode 100644 index 00000000..1997fed6 --- /dev/null +++ b/source/app/tests/integration/ops_metrics/test_ops_insights_metrics.py @@ -0,0 +1,293 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import json +from datetime import datetime +from typing import Any, Iterator +from unittest.mock import MagicMock, patch +from zoneinfo import ZoneInfo + +import pytest +from _pytest.fixtures import fixture +from freezegun import freeze_time +from mypy_boto3_ec2.literals import InstanceTypeType + +from instance_scheduler.ops_monitoring.cw_ops_insights import ( + CloudWatchOperationalInsights, +) +from tests.integration.helpers.ec2_helpers import ( + create_ec2_instances, + start_ec2_instances, + stop_ec2_instances, +) +from tests.integration.helpers.run_handler import simple_schedule +from tests.integration.helpers.schedule_helpers import quick_time +from tests.integration.ops_metrics.conftest import override_should_send_metric +from tests.test_utils.mock_metrics_environment import MockMetricsEnviron +from tests.test_utils.mock_scheduling_request_environment import ( + MockSchedulingRequestEnvironment, +) +from tests.test_utils.unordered_list import UnorderedList + +# ec2 instance types +small: InstanceTypeType = "t2.micro" +medium: InstanceTypeType = "t2.medium" +large: InstanceTypeType = "t2.large" + + +@fixture +def mocked_put_metric_data() -> Iterator[MagicMock]: + with patch.object( + CloudWatchOperationalInsights, "cloudwatch_client" + ) as cloudwatch_client: + with patch.object(cloudwatch_client, "put_metric_data") as put_metric_func: + yield put_metric_func + + +class MetricNotFound(Exception): + pass + + +def get_sent_ops_insight_metric_json(metrics_endpoint: MagicMock) -> Any: + desired_metric: dict[str, Any] = {} + for call_args in metrics_endpoint.call_args_list: + json_payload = call_args[1]["body"] + sent_metric = json.loads(json_payload) + + if sent_metric["event_name"] == "insights_metric": + if desired_metric: + raise Exception("metric sent multiple times") + desired_metric = sent_metric + + if not desired_metric: + raise MetricNotFound("metric not found") + return desired_metric + + +@freeze_time(datetime(2023, 6, 12, 12, 0, 0, tzinfo=ZoneInfo("UTC"))) +def test_op_metrics_sent_to_internal_metrics_endpoint( + state_table: str, + mock_metrics_endpoint: MagicMock, + mocked_put_metric_data: MagicMock, +) -> None: + stop_ec2_instances( + *create_ec2_instances(1, instance_type=small, schedule_name="test-schedule") + ) + start_ec2_instances( + *create_ec2_instances(5, instance_type=medium, schedule_name="test-schedule") + ) + + with ( + MockMetricsEnviron(send_anonymous_metrics=True) as metrics_environ, + override_should_send_metric(True), + simple_schedule(begintime="10:00", endtime="20:00") as context, + ): + context.run_scheduling_request_handler(dt=quick_time(12, 0)) + expected_metric = { + "timestamp": "2023-06-12 12:00:00", + "uuid": str(metrics_environ.metrics_uuid), + "solution": metrics_environ.solution_id, + "version": metrics_environ.solution_version, + "event_name": "insights_metric", + "context_version": 1, + "context": { + "metric_data": UnorderedList( + [ + { + "metric_name": "ManagedInstances", + "dimensions": [ + {"name": "Service", "value": "ec2"}, + {"name": "InstanceType", "value": "t2.micro"}, + {"name": "SchedulingInterval", "value": "5"}, + ], + "timestamp": "2023-06-12T12:00:00+00:00", + "value": 1, + "unit": "Count", + }, + { + "metric_name": "RunningInstances", + "dimensions": [ + {"name": "Service", "value": "ec2"}, + {"name": "InstanceType", "value": "t2.micro"}, + {"name": "SchedulingInterval", "value": "5"}, + ], + "timestamp": "2023-06-12T12:00:00+00:00", + "value": 0, + "unit": "Count", + }, + { + "metric_name": "StoppedInstances", + "dimensions": [ + {"name": "Service", "value": "ec2"}, + {"name": "InstanceType", "value": "t2.micro"}, + {"name": "SchedulingInterval", "value": "5"}, + ], + "timestamp": "2023-06-12T12:00:00+00:00", + "value": 1, + "unit": "Count", + }, + { + "metric_name": "ManagedInstances", + "dimensions": [ + {"name": "Service", "value": "ec2"}, + {"name": "InstanceType", "value": "t2.medium"}, + {"name": "SchedulingInterval", "value": "5"}, + ], + "timestamp": "2023-06-12T12:00:00+00:00", + "value": 5, + "unit": "Count", + }, + { + "metric_name": "RunningInstances", + "dimensions": [ + {"name": "Service", "value": "ec2"}, + {"name": "InstanceType", "value": "t2.medium"}, + {"name": "SchedulingInterval", "value": "5"}, + ], + "timestamp": "2023-06-12T12:00:00+00:00", + "value": 5, + "unit": "Count", + }, + { + "metric_name": "StoppedInstances", + "dimensions": [ + {"name": "Service", "value": "ec2"}, + {"name": "InstanceType", "value": "t2.medium"}, + {"name": "SchedulingInterval", "value": "5"}, + ], + "timestamp": "2023-06-12T12:00:00+00:00", + "value": 0, + "unit": "Count", + }, + ] + ), + }, + } + sent_metric = get_sent_ops_insight_metric_json(mock_metrics_endpoint) + assert sent_metric == expected_metric + + +@freeze_time(datetime(2023, 6, 12, 12, 0, 0, tzinfo=ZoneInfo("UTC"))) +def test_aws_metrics_still_sent_when_internal_metrics_are_disabled( + state_table: str, + mock_metrics_endpoint: MagicMock, + mocked_put_metric_data: MagicMock, +) -> None: + stop_ec2_instances( + *create_ec2_instances(1, instance_type=small, schedule_name="test-schedule") + ) + start_ec2_instances( + *create_ec2_instances(5, instance_type=medium, schedule_name="test-schedule") + ) + + with ( + MockMetricsEnviron(send_anonymous_metrics=True) as metrics_environ, + override_should_send_metric(True), + simple_schedule(begintime="10:00", endtime="20:00") as context, + ): + context.run_scheduling_request_handler( + dt=quick_time(12, 0), + environment=MockSchedulingRequestEnvironment(enable_ops_monitoring=False), + ) + expected_metric = { + "timestamp": "2023-06-12 12:00:00", + "uuid": str(metrics_environ.metrics_uuid), + "solution": metrics_environ.solution_id, + "version": metrics_environ.solution_version, + "event_name": "insights_metric", + "context_version": 1, + "context": { + "metric_data": UnorderedList( + [ + { + "metric_name": "ManagedInstances", + "dimensions": [ + {"name": "Service", "value": "ec2"}, + {"name": "InstanceType", "value": "t2.micro"}, + {"name": "SchedulingInterval", "value": "5"}, + ], + "timestamp": "2023-06-12T12:00:00+00:00", + "value": 1, + "unit": "Count", + }, + { + "metric_name": "RunningInstances", + "dimensions": [ + {"name": "Service", "value": "ec2"}, + {"name": "InstanceType", "value": "t2.micro"}, + {"name": "SchedulingInterval", "value": "5"}, + ], + "timestamp": "2023-06-12T12:00:00+00:00", + "value": 0, + "unit": "Count", + }, + { + "metric_name": "StoppedInstances", + "dimensions": [ + {"name": "Service", "value": "ec2"}, + {"name": "InstanceType", "value": "t2.micro"}, + {"name": "SchedulingInterval", "value": "5"}, + ], + "timestamp": "2023-06-12T12:00:00+00:00", + "value": 1, + "unit": "Count", + }, + { + "metric_name": "ManagedInstances", + "dimensions": [ + {"name": "Service", "value": "ec2"}, + {"name": "InstanceType", "value": "t2.medium"}, + {"name": "SchedulingInterval", "value": "5"}, + ], + "timestamp": "2023-06-12T12:00:00+00:00", + "value": 5, + "unit": "Count", + }, + { + "metric_name": "RunningInstances", + "dimensions": [ + {"name": "Service", "value": "ec2"}, + {"name": "InstanceType", "value": "t2.medium"}, + {"name": "SchedulingInterval", "value": "5"}, + ], + "timestamp": "2023-06-12T12:00:00+00:00", + "value": 5, + "unit": "Count", + }, + { + "metric_name": "StoppedInstances", + "dimensions": [ + {"name": "Service", "value": "ec2"}, + {"name": "InstanceType", "value": "t2.medium"}, + {"name": "SchedulingInterval", "value": "5"}, + ], + "timestamp": "2023-06-12T12:00:00+00:00", + "value": 0, + "unit": "Count", + }, + ] + ), + }, + } + + sent_metric = get_sent_ops_insight_metric_json(mock_metrics_endpoint) + assert sent_metric == expected_metric + + +@freeze_time(datetime(2023, 6, 12, 12, 0, 0, tzinfo=ZoneInfo("UTC"))) +def test_aws_metrics_not_sent_when_aws_metrics_disabled( + state_table: str, + mock_metrics_endpoint: MagicMock, + mocked_put_metric_data: MagicMock, +) -> None: + stop_ec2_instances( + *create_ec2_instances(1, instance_type=small, schedule_name="test-schedule") + ) + + with simple_schedule(begintime="10:00", endtime="20:00") as context: + context.run_scheduling_request_handler( + dt=quick_time(12, 0), + environment=MockSchedulingRequestEnvironment(enable_ops_monitoring=False), + ) + + with pytest.raises(MetricNotFound): + get_sent_ops_insight_metric_json(mock_metrics_endpoint) diff --git a/source/app/tests/integration/ops_metrics/test_scheduling_action_metrics.py b/source/app/tests/integration/ops_metrics/test_scheduling_action_metrics.py index 82a11839..436be054 100644 --- a/source/app/tests/integration/ops_metrics/test_scheduling_action_metrics.py +++ b/source/app/tests/integration/ops_metrics/test_scheduling_action_metrics.py @@ -1,100 +1,66 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import json -from datetime import datetime, time +from datetime import datetime +from itertools import chain from typing import Any -from unittest.mock import ANY, MagicMock, patch +from unittest.mock import ANY, MagicMock from zoneinfo import ZoneInfo -import boto3 from freezegun import freeze_time from mypy_boto3_ec2.client import EC2Client from mypy_boto3_ec2.literals import InstanceTypeType -from instance_scheduler.configuration.instance_schedule import InstanceSchedule -from instance_scheduler.configuration.running_period import RunningPeriod -from instance_scheduler.configuration.running_period_dict_element import ( - RunningPeriodDictElement, -) -from instance_scheduler.handler.scheduling_request import SchedulingRequestHandler +from instance_scheduler.model.period_definition import PeriodDefinition +from instance_scheduler.model.period_identifier import PeriodIdentifier +from instance_scheduler.model.schedule_definition import ScheduleDefinition from instance_scheduler.schedulers.instance_states import InstanceStates -from instance_scheduler.util.app_env import AppEnv -from tests.context import MockLambdaContext -from tests.integration.helpers.scheduling_context_builder import ( - build_context, - build_scheduling_event, +from tests.conftest import get_ami +from tests.integration.helpers.ec2_helpers import ( + create_ec2_instances, + start_ec2_instances, + stop_ec2_instances, ) -from tests.integration.ops_metrics.conftest import solution_metrics_uuid +from tests.integration.helpers.run_handler import SchedulingTestContext +from tests.test_utils.mock_metrics_environment import MockMetricsEnviron medium: InstanceTypeType = "a1.medium" large: InstanceTypeType = "a1.large" -will_start = InstanceSchedule( - name="start_instances", - periods=[ - RunningPeriodDictElement( - period=RunningPeriod(name="stop", begintime=time(12, 0, 0)) - ) - ], -) - -will_stop = InstanceSchedule( - name="stop_instances", - periods=[ - RunningPeriodDictElement( - period=RunningPeriod(name="start", endtime=time(12, 0, 0)) - ) - ], - stop_new_instances=True, +will_start = ScheduleDefinition( + name="will_start", periods=[PeriodIdentifier.of("start-at-noon")] ) -will_resize_to_large = InstanceSchedule( - name="resize_instances", - periods=[ - RunningPeriodDictElement( - period=RunningPeriod( - name="medium_window", begintime=time(6, 0, 0), endtime=time(12, 0, 0) - ), - instancetype=medium, - ), - RunningPeriodDictElement( - period=RunningPeriod(name="large_window", begintime=time(12, 0, 0)), - instancetype=large, - ), - ], +will_stop = ScheduleDefinition( + name="will_stop", periods=[PeriodIdentifier.of("stop-at-noon")] ) -context = build_context( - current_dt=datetime(2023, 6, 12, 12, 0, 0, tzinfo=ZoneInfo("UTC")), - schedules={ - "start_instances": will_start, - "stop_instances": will_stop, - "resize_instances": will_resize_to_large, - }, +will_resize_to_large = ScheduleDefinition( + name="will_resize_to_large", + periods=[PeriodIdentifier.of("start-at-noon", instance_type=large)], ) +periods = [ + PeriodDefinition(name="start-at-noon", begintime="12:00"), + PeriodDefinition(name="stop-at-noon", endtime="12:00"), +] -def create_test_instances( - count: int, instance_type: InstanceTypeType, schedule: InstanceSchedule -) -> list[str]: - ec2_client: EC2Client = boto3.client("ec2") - instance_ids = new_ec2_instances_of_type(ec2_client, count, instance_type) - ec2_client.create_tags( - Resources=instance_ids, Tags=[{"Key": "Schedule", "Value": schedule.name}] - ) - - if schedule in [will_start, will_resize_to_large]: - ec2_client.stop_instances(InstanceIds=instance_ids) +context = SchedulingTestContext( + schedules=[will_start, will_stop, will_resize_to_large], + periods=periods, +) - return instance_ids +dt = datetime(2023, 6, 12, 12, 0, 0, tzinfo=ZoneInfo("UTC")) def new_ec2_instances_of_type( ec2_client: EC2Client, count: int, ec2_instance_type: InstanceTypeType ) -> list[str]: - ami = "ami-0889ff9188674a22a" create_response = ec2_client.run_instances( - ImageId=ami, MinCount=count, MaxCount=count, InstanceType=ec2_instance_type + ImageId=get_ami(), + MinCount=count, + MaxCount=count, + InstanceType=ec2_instance_type, ) instance_ids = [instance["InstanceId"] for instance in create_response["Instances"]] @@ -104,10 +70,10 @@ def new_ec2_instances_of_type( def get_sent_scheduling_action_metric(metrics_endpoint: MagicMock) -> Any: desired_metric: dict[str, Any] = {} for call_args in metrics_endpoint.call_args_list: - json_payload = call_args[1]["data"] + json_payload = call_args[1]["body"] sent_metric = json.loads(json_payload) - if sent_metric["Event_Name"] == "scheduling_action": + if sent_metric["event_name"] == "scheduling_action": if desired_metric: raise Exception("metric sent multiple times") desired_metric = sent_metric @@ -117,71 +83,77 @@ def get_sent_scheduling_action_metric(metrics_endpoint: MagicMock) -> Any: return desired_metric -@patch("instance_scheduler.schedulers.instance_scheduler.should_collect_metric") @freeze_time(datetime(2023, 6, 12, 12, 0, 0, tzinfo=ZoneInfo("UTC"))) def test_scheduling_execution_sends_expected_actions_metric( - should_collect_metrics_func: MagicMock, mock_metrics_endpoint: MagicMock, ec2_instance_states: InstanceStates, - app_env: AppEnv, ) -> None: - should_collect_metrics_func.return_value = True - - create_test_instances(8, medium, will_start) - create_test_instances(6, large, will_start) - create_test_instances(4, medium, will_stop) - create_test_instances(2, large, will_stop) - create_test_instances(5, medium, will_resize_to_large) # instances already stopped - - event = build_scheduling_event(context) - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - expected_metric = { - "TimeStamp": "2023-06-12 12:00:00", - "UUID": str(solution_metrics_uuid), - "Solution": app_env.solution_id, - "Version": app_env.solution_version, - "Event_Name": "scheduling_action", - "Context": { - "duration_seconds": ANY, - "num_instances_scanned": 25, - "num_unique_schedules": 3, - "actions": [ - { - "action": "Started", - "instanceType": "a1.medium", - "instances": 8, - "service": "ec2", - }, - { - "action": "Started", - "instanceType": "a1.large", - "instances": 11, # the 5 instances resized to a1.large were also started - "service": "ec2", - }, - { - "action": "Stopped", - "instanceType": "a1.medium", - "instances": 4, - "service": "ec2", - }, - { - "action": "Stopped", - "instanceType": "a1.large", - "instances": 2, - "service": "ec2", - }, - { - "action": "Resized", - "instanceType": "a1.medium-a1.large", - "instances": 5, - "service": "ec2", - }, - ], - }, - } - - sent_metric = get_sent_scheduling_action_metric(mock_metrics_endpoint) - assert sent_metric == expected_metric - assert sent_metric["Context"]["duration_seconds"] is not None + with MockMetricsEnviron(send_anonymous_metrics=True) as metrics_env: + stopped_instances = chain( + create_ec2_instances( + 8, instance_type=medium, schedule_name=will_start.name + ), + create_ec2_instances(6, instance_type=large, schedule_name=will_start.name), + create_ec2_instances( + 5, instance_type=medium, schedule_name=will_resize_to_large.name + ), + ) + running_instances = chain( + create_ec2_instances(4, instance_type=medium, schedule_name=will_stop.name), + create_ec2_instances(2, instance_type=large, schedule_name=will_stop.name), + ) + + start_ec2_instances(*running_instances) + stop_ec2_instances(*stopped_instances) + + context.run_scheduling_request_handler(dt=dt) + + expected_metric = { + "timestamp": "2023-06-12 12:00:00", + "uuid": str(metrics_env.metrics_uuid), + "solution": metrics_env.solution_id, + "version": metrics_env.solution_version, + "event_name": "scheduling_action", + "context_version": 1, + "context": { + "duration_seconds": ANY, + "num_instances_scanned": 25, + "num_unique_schedules": 3, + "actions": [ + { + "action": "Started", + "instanceType": "a1.medium", + "instances": 8, + "service": "ec2", + }, + { + "action": "Started", + "instanceType": "a1.large", + "instances": 11, # the 5 instances resized to a1.large were also started + "service": "ec2", + }, + { + "action": "Stopped", + "instanceType": "a1.medium", + "instances": 4, + "service": "ec2", + }, + { + "action": "Stopped", + "instanceType": "a1.large", + "instances": 2, + "service": "ec2", + }, + { + "action": "Resized", + "instanceType": "a1.medium-a1.large", + "instances": 5, + "service": "ec2", + }, + ], + }, + } + + sent_metric = get_sent_scheduling_action_metric(mock_metrics_endpoint) + assert sent_metric == expected_metric + assert sent_metric["context"]["duration_seconds"] is not None diff --git a/source/app/tests/integration/test_1_sided_schedules.py b/source/app/tests/integration/test_1_sided_schedules.py index 9fd6aa75..b805a603 100644 --- a/source/app/tests/integration/test_1_sided_schedules.py +++ b/source/app/tests/integration/test_1_sided_schedules.py @@ -2,23 +2,18 @@ # SPDX-License-Identifier: Apache-2.0 import datetime -import boto3 -from mypy_boto3_ec2.client import EC2Client - from instance_scheduler.configuration.running_period import RunningPeriod from instance_scheduler.configuration.running_period_dict_element import ( RunningPeriodDictElement, ) -from instance_scheduler.handler.scheduling_request import SchedulingRequestHandler from instance_scheduler.schedulers.instance_states import InstanceStates -from tests.context import MockLambdaContext -from tests.integration.helpers.ec2_helpers import get_current_state -from tests.integration.helpers.schedule_helpers import quick_time -from tests.integration.helpers.scheduling_context_builder import ( - build_context, - build_scheduling_event, - custom_schedule, +from tests.integration.helpers.ec2_helpers import ( + create_ec2_instances, + get_current_state, + stop_ec2_instances, ) +from tests.integration.helpers.run_handler import simple_schedule +from tests.integration.helpers.schedule_helpers import quick_time one_sided_start = RunningPeriodDictElement( period=RunningPeriod(name="one-sided-start", begintime=datetime.time(10, 0, 0)) @@ -29,101 +24,74 @@ ) -def test_enforced_1_sided_start_does_not_stop_instances_before_start_time( - ec2_instance: str, ec2_instance_states: InstanceStates +def test_enforced_1_sided_start_takes_no_action_before_start_time( + ec2_instance_states: InstanceStates, ) -> None: - # ----------------------------Event Definition--------------------------# - context = build_context( - current_dt=quick_time(5, 0, 0), - schedules=custom_schedule(periods=[one_sided_start], enforced=True), + running_instance, stopped_instance = create_ec2_instances( + count=2, schedule_name="test-schedule" ) - event = build_scheduling_event(context) - # ----------------------------EC2 Instance-------------------------# - ec2_client: EC2Client = boto3.client("ec2") - ec2_client.start_instances(InstanceIds=[ec2_instance]) + stop_ec2_instances(stopped_instance) - # ------------------------Last Desired State------------------------# - ec2_instance_states.set_instance_state(ec2_instance, "any") - ec2_instance_states.save() + with simple_schedule( + name="test-schedule", begintime="10:00", enforced=True + ) as context: + # first contact (populates state table) + context.run_scheduling_request_handler(dt=quick_time(9, 50)) + assert get_current_state(stopped_instance) == "stopped" + assert get_current_state(running_instance) == "running" - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() + # second contact + context.run_scheduling_request_handler(dt=quick_time(9, 55)) + assert get_current_state(stopped_instance) == "stopped" + assert get_current_state(running_instance) == "running" - # ---------------------validate result---------------------# - assert get_current_state(ec2_instance) == "running" - -def test_enforced_1_sided_stop_does_not_start_instances_before_stop_time( - ec2_instance: str, ec2_instance_states: InstanceStates +def test_enforced_1_sided_stop_takes_no_action_before_stop_time( + ec2_instance_states: InstanceStates, ) -> None: - # ----------------------------Event Definition--------------------------# - context = build_context( - current_dt=quick_time(5, 0, 0), - schedules=custom_schedule(periods=[one_sided_stop], enforced=True), + running_instance, stopped_instance = create_ec2_instances( + count=2, schedule_name="test-schedule" ) - event = build_scheduling_event(context) - # ----------------------------EC2 Instance-------------------------# - ec2_client: EC2Client = boto3.client("ec2") - ec2_client.stop_instances(InstanceIds=[ec2_instance]) - - # ------------------------Last Desired State------------------------# - ec2_instance_states.set_instance_state(ec2_instance, "any") - ec2_instance_states.save() + stop_ec2_instances(stopped_instance) - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() + with simple_schedule( + name="test-schedule", endtime="20:00", enforced=True + ) as context: + # first contact (populates state table) + context.run_scheduling_request_handler(dt=quick_time(9, 50)) + assert get_current_state(stopped_instance) == "stopped" + assert get_current_state(running_instance) == "running" - # ---------------------validate result---------------------# - assert get_current_state(ec2_instance) == "stopped" + # second contact + context.run_scheduling_request_handler(dt=quick_time(9, 55)) + assert get_current_state(stopped_instance) == "stopped" + assert get_current_state(running_instance) == "running" def test_1_sided_stop_stops_at_stop_time( - ec2_instance: str, ec2_instance_states: InstanceStates + ec2_instance: str, + ec2_instance_states: InstanceStates, ) -> None: - # ----------------------------Event Definition--------------------------# - context = build_context( - current_dt=quick_time(20, 0, 0), - schedules=custom_schedule(periods=[one_sided_stop]), - ) - event = build_scheduling_event(context) - # ----------------------------EC2 Instance-------------------------# - ec2_client: EC2Client = boto3.client("ec2") - ec2_client.start_instances(InstanceIds=[ec2_instance]) + with simple_schedule(endtime="20:00") as context: + # before stop (populates state table) + context.run_scheduling_request_handler(dt=quick_time(19, 55)) + assert get_current_state(ec2_instance) == "running" - # ------------------------Last Desired State------------------------# - ec2_instance_states.set_instance_state(ec2_instance, "any") - ec2_instance_states.save() - - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - # ---------------------validate result---------------------# - assert get_current_state(ec2_instance) == "stopped" + # at stop + context.run_scheduling_request_handler(dt=quick_time(20, 0)) + assert get_current_state(ec2_instance) == "stopped" def test_1_sided_start_starts_at_start_time( - ec2_instance: str, ec2_instance_states: InstanceStates + ec2_instance: str, + ec2_instance_states: InstanceStates, ) -> None: - # ----------------------------Event Definition--------------------------# - context = build_context( - current_dt=quick_time(10, 0, 0), - schedules=custom_schedule(periods=[one_sided_start]), - ) - event = build_scheduling_event(context) - # ----------------------------EC2 Instance-------------------------# - ec2_client: EC2Client = boto3.client("ec2") - ec2_client.stop_instances(InstanceIds=[ec2_instance]) - - # ------------------------Last Desired State------------------------# - ec2_instance_states.set_instance_state(ec2_instance, "any") - ec2_instance_states.save() - - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - # ---------------------validate result---------------------# - assert get_current_state(ec2_instance) == "running" + stop_ec2_instances(ec2_instance) + with simple_schedule(begintime="10:00") as context: + # before start (populates state table) + context.run_scheduling_request_handler(dt=quick_time(9, 55)) + assert get_current_state(ec2_instance) == "stopped" + + # at stop + context.run_scheduling_request_handler(dt=quick_time(10, 0)) + assert get_current_state(ec2_instance) == "running" diff --git a/source/app/tests/integration/test_asg.py b/source/app/tests/integration/test_asg.py new file mode 100644 index 00000000..849d7411 --- /dev/null +++ b/source/app/tests/integration/test_asg.py @@ -0,0 +1,935 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from collections.abc import Iterator +from dataclasses import replace +from datetime import datetime, time, timedelta, timezone +from os import environ +from typing import TYPE_CHECKING, Any, Final +from unittest.mock import MagicMock, patch +from uuid import UUID +from zoneinfo import ZoneInfo + +from boto3 import client +from freezegun import freeze_time +from moto.core.models import DEFAULT_ACCOUNT_ID +from pytest import fixture, raises + +from instance_scheduler.handler.asg import lambda_handler, schedule_auto_scaling_groups +from instance_scheduler.model.period_definition import PeriodDefinition +from instance_scheduler.model.store.dynamo_period_definition_store import ( + DynamoPeriodDefinitionStore, +) +from instance_scheduler.service.asg import AsgSize, AsgTag +from tests import DEFAULT_REGION +from tests.context import MockLambdaContext +from tests.integration.helpers.asg_helpers import ( + ASG_GROUP_NAME, + ASG_SCHEDULED_TAG_KEY, + RULE_PREFIX, + SCHEDULE_TAG_KEY, + TEST_DATETIME, + ScheduleHelper, + add_actions, + build_lambda_event, + create_asg, + create_ecs_cluster_with_auto_scaling, + create_simple_schedule, + delete_all_actions, + get_actions, + get_scheduled_tag, + get_tag_value, + tag_group, + verify_operational_metrics, + verify_scheduled_actions_and_tagged, +) +from tests.test_utils.mock_asg_environment import MockAsgEnvironment +from tests.test_utils.mock_metrics_environment import MockMetricsEnviron + +if TYPE_CHECKING: + from mypy_boto3_autoscaling.client import AutoScalingClient +else: + AutoScalingClient = object + + +@fixture +def schedule_a(config_table: str) -> Iterator[ScheduleHelper]: + schedule_name: Final = "my-schedule" + begin_hour: Final = 9 + end_hour: Final = 17 + time_zone: Final = ZoneInfo("America/New_York") + create_simple_schedule( + config_table_name=config_table, + schedule_name=schedule_name, + begin_time=time(hour=begin_hour), + end_time=time(hour=end_hour), + time_zone=time_zone, + ) + + yield ScheduleHelper( + name=schedule_name, + start_recurrence=f"0 {begin_hour} * * *", + end_recurrence=f"0 {end_hour} * * *", + time_zone=time_zone, + ) + + +@fixture +def schedule_b(config_table: str) -> Iterator[ScheduleHelper]: + schedule_name: Final = "my-other-schedule" + begin_hour: Final = 10 + end_hour: Final = 14 + time_zone: Final = ZoneInfo("Asia/Taipei") + create_simple_schedule( + config_table_name=config_table, + schedule_name=schedule_name, + begin_time=time(hour=begin_hour), + end_time=time(hour=end_hour), + time_zone=time_zone, + ) + + yield ScheduleHelper( + name=schedule_name, + start_recurrence=f"0 {begin_hour} * * *", + end_recurrence=f"0 {end_hour} * * *", + time_zone=time_zone, + ) + + +@fixture +def schedule_no_timezone(config_table: str) -> Iterator[ScheduleHelper]: + schedule_name: Final = "my-no-tz-schedule" + begin_hour: Final = 17 + end_hour: Final = 19 + create_simple_schedule( + config_table_name=config_table, + schedule_name=schedule_name, + begin_time=time(hour=begin_hour), + end_time=time(hour=end_hour), + ) + + yield ScheduleHelper( + name=schedule_name, + start_recurrence=f"0 {begin_hour} * * *", + end_recurrence=f"0 {end_hour} * * *", + time_zone=None, + ) + + +@fixture +def schedule_invalid(config_table: str) -> Iterator[ScheduleHelper]: + schedule_name: Final = "my-invalid-schedule" + begin_hour: Final = 0 + end_hour: Final = 5 + create_simple_schedule( + config_table_name=config_table, + schedule_name=schedule_name, + begin_time=time(hour=begin_hour), + end_time=time(hour=end_hour), + monthdays={"15W"}, + ) + + yield ScheduleHelper( + name=schedule_name, + start_recurrence="Not valid", + end_recurrence="Not valid", + time_zone=None, + ) + + +@patch("instance_scheduler.handler.asg.collect_metric") +def test_handler_without_schedule_names_should_send_operational_metrics_when_time( + mock_collect_metric: MagicMock, + schedule_a: ScheduleHelper, +) -> None: + # Prepare + running_size: Final = AsgSize(min_size=3, desired_size=5, max_size=20) + create_asg(ASG_GROUP_NAME, running_size) + tag_group( + group_name=ASG_GROUP_NAME, tag_key=SCHEDULE_TAG_KEY, tag_value=schedule_a.name + ) + event: Final[dict[str, Any]] = build_lambda_event(TEST_DATETIME, None) + + # Call + with freeze_time(TEST_DATETIME): + with MockAsgEnvironment(): + with MockMetricsEnviron(metrics_uuid=UUID(int=TEST_DATETIME.hour)): + lambda_handler(event, MockLambdaContext()) + + # Verify + verify_scheduled_actions_and_tagged( + asg_group_name=ASG_GROUP_NAME, + schedule=schedule_a, + asg_size=running_size, + dt=TEST_DATETIME, + ) + verify_operational_metrics(mock_collect_metric, True) + + +@patch("instance_scheduler.handler.asg.collect_metric") +def test_handler_without_schedule_names_should_not_send_operational_metrics_when_not_time( + mock_collect_metric: MagicMock, + schedule_a: ScheduleHelper, +) -> None: + # Prepare + running_size: Final = AsgSize(min_size=3, desired_size=5, max_size=20) + create_asg(ASG_GROUP_NAME, running_size) + tag_group( + group_name=ASG_GROUP_NAME, tag_key=SCHEDULE_TAG_KEY, tag_value=schedule_a.name + ) + event: Final[dict[str, Any]] = build_lambda_event(TEST_DATETIME, None) + + # Call + with freeze_time(TEST_DATETIME): + with MockAsgEnvironment(): + with MockMetricsEnviron(metrics_uuid=UUID(int=TEST_DATETIME.hour + 1)): + lambda_handler(event, MockLambdaContext()) + + # Verify + verify_scheduled_actions_and_tagged( + asg_group_name=ASG_GROUP_NAME, + schedule=schedule_a, + asg_size=running_size, + dt=TEST_DATETIME, + ) + verify_operational_metrics(mock_collect_metric, False) + + +@patch("instance_scheduler.handler.asg.collect_metric") +def test_handler_with_schedule_names_should_not_send_operational_metrics( + mock_collect_metric: MagicMock, + schedule_a: ScheduleHelper, +) -> None: + # Prepare + running_size: Final = AsgSize(min_size=3, desired_size=5, max_size=20) + create_asg(ASG_GROUP_NAME, running_size) + tag_group( + group_name=ASG_GROUP_NAME, tag_key=SCHEDULE_TAG_KEY, tag_value=schedule_a.name + ) + event: Final[dict[str, Any]] = build_lambda_event(TEST_DATETIME, ["my-schedule"]) + + # Call + with freeze_time(TEST_DATETIME): + with MockAsgEnvironment(): + with MockMetricsEnviron(metrics_uuid=UUID(int=TEST_DATETIME.hour + 1)): + lambda_handler(event, MockLambdaContext()) + + # Verify + verify_scheduled_actions_and_tagged( + asg_group_name=ASG_GROUP_NAME, + schedule=schedule_a, + asg_size=running_size, + dt=TEST_DATETIME, + ) + verify_operational_metrics(mock_collect_metric, False) + + +def test_asg_configured_with_schedule( + config_table: str, + schedule_a: ScheduleHelper, +) -> None: + # Prepare + running_size: Final = AsgSize(min_size=3, desired_size=5, max_size=20) + create_asg(ASG_GROUP_NAME, running_size) + tag_group( + group_name=ASG_GROUP_NAME, tag_key=SCHEDULE_TAG_KEY, tag_value=schedule_a.name + ) + + # Call + with freeze_time(TEST_DATETIME): + schedule_auto_scaling_groups( + schedule_tag_key=SCHEDULE_TAG_KEY, + config_table_name=config_table, + account_id=DEFAULT_ACCOUNT_ID, + region=DEFAULT_REGION, + scheduling_role_name="my-role", + asg_scheduled_tag_key=ASG_SCHEDULED_TAG_KEY, + rule_prefix=RULE_PREFIX, + schedule_names=None, + ) + + # Verify + verify_scheduled_actions_and_tagged( + asg_group_name=ASG_GROUP_NAME, + schedule=schedule_a, + asg_size=running_size, + dt=TEST_DATETIME, + ) + + +def test_asg_with_other_tag_not_configured( + config_table: str, schedule_a: ScheduleHelper +) -> None: + # Prepare + running_size: Final = AsgSize(min_size=3, desired_size=5, max_size=20) + create_asg(ASG_GROUP_NAME, running_size) + tag_group( + group_name=ASG_GROUP_NAME, tag_key="SomethingElse", tag_value=schedule_a.name + ) + + # Call + schedule_auto_scaling_groups( + schedule_tag_key=SCHEDULE_TAG_KEY, + config_table_name=config_table, + account_id=DEFAULT_ACCOUNT_ID, + region=DEFAULT_REGION, + scheduling_role_name="my-role", + asg_scheduled_tag_key=ASG_SCHEDULED_TAG_KEY, + rule_prefix=RULE_PREFIX, + schedule_names=None, + ) + + # Verify + assert len(list(get_actions(ASG_GROUP_NAME))) == 0 + + with raises(KeyError): + get_tag_value(group_name=ASG_GROUP_NAME, tag_key=ASG_SCHEDULED_TAG_KEY) + + +def test_asg_not_reconfigured_if_tag_remains( + config_table: str, schedule_a: ScheduleHelper +) -> None: + # Prepare + running_size: Final = AsgSize(min_size=3, desired_size=5, max_size=20) + create_asg(ASG_GROUP_NAME, running_size) + tag_group( + group_name=ASG_GROUP_NAME, tag_key=SCHEDULE_TAG_KEY, tag_value=schedule_a.name + ) + + # Call + with freeze_time(TEST_DATETIME): + schedule_auto_scaling_groups( + schedule_tag_key=SCHEDULE_TAG_KEY, + config_table_name=config_table, + account_id=DEFAULT_ACCOUNT_ID, + region=DEFAULT_REGION, + scheduling_role_name="my-role", + asg_scheduled_tag_key=ASG_SCHEDULED_TAG_KEY, + rule_prefix=RULE_PREFIX, + schedule_names=None, + ) + + # Verify + verify_scheduled_actions_and_tagged( + asg_group_name=ASG_GROUP_NAME, + schedule=schedule_a, + asg_size=running_size, + dt=TEST_DATETIME, + ) + + # Prepare + delete_all_actions(ASG_GROUP_NAME) + + # Call + with freeze_time(TEST_DATETIME + timedelta(days=1)): + schedule_auto_scaling_groups( + schedule_tag_key=SCHEDULE_TAG_KEY, + config_table_name=config_table, + account_id=DEFAULT_ACCOUNT_ID, + region=DEFAULT_REGION, + scheduling_role_name="my-role", + asg_scheduled_tag_key=ASG_SCHEDULED_TAG_KEY, + rule_prefix=RULE_PREFIX, + schedule_names=None, + ) + + # Verify + assert len(list(get_actions(ASG_GROUP_NAME))) == 0 + + new_tag: Final[AsgTag] = get_scheduled_tag(ASG_GROUP_NAME) + assert new_tag == AsgTag( + schedule=schedule_a.name, + ttl=(TEST_DATETIME + timedelta(days=30)).isoformat(), + min_size=running_size.min_size, + max_size=running_size.max_size, + desired_size=running_size.desired_size, + ) + + +def test_asg_reconfigured_if_tag_removed( + config_table: str, schedule_a: ScheduleHelper +) -> None: + # Prepare + running_size: Final = AsgSize(min_size=3, desired_size=5, max_size=20) + create_asg(ASG_GROUP_NAME, running_size) + tag_group( + group_name=ASG_GROUP_NAME, tag_key=SCHEDULE_TAG_KEY, tag_value=schedule_a.name + ) + + # Call + with freeze_time(TEST_DATETIME): + schedule_auto_scaling_groups( + schedule_tag_key=SCHEDULE_TAG_KEY, + config_table_name=config_table, + account_id=DEFAULT_ACCOUNT_ID, + region=DEFAULT_REGION, + scheduling_role_name="my-role", + asg_scheduled_tag_key=ASG_SCHEDULED_TAG_KEY, + rule_prefix=RULE_PREFIX, + schedule_names=None, + ) + + # Verify + verify_scheduled_actions_and_tagged( + asg_group_name=ASG_GROUP_NAME, + schedule=schedule_a, + asg_size=running_size, + dt=TEST_DATETIME, + ) + + # Prepare + delete_all_actions(ASG_GROUP_NAME) + + autoscaling: Final[AutoScalingClient] = client("autoscaling") + autoscaling.delete_tags( + Tags=[ + { + "Key": ASG_SCHEDULED_TAG_KEY, + "ResourceId": ASG_GROUP_NAME, + "ResourceType": "auto-scaling-group", + } + ] + ) + new_dt: Final = TEST_DATETIME + timedelta(days=1) + + # Call + with freeze_time(new_dt): + schedule_auto_scaling_groups( + schedule_tag_key=SCHEDULE_TAG_KEY, + config_table_name=config_table, + account_id=DEFAULT_ACCOUNT_ID, + region=DEFAULT_REGION, + scheduling_role_name="my-role", + asg_scheduled_tag_key=ASG_SCHEDULED_TAG_KEY, + rule_prefix=RULE_PREFIX, + schedule_names=None, + ) + + # Verify + verify_scheduled_actions_and_tagged( + asg_group_name=ASG_GROUP_NAME, + schedule=schedule_a, + asg_size=running_size, + dt=new_dt, + ) + + +def test_asg_reconfigured_if_schedule_changed( + config_table: str, + schedule_a: ScheduleHelper, + schedule_b: ScheduleHelper, +) -> None: + # Prepare + running_size: Final = AsgSize(min_size=3, desired_size=5, max_size=20) + create_asg(ASG_GROUP_NAME, running_size) + tag_group( + group_name=ASG_GROUP_NAME, tag_key=SCHEDULE_TAG_KEY, tag_value=schedule_a.name + ) + + # Call + with freeze_time(TEST_DATETIME): + schedule_auto_scaling_groups( + schedule_tag_key=SCHEDULE_TAG_KEY, + config_table_name=config_table, + account_id=DEFAULT_ACCOUNT_ID, + region=DEFAULT_REGION, + scheduling_role_name="my-role", + asg_scheduled_tag_key=ASG_SCHEDULED_TAG_KEY, + rule_prefix=RULE_PREFIX, + schedule_names=None, + ) + + # Verify + verify_scheduled_actions_and_tagged( + asg_group_name=ASG_GROUP_NAME, + schedule=schedule_a, + asg_size=running_size, + dt=TEST_DATETIME, + ) + + # Prepare + tag_group( + group_name=ASG_GROUP_NAME, tag_key=SCHEDULE_TAG_KEY, tag_value=schedule_b.name + ) + new_dt: Final = TEST_DATETIME + timedelta(days=1) + + # Call + with freeze_time(new_dt): + schedule_auto_scaling_groups( + schedule_tag_key=SCHEDULE_TAG_KEY, + config_table_name=config_table, + account_id=DEFAULT_ACCOUNT_ID, + region=DEFAULT_REGION, + scheduling_role_name="my-role", + asg_scheduled_tag_key=ASG_SCHEDULED_TAG_KEY, + rule_prefix=RULE_PREFIX, + schedule_names=None, + ) + + # Verify + verify_scheduled_actions_and_tagged( + asg_group_name=ASG_GROUP_NAME, + schedule=schedule_b, + asg_size=running_size, + dt=new_dt, + ) + assert len(list(get_actions(ASG_GROUP_NAME))) == 2 + + +def test_asg_reconfigured_if_tag_expired( + config_table: str, schedule_a: ScheduleHelper +) -> None: + # Prepare + running_size: Final = AsgSize(min_size=3, desired_size=5, max_size=20) + create_asg(ASG_GROUP_NAME, running_size) + tag_group( + group_name=ASG_GROUP_NAME, tag_key=SCHEDULE_TAG_KEY, tag_value=schedule_a.name + ) + + # Call + with freeze_time(TEST_DATETIME): + schedule_auto_scaling_groups( + schedule_tag_key=SCHEDULE_TAG_KEY, + config_table_name=config_table, + account_id=DEFAULT_ACCOUNT_ID, + region=DEFAULT_REGION, + scheduling_role_name="my-role", + asg_scheduled_tag_key=ASG_SCHEDULED_TAG_KEY, + rule_prefix=RULE_PREFIX, + schedule_names=None, + ) + + # Verify + verify_scheduled_actions_and_tagged( + asg_group_name=ASG_GROUP_NAME, + schedule=schedule_a, + asg_size=running_size, + dt=TEST_DATETIME, + ) + + # Prepare + delete_all_actions(ASG_GROUP_NAME) + new_dt: Final = TEST_DATETIME + timedelta(days=31) + + # Call + with freeze_time(new_dt): + schedule_auto_scaling_groups( + schedule_tag_key=SCHEDULE_TAG_KEY, + config_table_name=config_table, + account_id=DEFAULT_ACCOUNT_ID, + region=DEFAULT_REGION, + scheduling_role_name="my-role", + asg_scheduled_tag_key=ASG_SCHEDULED_TAG_KEY, + rule_prefix=RULE_PREFIX, + schedule_names=None, + ) + + # Verify + verify_scheduled_actions_and_tagged( + asg_group_name=ASG_GROUP_NAME, + schedule=schedule_a, + asg_size=running_size, + dt=new_dt, + ) + + +def test_stopped_asg_not_configured( + config_table: str, schedule_a: ScheduleHelper +) -> None: + # Prepare + running_size: Final = AsgSize.stopped() + create_asg(ASG_GROUP_NAME, running_size) + tag_group( + group_name=ASG_GROUP_NAME, tag_key=SCHEDULE_TAG_KEY, tag_value=schedule_a.name + ) + + # Call + with freeze_time(TEST_DATETIME): + schedule_auto_scaling_groups( + schedule_tag_key=SCHEDULE_TAG_KEY, + config_table_name=config_table, + account_id=DEFAULT_ACCOUNT_ID, + region=DEFAULT_REGION, + scheduling_role_name="my-role", + asg_scheduled_tag_key=ASG_SCHEDULED_TAG_KEY, + rule_prefix=RULE_PREFIX, + schedule_names=None, + ) + + # Verify + assert len(list(get_actions(ASG_GROUP_NAME))) == 0 + + with raises(KeyError): + get_scheduled_tag(ASG_GROUP_NAME) + + +def test_asg_configured_with_default_timezone_if_not_specified( + config_table: str, + schedule_no_timezone: ScheduleHelper, +) -> None: + # Prepare + running_size: Final = AsgSize(min_size=3, desired_size=5, max_size=20) + create_asg(ASG_GROUP_NAME, running_size) + tag_group( + group_name=ASG_GROUP_NAME, + tag_key=SCHEDULE_TAG_KEY, + tag_value=schedule_no_timezone.name, + ) + expected_tz: Final = ZoneInfo("Europe/Helsinki") + + # Call + with freeze_time(TEST_DATETIME), patch.dict( + environ, {"DEFAULT_TIMEZONE": str(expected_tz)} + ): + schedule_auto_scaling_groups( + schedule_tag_key=SCHEDULE_TAG_KEY, + config_table_name=config_table, + account_id=DEFAULT_ACCOUNT_ID, + region=DEFAULT_REGION, + scheduling_role_name="my-role", + asg_scheduled_tag_key=ASG_SCHEDULED_TAG_KEY, + rule_prefix=RULE_PREFIX, + schedule_names=None, + ) + + # Verify + expected_schedule: Final = replace(schedule_no_timezone, time_zone=expected_tz) + verify_scheduled_actions_and_tagged( + asg_group_name=ASG_GROUP_NAME, + schedule=expected_schedule, + asg_size=running_size, + dt=TEST_DATETIME, + ) + + +def test_asg_not_configured_if_schedule_invalid( + config_table: str, schedule_invalid: ScheduleHelper +) -> None: + # Prepare + running_size: Final = AsgSize(min_size=3, desired_size=5, max_size=20) + create_asg(ASG_GROUP_NAME, running_size) + tag_group( + group_name=ASG_GROUP_NAME, + tag_key=SCHEDULE_TAG_KEY, + tag_value=schedule_invalid.name, + ) + + # Call + schedule_auto_scaling_groups( + schedule_tag_key=SCHEDULE_TAG_KEY, + config_table_name=config_table, + account_id=DEFAULT_ACCOUNT_ID, + region=DEFAULT_REGION, + scheduling_role_name="my-role", + asg_scheduled_tag_key=ASG_SCHEDULED_TAG_KEY, + rule_prefix=RULE_PREFIX, + schedule_names=None, + ) + + # Verify + assert len(list(get_actions(ASG_GROUP_NAME))) == 0 + + with raises(KeyError): + get_tag_value(group_name=ASG_GROUP_NAME, tag_key=ASG_SCHEDULED_TAG_KEY) + + +def test_preexisting_rules_not_removed( + config_table: str, schedule_a: ScheduleHelper +) -> None: + # Prepare + running_size: Final = AsgSize(min_size=3, desired_size=5, max_size=20) + create_asg(ASG_GROUP_NAME, running_size) + tag_group( + group_name=ASG_GROUP_NAME, tag_key=SCHEDULE_TAG_KEY, tag_value=schedule_a.name + ) + + action_name: Final = "my-action" + autoscaling: Final[AutoScalingClient] = client("autoscaling") + autoscaling.put_scheduled_update_group_action( + AutoScalingGroupName=ASG_GROUP_NAME, + ScheduledActionName=action_name, + MinSize=1, + DesiredCapacity=2, + MaxSize=3, + StartTime=datetime(year=2024, month=3, day=1, tzinfo=timezone.utc), + ) + + # Call + with freeze_time(TEST_DATETIME): + schedule_auto_scaling_groups( + schedule_tag_key=SCHEDULE_TAG_KEY, + config_table_name=config_table, + account_id=DEFAULT_ACCOUNT_ID, + region=DEFAULT_REGION, + scheduling_role_name="my-role", + asg_scheduled_tag_key=ASG_SCHEDULED_TAG_KEY, + rule_prefix=RULE_PREFIX, + schedule_names=None, + ) + + # Verify + verify_scheduled_actions_and_tagged( + asg_group_name=ASG_GROUP_NAME, + schedule=schedule_a, + asg_size=running_size, + dt=TEST_DATETIME, + ) + + actions: Final = list(get_actions(ASG_GROUP_NAME)) + assert len(actions) == 3 + assert ( + len( + list( + filter( + lambda action: action["ScheduledActionName"] == action_name, actions + ) + ) + ) + == 1 + ) + + +def test_asg_reconfigured_if_schedule_name_specified( + config_table: str, schedule_a: ScheduleHelper +) -> None: + # Prepare + running_size: Final = AsgSize(min_size=3, desired_size=5, max_size=20) + create_asg(ASG_GROUP_NAME, running_size) + tag_group( + group_name=ASG_GROUP_NAME, tag_key=SCHEDULE_TAG_KEY, tag_value=schedule_a.name + ) + + # Call + with freeze_time(TEST_DATETIME): + schedule_auto_scaling_groups( + schedule_tag_key=SCHEDULE_TAG_KEY, + config_table_name=config_table, + account_id=DEFAULT_ACCOUNT_ID, + region=DEFAULT_REGION, + scheduling_role_name="my-role", + asg_scheduled_tag_key=ASG_SCHEDULED_TAG_KEY, + rule_prefix=RULE_PREFIX, + schedule_names=None, + ) + + # Verify + verify_scheduled_actions_and_tagged( + asg_group_name=ASG_GROUP_NAME, + schedule=schedule_a, + asg_size=running_size, + dt=TEST_DATETIME, + ) + + # Prepare + period_store: Final = DynamoPeriodDefinitionStore(table_name=config_table) + begin_hour: Final = 20 + end_hour: Final = 22 + begin_time: Final = time(hour=begin_hour) + end_time: Final = time(hour=22) + period_name: Final = f"{schedule_a.name}-period" + period: Final = PeriodDefinition( + name=period_name, + begintime=begin_time.strftime("%H:%M"), + endtime=end_time.strftime("%H:%M"), + ) + period_store.put(period, overwrite=True) + + schedule_a_updated: Final = replace( + schedule_a, + start_recurrence=f"0 {begin_hour} * * *", + end_recurrence=f"0 {end_hour} * * *", + ) + new_dt: Final = TEST_DATETIME + timedelta(days=1) + + # Call + with freeze_time(new_dt): + schedule_auto_scaling_groups( + schedule_tag_key=SCHEDULE_TAG_KEY, + config_table_name=config_table, + account_id=DEFAULT_ACCOUNT_ID, + region=DEFAULT_REGION, + scheduling_role_name="my-role", + asg_scheduled_tag_key=ASG_SCHEDULED_TAG_KEY, + rule_prefix=RULE_PREFIX, + schedule_names=[schedule_a_updated.name], + ) + + # Verify + verify_scheduled_actions_and_tagged( + asg_group_name=ASG_GROUP_NAME, + schedule=schedule_a_updated, + asg_size=running_size, + dt=new_dt, + ) + assert len(list(get_actions(ASG_GROUP_NAME))) == 2 + + +def test_update_schedule_when_schedule_tag_value_is_updated( + config_table: str, + schedule_a: ScheduleHelper, + schedule_b: ScheduleHelper, +) -> None: + """ + Scenario: + An auto scaling group is scheduled and tagged with `schedule_a` and `initial_tag_size` initially. + Then, a user updates the auto scaling group tag to `schedule_b` and update the schedules. + The new schedule should be based on the current auto scaling size, `running_size` and `schedule_b`. + """ + + # Prepare + initial_tag_size: Final = AsgSize(min_size=1, desired_size=2, max_size=3) + running_size: Final = AsgSize(min_size=3, desired_size=5, max_size=20) + create_asg(ASG_GROUP_NAME, running_size) + tag_group( + group_name=ASG_GROUP_NAME, tag_key=SCHEDULE_TAG_KEY, tag_value=schedule_b.name + ) + tag_group( + group_name=ASG_GROUP_NAME, + tag_key=ASG_SCHEDULED_TAG_KEY, + tag_value=str( + AsgTag( + schedule=schedule_a.name, + ttl=(TEST_DATETIME + timedelta(days=30)).isoformat(), + min_size=initial_tag_size.min_size, + max_size=initial_tag_size.max_size, + desired_size=initial_tag_size.desired_size, + ) + ), + ) + add_actions( + group_name=ASG_GROUP_NAME, asg_size=initial_tag_size, schedule=schedule_a + ) + + # Initial verification for the preparation + verify_scheduled_actions_and_tagged( + asg_group_name=ASG_GROUP_NAME, + schedule=schedule_a, + asg_size=initial_tag_size, + dt=TEST_DATETIME, + ) + + # Call + with freeze_time(TEST_DATETIME): + schedule_auto_scaling_groups( + schedule_tag_key=SCHEDULE_TAG_KEY, + config_table_name=config_table, + account_id=DEFAULT_ACCOUNT_ID, + region=DEFAULT_REGION, + scheduling_role_name="my-role", + asg_scheduled_tag_key=ASG_SCHEDULED_TAG_KEY, + rule_prefix=RULE_PREFIX, + schedule_names=None, + ) + + # Verify + verify_scheduled_actions_and_tagged( + asg_group_name=ASG_GROUP_NAME, + schedule=schedule_b, + asg_size=running_size, + dt=TEST_DATETIME, + ) + + +def test_update_schedule_when_tag_is_updated_and_asg_stopped( + config_table: str, + schedule_a: ScheduleHelper, + schedule_b: ScheduleHelper, +) -> None: + """ + Scenario: + An auto scaling group is scheduled and tagged with `schedule_a` and `initial_tag_size` initially. + Then, a user updates the auto scaling group tag to `schedule_b` and update the schedules. + However, as the auto scaling group is currently stopped, + the new schedule should be based on the existing tag size, `initial_tag_size`, and `schedule_b`. + """ + + # Prepare + initial_tag_size: Final = AsgSize(min_size=1, desired_size=2, max_size=3) + create_asg(ASG_GROUP_NAME, AsgSize.stopped()) + tag_group( + group_name=ASG_GROUP_NAME, tag_key=SCHEDULE_TAG_KEY, tag_value=schedule_b.name + ) + tag_group( + group_name=ASG_GROUP_NAME, + tag_key=ASG_SCHEDULED_TAG_KEY, + tag_value=str( + AsgTag( + schedule=schedule_a.name, + ttl=(TEST_DATETIME + timedelta(days=30)).isoformat(), + min_size=initial_tag_size.min_size, + max_size=initial_tag_size.max_size, + desired_size=initial_tag_size.desired_size, + ) + ), + ) + add_actions( + group_name=ASG_GROUP_NAME, asg_size=initial_tag_size, schedule=schedule_a + ) + + # Initial verification for the preparation + verify_scheduled_actions_and_tagged( + asg_group_name=ASG_GROUP_NAME, + schedule=schedule_a, + asg_size=initial_tag_size, + dt=TEST_DATETIME, + ) + + # Call + with freeze_time(TEST_DATETIME): + schedule_auto_scaling_groups( + schedule_tag_key=SCHEDULE_TAG_KEY, + config_table_name=config_table, + account_id=DEFAULT_ACCOUNT_ID, + region=DEFAULT_REGION, + scheduling_role_name="my-role", + asg_scheduled_tag_key=ASG_SCHEDULED_TAG_KEY, + rule_prefix=RULE_PREFIX, + schedule_names=None, + ) + + # Verify + verify_scheduled_actions_and_tagged( + asg_group_name=ASG_GROUP_NAME, + schedule=schedule_b, + asg_size=initial_tag_size, + dt=TEST_DATETIME, + ) + + +def test_schedule_ecs_autoscaling_group( + config_table: str, schedule_a: ScheduleHelper +) -> None: + # Prepare + ecs_asg_group_name: Final = "ecs-asg" + running_size: Final = AsgSize(min_size=1, max_size=10, desired_size=5) + create_ecs_cluster_with_auto_scaling( + ecs_asg_group_name=ecs_asg_group_name, running_size=running_size + ) + tag_group( + group_name=ecs_asg_group_name, + tag_key=SCHEDULE_TAG_KEY, + tag_value=schedule_a.name, + ) + + # Call + with freeze_time(TEST_DATETIME): + schedule_auto_scaling_groups( + schedule_tag_key=SCHEDULE_TAG_KEY, + config_table_name=config_table, + account_id=DEFAULT_ACCOUNT_ID, + region=DEFAULT_REGION, + scheduling_role_name="my-role", + asg_scheduled_tag_key=ASG_SCHEDULED_TAG_KEY, + rule_prefix=RULE_PREFIX, + schedule_names=None, + ) + + # Verify + verify_scheduled_actions_and_tagged( + asg_group_name=ecs_asg_group_name, + schedule=schedule_a, + asg_size=running_size, + dt=TEST_DATETIME, + ) + + +# - configure fails, rules are rolled back (???), existing rules are left in place diff --git a/source/app/tests/integration/test_basic_ec2_scheduling.py b/source/app/tests/integration/test_basic_ec2_scheduling.py index 9bc50bd0..44300811 100644 --- a/source/app/tests/integration/test_basic_ec2_scheduling.py +++ b/source/app/tests/integration/test_basic_ec2_scheduling.py @@ -1,276 +1,110 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -import datetime - -import boto3 -from mypy_boto3_ec2.client import EC2Client - -from instance_scheduler.configuration.instance_schedule import InstanceSchedule -from instance_scheduler.configuration.running_period import RunningPeriod -from instance_scheduler.handler.scheduling_request import SchedulingRequestHandler from instance_scheduler.schedulers.instance_states import InstanceStates -from tests.context import MockLambdaContext -from tests.integration.helpers.ec2_helpers import get_current_state -from tests.integration.helpers.schedule_helpers import quick_time -from tests.integration.helpers.scheduling_context_builder import ( - build_context, - build_scheduling_event, +from tests.integration.helpers.ec2_helpers import ( + get_current_state, + start_ec2_instances, + stop_ec2_instances, ) +from tests.integration.helpers.run_handler import simple_schedule +from tests.integration.helpers.schedule_helpers import quick_time def test_ec2_starts_at_beginning_of_period( - ec2_instance: str, ec2_instance_states: InstanceStates + ec2_instance: str, + ec2_instance_states: InstanceStates, ) -> None: - """ - ----inputs---- - schedule: - begintime = 10:00 - endtime = 20:00 - current time: 10:00 - instance: stopped - last_desired_state: stopped - - ----expect---- - instance: running - """ - - # ----------------------------Event Definition--------------------------# - context = build_context(current_dt=quick_time(10, 0, 0)) - event = build_scheduling_event(context) - # ----------------------------EC2 Instance-------------------------# - ec2_client: EC2Client = boto3.client("ec2") - ec2_client.stop_instances(InstanceIds=[ec2_instance]) - - # ------------------------Last Desired State------------------------# - ec2_instance_states.set_instance_state(ec2_instance, "stopped") - ec2_instance_states.save() - - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - # ---------------------validate result---------------------# - assert get_current_state(ec2_instance) == "running" + # instance is already stopped + stop_ec2_instances(ec2_instance) + with simple_schedule(begintime="10:00", endtime="20:00") as context: + # before start of period (populates state table) + context.run_scheduling_request_handler(dt=quick_time(9, 55)) + assert get_current_state(ec2_instance) == "stopped" + # start of period + context.run_scheduling_request_handler(dt=quick_time(10, 0)) + assert get_current_state(ec2_instance) == "running" def test_ec2_stops_at_end_of_period( - ec2_instance: str, ec2_instance_states: InstanceStates + ec2_instance: str, + ec2_instance_states: InstanceStates, ) -> None: - """ - ----inputs---- - schedule: - begintime = 10:00 - endtime = 20:00 - current time: 20:00 - instance: running - last_desired_state: running - - ----expect---- - instance: stopped - """ - # ----------------------------Event Definition--------------------------# - context = build_context(current_dt=quick_time(20, 0, 0)) - event = build_scheduling_event(context) - # ----------------------------EC2 Instance-------------------------# - ec2_client: EC2Client = boto3.client("ec2") - ec2_client.start_instances(InstanceIds=[ec2_instance]) - - # ------------------------Last Desired State------------------------# - ec2_instance_states.set_instance_state(ec2_instance, "running") - ec2_instance_states.save() - - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - # ---------------------validate result---------------------# - assert get_current_state(ec2_instance) == "stopped" - - -def test_ec2_does_not_start_in_middle_of_period( - ec2_instance: str, ec2_instance_states: InstanceStates -) -> None: # default behavior should be to only start/stop at period boundaries - """ - ----inputs---- - schedule: - begintime = 10:00 - endtime = 20:00 - current time: 15:00 - instance: stopped - last_desired_state: running - - ----expect---- - instance: stopped - """ - # ----------------------------Event Definition--------------------------# - context = build_context( - current_dt=datetime.datetime.fromisoformat("2023-05-16T15:00:00+00:00"), - schedules={ - "test-schedule": InstanceSchedule( - name="test-schedule", - periods=[ - { - "period": RunningPeriod( - name="test-period", - begintime=datetime.time(10, 0, 0), - endtime=datetime.time(20, 0, 0), - ) - } - ], - ) - }, - ) - - event = build_scheduling_event(context) - # ----------------------------EC2 Instance-------------------------# - ec2_client: EC2Client = boto3.client("ec2") - ec2_client.stop_instances(InstanceIds=[ec2_instance]) - - # ------------------------Last Desired State------------------------# - ec2_instance_states.set_instance_state(ec2_instance, "running") - ec2_instance_states.save() + with simple_schedule(begintime="10:00", endtime="20:00") as context: + # run before end of period (populates state table) + context.run_scheduling_request_handler(dt=quick_time(19, 55, 0)) + assert get_current_state(ec2_instance) == "running" + # run at end of period + context.run_scheduling_request_handler(dt=quick_time(20, 0, 0)) + assert get_current_state(ec2_instance) == "stopped" + + +def test_instance_is_not_restarted_if_stopped_during_running_period( + ec2_instance: str, + ec2_instance_states: InstanceStates, +) -> None: + # instance running in period + with simple_schedule(begintime="10:00", endtime="20:00") as context: + context.run_scheduling_request_handler(dt=quick_time(15, 0, 0)) + assert get_current_state(ec2_instance) == "running" - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() + # cx stops instance manually + stop_ec2_instances(ec2_instance) - # ---------------------validate result---------------------# - assert get_current_state(ec2_instance) == "stopped" + # instance is not restarted + context.run_scheduling_request_handler(dt=quick_time(15, 5, 0)) + assert get_current_state(ec2_instance) == "stopped" -def test_ec2_starts_when_inside_enforced_period( - ec2_instance: str, ec2_instance_states: InstanceStates +def test_instance_is_restarted_if_schedule_is_enforced( + ec2_instance: str, + ec2_instance_states: InstanceStates, ) -> None: - """ - ----inputs---- - schedule: - begintime = 10:00 - endtime = 20:00 - enforced = true - current time: 15:00 - instance: stopped - last_desired_state: running - - ----expect---- - instance: running - """ - # ----------------------------Event Definition--------------------------# - context = build_context( - current_dt=quick_time(15, 0, 0), - schedules={ - "test-schedule": InstanceSchedule( - name="test-schedule", - enforced=True, - periods=[ - { - "period": RunningPeriod( - name="test-period", - begintime=datetime.time(10, 0, 0), - endtime=datetime.time(20, 0, 0), - ) - } - ], - ) - }, - ) - event = build_scheduling_event(context) - # ----------------------------EC2 Instance-------------------------# - ec2_client: EC2Client = boto3.client("ec2") - ec2_client.stop_instances(InstanceIds=[ec2_instance]) - - # ------------------------Last Desired State------------------------# - ec2_instance_states.set_instance_state(ec2_instance, "running") - ec2_instance_states.save() + with simple_schedule(begintime="10:00", endtime="20:00", enforced=True) as context: + # instance running in period + context.run_scheduling_request_handler(dt=quick_time(15, 0, 0)) + assert get_current_state(ec2_instance) == "running" - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() + # cx stops instance manually + stop_ec2_instances(ec2_instance) - # ---------------------validate result---------------------# - assert get_current_state(ec2_instance) == "running" + # instance is restarted + context.run_scheduling_request_handler(dt=quick_time(15, 5, 0)) + assert get_current_state(ec2_instance) == "running" -def test_ec2_does_not_stop_outside_of_period( - ec2_instance: str, ec2_instance_states: InstanceStates -) -> None: # default behavior should be to only start/stop at period boundaries - """ - ----inputs---- - schedule: - begintime = 10:00 - endtime = 20:00 - current time: 22:00 - instance: running - last_desired_state: stopped - - ----expect---- - instance: running - """ - # ----------------------------Event Definition--------------------------# - context = build_context(current_dt=quick_time(22, 0, 0)) - event = build_scheduling_event(context) - # ----------------------------EC2 Instance-------------------------# - ec2_client: EC2Client = boto3.client("ec2") - ec2_client.start_instances(InstanceIds=[ec2_instance]) +def test_instance_is_not_stopped_if_started_outside_running_period( + ec2_instance: str, + ec2_instance_states: InstanceStates, +) -> None: + stop_ec2_instances(ec2_instance) - # ------------------------Last Desired State------------------------# - ec2_instance_states.set_instance_state(ec2_instance, "stopped") - ec2_instance_states.save() + with simple_schedule(begintime="10:00", endtime="20:00") as context: + # instance stopped outside period + context.run_scheduling_request_handler(dt=quick_time(22, 0, 0)) + assert get_current_state(ec2_instance) == "stopped" - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() + # cx starts instance manually + start_ec2_instances(ec2_instance) - # ---------------------validate result---------------------# - assert get_current_state(ec2_instance) == "running" + # instance is not stopped + context.run_scheduling_request_handler(dt=quick_time(22, 5, 0)) + assert get_current_state(ec2_instance) == "running" -def test_ec2_stops_when_outside_enforced_period( - ec2_instance: str, ec2_instance_states: InstanceStates +def test_instance_is_stopped_if_started_outside_running_period_when_schedule_is_enforced( + ec2_instance: str, + ec2_instance_states: InstanceStates, ) -> None: - """ - inputs: - schedule: - begintime = 10:00 - endtime = 20:00 - enforced = true - current time: 22:00 - instance: running - last_desired_state: stopped - - output: - instance: stopped - """ - # ----------------------------Event Definition--------------------------# - context = build_context( - current_dt=quick_time(22, 0, 0), - schedules={ - "test-schedule": InstanceSchedule( - name="test-schedule", - enforced=True, - periods=[ - { - "period": RunningPeriod( - name="test-period", - begintime=datetime.time(10, 0, 0), - endtime=datetime.time(20, 0, 0), - ) - } - ], - ) - }, - ) - event = build_scheduling_event(context) - # ----------------------------EC2 Instance-------------------------# - ec2_client: EC2Client = boto3.client("ec2") - ec2_client.start_instances(InstanceIds=[ec2_instance]) + stop_ec2_instances(ec2_instance) - # ------------------------Last Desired State------------------------# - ec2_instance_states.set_instance_state(ec2_instance, "stopped") - ec2_instance_states.save() + with simple_schedule(begintime="10:00", endtime="20:00", enforced=True) as context: + # instance stopped outside period + context.run_scheduling_request_handler(dt=quick_time(22, 0, 0)) + assert get_current_state(ec2_instance) == "stopped" - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() + # cx starts instance manually + start_ec2_instances(ec2_instance) - # ---------------------validate result---------------------# - assert get_current_state(ec2_instance) == "stopped" + # instance is not stopped + context.run_scheduling_request_handler(dt=quick_time(22, 5, 0)) + assert get_current_state(ec2_instance) == "stopped" diff --git a/source/app/tests/integration/test_basic_rds_scheduling.py b/source/app/tests/integration/test_basic_rds_scheduling.py index 3ef54780..5056239a 100644 --- a/source/app/tests/integration/test_basic_rds_scheduling.py +++ b/source/app/tests/integration/test_basic_rds_scheduling.py @@ -1,84 +1,46 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -import boto3 -from mypy_boto3_rds import RDSClient - -from instance_scheduler.handler.scheduling_request import SchedulingRequestHandler from instance_scheduler.schedulers.instance_states import InstanceStates -from tests.context import MockLambdaContext -from tests.integration.helpers.rds_helpers import get_rds_instance_state -from tests.integration.helpers.schedule_helpers import quick_time -from tests.integration.helpers.scheduling_context_builder import ( - build_context, - build_scheduling_event, +from tests.integration.helpers.rds_helpers import ( + get_rds_instance_state, + stop_rds_instances, ) +from tests.integration.helpers.run_handler import simple_schedule, target +from tests.integration.helpers.schedule_helpers import quick_time def test_rds_starts_at_beginning_of_period( - rds_instance: str, rds_instance_states: InstanceStates + rds_instance: str, + rds_instance_states: InstanceStates, ) -> None: - """ - ----inputs---- - schedule: - begintime = 10:00 - endtime = 20:00 - current time: 10:00 - instance: stopped - last_desired_state: stopped - - ----expect---- - instance: available - """ - - # ----------------------------Event Definition--------------------------# - context = build_context(current_dt=quick_time(10, 0, 0), service="rds") - event = build_scheduling_event(context) - # ----------------------------RDS Instance-------------------------# - rds_client: RDSClient = boto3.client("rds") - rds_client.stop_db_instance(DBInstanceIdentifier=rds_instance) - - # ------------------------Last Desired State------------------------# - rds_instance_states.set_instance_state(rds_instance, "stopped") - rds_instance_states.save() - - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - # ---------------------validate result---------------------# - assert get_rds_instance_state(rds_instance) == "available" + # instance is already stopped + stop_rds_instances(rds_instance) + with simple_schedule(begintime="10:00", endtime="20:00") as context: + # before start of period (populates state table) + context.run_scheduling_request_handler( + dt=quick_time(9, 55), target=target(service="rds") + ) + assert get_rds_instance_state(rds_instance) == "stopped" + # start of period + context.run_scheduling_request_handler( + dt=quick_time(10, 0), target=target(service="rds") + ) + assert get_rds_instance_state(rds_instance) == "available" def test_rds_stops_at_end_of_period( - rds_instance: str, rds_instance_states: InstanceStates + rds_instance: str, + rds_instance_states: InstanceStates, ) -> None: - """ - ----inputs---- - schedule: - begintime = 10:00 - endtime = 20:00 - current time: 10:00 - instance: stopped - last_desired_state: stopped - - ----expect---- - instance: available - """ - - # ----------------------------Event Definition--------------------------# - context = build_context(current_dt=quick_time(20, 0, 0), service="rds") - event = build_scheduling_event(context) - # ----------------------------RDS Instance-------------------------# - assert get_rds_instance_state(rds_instance) == "available" # defaults to available - - # ------------------------Last Desired State------------------------# - rds_instance_states.set_instance_state(rds_instance, "running") - rds_instance_states.save() - - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - # ---------------------validate result---------------------# - assert get_rds_instance_state(rds_instance) == "stopped" + with simple_schedule(begintime="10:00", endtime="20:00") as context: + # before start of period (populates state table) + context.run_scheduling_request_handler( + dt=quick_time(19, 55), target=target(service="rds") + ) + assert get_rds_instance_state(rds_instance) == "available" + # start of period + context.run_scheduling_request_handler( + dt=quick_time(20, 0), target=target(service="rds") + ) + assert get_rds_instance_state(rds_instance) == "stopped" diff --git a/source/app/tests/integration/test_basic_timezone_handling.py b/source/app/tests/integration/test_basic_timezone_handling.py index 7311a506..acb7b947 100644 --- a/source/app/tests/integration/test_basic_timezone_handling.py +++ b/source/app/tests/integration/test_basic_timezone_handling.py @@ -1,23 +1,33 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 from datetime import datetime, time, timezone +from zoneinfo import ZoneInfo -import boto3 import pytest -from mypy_boto3_ec2.client import EC2Client +from freezegun import freeze_time -from instance_scheduler.configuration.instance_schedule import InstanceSchedule from instance_scheduler.configuration.running_period import RunningPeriod from instance_scheduler.configuration.running_period_dict_element import ( RunningPeriodDictElement, ) -from instance_scheduler.handler.scheduling_request import SchedulingRequestHandler +from instance_scheduler.handler.scheduling_request import ( + SchedulingRequest, + SchedulingRequestHandler, +) +from instance_scheduler.model.period_definition import PeriodDefinition +from instance_scheduler.model.store.ddb_config_item_store import DdbConfigItemStore from instance_scheduler.schedulers.instance_states import InstanceStates +from instance_scheduler.schedulers.states import InstanceState from tests.context import MockLambdaContext -from tests.integration.helpers.ec2_helpers import get_current_state -from tests.integration.helpers.scheduling_context_builder import ( - build_context, - build_scheduling_event, +from tests.integration.helpers.ec2_helpers import ( + create_ec2_instances, + get_current_state, + stop_ec2_instances, +) +from tests.integration.helpers.run_handler import multi_period_schedule, simple_schedule +from tests.logger import MockLogger +from tests.test_utils.mock_scheduling_request_environment import ( + MockSchedulingRequestEnvironment, ) stop_after_noon_period: RunningPeriodDictElement = { @@ -29,81 +39,91 @@ } -def tz_instance(tz_name: str) -> tuple[InstanceSchedule, str]: - ec2_client: EC2Client = boto3.client("ec2") - instance_id = new_ec2_instance(ec2_client) - ec2_client.create_tags( - Resources=[instance_id], Tags=[{"Key": "Schedule", "Value": tz_name}] - ) - - schedule = InstanceSchedule( - name=tz_name, - timezone=tz_name, - periods=[stop_after_noon_period], - enforced=True, - stop_new_instances=True, - ) - - return schedule, instance_id - - -def new_ec2_instance(ec2_client: EC2Client) -> str: - ami = "ami-0889ff9188674a22a" - instance_id: str = ec2_client.run_instances(ImageId=ami, MinCount=1, MaxCount=1)[ - "Instances" - ][0]["InstanceId"] - return instance_id - - -def test_passing_tz_unaware_dt_to_scheduling_request_handler_throws_error() -> None: - context = build_context(current_dt=datetime(2023, 6, 19, 12, 0, 0)) - event = build_scheduling_event(context) +def test_passing_tz_unaware_dt_to_scheduling_request_handler_throws_error( + config_item_store: DdbConfigItemStore, +) -> None: + # testing if raised by handler, not just scheduling_context constructor + event: SchedulingRequest = { + "action": "scheduler:run", + "account": "123456789012", + "region": "us-east-1", + "service": "ec2", + "current_dt": datetime(2023, 6, 19, 12, 0, 0).isoformat(), + "dispatch_time": "2023-05-12 14:55:10.600619", + } with pytest.raises(ValueError): - handler = SchedulingRequestHandler(event, MockLambdaContext()) + handler = SchedulingRequestHandler( + event, MockLambdaContext(), MockSchedulingRequestEnvironment(), MockLogger() + ) handler.handle_request() -def test_time_zones(moto_backend: None, ec2_instance_states: InstanceStates) -> None: - # ----------------------------EC2 Instances-------------------------# - schedules: dict[str, InstanceSchedule] = {} - instances: dict[str, str] = {} - - # all times related to 12:00 UTC - for tz in [ - "Etc/GMT+12", # local time: 00:00 - "Etc/GMT+10", # local time: 02:00 - "Etc/GMT+5", # local time: 07:00 - "Etc/GMT", # local time: 12:00 - "Etc/GMT-5", # local time: 17:00 - "Etc/GMT-10", # local time: 22:00 - "Etc/GMT-13", # local time: 01:00 - ]: - schedule, instance_id = tz_instance(tz) - schedules[tz] = schedule - instances[tz] = instance_id - ec2_instance_states.set_instance_state(instance_id, "running") - +@freeze_time(datetime(2023, 8, 13, 0, 0, 0, tzinfo=ZoneInfo("Australia/Sydney"))) +def test_weekday_boundaries_respect_schedule_timezone( + ec2_instance_states: InstanceStates, +) -> None: + """ + This is a real-world scenario that used to fail in 1.3.0: + The instance would not be started on sunday morning Australia time because the weekdays + expression was being compared using UTC meaning that the sunday check failed at 0:00 sunday morning because + UTC was still in Saturday + """ + with multi_period_schedule( + name="test-schedule", + timezone="Australia/Sydney", + period_definitions=[ + PeriodDefinition( + name="weekdays", + begintime="07:00", + endtime="21:00", + weekdays={"mon-fri"}, + ), + PeriodDefinition( + name="sun2", begintime="00:00", endtime="11:00", weekdays={"sun#2"} + ), + PeriodDefinition( + name="sun3", begintime="00:00", endtime="11:00", weekdays={"sun#3"} + ), + ], + ) as context: + (instance,) = create_ec2_instances(1, schedule_name="test-schedule") + # instance is stopped and last desired state is stopped + stop_ec2_instances(instance) + ec2_instance_states.set_instance_state(instance, InstanceState.STOPPED) + ec2_instance_states.save() + + aus_time = datetime(2023, 8, 13, 0, 0, 0, tzinfo=ZoneInfo("Australia/Sydney")) + context.run_scheduling_request_handler(dt=aus_time) + + assert get_current_state(instance) == "running" + + +@pytest.mark.parametrize( + "tz,expected", + [ + # all instances between 0-12 should be running, all instances between 12-24 should be stopped + ("Etc/GMT+12", "running"), # local time: 00:00 + ("Etc/GMT+10", "running"), # local time: 02:00 + ("Etc/GMT+5", "running"), # local time: 07:00 + ("Etc/GMT", "stopped"), # local time: 12:00 + ("Etc/GMT-5", "stopped"), # local time: 17:00 + ("Etc/GMT-10", "stopped"), # local time: 22:00 + ("Etc/GMT-13", "running"), # local time: 01:00 + ], +) +def test_time_zones( + ec2_instance: str, + ec2_instance_states: InstanceStates, + tz: str, + expected: str, +) -> None: + ec2_instance_states.set_instance_state(ec2_instance, InstanceState.RUNNING) ec2_instance_states.save() - # ----------------------------Event Definition--------------------------# - context = build_context( - current_dt=datetime(2023, 6, 9, 12, 0, 0, tzinfo=timezone.utc), - schedules=schedules, - ) - event = build_scheduling_event(context) - - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - # ---------------------validate result---------------------# - - # all instances between 0-12 should be running, all instances between 12-24 should be stopped - assert get_current_state(instances["Etc/GMT+12"]) == "running" # local time: 00:00 - assert get_current_state(instances["Etc/GMT+10"]) == "running" # local time: 02:00 - assert get_current_state(instances["Etc/GMT+5"]) == "running" # local time: 07:00 - assert get_current_state(instances["Etc/GMT"]) == "stopped" # local time: 12:00 - assert get_current_state(instances["Etc/GMT-5"]) == "stopped" # local time: 17:00 - assert get_current_state(instances["Etc/GMT-10"]) == "stopped" # local time: 22:00 - assert get_current_state(instances["Etc/GMT-13"]) == "running" # local time: 01:00 + with simple_schedule(begintime="0:00", endtime="12:00", timezone=tz) as context: + # run at 12:00 utc (should be translated to schedule time) + context.run_scheduling_request_handler( + dt=datetime(2023, 6, 9, 12, 0, 0, tzinfo=timezone.utc) + ) + assert get_current_state(ec2_instance) == expected diff --git a/source/app/tests/integration/test_create_rds_snapshot_flag.py b/source/app/tests/integration/test_create_rds_snapshot_flag.py index 0997bea7..0b9fe422 100644 --- a/source/app/tests/integration/test_create_rds_snapshot_flag.py +++ b/source/app/tests/integration/test_create_rds_snapshot_flag.py @@ -5,70 +5,66 @@ from mypy_boto3_rds import RDSClient from mypy_boto3_rds.type_defs import DBSnapshotMessageTypeDef -from instance_scheduler.handler.scheduling_request import SchedulingRequestHandler from instance_scheduler.schedulers.instance_states import InstanceStates -from tests.context import MockLambdaContext from tests.integration.helpers.rds_helpers import get_rds_instance_state +from tests.integration.helpers.run_handler import simple_schedule, target from tests.integration.helpers.schedule_helpers import quick_time -from tests.integration.helpers.scheduling_context_builder import ( - build_context, - build_scheduling_event, +from tests.test_utils.mock_scheduling_request_environment import ( + MockSchedulingRequestEnvironment, ) def test_rds_creates_snapshot_when_flag_enabled( - rds_instance: str, rds_instance_states: InstanceStates + rds_instance: str, + rds_instance_states: InstanceStates, ) -> None: - # ----------------------------Event Definition--------------------------# + with simple_schedule(begintime="10:00", endtime="20:00") as context: + # before end of period (populates state table) + context.run_scheduling_request_handler( + dt=quick_time(19, 55, 0), target=target(service="rds") + ) + assert get_rds_instance_state(rds_instance) == "available" - context = build_context( - current_dt=quick_time(20, 0, 0), service="rds", create_rds_snapshot=True - ) - event = build_scheduling_event(context) + # test if snapshot is created + context.run_scheduling_request_handler( + dt=quick_time(20, 0, 0), + environment=MockSchedulingRequestEnvironment(enable_rds_snapshots=True), + target=target(service="rds"), + ) - # ----------------------------RDS Instance-------------------------# - # already setup and running - # ------------------------Last Desired State------------------------# - rds_instance_states.set_instance_state(rds_instance, "running") - rds_instance_states.save() - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - # ---------------------validate result---------------------# - assert ( - get_rds_instance_state(rds_instance) == "stopped" - ) # ensure instance actually stopped - rds_client: RDSClient = boto3.client("rds") - result: DBSnapshotMessageTypeDef = rds_client.describe_db_snapshots( - DBInstanceIdentifier=rds_instance - ) - assert len(result["DBSnapshots"]) == 1 + assert ( + get_rds_instance_state(rds_instance) == "stopped" + ) # ensure instance actually stopped + rds_client: RDSClient = boto3.client("rds") + result: DBSnapshotMessageTypeDef = rds_client.describe_db_snapshots( + DBInstanceIdentifier=rds_instance + ) + assert len(result["DBSnapshots"]) == 1 def test_rds_does_not_create_snapshot_when_flag_disabled( - rds_instance: str, rds_instance_states: InstanceStates + rds_instance: str, + rds_instance_states: InstanceStates, ) -> None: - # ----------------------------Event Definition--------------------------# - context = build_context( - current_dt=quick_time(20, 0, 0), service="rds", create_rds_snapshot=False - ) - event = build_scheduling_event(context) - # ----------------------------RDS Instance-------------------------# - # already setup and running - # ------------------------Last Desired State------------------------# - rds_instance_states.set_instance_state(rds_instance, "running") - rds_instance_states.save() - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() + with simple_schedule(begintime="10:00", endtime="20:00") as context: + # before end of period (populates state table) + context.run_scheduling_request_handler( + dt=quick_time(19, 55, 0), target=target(service="rds") + ) + assert get_rds_instance_state(rds_instance) == "available" + + # test if snapshot is not created + context.run_scheduling_request_handler( + dt=quick_time(20, 0, 0), + environment=MockSchedulingRequestEnvironment(enable_rds_snapshots=False), + target=target(service="rds"), + ) - # ---------------------validate result---------------------# - assert ( - get_rds_instance_state(rds_instance) == "stopped" - ) # ensure instance actually stopped - rds_client: RDSClient = boto3.client("rds") - result: DBSnapshotMessageTypeDef = rds_client.describe_db_snapshots( - DBInstanceIdentifier=rds_instance - ) - assert len(result["DBSnapshots"]) == 0 + assert ( + get_rds_instance_state(rds_instance) == "stopped" + ) # ensure instance actually stopped + rds_client: RDSClient = boto3.client("rds") + result: DBSnapshotMessageTypeDef = rds_client.describe_db_snapshots( + DBInstanceIdentifier=rds_instance + ) + assert len(result["DBSnapshots"]) == 0 diff --git a/source/app/tests/integration/test_cross_account_scheduling.py b/source/app/tests/integration/test_cross_account_scheduling.py index 24881698..f1b19f22 100644 --- a/source/app/tests/integration/test_cross_account_scheduling.py +++ b/source/app/tests/integration/test_cross_account_scheduling.py @@ -1,115 +1,102 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -import dataclasses - from mypy_boto3_ec2 import EC2Client -from instance_scheduler.configuration.instance_schedule import InstanceSchedule -from instance_scheduler.handler.scheduling_request import SchedulingRequestHandler from instance_scheduler.schedulers.instance_states import InstanceStates from instance_scheduler.util.app_env import AppEnv -from tests.context import MockLambdaContext from tests.integration.helpers.boto_client_helpers import client_in_account_region from tests.integration.helpers.ec2_helpers import ( create_ec2_instances, get_current_state, ) -from tests.integration.helpers.schedule_helpers import quick_time -from tests.integration.helpers.scheduling_context_builder import ( - build_context, - build_scheduling_event, - default_test_schedule, +from tests.integration.helpers.run_handler import ( + multi_period_schedule, + simple_schedule, + target, ) +from tests.integration.helpers.schedule_helpers import quick_time class LocalizedTestInstance: def __init__( self, - schedule: InstanceSchedule, + schedule_name: str, account: str = "123456789012", region: str = "us-east-1", ) -> None: self.client: EC2Client = client_in_account_region("ec2", account, region) - (self.instance_id,) = create_ec2_instances(1, schedule, account, region) + (self.instance_id,) = create_ec2_instances(1, schedule_name, account, region) def current_state(self) -> str: return get_current_state(self.instance_id, self.client) def test_cross_account_cross_region_ec2_scheduling_starts_and_stops_instance( - ec2_instance_states: InstanceStates, app_env: AppEnv + ec2_instance_states: InstanceStates, + app_env: AppEnv, ) -> None: - account = "222233334444" - region = "us-west-2" - (ec2_instance,) = create_ec2_instances(1, default_test_schedule(), account, region) - - # ----------------------------Event Definition--------------------------# - context = build_context( - current_dt=quick_time(10, 0, 0), - region=region, - schedule_lambda_account=False, - account_id=account, - ) - event = build_scheduling_event(context) - # ----------------------------EC2 Instance-------------------------# - ec2_client: EC2Client = client_in_account_region("ec2", account, region) - ec2_client.stop_instances(InstanceIds=[ec2_instance]) - - # ------------------------Last Desired State------------------------# - ec2_instance_states.load(account, region) - ec2_instance_states.set_instance_state(ec2_instance, "stopped") - ec2_instance_states.save() - - # ---------------------start---------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - assert get_current_state(ec2_instance, ec2_client) == "running" - - # ---------------------stop---------------------# - context = dataclasses.replace(context, current_dt=quick_time(20, 0, 0)) - event = build_scheduling_event(context) - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - assert get_current_state(ec2_instance, ec2_client) == "stopped" + spoke_account = "222233334444" + spoke_region = "us-west-2" + ec2_client: EC2Client = client_in_account_region("ec2", spoke_account, spoke_region) + + with simple_schedule( + name="test-schedule", begintime="10:00", endtime="20:00" + ) as context: + (ec2_instance,) = create_ec2_instances( + 1, "test-schedule", spoke_account, spoke_region + ) + ec2_client.stop_instances(InstanceIds=[ec2_instance]) + + # before start of period (populates state table) + context.run_scheduling_request_handler( + dt=quick_time(9, 55, 0), + target=target(account=spoke_account, region=spoke_region), + ) + assert get_current_state(ec2_instance, ec2_client) == "stopped" + + # start instance + context.run_scheduling_request_handler( + dt=quick_time(10, 0, 0), + target=target(account=spoke_account, region=spoke_region), + ) + assert get_current_state(ec2_instance, ec2_client) == "running" + + # stop instance + context.run_scheduling_request_handler( + dt=quick_time(20, 0, 0), + target=target(account=spoke_account, region=spoke_region), + ) + assert get_current_state(ec2_instance, ec2_client) == "stopped" def test_cross_account_scheduling_does_not_schedule_other_instances( - ec2_instance_states: InstanceStates, app_env: AppEnv + ec2_instance_states: InstanceStates, + app_env: AppEnv, ) -> None: - target_account = "222233334444" - target_region = "us-west-2" - - schedule = InstanceSchedule( + spoke_account = "222233334444" + spoke_region = "us-west-2" + with multi_period_schedule( name="force-stop", + period_definitions=[], # no periods defined enforced=True, override_status="stopped", stop_new_instances=True, - ) - - context = build_context( - current_dt=quick_time(10, 0, 0), - region=target_region, - schedule_lambda_account=False, - account_id=target_account, - schedules={schedule.name: schedule}, - ) - - event = build_scheduling_event(context) - - target_instance = LocalizedTestInstance( - schedule, account=target_account, region=target_region - ) - bad_instances = [ - LocalizedTestInstance(schedule, account=target_account), - LocalizedTestInstance(schedule, region=target_region), - LocalizedTestInstance(schedule), - ] - - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - assert target_instance.current_state() == "stopped" - for bad_instance in bad_instances: - assert bad_instance.current_state() == "running" + ) as context: + target_instance = LocalizedTestInstance( + "force-stop", account=spoke_account, region=spoke_region + ) + bad_instances = [ + # omitted account/region values use hub account/region + LocalizedTestInstance("force-stop", account=spoke_account), + LocalizedTestInstance("force-stop", region=spoke_region), + LocalizedTestInstance("force-stop"), + ] + + context.run_scheduling_request_handler( + dt=quick_time(10, 0, 0), + target=target(account=spoke_account, region=spoke_region), + ) + + assert target_instance.current_state() == "stopped" + for bad_instance in bad_instances: + assert bad_instance.current_state() == "running" diff --git a/source/app/tests/integration/test_ec2_instance_tagging.py b/source/app/tests/integration/test_ec2_instance_tagging.py index f5b72472..0bf99fcf 100644 --- a/source/app/tests/integration/test_ec2_instance_tagging.py +++ b/source/app/tests/integration/test_ec2_instance_tagging.py @@ -1,113 +1,107 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -import dataclasses -from datetime import datetime, timezone -import boto3 from freezegun.api import freeze_time -from mypy_boto3_ec2.client import EC2Client from instance_scheduler.configuration.scheduling_context import TagTemplate -from instance_scheduler.handler.scheduling_request import SchedulingRequestHandler from instance_scheduler.schedulers.instance_states import InstanceStates -from instance_scheduler.util.app_env import AppEnv -from tests.context import MockLambdaContext -from tests.integration.helpers.ec2_helpers import get_tags +from tests.integration.helpers.ec2_helpers import ( + get_current_state, + get_tags, + stop_ec2_instances, +) +from tests.integration.helpers.run_handler import simple_schedule from tests.integration.helpers.schedule_helpers import quick_time -from tests.integration.helpers.scheduling_context_builder import ( - build_context, - build_scheduling_event, +from tests.test_utils.app_env_utils import with_mock_app_env +from tests.test_utils.mock_scheduling_request_environment import ( + MockSchedulingRequestEnvironment, ) @freeze_time("2023-06-12 5:00:00") def test_ec2_start_tag_is_applied_on_start( - app_env: AppEnv, ec2_instance: str, ec2_instance_states: InstanceStates + ec2_instance: str, + ec2_instance_states: InstanceStates, ) -> None: """ test that the start tag is applied with the correct substitutions note - the correct time substitution is actual time (from freeze_time) NOT schedule time (current_dt) """ - context = build_context( - current_dt=datetime(2023, 6, 12, 10, 0, 0, tzinfo=timezone.utc), - started_tags=[ + + # instance is already stopped + stop_ec2_instances(ec2_instance) + mock_environ = MockSchedulingRequestEnvironment( + start_tags=[ + "action=started by {scheduler} on {year}/{month}/{day} at {hour}:{minute} {timezone}" + ] + ) + with simple_schedule(begintime="10:00", endtime="20:00") as context: + # before start of period (populates state table) + context.run_scheduling_request_handler(dt=quick_time(9, 55)) + assert get_current_state(ec2_instance) == "stopped" + # start of period + context.run_scheduling_request_handler( + dt=quick_time(10, 0), environment=mock_environ + ) + + assert get_current_state(ec2_instance) == "running" + tags = get_tags(ec2_instance) + + assert tags.__contains__( { "Key": "action", - "Value": "started by {scheduler} on {year}/{month}/{day} at {hour}:{minute} {timezone}", + "Value": f"started by {mock_environ.stack_name} on 2023/06/12 at 05:00 UTC", } - ], - ) - event = build_scheduling_event(context) - - # ----------------------------EC2 Instance-------------------------# - ec2_client: EC2Client = boto3.client("ec2") - ec2_client.stop_instances(InstanceIds=[ec2_instance]) - - # ------------------------Last Desired State------------------------# - ec2_instance_states.set_instance_state(ec2_instance, "stopped") - ec2_instance_states.save() - - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - # ---------------------validate result---------------------# - - tags = get_tags(ec2_instance) - - assert tags.__contains__( - { - "Key": "action", - "Value": f"started by {app_env.stack_name} on 2023/06/12 at 05:00 UTC", - } - ) + ) @freeze_time("2023-06-12 15:00:00") +@with_mock_app_env( + stop_tags=[ + "action=stopped by {scheduler} on {year}/{month}/{day} at {hour}:{minute} {timezone}" + ] +) def test_ec2_stop_tag_is_applied_on_stop( - app_env: AppEnv, ec2_instance: str, ec2_instance_states: InstanceStates + ec2_instance: str, + ec2_instance_states: InstanceStates, ) -> None: """ test that the stop tag is applied with the correct substitutions note - the correct time substitution is actual time (from freeze_time) NOT schedule time (current_dt) """ - context = build_context( - current_dt=datetime(2023, 6, 12, 20, 0, 0, tzinfo=timezone.utc), - stopped_tags=[ - { - "Key": "action", - "Value": "stopped by {scheduler} on {year}/{month}/{day} at {hour}:{minute} {timezone}", - } - ], - ) - event = build_scheduling_event(context) - # ----------------------------EC2 Instance-------------------------# - ec2_client: EC2Client = boto3.client("ec2") - ec2_client.start_instances(InstanceIds=[ec2_instance]) - - # ------------------------Last Desired State------------------------# - ec2_instance_states.set_instance_state(ec2_instance, "running") - ec2_instance_states.save() + # instance is already stopped + stop_ec2_instances(ec2_instance) + mock_environ = MockSchedulingRequestEnvironment( + stop_tags=[ + "action=stopped by {scheduler} on {year}/{month}/{day} at {hour}:{minute} {timezone}" + ] + ) - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() + with simple_schedule(begintime="10:00", endtime="20:00") as context: + # before end of period (populates state table) + context.run_scheduling_request_handler(dt=quick_time(19, 55)) + assert get_current_state(ec2_instance) == "running" + # end of period - # ---------------------validate result---------------------# + context.run_scheduling_request_handler( + dt=quick_time(20, 0), environment=mock_environ + ) - tags = get_tags(ec2_instance) + assert get_current_state(ec2_instance) == "stopped" + tags = get_tags(ec2_instance) - assert tags.__contains__( - { - "Key": "action", - "Value": f"stopped by {app_env.stack_name} on 2023/06/12 at 15:00 UTC", - } - ) + assert tags.__contains__( + { + "Key": "action", + "Value": f"stopped by {mock_environ.stack_name} on 2023/06/12 at 15:00 UTC", + } + ) def test_ec2_start_stop_tags_are_mutually_exclusive( - ec2_instance: str, ec2_instance_states: InstanceStates + ec2_instance: str, + ec2_instance_states: InstanceStates, ) -> None: """when start tags are applied, stop tags should be removed and vice-versa""" started_tag1: TagTemplate = {"Key": "started1", "Value": "start1"} @@ -115,49 +109,49 @@ def test_ec2_start_stop_tags_are_mutually_exclusive( stopped_tag1: TagTemplate = {"Key": "stopped1", "Value": "stop1"} stopped_tag2: TagTemplate = {"Key": "stopped2", "Value": "stop2"} - context = build_context( - current_dt=quick_time(10, 0, 0), - started_tags=[started_tag1, started_tag2], - stopped_tags=[stopped_tag1, stopped_tag2], + mock_environ = MockSchedulingRequestEnvironment( + start_tags=[ + f"{started_tag1['Key']}={started_tag1['Value']}", + f"{started_tag2['Key']}={started_tag2['Value']}", + ], + stop_tags=[ + f"{stopped_tag1['Key']}={stopped_tag1['Value']}", + f"{stopped_tag2['Key']}={stopped_tag2['Value']}", + ], ) - event = build_scheduling_event(context) - - # ----------------------------initial setup-------------------------# - ec2_client: EC2Client = boto3.client("ec2") - ec2_client.stop_instances(InstanceIds=[ec2_instance]) - ec2_instance_states.set_instance_state(ec2_instance, "stopped") - ec2_instance_states.save() - - # ------------------------first start------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - instance_tags = get_tags(ec2_instance) - assert started_tag1 in instance_tags - assert started_tag2 in instance_tags - assert stopped_tag1 not in instance_tags - assert stopped_tag2 not in instance_tags - - # -------------------stop------------------------# - context = dataclasses.replace(context, current_dt=quick_time(20, 0, 0)) - event = build_scheduling_event(context) - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - instance_tags = get_tags(ec2_instance) - assert started_tag1 not in instance_tags - assert started_tag2 not in instance_tags - assert stopped_tag1 in instance_tags - assert stopped_tag2 in instance_tags - - # ---------------------start again---------------------# - context = dataclasses.replace(context, current_dt=quick_time(10, 0, 0)) - event = build_scheduling_event(context) - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - instance_tags = get_tags(ec2_instance) - assert started_tag1 in instance_tags - assert started_tag2 in instance_tags - assert stopped_tag1 not in instance_tags - assert stopped_tag2 not in instance_tags + + with simple_schedule(begintime="10:00", endtime="20:00") as context: + stop_ec2_instances(ec2_instance) + # before period start (populates state table) + context.run_scheduling_request_handler(dt=quick_time(9, 55)) + + # first start + context.run_scheduling_request_handler( + dt=quick_time(10, 0), environment=mock_environ + ) + + instance_tags = get_tags(ec2_instance) + assert started_tag1 in instance_tags + assert started_tag2 in instance_tags + assert stopped_tag1 not in instance_tags + assert stopped_tag2 not in instance_tags + + # stop + context.run_scheduling_request_handler( + dt=quick_time(20, 0), environment=mock_environ + ) + instance_tags = get_tags(ec2_instance) + assert started_tag1 not in instance_tags + assert started_tag2 not in instance_tags + assert stopped_tag1 in instance_tags + assert stopped_tag2 in instance_tags + + # second start + context.run_scheduling_request_handler( + dt=quick_time(10, 0), environment=mock_environ + ) + instance_tags = get_tags(ec2_instance) + assert started_tag1 in instance_tags + assert started_tag2 in instance_tags + assert stopped_tag1 not in instance_tags + assert stopped_tag2 not in instance_tags diff --git a/source/app/tests/integration/test_lambda_schedule_encoding_limits.py b/source/app/tests/integration/test_lambda_schedule_encoding_limits.py index eba97b25..9c6975cd 100644 --- a/source/app/tests/integration/test_lambda_schedule_encoding_limits.py +++ b/source/app/tests/integration/test_lambda_schedule_encoding_limits.py @@ -8,13 +8,12 @@ In this scenario the schedules will be omitted from the event, and instead need to be refetched from dynamodb """ -import datetime import json +from typing import Iterator from unittest.mock import MagicMock, patch -from instance_scheduler import configuration -from instance_scheduler.configuration.instance_schedule import InstanceSchedule -from instance_scheduler.configuration.running_period import RunningPeriod +from _pytest.fixtures import fixture + from instance_scheduler.configuration.scheduling_context import SchedulingContext from instance_scheduler.handler import scheduling_orchestrator from instance_scheduler.handler.scheduling_orchestrator import ( @@ -22,29 +21,35 @@ SchedulingOrchestratorHandler, ) from instance_scheduler.handler.scheduling_request import ( - SchedulerRequest, + SchedulingRequest, SchedulingRequestHandler, + validate_scheduler_request, +) +from instance_scheduler.model.period_definition import PeriodDefinition +from instance_scheduler.model.period_identifier import PeriodIdentifier +from instance_scheduler.model.schedule_definition import ScheduleDefinition +from instance_scheduler.model.store.ddb_config_item_store import DdbConfigItemStore +from instance_scheduler.model.store.in_memory_period_definition_store import ( + InMemoryPeriodDefinitionStore, +) +from instance_scheduler.model.store.in_memory_schedule_definition_store import ( + InMemoryScheduleDefinitionStore, +) +from instance_scheduler.model.store.period_definition_store import PeriodDefinitionStore +from instance_scheduler.model.store.schedule_definition_store import ( + ScheduleDefinitionStore, ) from tests.context import MockLambdaContext -from tests.integration.helpers.global_config import build_global_config +from tests.handler.test_scheduling_orchestration_handler import ( + orchestrator_env_overrides, + scheduling_request_from_lambda_invoke, +) from tests.integration.helpers.schedule_helpers import quick_time -from tests.integration.helpers.scheduling_context_builder import build_context - -global_config = build_global_config( - schedules={ - "global-schedule": InstanceSchedule( - name="global-schedule", - periods=[ - { - "period": RunningPeriod( - name="global-period", - begintime=datetime.time(10, 0, 0), - endtime=datetime.time(20, 0, 0), - ) - } - ], - ) - }, +from tests.logger import MockLogger +from tests.test_utils.app_env_utils import with_mock_app_env +from tests.test_utils.mock_orchestrator_environment import MockOrchestratorEnvironment +from tests.test_utils.mock_scheduling_request_environment import ( + MockSchedulingRequestEnvironment, ) mock_event_bridge_event: OrchestrationRequest = { @@ -52,49 +57,136 @@ } -def build_stripped_event(context: SchedulingContext) -> SchedulerRequest: - payload = context.to_dict() - scheduling_orchestrator.strip_schedules_and_periods(payload) - return { - "action": "scheduler:run", - "configuration": payload, - "dispatch_time": "dispatchTime", - } +@fixture +def mocked_lambda_invoke() -> Iterator[MagicMock]: + with patch.object(SchedulingOrchestratorHandler, "lambda_client") as lambda_client: + with patch.object(lambda_client, "invoke") as invoke_func: + yield invoke_func + + +def test_schedules_and_periods_are_encoded_into_payload( + mocked_lambda_invoke: MagicMock, + schedule_store: ScheduleDefinitionStore, + period_store: PeriodDefinitionStore, + config_item_store: DdbConfigItemStore, +) -> None: + schedule_store.put( + ScheduleDefinition( + name="my_schedule", periods=[PeriodIdentifier.of("my_period")] + ) + ) + period_store.put( + PeriodDefinition( + name="my_period", + begintime="10:00", + endtime="20:00", + monthdays={"1"}, # sets can cause json to fail if not handled correctly + weekdays={"mon"}, + months={"jan"}, + ) + ) + + SchedulingOrchestratorHandler( + event=mock_event_bridge_event, + context=MockLambdaContext(), + env=MockOrchestratorEnvironment( + schedule_regions=[], + enable_ec2_service=True, + enable_schedule_hub_account=True, + ), + logger=MockLogger(), + ).handle_request() + + assert mocked_lambda_invoke.call_count == 1 + scheduling_request: SchedulingRequest = scheduling_request_from_lambda_invoke( + mocked_lambda_invoke.call_args + ) + + assert "schedules" in scheduling_request + assert "periods" in scheduling_request + + # validate expected serial data + InMemoryScheduleDefinitionStore.validate_serial_data( + scheduling_request["schedules"] + ) + InMemoryPeriodDefinitionStore.validate_serial_data(scheduling_request["periods"]) + + encoded_schedules = InMemoryScheduleDefinitionStore.deserialize( + scheduling_request["schedules"] + ) + encoded_periods = InMemoryPeriodDefinitionStore.deserialize( + scheduling_request["periods"] + ) + + assert schedule_store.find_all() == encoded_schedules.find_all() + assert period_store.find_all() == encoded_periods.find_all() -@patch.object(SchedulingOrchestratorHandler, "configuration", global_config) @patch.object(SchedulingOrchestratorHandler, "lambda_client") -def test_strips_schedules_when_payload_is_too_large(lambda_client: MagicMock) -> None: +@with_mock_app_env( + **orchestrator_env_overrides( + schedule_regions=[], enable_ec2_service=True, enable_schedule_hub_account=True + ) +) +def test_strips_schedules_when_payload_is_too_large( + lambda_client: MagicMock, config_item_store: DdbConfigItemStore +) -> None: scheduling_orchestrator.LAMBDA_PAYLOAD_CAPACITY_BYTES = 0 with patch.object(lambda_client, "invoke") as invoke_func: cloudwatch_handler = SchedulingOrchestratorHandler( - event=mock_event_bridge_event, context=MockLambdaContext() + event=mock_event_bridge_event, + context=MockLambdaContext(), + env=MockOrchestratorEnvironment( + schedule_regions=[], + enable_ec2_service=True, + enable_schedule_hub_account=True, + ), + logger=MockLogger(), ) cloudwatch_handler.handle_request() assert invoke_func.call_count == 1 payload = invoke_func.call_args[1]["Payload"] - content = json.loads(payload) - assert content["configuration"]["schedules"] == {} - assert content["configuration"]["periods"] == {} + + scheduling_request: SchedulingRequest = json.loads(payload) + validate_scheduler_request(scheduling_request) + assert "schedules" not in scheduling_request + assert "periods" not in scheduling_request -@patch.object(configuration, "get_global_configuration") @patch("instance_scheduler.handler.scheduling_request.InstanceScheduler") def test_scheduling_request_handler_reloads_schedules_when_not_provided( - mock_scheduler: MagicMock, fetch_global_config_func: MagicMock + mock_scheduler: MagicMock, + schedule_store: ScheduleDefinitionStore, + period_store: PeriodDefinitionStore, ) -> None: # setup - fetch_global_config_func.return_value = global_config - context = build_context(current_dt=quick_time(10, 0, 0)) - event = build_stripped_event(context) + schedule_store.put( + ScheduleDefinition( + name="fetched_schedule", periods=[PeriodIdentifier.of("my_period")] + ) + ) + + period_store.put(PeriodDefinition(name="my_period", begintime="10:00")) + + request = SchedulingRequest( + action="scheduler:run", + account="123456789012", + region="us-east-1", + service="ec2", + current_dt=quick_time(10, 0, 0).isoformat(), + # schedules explicitly omitted, + # periods explicitly omitted, + dispatch_time=quick_time(10, 0, 0).isoformat(), + ) # run handler - scheduling_handler = SchedulingRequestHandler(event, MockLambdaContext()) + scheduling_handler = SchedulingRequestHandler( + request, MockLambdaContext(), MockSchedulingRequestEnvironment(), MockLogger() + ) scheduling_handler.handle_request() - # assert that the schedule that gets passed to instance_scheduler is global-schedule, not stripped-schedule + # assert that when the schedules are not provided in the request they are still loaded from dynamo assert mock_scheduler.call_count == 1 - schedules_passed_to_scheduler = mock_scheduler.call_args.args[1].schedules - assert "global-schedule" in schedules_passed_to_scheduler - assert "stripped-schedule" not in schedules_passed_to_scheduler + context_passed_to_scheduler: SchedulingContext = mock_scheduler.call_args.args[1] + assert context_passed_to_scheduler.get_schedule("fetched_schedule") is not None diff --git a/source/app/tests/integration/test_maint_window_scheduling.py b/source/app/tests/integration/test_maint_window_scheduling.py new file mode 100644 index 00000000..4382377f --- /dev/null +++ b/source/app/tests/integration/test_maint_window_scheduling.py @@ -0,0 +1,229 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from datetime import datetime, timezone +from typing import Sequence +from unittest.mock import patch +from zoneinfo import ZoneInfo + +import pytest + +from instance_scheduler.maint_win.ssm_mw_client import SSMMWClient +from instance_scheduler.model import ( + EC2SSMMaintenanceWindow, + EC2SSMMaintenanceWindowStore, + MWStore, +) +from instance_scheduler.schedulers.instance_states import InstanceStates +from tests.integration.helpers.ec2_helpers import get_current_state, stop_ec2_instances +from tests.integration.helpers.run_handler import simple_schedule +from tests.test_utils.mock_scheduling_request_environment import ( + MockSchedulingRequestEnvironment, +) + + +def invoke_scheduling_request_handler_with_maintenance_windows( + ssm_maintenance_windows: Sequence[str], +) -> None: + with simple_schedule( + begintime="20:00", + endtime="22:00", + ssm_maintenance_window=ssm_maintenance_windows, + ) as context: + context.run_scheduling_request_handler( + dt=datetime(year=2024, month=1, day=25, hour=14, tzinfo=timezone.utc), + environment=MockSchedulingRequestEnvironment( + enable_ec2_ssm_maintenance_windows=True + ), + ) + + +def test_maint_window_is_enforced( + ec2_instance: str, + ec2_instance_states: InstanceStates, + maint_win_store: EC2SSMMaintenanceWindowStore, +) -> None: + # maintenance window is active (exists in maint window table) + # instance should start because window is enforced + + maint_win_store.put_window_dynamodb( + EC2SSMMaintenanceWindow( + account_id="123456789012", + region="us-east-1", + window_id="mw-00000000000000000", + window_name="my-active-window", + schedule_timezone=ZoneInfo("UTC"), + next_execution_time=datetime( + year=2024, month=1, day=25, hour=14, tzinfo=timezone.utc + ), + duration_hours=1, + ) + ) + + with simple_schedule( + begintime="20:00", + endtime="22:00", + ssm_maintenance_window=["my-active-window"], + ) as context: + # start with instance stopped + stop_ec2_instances(ec2_instance) + assert get_current_state(ec2_instance) == "stopped" + + context.run_scheduling_request_handler( + dt=datetime(year=2024, month=1, day=25, hour=14, tzinfo=timezone.utc), + environment=MockSchedulingRequestEnvironment( + enable_ec2_ssm_maintenance_windows=True + ), + ) + + assert get_current_state(ec2_instance) == "running" + + +def test_inactive_maintenance_windows_have_no_effect( + ec2_instance: str, + ec2_instance_states: InstanceStates, + mw_store: MWStore, +) -> None: + # multiple maintenance windows (none are active) + # instance should stay stopped since no maintenance windows are active + with simple_schedule( + begintime="20:00", + endtime="22:00", + ssm_maintenance_window=["my-inactive-window"], + ) as context: + # start with instance stopped + stop_ec2_instances(ec2_instance) + assert get_current_state(ec2_instance) == "stopped" + + context.run_scheduling_request_handler( + dt=datetime(year=2024, month=1, day=25, hour=14, tzinfo=timezone.utc), + environment=MockSchedulingRequestEnvironment( + enable_ec2_ssm_maintenance_windows=True + ), + ) + + assert get_current_state(ec2_instance) == "stopped" + + +@pytest.mark.parametrize( + "active_window_name", + [ + "my-window-a", + "my-window-b", + ], +) +def test_multiple_maintenance_windows_one_active( + active_window_name: str, + ec2_instance: str, + ec2_instance_states: InstanceStates, + maint_win_store: EC2SSMMaintenanceWindowStore, +) -> None: + # maintenance window is active (exists in maint window table) + # instance should start if any maintenance window is active + + maint_win_store.put_window_dynamodb( + EC2SSMMaintenanceWindow( + account_id="123456789012", + region="us-east-1", + window_id="mw-00000000000000000", + window_name=active_window_name, + schedule_timezone=ZoneInfo("UTC"), + next_execution_time=datetime( + year=2024, month=1, day=25, hour=14, tzinfo=timezone.utc + ), + duration_hours=1, + ) + ) + + with simple_schedule( + begintime="20:00", + endtime="22:00", + ssm_maintenance_window=[ + "my-window-a", + "my-window-b", + ], + ) as context: + # start with instance stopped + stop_ec2_instances(ec2_instance) + assert get_current_state(ec2_instance) == "stopped" + + context.run_scheduling_request_handler( + dt=datetime(year=2024, month=1, day=25, hour=14, tzinfo=timezone.utc), + environment=MockSchedulingRequestEnvironment( + enable_ec2_ssm_maintenance_windows=True + ), + ) + + assert get_current_state(ec2_instance) == "running" + + +def test_all_maintenance_windows_sharing_non_unique_names_are_used( + ec2_instance: str, + ec2_instance_states: InstanceStates, + mw_store: MWStore, +) -> None: + non_unique_window_name = "non-unique-window-name" + # windows share a name but have unique id + # both should be used when the shared name is provided to the schedule + windows = [ + EC2SSMMaintenanceWindow( + account_id="123456789012", + region="us-east-1", + window_id="mw-00000000000000000", + window_name=non_unique_window_name, + schedule_timezone=ZoneInfo("UTC"), + next_execution_time=datetime( + year=2024, month=1, day=25, hour=14, tzinfo=timezone.utc + ), + duration_hours=1, + ), + EC2SSMMaintenanceWindow( + account_id="123456789012", + region="us-east-1", + window_id="mw-00000000000000001", + window_name=non_unique_window_name, + schedule_timezone=ZoneInfo("UTC"), + next_execution_time=datetime( + year=2024, month=1, day=25, hour=18, tzinfo=timezone.utc + ), + duration_hours=1, + ), + ] + + with ( + simple_schedule( + begintime="20:00", + endtime="22:00", + ssm_maintenance_window=[non_unique_window_name], # only one name provided + ) as context, + patch.object(SSMMWClient, "get_mws_from_ssm", return_value=windows), + ): + # start with instance stopped + stop_ec2_instances(ec2_instance) + assert get_current_state(ec2_instance) == "stopped" + + # running during first maintenance window + context.run_scheduling_request_handler( + dt=datetime(year=2024, month=1, day=25, hour=14, tzinfo=timezone.utc), + environment=MockSchedulingRequestEnvironment( + enable_ec2_ssm_maintenance_windows=True, + ), + ) + assert get_current_state(ec2_instance) == "running" + + # stopped in between windows + context.run_scheduling_request_handler( + dt=datetime(year=2024, month=1, day=25, hour=16, tzinfo=timezone.utc), + environment=MockSchedulingRequestEnvironment( + enable_ec2_ssm_maintenance_windows=True + ), + ) + assert get_current_state(ec2_instance) == "stopped" + + # running during second maintenance window + context.run_scheduling_request_handler( + dt=datetime(year=2024, month=1, day=25, hour=18, tzinfo=timezone.utc), + environment=MockSchedulingRequestEnvironment( + enable_ec2_ssm_maintenance_windows=True + ), + ) + assert get_current_state(ec2_instance) == "running" diff --git a/source/app/tests/integration/test_multi_period_schedules.py b/source/app/tests/integration/test_multi_period_schedules.py new file mode 100644 index 00000000..25068703 --- /dev/null +++ b/source/app/tests/integration/test_multi_period_schedules.py @@ -0,0 +1,93 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from datetime import datetime, timezone + +from instance_scheduler.model.period_definition import PeriodDefinition +from instance_scheduler.schedulers.instance_states import InstanceStates +from tests.integration.helpers.ec2_helpers import get_current_state +from tests.integration.helpers.run_handler import multi_period_schedule +from tests.integration.helpers.schedule_helpers import quick_time + + +def test_adjacent_periods_do_not_get_stopped_in_the_middle( + ec2_instance: str, + ec2_instance_states: InstanceStates, +) -> None: + with multi_period_schedule( + period_definitions=[ + PeriodDefinition(name="period1", begintime="5:00", endtime="10:00"), + PeriodDefinition(name="period2", begintime="10:01", endtime="15:00"), + ] + ) as context: + # in period 1 (populates state table) + context.run_scheduling_request_handler(dt=quick_time(9, 30)) + assert get_current_state(ec2_instance) == "running" + + # time between periods + context.run_scheduling_request_handler(dt=quick_time(10, 0, 30)) + assert get_current_state(ec2_instance) == "running" + + +def test_adjacent_periods_across_midnight_do_not_get_stopped_in_the_middle( + ec2_instance: str, + ec2_instance_states: InstanceStates, +) -> None: + + with multi_period_schedule( + period_definitions=[ + PeriodDefinition(name="period1", begintime="5:00", endtime="23:59"), + PeriodDefinition(name="period2", begintime="0:00", endtime="3:00"), + ] + ) as context: + # in period 1 (populates state table) + context.run_scheduling_request_handler(dt=quick_time(23, 30)) + assert get_current_state(ec2_instance) == "running" + + # just before midnight + context.run_scheduling_request_handler(dt=quick_time(23, 59, 59)) + assert get_current_state(ec2_instance) == "running" + + # midnight + context.run_scheduling_request_handler(dt=quick_time(0, 0, 0)) + assert get_current_state(ec2_instance) == "running" + + +def test_not_running_period( + ec2_instance: str, + ec2_instance_states: InstanceStates, +) -> None: + # example schedule that turns an instance off from 6pm to 7pm on tuesdays + with multi_period_schedule( + period_definitions=[ + PeriodDefinition( + name="period1", begintime="0:00", endtime="18:00", weekdays={"1"} + ), + PeriodDefinition( + name="period2", begintime="19:00", endtime="23:59", weekdays={"1"} + ), + PeriodDefinition(name="period3", weekdays={"2-0"}), + ] + ) as context: + # initial run (populates state table + context.run_scheduling_request_handler( + dt=datetime(2024, 1, 2, 17, 55, tzinfo=timezone.utc) + ) + assert get_current_state(instance_id=ec2_instance) == "running" + + # stop at 6pm on Tuesday + context.run_scheduling_request_handler( + dt=datetime(2024, 1, 2, 18, 0, tzinfo=timezone.utc) + ) + assert get_current_state(instance_id=ec2_instance) == "stopped" + + # start again at 7pm + context.run_scheduling_request_handler( + dt=datetime(2024, 1, 2, 19, 0, tzinfo=timezone.utc) + ) + assert get_current_state(instance_id=ec2_instance) == "running" + + # don't stop at 6pm on Wednesday + context.run_scheduling_request_handler( + dt=datetime(2024, 1, 3, 18, 0, tzinfo=timezone.utc) + ) + assert get_current_state(instance_id=ec2_instance) == "running" diff --git a/source/app/tests/integration/test_op_metrics_level.py b/source/app/tests/integration/test_op_metrics_level.py new file mode 100644 index 00000000..262d394e --- /dev/null +++ b/source/app/tests/integration/test_op_metrics_level.py @@ -0,0 +1,243 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from datetime import datetime, timezone +from typing import TYPE_CHECKING, Iterator +from unittest.mock import MagicMock, patch + +from _pytest.fixtures import fixture +from freezegun import freeze_time +from mypy_boto3_ec2 import EC2Client +from mypy_boto3_ec2.literals import InstanceTypeType +from mypy_boto3_rds import RDSClient + +from instance_scheduler.ops_monitoring.cw_ops_insights import ( + CloudWatchOperationalInsights, +) +from tests.conftest import get_ami +from tests.integration.helpers.boto_client_helpers import client_in_account_region +from tests.integration.helpers.ec2_helpers import ( + create_ec2_instances, + start_ec2_instances, + stop_ec2_instances, +) +from tests.integration.helpers.run_handler import simple_schedule +from tests.integration.helpers.schedule_helpers import quick_time +from tests.test_utils.mock_scheduling_request_environment import ( + MockSchedulingRequestEnvironment, +) +from tests.test_utils.unordered_list import UnorderedList + +if TYPE_CHECKING: + from mypy_boto3_rds.type_defs import CreateDBInstanceResultTypeDef +else: + CreateDBInstanceResultTypeDef = object + +# ec2 instance types +small: InstanceTypeType = "t2.micro" +medium: InstanceTypeType = "t2.medium" +large: InstanceTypeType = "t2.large" + + +def rds_instance( + account: str, region: str, instance_type: str = "db.m5.large", stop: bool = False +) -> str: + instance_id = "rds-test-instance" + rds_client: RDSClient = client_in_account_region("rds", account, region) + result = new_rds_instances_of_type(rds_client, instance_type, instance_id) + instance_arn = result["DBInstance"]["DBInstanceArn"] + rds_client.add_tags_to_resource( + ResourceName=instance_arn, Tags=[{"Key": "Schedule", "Value": "test-schedule"}] + ) + if stop: + rds_client.stop_db_instance(DBInstanceIdentifier=instance_id) + + return instance_id + + +def new_rds_instances_of_type( + rds_client: RDSClient, rds_instance_type: str, instance_id: str +) -> CreateDBInstanceResultTypeDef: + result: CreateDBInstanceResultTypeDef = rds_client.create_db_instance( + DBInstanceIdentifier=instance_id, + DBInstanceClass=rds_instance_type, + Engine="postgres", + ) + + return result + + +def create_test_instances( + count: int, + instance_type: InstanceTypeType, + account: str, + region: str, + stop: bool = False, +) -> list[str]: + ec2_client: EC2Client = client_in_account_region("ec2", account, region) + instance_ids = new_ec2_instances_of_type(ec2_client, count, instance_type) + ec2_client.create_tags( + Resources=instance_ids, Tags=[{"Key": "Schedule", "Value": "test-schedule"}] + ) + + if stop: + ec2_client.stop_instances(InstanceIds=instance_ids) + + return instance_ids + + +def new_ec2_instances_of_type( + ec2_client: EC2Client, count: int, ec2_instance_type: InstanceTypeType +) -> list[str]: + create_response = ec2_client.run_instances( + ImageId=get_ami(), + MinCount=count, + MaxCount=count, + InstanceType=ec2_instance_type, + ) + instance_ids = [instance["InstanceId"] for instance in create_response["Instances"]] + + return instance_ids + + +@fixture +def mocked_put_metric_data() -> Iterator[MagicMock]: + with patch.object( + CloudWatchOperationalInsights, "cloudwatch_client" + ) as cloudwatch_client: + with patch.object(cloudwatch_client, "put_metric_data") as put_metric_func: + yield put_metric_func + + +def test_metrics_not_sent_when_disabled( + state_table: str, + mocked_put_metric_data: MagicMock, +) -> None: + with simple_schedule(begintime="10:00", endtime="20:00") as context: + context.run_scheduling_request_handler( + dt=quick_time(12, 0), + environment=MockSchedulingRequestEnvironment(enable_ops_monitoring=False), + ) + + mocked_put_metric_data.assert_not_called() + + +@freeze_time("2023-12-28 20:23:37") +def test_ops_monitoring_metrics_sent_to_cw_when_enabled( + state_table: str, + mocked_put_metric_data: MagicMock, +) -> None: + stop_ec2_instances( + *create_ec2_instances(1, instance_type=small, schedule_name="test-schedule") + ) + start_ec2_instances( + *create_ec2_instances(5, instance_type=medium, schedule_name="test-schedule") + ) + + with simple_schedule(begintime="10:00", endtime="20:00") as context: + context.run_scheduling_request_handler( + dt=quick_time(12, 0), + environment=MockSchedulingRequestEnvironment(enable_ops_monitoring=True), + ) + + fixed_time = datetime(2023, 12, 28, 20, 23, 37, tzinfo=timezone.utc) + expected_metric_data = { + "Namespace": "my-stack-name:InstanceScheduler", + "MetricData": UnorderedList( + [ + # per schedule data + { + "MetricName": "ManagedInstances", + "Dimensions": [ + {"Name": "Service", "Value": "ec2"}, + {"Name": "Schedule", "Value": "test-schedule"}, + {"Name": "SchedulingInterval", "Value": "5"}, + ], + "Timestamp": fixed_time, + "Value": 6, + "Unit": "Count", + }, + { + "MetricName": "RunningInstances", + "Dimensions": [ + {"Name": "Service", "Value": "ec2"}, + {"Name": "Schedule", "Value": "test-schedule"}, + {"Name": "SchedulingInterval", "Value": "5"}, + ], + "Timestamp": fixed_time, + "Value": 5, + "Unit": "Count", + }, + # per instance_type data + { + "MetricName": "ManagedInstances", + "Dimensions": [ + {"Name": "Service", "Value": "ec2"}, + {"Name": "InstanceType", "Value": "t2.micro"}, + {"Name": "SchedulingInterval", "Value": "5"}, + ], + "Timestamp": fixed_time, + "Value": 1, + "Unit": "Count", + }, + { + "MetricName": "RunningInstances", + "Dimensions": [ + {"Name": "Service", "Value": "ec2"}, + {"Name": "InstanceType", "Value": "t2.micro"}, + {"Name": "SchedulingInterval", "Value": "5"}, + ], + "Timestamp": fixed_time, + "Value": 0, + "Unit": "Count", + }, + { + "MetricName": "StoppedInstances", + "Dimensions": [ + {"Name": "Service", "Value": "ec2"}, + {"Name": "InstanceType", "Value": "t2.micro"}, + {"Name": "SchedulingInterval", "Value": "5"}, + ], + "Timestamp": fixed_time, + "Value": 1, + "Unit": "Count", + }, + { + "MetricName": "ManagedInstances", + "Dimensions": [ + {"Name": "Service", "Value": "ec2"}, + {"Name": "InstanceType", "Value": "t2.medium"}, + {"Name": "SchedulingInterval", "Value": "5"}, + ], + "Timestamp": fixed_time, + "Value": 5, + "Unit": "Count", + }, + { + "MetricName": "RunningInstances", + "Dimensions": [ + {"Name": "Service", "Value": "ec2"}, + {"Name": "InstanceType", "Value": "t2.medium"}, + {"Name": "SchedulingInterval", "Value": "5"}, + ], + "Timestamp": fixed_time, + "Value": 5, + "Unit": "Count", + }, + { + "MetricName": "StoppedInstances", + "Dimensions": [ + {"Name": "Service", "Value": "ec2"}, + {"Name": "InstanceType", "Value": "t2.medium"}, + {"Name": "SchedulingInterval", "Value": "5"}, + ], + "Timestamp": fixed_time, + "Value": 0, + "Unit": "Count", + }, + ] + ), + } + + args, kwargs = mocked_put_metric_data.call_args + assert kwargs == expected_metric_data + mocked_put_metric_data.assert_called_once() diff --git a/source/app/tests/integration/test_rds_cluster_instance.py b/source/app/tests/integration/test_rds_cluster_instance.py index f33af900..75d89f90 100644 --- a/source/app/tests/integration/test_rds_cluster_instance.py +++ b/source/app/tests/integration/test_rds_cluster_instance.py @@ -3,19 +3,15 @@ from typing import TYPE_CHECKING import boto3 +from mypy_boto3_rds.type_defs import DBClusterMemberTypeDef -from instance_scheduler.handler.scheduling_request import SchedulingRequestHandler from instance_scheduler.schedulers.instance_states import InstanceStates -from tests.context import MockLambdaContext from tests.integration.helpers.rds_helpers import ( get_rds_cluster_state, get_rds_instance_state, ) +from tests.integration.helpers.run_handler import simple_schedule, target from tests.integration.helpers.schedule_helpers import quick_time -from tests.integration.helpers.scheduling_context_builder import ( - build_context, - build_scheduling_event, -) if TYPE_CHECKING: from mypy_boto3_rds.client import RDSClient @@ -23,41 +19,50 @@ RDSClient = object -def test_rds_cluster_instance_not_scheduled( - rds_cluster: str, rds_instance_states: InstanceStates +def tag_rds_instance( + instance: DBClusterMemberTypeDef, schedule_name: str, rds_client: RDSClient ) -> None: - """Instances part of an aurora cluster should not be scheduled, even if tagged""" - context = build_context( - current_dt=quick_time(10, 0, 0), schedule_clusters=False, service="rds" + instance_description = rds_client.describe_db_instances( + DBInstanceIdentifier=instance["DBInstanceIdentifier"] + ) + arn = instance_description["DBInstances"][0]["DBInstanceArn"] + rds_client.add_tags_to_resource( + ResourceName=arn, Tags=[{"Key": "Schedule", "Value": schedule_name}] ) - event = build_scheduling_event(context) + +def test_rds_cluster_instances_are_not_scheduled_individually( + rds_cluster: str, + rds_instance_states: InstanceStates, +) -> None: rds_client: RDSClient = boto3.client("rds") cluster = rds_client.describe_db_clusters(DBClusterIdentifier=rds_cluster) instances = [instance for instance in cluster["DBClusters"][0]["DBClusterMembers"]] - assert instances - for instance in instances: - instance_description = rds_client.describe_db_instances( - DBInstanceIdentifier=instance["DBInstanceIdentifier"] - ) - arn = instance_description["DBInstances"][0]["DBInstanceArn"] - rds_client.add_tags_to_resource( - ResourceName=arn, Tags=[{"Key": "Schedule", "Value": "test-schedule"}] - ) - rds_client.stop_db_instance( - DBInstanceIdentifier=instance["DBInstanceIdentifier"] - ) + assert len(instances) > 0 # test would be invalid if there were no instances - rds_client.stop_db_cluster(DBClusterIdentifier=rds_cluster) + # customer incorrectly tags instances that are members of a cluster + for instance in instances: + tag_rds_instance(instance, "some-other-schedule", rds_client) - rds_instance_states.set_instance_state(rds_cluster, "stopped") - rds_instance_states.save() + with simple_schedule( + name="some-other-schedule", begintime="10:00", endtime="20:00" + ) as context: + # within period (populate state table) + context.run_scheduling_request_handler( + dt=quick_time(19, 55), target=target(service="rds") + ) - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() + # period end (would normally stop) + context.run_scheduling_request_handler( + dt=quick_time(20, 0), target=target(service="rds") + ) - assert get_rds_cluster_state(rds_cluster) == "stopped" - for instance in instances: - assert get_rds_instance_state(instance["DBInstanceIdentifier"]) == "stopped" + # neither the instances nor the cluster should have been stopped + # (the cluster is tagged with a different schedule) + assert get_rds_cluster_state(rds_cluster) == "available" + for instance in instances: + assert ( + get_rds_instance_state(instance["DBInstanceIdentifier"]) == "available" + ) diff --git a/source/app/tests/integration/test_rds_cluster_scheduling.py b/source/app/tests/integration/test_rds_cluster_scheduling.py index da8829b3..e543c574 100644 --- a/source/app/tests/integration/test_rds_cluster_scheduling.py +++ b/source/app/tests/integration/test_rds_cluster_scheduling.py @@ -1,53 +1,50 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -import boto3 -from mypy_boto3_rds import RDSClient -from instance_scheduler.handler.scheduling_request import SchedulingRequestHandler from instance_scheduler.schedulers.instance_states import InstanceStates -from tests.context import MockLambdaContext -from tests.integration.helpers.rds_helpers import get_rds_cluster_state -from tests.integration.helpers.schedule_helpers import quick_time -from tests.integration.helpers.scheduling_context_builder import ( - build_context, - build_scheduling_event, +from instance_scheduler.util.app_env import AppEnv +from tests.integration.helpers.rds_helpers import ( + get_rds_cluster_state, + stop_rds_clusters, ) +from tests.integration.helpers.run_handler import simple_schedule, target +from tests.integration.helpers.schedule_helpers import quick_time def test_rds_cluster_starts_at_beginning_of_period( - rds_cluster: str, rds_instance_states: InstanceStates + rds_cluster: str, + rds_instance_states: InstanceStates, ) -> None: - context = build_context( - current_dt=quick_time(10, 0, 0), schedule_clusters=True, service="rds" - ) - event = build_scheduling_event(context) - - rds_client: RDSClient = boto3.client("rds") - rds_client.stop_db_cluster(DBClusterIdentifier=rds_cluster) - - rds_instance_states.set_instance_state(rds_cluster, "stopped") - rds_instance_states.save() + with simple_schedule(begintime="10:00", endtime="20:00") as context: + stop_rds_clusters(rds_cluster) - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() + # before start of period (populates state table) + context.run_scheduling_request_handler( + dt=quick_time(9, 55), target=target(service="rds") + ) + assert get_rds_cluster_state(rds_cluster) == "stopped" - assert get_rds_cluster_state(rds_cluster) == "available" + # start of period + context.run_scheduling_request_handler( + dt=quick_time(10, 0), target=target(service="rds") + ) + assert get_rds_cluster_state(rds_cluster) == "available" def test_rds_cluster_stops_at_end_of_period( - rds_cluster: str, rds_instance_states: InstanceStates + rds_cluster: str, + rds_instance_states: InstanceStates, + app_env: AppEnv, ) -> None: - context = build_context( - current_dt=quick_time(20, 0, 0), schedule_clusters=True, service="rds" - ) - event = build_scheduling_event(context) - - assert get_rds_cluster_state(rds_cluster) == "available" - - rds_instance_states.set_instance_state(rds_cluster, "running") - rds_instance_states.save() - - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - assert get_rds_cluster_state(rds_cluster) == "stopped" + with simple_schedule(begintime="10:00", endtime="20:00") as context: + # before end of period (populates state table) + context.run_scheduling_request_handler( + dt=quick_time(19, 55), target=target(service="rds") + ) + assert get_rds_cluster_state(rds_cluster) == "available" + + # end of period + context.run_scheduling_request_handler( + dt=quick_time(20, 0), target=target(service="rds") + ) + assert get_rds_cluster_state(rds_cluster) == "stopped" diff --git a/source/app/tests/integration/test_resize.py b/source/app/tests/integration/test_resize.py index 16cc69fb..5d69d903 100644 --- a/source/app/tests/integration/test_resize.py +++ b/source/app/tests/integration/test_resize.py @@ -1,119 +1,126 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -import datetime +from mypy_boto3_ec2.literals import InstanceTypeType -import boto3 -from mypy_boto3_ec2.client import EC2Client - -from instance_scheduler.configuration.instance_schedule import InstanceSchedule -from instance_scheduler.configuration.running_period import RunningPeriod -from instance_scheduler.handler.scheduling_request import SchedulingRequestHandler +from instance_scheduler.model.period_definition import PeriodDefinition from instance_scheduler.schedulers.instance_states import InstanceStates -from tests.context import MockLambdaContext -from tests.integration.helpers.ec2_helpers import get_current_state -from tests.integration.helpers.schedule_helpers import quick_time -from tests.integration.helpers.scheduling_context_builder import ( - build_context, - build_scheduling_event, +from tests.integration.helpers.ec2_helpers import ( + create_ec2_instances, + get_current_instance_type, + get_current_state, + stop_ec2_instances, ) +from tests.integration.helpers.run_handler import resizable_multi_period_schedule +from tests.integration.helpers.schedule_helpers import quick_time def test_stopped_instance_resized( - ec2_instance: str, ec2_instance_states: InstanceStates + ec2_instance: str, + ec2_instance_states: InstanceStates, ) -> None: - ec2_client: EC2Client = boto3.client("ec2") - ec2_client.stop_instances(InstanceIds=[ec2_instance]) - ec2_client.modify_instance_attribute( - InstanceId=ec2_instance, InstanceType={"Value": "c6g.medium"} - ) - - desired_instance_type = "c6g.2xlarge" - - context = build_context( - current_dt=quick_time(10, 0, 0), - schedules={ - "test-schedule": InstanceSchedule( - name="test-schedule", - periods=[ - { - "period": RunningPeriod( - name="test-period", - begintime=datetime.time(10, 0, 0), - endtime=datetime.time(20, 0, 0), - ), - "instancetype": desired_instance_type, - } - ], - ) - }, - ) - - event = build_scheduling_event(context) - - ec2_instance_states.set_instance_state(ec2_instance, "stopped") - ec2_instance_states.save() - - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - assert get_current_state(ec2_instance) == "running" - assert ( - ec2_client.describe_instances(InstanceIds=[ec2_instance])["Reservations"][0][ - "Instances" - ][0]["InstanceType"] - == desired_instance_type + (ec2_instance,) = create_ec2_instances( + 1, "test-schedule", instance_type="c6g.medium" ) + desired_instance_type: InstanceTypeType = "c6g.2xlarge" + + stop_ec2_instances(ec2_instance) + + with resizable_multi_period_schedule( + name="test-schedule", + period_definitions=[ + { + "period": PeriodDefinition( + name="test-period", begintime="10:00", endtime="20:00" + ), + "desired_type": desired_instance_type, + } + ], + ) as context: + # start and resize + context.run_scheduling_request_handler(dt=quick_time(10, 0)) + assert get_current_state(ec2_instance) == "running" + assert get_current_instance_type(ec2_instance) == desired_instance_type def test_running_instance_is_stopped_for_resize( - ec2_instance: str, ec2_instance_states: InstanceStates + ec2_instance: str, + ec2_instance_states: InstanceStates, ) -> None: - ec2_client: EC2Client = boto3.client("ec2") - ec2_client.stop_instances(InstanceIds=[ec2_instance]) - ec2_client.modify_instance_attribute( - InstanceId=ec2_instance, InstanceType={"Value": "c6g.medium"} - ) - ec2_client.start_instances(InstanceIds=[ec2_instance]) - - desired_instance_type = "c6g.2xlarge" - - context = build_context( - current_dt=quick_time(15, 0, 0), - schedules={ - "test-schedule": InstanceSchedule( - name="test-schedule", - periods=[ - { - "period": RunningPeriod( - name="test-period", - begintime=datetime.time(10, 0, 0), - endtime=datetime.time(20, 0, 0), - ), - "instancetype": desired_instance_type, - } - ], - ) - }, + (ec2_instance,) = create_ec2_instances( + 1, "test-schedule", instance_type="c6g.medium" ) - - event = build_scheduling_event(context) - - ec2_instance_states.set_instance_state(ec2_instance, "running") - ec2_instance_states.save() - - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - ec2_instance_states.load(account="123456789012", region="us-east-1") - assert get_current_state(ec2_instance) == "stopped" - - # rerun handler to confirm the resize would be finished next interval - - handler.handle_request() - assert get_current_state(ec2_instance) == "running" - assert ( - ec2_client.describe_instances(InstanceIds=[ec2_instance])["Reservations"][0][ - "Instances" - ][0]["InstanceType"] - == desired_instance_type + desired_instance_type: InstanceTypeType = "c6g.2xlarge" + + with resizable_multi_period_schedule( + name="test-schedule", + period_definitions=[ + { + "period": PeriodDefinition( + name="test-period", begintime="10:00", endtime="20:00" + ), + "desired_type": desired_instance_type, + } + ], + ) as context: + # should stop instance so it can be resized + context.run_scheduling_request_handler(dt=quick_time(15, 0)) + assert get_current_state(ec2_instance) == "stopped" + + # should restart instance as correct size + context.run_scheduling_request_handler(dt=quick_time(15, 5)) + assert get_current_state(ec2_instance) == "running" + assert get_current_instance_type(ec2_instance) == desired_instance_type + + +def test_resizing_with_multi_period_schedule( + ec2_instance: str, + ec2_instance_states: InstanceStates, +) -> None: + (ec2_instance,) = create_ec2_instances( + 1, "test-schedule", instance_type="c6g.medium" ) + outer_period_instance_type: InstanceTypeType = "c6g.medium" + inner_period_instance_type: InstanceTypeType = "c6g.2xlarge" + + with resizable_multi_period_schedule( + name="test-schedule", + period_definitions=[ + { + "period": PeriodDefinition( + name="outer-period", begintime="5:00", endtime="20:00" + ), + "desired_type": outer_period_instance_type, + }, + { + "period": PeriodDefinition( + name="inner-period", begintime="12:00", endtime="14:00" + ), + "desired_type": inner_period_instance_type, + }, + ], + ) as context: + # in outer period, no change should occur + context.run_scheduling_request_handler(dt=quick_time(7, 0, 0)) + assert get_current_state(ec2_instance) == "running" + assert get_current_instance_type(ec2_instance) == outer_period_instance_type + + # enter inner period, should resize + context.run_scheduling_request_handler(dt=quick_time(12, 0, 0)) + assert get_current_state(ec2_instance) == "stopped" + + context.run_scheduling_request_handler(dt=quick_time(12, 5, 0)) + assert get_current_state(ec2_instance) == "running" + assert get_current_instance_type(ec2_instance) == inner_period_instance_type + + # within inner period, no action (should not thrash with outer period) + context.run_scheduling_request_handler(dt=quick_time(13, 0, 0)) + assert get_current_state(ec2_instance) == "running" + assert get_current_instance_type(ec2_instance) == inner_period_instance_type + + # exit inner period, should resize to outer period + context.run_scheduling_request_handler(dt=quick_time(14, 0, 0)) + assert get_current_state(ec2_instance) == "stopped" + + context.run_scheduling_request_handler(dt=quick_time(14, 5, 0)) + assert get_current_state(ec2_instance) == "running" + assert get_current_instance_type(ec2_instance) == outer_period_instance_type diff --git a/source/app/tests/integration/test_retain_running_flag.py b/source/app/tests/integration/test_retain_running_flag.py index 6dc4cef5..d5faf688 100644 --- a/source/app/tests/integration/test_retain_running_flag.py +++ b/source/app/tests/integration/test_retain_running_flag.py @@ -1,61 +1,13 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -import datetime -from typing import Any - -import boto3 -from mypy_boto3_ec2.client import EC2Client - -from instance_scheduler.configuration.instance_schedule import InstanceSchedule -from instance_scheduler.configuration.running_period import RunningPeriod -from instance_scheduler.handler.scheduling_request import SchedulingRequestHandler from instance_scheduler.schedulers.instance_states import InstanceStates -from tests.context import MockLambdaContext -from tests.integration.helpers.ec2_helpers import get_current_state -from tests.integration.helpers.schedule_helpers import at_time -from tests.integration.helpers.scheduling_context_builder import ( - build_context, - build_scheduling_event, -) - - -def schedule_event_at_time( - time: datetime.time, retain_running_flag: bool = True -) -> Any: - """ - helper method for quickly building a scheduling event with the following: - begintime: 10:00 - endtime: 20:00 - retain_running: true - """ - context = build_context( - current_dt=at_time(time), - schedules={ - "test-schedule": InstanceSchedule( - name="test-schedule", - retain_running=retain_running_flag, - periods=[ - { - "period": RunningPeriod( - name="test-period", - begintime=datetime.time(10, 0, 0), - endtime=datetime.time(20, 0, 0), - ) - } - ], - ) - }, - ) - - event = build_scheduling_event(context) - - return event +from tests.integration.helpers.ec2_helpers import get_current_state, start_ec2_instances +from tests.integration.helpers.run_handler import simple_schedule +from tests.integration.helpers.schedule_helpers import quick_time def setup_retain_running_scenario( ec2_instance: str, - ec2_instance_states: InstanceStates, - retain_running_flag: bool = True, ) -> None: """ The retain_running flag comes into effect when an instance is manually started by @@ -72,99 +24,93 @@ def setup_retain_running_scenario( this should cause the instance to be identified as having been started manually and will tag it with the retain_running flag """ - # ----------------------------Event Definition--------------------------# - event = schedule_event_at_time( - datetime.time(10, 0, 0), retain_running_flag=retain_running_flag - ) - # ----------------------------EC2 Instance-------------------------# - ec2_client: EC2Client = boto3.client("ec2") - ec2_client.start_instances(InstanceIds=[ec2_instance]) + with simple_schedule( + begintime="10:00", endtime="20:00", retain_running=True + ) as context: + # stopped under normal conditions (populates state table) + context.run_scheduling_request_handler(dt=quick_time(20, 0)) + assert get_current_state(ec2_instance) == "stopped" - # ------------------------Last Desired State------------------------# - ec2_instance_states.set_instance_state(ec2_instance, "stopped") - ec2_instance_states.save() + # customer manually starts instance + start_ec2_instances(ec2_instance) - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() + # already running instance is identified when it should normally be started + context.run_scheduling_request_handler(dt=quick_time(10, 0)) def test_instance_is_stopped_at_end_of_period_when_flag_is_not_set( - ec2_instance: str, ec2_instance_states: InstanceStates + ec2_instance: str, + ec2_instance_states: InstanceStates, ) -> None: - # ----------------------------Setup--------------------------# setup_retain_running_scenario( - ec2_instance, ec2_instance_states, retain_running_flag=False + ec2_instance, ) - # Ec2 instance and instance states should already be setup now - - # ----------------------------Event Definition--------------------------# - event = schedule_event_at_time(datetime.time(20, 0, 0), retain_running_flag=False) - - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - # ---------------------validate result---------------------# - assert get_current_state(ec2_instance) == "stopped" + with simple_schedule( + begintime="10:00", endtime="20:00", retain_running=False + ) as context: + context.run_scheduling_request_handler(quick_time(20, 0)) + assert get_current_state(ec2_instance) == "stopped" def test_instance_is_not_stopped_at_end_of_period_when_flag_is_set( - ec2_instance: str, ec2_instance_states: InstanceStates + ec2_instance: str, + ec2_instance_states: InstanceStates, ) -> None: - # ----------------------------Setup--------------------------# setup_retain_running_scenario( - ec2_instance, ec2_instance_states, retain_running_flag=True + ec2_instance, ) - # Ec2 instance and instance states should already be setup now - - # ----------------------------Event Definition--------------------------# - event = schedule_event_at_time(datetime.time(20, 0, 0)) - - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - # ---------------------validate result---------------------# - assert get_current_state(ec2_instance) == "running" + with simple_schedule( + begintime="10:00", endtime="20:00", retain_running=True + ) as context: + context.run_scheduling_request_handler(quick_time(20, 0)) + assert get_current_state(ec2_instance) == "running" def test_retain_running_behavior_over_multiple_scheduling_cycles( - ec2_instance: str, ec2_instance_states: InstanceStates + ec2_instance: str, + ec2_instance_states: InstanceStates, ) -> None: - # ----------------------------Setup--------------------------# setup_retain_running_scenario( - ec2_instance, ec2_instance_states, retain_running_flag=True + ec2_instance, ) - for i in range(1, 3): + with simple_schedule( + begintime="10:00", endtime="20:00", retain_running=True + ) as context: # ----------------------------Period Start--------------------------# - event = schedule_event_at_time(datetime.time(10, 0, 0)) - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() + context.run_scheduling_request_handler(quick_time(10, 0)) + assert get_current_state(ec2_instance) == "running" + # ----------------------------Period end--------------------------# + context.run_scheduling_request_handler(quick_time(20, 0)) assert get_current_state(ec2_instance) == "running" - # ----------------------------Period End----------------------------# - event = schedule_event_at_time(datetime.time(20, 0, 0)) - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() + # ----------------------------Period Start--------------------------# + context.run_scheduling_request_handler(quick_time(10, 0)) + assert get_current_state(ec2_instance) == "running" + # ----------------------------Period end--------------------------# + context.run_scheduling_request_handler(quick_time(20, 0)) assert get_current_state(ec2_instance) == "running" # disable retain-running flag to confirm running behavior was actually because of the flag - # ----------------------------Period Start--------------------------# - event = schedule_event_at_time(datetime.time(10, 0, 0), retain_running_flag=False) - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() + with simple_schedule( + begintime="10:00", endtime="20:00", retain_running=False + ) as context: + # ----------------------------Period Start--------------------------# + context.run_scheduling_request_handler(quick_time(10, 0)) + assert get_current_state(ec2_instance) == "running" - assert get_current_state(ec2_instance) == "running" + # ----------------------------Period end--------------------------# + context.run_scheduling_request_handler(quick_time(20, 0)) + assert get_current_state(ec2_instance) == "stopped" - # ----------------------------Period End----------------------------# - event = schedule_event_at_time(datetime.time(20, 0, 0), retain_running_flag=False) - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() + # ----------------------------Period Start--------------------------# + context.run_scheduling_request_handler(quick_time(10, 0)) + assert get_current_state(ec2_instance) == "running" - assert get_current_state(ec2_instance) == "stopped" + # ----------------------------Period end--------------------------# + context.run_scheduling_request_handler(quick_time(20, 0)) + assert get_current_state(ec2_instance) == "stopped" diff --git a/source/app/tests/integration/test_stack_update.py b/source/app/tests/integration/test_stack_update.py deleted file mode 100644 index 57987c57..00000000 --- a/source/app/tests/integration/test_stack_update.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -import json -from typing import TYPE_CHECKING -from unittest.mock import MagicMock, patch - -import boto3 -from aws_lambda_powertools.utilities.typing import LambdaContext - -from instance_scheduler.handler.config_resource import ( - SchedulerSetupHandler, - ServiceSetupResourceProperties, -) -from instance_scheduler.util.app_env import AppEnv -from instance_scheduler.util.custom_resource import CustomResourceRequest - -if TYPE_CHECKING: - from mypy_boto3_dynamodb.client import DynamoDBClient -else: - DynamoDBClient = object - - -@patch("custom_resource.requests") -class CustomResourceLambdaContext(LambdaContext): - def __init__(self) -> None: - LambdaContext.__init__(self) - self._log_group_name = "my-log-group" - - @staticmethod - def get_remaining_time_in_millis() -> int: - return 1000 * 60 * 15 - - -def resource_properties( - org_id: str, config_table_name: str -) -> ServiceSetupResourceProperties: - return ServiceSetupResourceProperties( - ServiceToken="lambda-arn", - timeout=120, - config_table=config_table_name, - tagname="Schedule", - default_timezone="UTC", - use_metrics="False", - scheduled_services=["ec2"], - schedule_clusters="False", - create_rds_snapshot="False", - regions=["us-east-1"], - remote_account_ids=[org_id], - namespace="test", - aws_partition="aws", - scheduler_role_name="scheduler-role", - schedule_lambda_account="False", - trace="False", - enable_ssm_maintenance_windows="False", - log_retention_days=30, - started_tags="", - stopped_tags="", - stack_version="v9.9.9", - use_aws_organizations="True", - ) - - -@patch("requests.put") -def test_remote_account_ids_retained( - mock_requests: MagicMock, - app_env: AppEnv, - config_table: None, -) -> None: - accounts = ["111111111111", "222222222222"] - - config_table_name = app_env.config_table_name - org_id = "o-0000000000" - ddb: DynamoDBClient = boto3.client("dynamodb") - ddb.put_item( - TableName=config_table_name, - Item={ - "type": {"S": "config"}, - "name": {"S": "scheduler"}, - "aws_partition": {"S": "aws"}, - "create_rds_snapshot": {"BOOL": False}, - "default_timezone": {"S": "UTC"}, - "enable_ssm_maintenance_windows": {"BOOL": False}, - "namespace": {"S": "test"}, - "organization_id": {"S": org_id}, - "regions": {"SS": ["us-east-1"]}, - "remote_account_ids": {"SS": accounts}, - "scheduled_services": {"SS": ["ec2"]}, - "scheduler_role_name": {"S": "scheduler-role"}, - "schedule_clusters": {"BOOL": False}, - "schedule_lambda_account": {"BOOL": True}, - "started_tags": {"S": ""}, - "tagname": {"S": "Schedule"}, - "trace": {"BOOL": False}, - "use_metrics": {"BOOL": False}, - }, - ) - - event = CustomResourceRequest[ServiceSetupResourceProperties]( - ServiceToken="", - RequestType="Update", - ResponseURL="", - StackId="arn:aws:cloudformation:us-east-1:111111111111:stack/my-stack/00000000-0000-0000-0000-000000000000", - RequestId="", - ResourceType="Custom::ServiceSetup", - LogicalResourceId="", - PhysicalResourceId="", - ResourceProperties=resource_properties(org_id, config_table_name), - OldResourceProperties=resource_properties(org_id, config_table_name), - ) - - SchedulerSetupHandler(event, CustomResourceLambdaContext()).handle_request() - - mock_requests.assert_called_once() - assert json.loads(mock_requests.call_args.kwargs["data"])["Status"] == "SUCCESS" - - config = ddb.get_item( - TableName=config_table_name, - Key={"type": {"S": "config"}, "name": {"S": "scheduler"}}, - )["Item"] - - assert set(config["remote_account_ids"]["SS"]) == set(accounts) diff --git a/source/app/tests/integration/test_stop_new_instances_flag.py b/source/app/tests/integration/test_stop_new_instances_flag.py index ac02d297..3950d934 100644 --- a/source/app/tests/integration/test_stop_new_instances_flag.py +++ b/source/app/tests/integration/test_stop_new_instances_flag.py @@ -1,225 +1,59 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -import datetime - -import boto3 -from mypy_boto3_ec2.client import EC2Client - -from instance_scheduler.configuration.instance_schedule import InstanceSchedule -from instance_scheduler.configuration.running_period import RunningPeriod -from instance_scheduler.handler.scheduling_request import SchedulingRequestHandler from instance_scheduler.schedulers.instance_states import InstanceStates -from tests.context import MockLambdaContext +from instance_scheduler.schedulers.states import InstanceState from tests.integration.helpers.ec2_helpers import get_current_state +from tests.integration.helpers.run_handler import simple_schedule from tests.integration.helpers.schedule_helpers import quick_time -from tests.integration.helpers.scheduling_context_builder import ( - build_context, - build_scheduling_event, -) def test_new_instance_stops_when_outside_period_and_flag_is_set( - ec2_instance: str, ec2_instance_states: InstanceStates + ec2_instance: str, + ec2_instance_states: InstanceStates, ) -> None: - """ - ----inputs---- - schedule: - begintime = 10:00 - endtime = 20:00 - current time: 5:00 - instance: running - last_desired_state: none (not seen before) - - ----expect---- - instance: stopped - """ - # ----------------------------Event Definition--------------------------# - context = build_context( - current_dt=quick_time(5, 0, 0), - schedules={ - "test-schedule": InstanceSchedule( - name="test-schedule", - stop_new_instances=True, - periods=[ - { - "period": RunningPeriod( - name="test-period", - begintime=datetime.time(10, 0, 0), - endtime=datetime.time(20, 0, 0), - ) - } - ], - ) - }, - ) - - event = build_scheduling_event(context) - # ----------------------------EC2 Instance-------------------------# - ec2_client: EC2Client = boto3.client("ec2") - ec2_client.start_instances(InstanceIds=[ec2_instance]) - - # ------------------------Last Desired State------------------------# - # none set (never been seen before) - - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - # ---------------------validate result---------------------# - assert get_current_state(ec2_instance) == "stopped" + with simple_schedule( + begintime="10:00", endtime="20:00", stop_new_instances=True + ) as context: + assert get_current_state(ec2_instance) == "running" + context.run_scheduling_request_handler(dt=quick_time(5, 0)) + assert get_current_state(ec2_instance) == "stopped" def test_instance_does_not_stop_when_it_is_not_new( - ec2_instance: str, ec2_instance_states: InstanceStates + ec2_instance: str, + ec2_instance_states: InstanceStates, ) -> None: - """ - the stop new instances flag should only affect instances that are "new" to the scheduler - ----inputs---- - schedule: - begintime = 10:00 - endtime = 20:00 - current time: 5:00 - instance: running - last_desired_state: stopped (normal expected from being outside a period) - - ----expect---- - instance: running - """ - # ----------------------------Event Definition--------------------------# - context = build_context( - current_dt=quick_time(5, 0, 0), - schedules={ - "test-schedule": InstanceSchedule( - name="test-schedule", - stop_new_instances=True, - periods=[ - { - "period": RunningPeriod( - name="test-period", - begintime=datetime.time(10, 0, 0), - endtime=datetime.time(20, 0, 0), - ) - } - ], - ) - }, - ) - - event = build_scheduling_event(context) - # ----------------------------EC2 Instance-------------------------# - ec2_client: EC2Client = boto3.client("ec2") - ec2_client.start_instances(InstanceIds=[ec2_instance]) - - # ------------------------Last Desired State------------------------# - ec2_instance_states.set_instance_state(ec2_instance, "stopped") - ec2_instance_states.save() - - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() + with simple_schedule( + begintime="10:00", endtime="20:00", stop_new_instances=True + ) as context: + # already registered in state table + ec2_instance_states.set_instance_state(ec2_instance, InstanceState.STOPPED) + ec2_instance_states.save() - # ---------------------validate result---------------------# - assert get_current_state(ec2_instance) == "running" + assert get_current_state(ec2_instance) == "running" + context.run_scheduling_request_handler(dt=quick_time(5, 0)) + assert get_current_state(ec2_instance) == "running" def test_new_instance_does_not_stop_when_outside_period_and_flag_is_not_set( - ec2_instance: str, ec2_instance_states: InstanceStates + ec2_instance: str, + ec2_instance_states: InstanceStates, ) -> None: - """ - ----inputs---- - schedule: - begintime = 10:00 - endtime = 20:00 - current time: 5:00 - instance: running - last_desired_state: none (not seen before) - - ----expect---- - instance: stopped - """ - # ----------------------------Event Definition--------------------------# - context = build_context( - current_dt=quick_time(5, 0, 0), - schedules={ - "test-schedule": InstanceSchedule( - name="test-schedule", - stop_new_instances=False, - periods=[ - { - "period": RunningPeriod( - name="test-period", - begintime=datetime.time(10, 0, 0), - endtime=datetime.time(20, 0, 0), - ) - } - ], - ) - }, - ) - - event = build_scheduling_event(context) - # ----------------------------EC2 Instance-------------------------# - ec2_client: EC2Client = boto3.client("ec2") - ec2_client.start_instances(InstanceIds=[ec2_instance]) - - # ------------------------Last Desired State------------------------# - # none set (never been seen before) - - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - # ---------------------validate result---------------------# - assert get_current_state(ec2_instance) == "running" + with simple_schedule( + begintime="10:00", endtime="20:00", stop_new_instances=False + ) as context: + assert get_current_state(ec2_instance) == "running" + context.run_scheduling_request_handler(dt=quick_time(5, 0)) + assert get_current_state(ec2_instance) == "running" def test_new_instance_does_not_stop_when_inside_period_and_flag_is_set( - ec2_instance: str, ec2_instance_states: InstanceStates + ec2_instance: str, + ec2_instance_states: InstanceStates, ) -> None: - """ - flag should only stop instances that are newly detected when outside a valid running period - ----inputs---- - schedule: - begintime = 10:00 - endtime = 20:00 - current time: 15:00 - instance: running - last_desired_state: none (not seen before) - - ----expect---- - instance: running - """ - # ----------------------------Event Definition--------------------------# - context = build_context( - current_dt=quick_time(15, 0, 0), - schedules={ - "test-schedule": InstanceSchedule( - name="test-schedule", - stop_new_instances=True, - periods=[ - { - "period": RunningPeriod( - name="test-period", - begintime=datetime.time(10, 0, 0), - endtime=datetime.time(20, 0, 0), - ) - } - ], - ) - }, - ) - - event = build_scheduling_event(context) - # ----------------------------EC2 Instance-------------------------# - ec2_client: EC2Client = boto3.client("ec2") - ec2_client.start_instances(InstanceIds=[ec2_instance]) - - # ------------------------Last Desired State------------------------# - # none set (never been seen before) - - # -------------------run handler------------------------# - handler = SchedulingRequestHandler(event, MockLambdaContext()) - handler.handle_request() - - # ---------------------validate result---------------------# - assert get_current_state(ec2_instance) == "running" + with simple_schedule( + begintime="10:00", endtime="20:00", stop_new_instances=True + ) as context: + assert get_current_state(ec2_instance) == "running" + context.run_scheduling_request_handler(dt=quick_time(15, 0)) + assert get_current_state(ec2_instance) == "running" diff --git a/source/app/tests/logger.py b/source/app/tests/logger.py index 8ea01f36..4baf43a1 100644 --- a/source/app/tests/logger.py +++ b/source/app/tests/logger.py @@ -6,20 +6,30 @@ class MockLogger(Logger): - def __init__(self) -> None: + def __init__( + self, + log_group: str = "", + log_stream: str = "", + topic_arn: str = "", + debug: bool = False, + ) -> None: Logger.__init__(self, log_group="", log_stream="", topic_arn="") - def info(self, _: str, *__: Any) -> None: - """noop""" + def info(self, msg: str, *args: Any) -> None: + s = msg if len(args) == 0 else msg.format(*args) + print(f"info: {s}") - def error(self, _: str, *__: Any) -> None: - """noop""" + def error(self, msg: str, *args: Any) -> None: + s = msg if len(args) == 0 else msg.format(*args) + print(f"error: {s}") - def warning(self, _: str, *__: Any) -> None: - """noop""" + def warning(self, msg: str, *args: Any) -> None: + s = msg if len(args) == 0 else msg.format(*args) + print(f"warning: {s}") - def debug(self, _: str, *__: Any) -> None: - """noop""" + def debug(self, msg: str, *args: Any) -> None: + s = msg if len(args) == 0 else msg.format(*args) + print(f"debug: {s}") def flush(self) -> None: """noop""" diff --git a/source/app/tests/maint_win/test_ec2_ssm.py b/source/app/tests/maint_win/test_ec2_ssm.py deleted file mode 100644 index da51e644..00000000 --- a/source/app/tests/maint_win/test_ec2_ssm.py +++ /dev/null @@ -1,245 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -from datetime import time -from typing import Any, Final -from unittest.mock import MagicMock, patch - -from boto3.session import Session -from freezegun import freeze_time -from pytest_mock import MockFixture - -from instance_scheduler.maint_win import EC2SSMMaintenanceWindows -from tests.logger import MockLogger - -next_execution_time: Final = "2020-04-09T19:00Z" - - -def test_ssm_maintenance_windows_1(mocker: MockFixture) -> None: - window_list = [ - { - "WindowId": "mw-018e7137c74304cb5", - "Name": "mon-1", - "Enabled": True, - "Duration": 1, - "Cutoff": 0, - "Schedule": "cron(0 0 19 ? * * *)", - "NextExecutionTime": next_execution_time, - } - ] - maint_win: Any = EC2SSMMaintenanceWindows( - hub_session=Session(), - spoke_session=Session(), - spoke_account_id="111111111111", - table_name="maint-win-table", - scheduler_interval=10, - logger=MockLogger(), - ) - mocker.patch.object(maint_win, "get_ssm_windows") - maint_win.get_ssm_windows.return_value = window_list - session = "" - account = "1111" - region = "us-east-1" - - response = maint_win.ssm_maintenance_windows(session, account, region) - - assert response["mon-1"].periods[0]["period"].name == "mon-1-period" - assert response["mon-1"].periods[0]["period"].begintime == time(18, 40) - assert response["mon-1"].periods[0]["period"].endtime == time(20, 0) - assert response["mon-1"].periods[0]["period"].months == {4} - assert response["mon-1"].periods[0]["period"].monthdays == {9} - - -def test_ssm_maintenance_windows_2(mocker: MockFixture) -> None: - window_list = [ - { - "WindowId": "mw-018e7137c74304cb5", - "Name": "mon-1", - "Enabled": True, - "Duration": 1, - "Cutoff": 0, - "Schedule": "cron(0 0 19 ? * * *)", - "NextExecutionTime": "2020-05-10T15:00Z", - } - ] - maint_win: Any = EC2SSMMaintenanceWindows( - hub_session=Session(), - spoke_session=Session(), - spoke_account_id="111111111111", - table_name="maint-win-table", - scheduler_interval=10, - logger=MockLogger(), - ) - mocker.patch.object(maint_win, "get_ssm_windows") - maint_win.get_ssm_windows.return_value = window_list - session = "" - account = "1111" - region = "us-east-1" - response = maint_win.ssm_maintenance_windows(session, account, region) - - assert response["mon-1"].periods[0]["period"].name == "mon-1-period" - assert response["mon-1"].periods[0]["period"].begintime == time(14, 40) - assert response["mon-1"].periods[0]["period"].endtime == time(16, 0) - assert response["mon-1"].periods[0]["period"].months == {5} - assert response["mon-1"].periods[0]["period"].monthdays == {10} - - -@freeze_time("2020-05-10 15:30:34") -def test_check_window_running_1() -> None: - window = { - "WindowId": "mw-018e7137c74304cb5", - "Name": "mon-1", - "Enabled": True, - "Duration": 1, - "Cutoff": 0, - "Schedule": "cron(0 10 19 ? * * *)", - "NextExecutionTime": "2020-05-10T15:00Z", - } - mw = EC2SSMMaintenanceWindows( - hub_session=Session(), - spoke_session=Session(), - spoke_account_id="", - table_name="", - scheduler_interval=5, - logger=MockLogger(), - ) - assert mw.check_window_running(window) - - -@freeze_time("2020-05-11 15:30:34") -def test_check_window_running_2() -> None: - window = { - "WindowId": "mw-018e7137c74304cb5", - "Name": "mon-1", - "Enabled": True, - "Duration": 1, - "Cutoff": 0, - "Schedule": "cron(0 10 19 ? * * *)", - "NextExecutionTime": "2020-05-10T15:00Z", - } - mw = EC2SSMMaintenanceWindows( - hub_session=Session(), - spoke_session=Session(), - spoke_account_id="", - table_name="", - scheduler_interval=5, - logger=MockLogger(), - ) - assert not mw.check_window_running(window) - - -@patch("instance_scheduler.maint_win.ec2_ssm.EC2SSMMaintenanceWindowStore") -def test_get_ssm_windows(mock_store: MagicMock) -> None: - window_list = [ - { - "WindowId": "mw-018e7137c74304cb5", - "Name": "mon-1", - "Duration": 1, - "NextExecutionTime": next_execution_time, - } - ] - maint_win: Any = EC2SSMMaintenanceWindows( - hub_session=Session(), - spoke_session=Session(), - spoke_account_id="111111111111", - table_name="maint-win-table", - scheduler_interval=10, - logger=MockLogger(), - ) - mock_store.return_value.get_ssm_windows_db.return_value = window_list - - session = "" - account = "1111" - region = "us-east-1" - with patch.object(maint_win, "get_ssm_windows_service"), patch.object( - maint_win, "process_ssm_window" - ): - response = maint_win.get_ssm_windows(session, account, region) - - assert response == window_list - - -@patch("instance_scheduler.maint_win.ec2_ssm.EC2SSMMaintenanceWindowStore") -def test_process_ssm_window_1(mock_store: MagicMock) -> None: - ssm_windows_db = [ - { - "WindowId": "mw-018e7137c74304cb5", - "Name": "mon-1", - "Duration": 1, - "NextExecutionTime": next_execution_time, - }, - { - "WindowId": "mw-018e7137c74304wb5", - "Name": "mon-2", - "Duration": 1, - "NextExecutionTime": "2020-04-10T19:00Z", - }, - ] - window = { - "WindowId": "mw-018e7137c74304cb5", - "Name": "mon-1", - "Duration": 1, - "NextExecutionTime": next_execution_time, - } - maint_win: Any = EC2SSMMaintenanceWindows( - hub_session=Session(), - spoke_session=Session(), - spoke_account_id="111111111111", - table_name="maint-win-table", - scheduler_interval=10, - logger=MockLogger(), - ) - account = "1111" - region = "us-east-1" - maint_win.process_ssm_window( - window=window, ssm_windows_db=ssm_windows_db, account=account, region=region - ) - mock_store.return_value.put_window_dynamodb.assert_called_with( - window=window, account=account, region=region - ) - - -@patch("instance_scheduler.maint_win.ec2_ssm.EC2SSMMaintenanceWindowStore") -def test_process_ssm_window_2(mock_store: MagicMock) -> None: - ssm_windows_db = [ - { - "WindowId": "mw-018e7137c74304cb5", - "Name": "mon-1", - "Enabled": True, - "Duration": 1, - "Cutoff": 0, - "Schedule": "cron(0 10 19 ? * * *)", - "NextExecutionTime": next_execution_time, - }, - { - "WindowId": "mw-018e7137c74304wb5", - "Name": "mon-2", - "Enabled": False, - "Duration": 1, - "Cutoff": 0, - "Schedule": "cron(0 10 19 ? * * *)", - "NextExecutionTime": "2020-04-10T19:00Z", - }, - ] - window = { - "WindowId": "mw-018e7137c74304cb5", - "Name": "mon-3", - "Enabled": True, - "Duration": 1, - "Cutoff": 0, - "Schedule": "cron(0 10 19 ? * * *)", - "NextExecutionTime": next_execution_time, - } - maint_win: Any = EC2SSMMaintenanceWindows( - hub_session=Session(), - spoke_session=Session(), - spoke_account_id="111111111111", - table_name="maint-win-table", - scheduler_interval=10, - logger=MockLogger(), - ) - account = "1111" - region = "us-east-1" - maint_win.process_ssm_window(window, ssm_windows_db, account, region) - mock_store.return_value.put_window_dynamodb.assert_called_with( - window=window, account=account, region=region - ) diff --git a/source/app/tests/maint_win/test_maintenance_window_context.py b/source/app/tests/maint_win/test_maintenance_window_context.py new file mode 100644 index 00000000..e845e4c9 --- /dev/null +++ b/source/app/tests/maint_win/test_maintenance_window_context.py @@ -0,0 +1,341 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from contextlib import contextmanager +from datetime import datetime, timezone +from typing import Iterable, Iterator, Optional +from unittest.mock import patch +from zoneinfo import ZoneInfo + +from instance_scheduler.configuration.instance_schedule import InstanceSchedule +from instance_scheduler.maint_win.maintenance_window_context import ( + MaintenanceWindowContext, +) +from instance_scheduler.maint_win.ssm_mw_client import SSMMWClient +from instance_scheduler.model import EC2SSMMaintenanceWindow, MWStore +from instance_scheduler.util.session_manager import AssumedRole +from tests.integration.helpers.schedule_helpers import quick_time +from tests.integration.helpers.scheduling_context_builder import ( + build_scheduling_context, +) +from tests.logger import MockLogger +from tests.model.store.test_mw_store import new_mw + + +def test_find_by_name_returns_all_windows_with_same_name( + mw_store: MWStore, hub_role: AssumedRole +) -> None: + a_windows = [ + new_mw("mw-00000000000000000", window_name="aaa"), + new_mw("mw-00000000000000001", window_name="aaa"), + new_mw("mw-00000000000000002", window_name="aaa"), + ] + + b_windows = [ + new_mw("mw-00000000000000003", window_name="bbb"), + new_mw("mw-00000000000000004", window_name="bbb"), + ] + + with ssm_returning(a_windows + b_windows): + mw_context = new_mw_context( + mw_store, hub_role, mws_referenced_by_schedules=a_windows + b_windows + ) + + fetched_a_windows = mw_context.find_by_name("aaa") + fetched_b_windows = mw_context.find_by_name("bbb") + + assert all(a_window in fetched_a_windows for a_window in a_windows) + assert all(b_window not in fetched_a_windows for b_window in b_windows) + assert all(a_window not in fetched_b_windows for a_window in a_windows) + assert all(b_window in fetched_b_windows for b_window in b_windows) + + +def test_contains_returns_false_on_empty_context( + mw_store: MWStore, hub_role: AssumedRole +) -> None: + mw_context = new_mw_context(mw_store, hub_role) + assert "some_value" not in mw_context + + +def test_contains_returns_true_when_name_has_at_least_1_associated_mw( + mw_store: MWStore, hub_role: AssumedRole +) -> None: + windows = [ + new_mw("mw-00000000000000001", window_name="named_window"), + new_mw("mw-00000000000000002", window_name="shared_name"), + new_mw("mw-00000000000000003", window_name="shared_name"), + ] + with ssm_returning(windows): + mw_context = new_mw_context( + mw_store, hub_role, mws_referenced_by_schedules=windows + ) + + assert "shared_name" in mw_context + assert "named_window" in mw_context + assert "missing" not in mw_context + + +def test_find_by_name_returns_empty_list_when_no_windows_found( + mw_store: MWStore, hub_role: AssumedRole +) -> None: + mw_context = new_mw_context(mw_store, hub_role) + + assert list(mw_context.find_by_name("unknown")) == [] + + +def test_reconcile_adds_new_mws_to_db(mw_store: MWStore, hub_role: AssumedRole) -> None: + with ssm_returning([new_mw("mw-00000000000000000")]): + new_mw_context(mw_store, hub_role).reconcile_ssm_with_dynamodb() + + db_mws = list( + mw_store.find_by_account_region(hub_role.account, hub_role.region) + ) + assert len(db_mws) == 1 + + +def test_reconcile_filters_out_windows_not_referenced_by_at_least_one_schedule( + mw_store: MWStore, hub_role: AssumedRole +) -> None: + referenced_window = new_mw("mw-00000000000000000", window_name="referenced") + not_referenced_window = new_mw("mw-00000000000000001", window_name="not-referenced") + with ssm_returning([referenced_window, not_referenced_window]): + new_mw_context( + mw_store, hub_role, mws_referenced_by_schedules=[referenced_window] + ).reconcile_ssm_with_dynamodb() + + db_mws = list( + mw_store.find_by_account_region(hub_role.account, hub_role.region) + ) + assert referenced_window in db_mws + assert not_referenced_window not in db_mws + + +def test_reconcile_deletes_windows_no_longer_referenced_by_at_least_one_schedule( + mw_store: MWStore, hub_role: AssumedRole +) -> None: + referenced_window = new_mw("mw-00000000000000000", window_name="referenced") + not_referenced_window = new_mw("mw-00000000000000001", window_name="not-referenced") + mw_store.put(not_referenced_window) # put window into db + with ssm_returning([referenced_window, not_referenced_window]): + new_mw_context( + mw_store, hub_role, mws_referenced_by_schedules=[referenced_window] + ).reconcile_ssm_with_dynamodb() + + db_mws = list( + mw_store.find_by_account_region(hub_role.account, hub_role.region) + ) + assert ( + referenced_window in db_mws + ) # validate that delete was only on the non-referenced window + assert not_referenced_window not in db_mws + + +def test_reconcile_updates_mws_in_db_when_existing_window_is_not_active( + mw_store: MWStore, hub_role: AssumedRole +) -> None: + old_window = new_mw( + "mw-00000000000000001", + next_execution_time=quick_time(10, 0, 0), + duration_hours=2, + ) + next_window = new_mw( + "mw-00000000000000001", + next_execution_time=quick_time(20, 0, 0), + duration_hours=2, + ) + mw_store.put(old_window) + + with ssm_returning([next_window]): + new_mw_context( + mw_store, hub_role, current_dt=quick_time(15, 0, 0) + ).reconcile_ssm_with_dynamodb() + + db_mws = list( + mw_store.find_by_account_region(hub_role.account, hub_role.region) + ) + assert len(db_mws) == 1 + assert old_window not in db_mws + assert next_window in db_mws + + +def test_reconcile_removes_windows_from_db_that_are_no_longer_in_ssm( + mw_store: MWStore, hub_role: AssumedRole +) -> None: + window_to_be_preserved = new_mw("mw-00000000000000001") + window_to_be_deleted = new_mw("mw-00000000000000002") + + mw_store.put(window_to_be_deleted) + mw_store.put(window_to_be_preserved) + + with ssm_returning([window_to_be_preserved]): + new_mw_context(mw_store, hub_role).reconcile_ssm_with_dynamodb() + + db_mws = list( + mw_store.find_by_account_region(hub_role.account, hub_role.region) + ) + assert len(db_mws) == 1 + assert window_to_be_preserved in db_mws + assert window_to_be_deleted not in db_mws + + +def test_reconcile_does_not_delete_windows_that_are_still_running_until_they_stop( + mw_store: MWStore, hub_role: AssumedRole +) -> None: + running_window = new_mw( + "mw-00000000000000001", + next_execution_time=quick_time(10, 0, 0), + duration_hours=5, + ) + mw_store.put(running_window) + + with ssm_returning([]): + # window running + new_mw_context( + mw_store, hub_role, current_dt=quick_time(11, 0, 0) + ).reconcile_ssm_with_dynamodb() + + assert running_window in mw_store.find_by_account_region( + hub_role.account, hub_role.region + ) + + # window stopped + new_mw_context( + mw_store, hub_role, current_dt=quick_time(15, 0, 0) + ).reconcile_ssm_with_dynamodb() + + assert running_window not in mw_store.find_by_account_region( + hub_role.account, hub_role.region + ) + + +def test_reconcile_does_not_overwrite_windows_that_are_still_running_until_they_stop( + mw_store: MWStore, hub_role: AssumedRole +) -> None: + running_window = new_mw( + "mw-00000000000000001", + next_execution_time=datetime(2024, 5, 15, 10, 0, 0, tzinfo=timezone.utc), + duration_hours=5, + ) + next_window = new_mw( + "mw-00000000000000001", + next_execution_time=datetime(2024, 5, 16, 10, 0, 0, tzinfo=timezone.utc), + duration_hours=5, + ) + mw_store.put(running_window) + + with ssm_returning([next_window]): + # window running + new_mw_context( + mw_store, + hub_role, + current_dt=datetime(2024, 5, 15, 11, 0, 0, tzinfo=timezone.utc), + ).reconcile_ssm_with_dynamodb() + + assert running_window in mw_store.find_by_account_region( + hub_role.account, hub_role.region + ) + assert next_window not in mw_store.find_by_account_region( + hub_role.account, hub_role.region + ) + + # window stopped + new_mw_context( + mw_store, + hub_role, + current_dt=datetime(2024, 5, 15, 15, 0, 0, tzinfo=timezone.utc), + ).reconcile_ssm_with_dynamodb() + + db_mws = list( + mw_store.find_by_account_region(hub_role.account, hub_role.region) + ) + assert len(db_mws) == 1 + assert running_window not in db_mws + assert next_window in db_mws + + +def test_find_windows_after_reconcile_matches_contents_of_db( + mw_store: MWStore, hub_role: AssumedRole +) -> None: + window_to_be_added = new_mw("mw-00000000000000000") + window_to_be_preserved = new_mw("mw-00000000000000001") + window_to_be_deleted = new_mw("mw-00000000000000002") + window_to_be_overwritten = new_mw("mw-00000000000000003", duration_hours=2) + window_to_be_updated = new_mw("mw-00000000000000003", duration_hours=5) + window_not_referenced_by_schedule = new_mw( + "mw-00000000000000004", window_name="not-referenced" + ) + + mw_store.put(window_to_be_deleted) + mw_store.put(window_to_be_preserved) + mw_store.put(window_to_be_overwritten) + + with ssm_returning( + [window_to_be_added, window_to_be_preserved, window_to_be_updated] + ): + mw_context = new_mw_context(mw_store, hub_role) + mw_context.reconcile_ssm_with_dynamodb() + + db_mws = list( + mw_store.find_by_account_region(hub_role.account, hub_role.region) + ) + + # check assert for each action + assert window_to_be_added in db_mws + assert window_to_be_added in mw_context.find_by_name( + window_to_be_added.window_name + ) + + assert window_to_be_preserved in db_mws + assert window_to_be_preserved in mw_context.find_by_name( + window_to_be_preserved.window_name + ) + + assert window_to_be_updated in db_mws + assert window_to_be_updated in mw_context.find_by_name( + window_to_be_updated.window_name + ) + + assert window_to_be_overwritten not in db_mws + assert window_to_be_overwritten not in mw_context.find_by_name( + window_to_be_overwritten.window_name + ) + + assert window_to_be_deleted not in db_mws + assert window_to_be_deleted not in mw_context.find_by_name( + window_to_be_deleted.window_name + ) + + assert window_not_referenced_by_schedule not in db_mws + assert window_not_referenced_by_schedule not in mw_context.find_by_name( + window_not_referenced_by_schedule.window_name + ) + + +def new_mw_context( + mw_store: MWStore, + hub_role: AssumedRole, + current_dt: datetime = quick_time(10, 0, 0), + mws_referenced_by_schedules: Optional[list[EC2SSMMaintenanceWindow]] = None, +) -> MaintenanceWindowContext: + if mws_referenced_by_schedules is None: + # make sure the default mw name is referenced by at least one schedule (simplifies tests) + mws_referenced_by_schedules = [new_mw("mw-11112222333344445")] + mw_schedule = InstanceSchedule( + name="mw-schedule", + ssm_maintenance_window=[mw.window_name for mw in mws_referenced_by_schedules], + timezone=ZoneInfo("UTC"), + ) + return MaintenanceWindowContext( + scheduling_context=build_scheduling_context( + current_dt, schedules={mw_schedule.name: mw_schedule} + ), + mw_store=mw_store, + spoke_scheduler_role=hub_role, + logger=MockLogger(), + ) + + +@contextmanager +def ssm_returning(mws: Iterable[EC2SSMMaintenanceWindow]) -> Iterator[None]: + with patch.object(SSMMWClient, "get_mws_from_ssm") as ssm_endpoint: + ssm_endpoint.return_value = mws + yield diff --git a/source/app/tests/maint_win/test_ssm_mw_client.py b/source/app/tests/maint_win/test_ssm_mw_client.py new file mode 100644 index 00000000..ae872398 --- /dev/null +++ b/source/app/tests/maint_win/test_ssm_mw_client.py @@ -0,0 +1,152 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from collections.abc import Iterable, Iterator +from contextlib import contextmanager +from datetime import datetime, timedelta +from typing import TYPE_CHECKING, Final +from unittest.mock import MagicMock +from zoneinfo import ZoneInfo + +from boto3 import client +from boto3.session import Session +from botocore.stub import Stubber +from freezegun import freeze_time + +from instance_scheduler.configuration.instance_schedule import InstanceSchedule +from instance_scheduler.cron.cron_recurrence_expression import CronRecurrenceExpression +from instance_scheduler.cron.expression import CronSingleValueNumeric +from instance_scheduler.model.maint_win import EC2SSMMaintenanceWindow + +if TYPE_CHECKING: + from mypy_boto3_ssm.client import SSMClient + from mypy_boto3_ssm.type_defs import MaintenanceWindowIdentityTypeDef +else: + SSMClient = object + MaintenanceWindowIdentityTypeDef = object + + +# use common UTC zoneinfo in these tests to allow proper emulation of SSM API +utc_timezone: Final = ZoneInfo("Etc/UTC") + + +def to_identity(window: EC2SSMMaintenanceWindow) -> MaintenanceWindowIdentityTypeDef: + if window.schedule_timezone == utc_timezone: + next_execution_time = window.next_execution_time.strftime("%Y-%m-%dT%H:%M:%SZ") + else: + next_execution_time = window.next_execution_time.isoformat() + + return { + "WindowId": window.window_id, + "Duration": window.duration_hours, + "Enabled": True, + "Name": window.window_name, + "NextExecutionTime": next_execution_time, + "ScheduleTimezone": str(window.schedule_timezone), + } + + +def assert_valid_maintenance_window_schedule(schedule: InstanceSchedule) -> None: + """schedules generated from maintenance windows must have these properties""" + assert schedule.override_status is None + assert schedule.stop_new_instances is True + assert schedule.ssm_maintenance_window is None + assert schedule.enforced is True + assert schedule.hibernate is False + assert schedule.retain_running is False + assert schedule.configured_in_stack is None + + for period in schedule.periods: + assert period.get("instancetype") is None + + +scheduler_interval_minutes: Final = 5 +expected_window_buffer: Final = 10 + scheduler_interval_minutes + + +def assert_schedule_matches_window( + schedule: InstanceSchedule, window: EC2SSMMaintenanceWindow +) -> None: + assert_valid_maintenance_window_schedule(schedule) + + assert schedule.timezone == window.schedule_timezone + + # does not handle multi-period windows + assert len(schedule.periods) == 1 + period: Final = schedule.periods[0]["period"] + + # does not handle windows that cross day boundaries + expected_begintime: Final = window.next_execution_time - timedelta( + minutes=expected_window_buffer + ) + assert period.begintime == expected_begintime.time() + + # does not handle windows that cross day boundaries + expected_endtime: Final = window.next_execution_time + timedelta( + hours=window.duration_hours + ) + assert period.endtime == expected_endtime.time() + + assert period.cron_recurrence == CronRecurrenceExpression( + months=CronSingleValueNumeric(value=window.next_execution_time.month), + monthdays=CronSingleValueNumeric(value=window.next_execution_time.day), + ) + + +@contextmanager +def mock_ssm_windows(windows: Iterable[EC2SSMMaintenanceWindow]) -> Iterator[Session]: + session: Final = Session() + ssm: Final[SSMClient] = client("ssm") + stub_ssm: Final = Stubber(ssm) + setattr(session, "client", MagicMock(return_value=ssm)) + + stub_ssm.add_response( + "describe_maintenance_windows", + { + "WindowIdentities": list(map(to_identity, windows)), + }, + { + "Filters": [{"Key": "Enabled", "Values": ["true"]}], + }, + ) + + with stub_ssm: + yield session + + +account_id: Final = "123456789012" +region: Final = "us-east-1" + + +def test_window_currently_running() -> None: + next_execution_time: Final = datetime( + year=2023, month=11, day=6, hour=15, minute=14, tzinfo=utc_timezone + ) + duration_hours: Final = 1 + + window: Final = EC2SSMMaintenanceWindow( + account_id="111111111111", + region="us-east-1", + schedule_timezone=utc_timezone, + window_id="mw-00000000000000000", + window_name="mon-1", + duration_hours=duration_hours, + next_execution_time=next_execution_time, + ) + + end: Final = next_execution_time + timedelta(hours=duration_hours) + + for dt in ( + next_execution_time - timedelta(minutes=20), + end, + end + timedelta(minutes=1), + ): + with freeze_time(dt): + assert not window.is_running_at(dt, scheduler_interval_minutes=5) + for dt in ( + next_execution_time, + next_execution_time - timedelta(minutes=10), # 10 minute early start + next_execution_time + timedelta(minutes=1), + end - timedelta(minutes=1), + ): + with freeze_time(dt): + assert window.is_running_at(dt, scheduler_interval_minutes=5) diff --git a/source/app/tests/model/store/__init__.py b/source/app/tests/model/store/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/source/app/tests/model/store/test_ddb_config_item_store.py b/source/app/tests/model/store/test_ddb_config_item_store.py new file mode 100644 index 00000000..f95f74e8 --- /dev/null +++ b/source/app/tests/model/store/test_ddb_config_item_store.py @@ -0,0 +1,130 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from _pytest.fixtures import fixture + +from instance_scheduler.model.ddb_config_item import DdbConfigItem +from instance_scheduler.model.store.ddb_config_item_store import DdbConfigItemStore +from tests.test_utils.unordered_list import UnorderedList + + +@fixture +def config_item_store(config_table: str) -> DdbConfigItemStore: + return DdbConfigItemStore(config_table) + + +def test_write_then_read(config_item_store: DdbConfigItemStore) -> None: + config_item = DdbConfigItem( + remote_account_ids=["111122223333", "222233334444"], + organization_id="some_org_id", + ) + + config_item_store.put(config_item) + fetched = config_item_store.get() + assert fetched == config_item + + +def test_account_ids_with_no_org_id(config_item_store: DdbConfigItemStore) -> None: + config_item = DdbConfigItem( + remote_account_ids=["111122223333", "222233334444"], + ) + + config_item_store.put(config_item) + fetched = config_item_store.get() + assert fetched == config_item + + +def test_org_id_with_no_remote_accounts(config_item_store: DdbConfigItemStore) -> None: + config_item = DdbConfigItem(organization_id="some_org_id") + + config_item_store.put(config_item) + fetched = config_item_store.get() + assert fetched == config_item + + +def test_empty_config_item(config_item_store: DdbConfigItemStore) -> None: + config_item = DdbConfigItem() + + config_item_store.put(config_item) + fetched = config_item_store.get() + assert fetched == config_item + + +def test_config_with_ssm_param_references( + config_item_store: DdbConfigItemStore, +) -> None: + config_item = DdbConfigItem( + remote_account_ids=["{param:my_ssm_param}", "222233334444"], + ) + + config_item_store.put(config_item) + fetched = config_item_store.get() + assert fetched == config_item + + +def test_register_spoke_account(config_item_store: DdbConfigItemStore) -> None: + account_id = "111122223333" + result = config_item_store.register_spoke_accounts({account_id}) + assert result.remote_account_ids == [account_id] + assert config_item_store.get().remote_account_ids == [account_id] + + +def test_register_spoke_account_does_not_create_duplicates( + config_item_store: DdbConfigItemStore, +) -> None: + account_id = "111122223333" + + config_item_store.register_spoke_accounts({account_id}) + result = config_item_store.register_spoke_accounts({account_id}) + + assert result.remote_account_ids == [account_id] + assert config_item_store.get().remote_account_ids == [account_id] + + +def test_register_multiple_spoke_accounts( + config_item_store: DdbConfigItemStore, +) -> None: + account_ids = {"111122223333", "222233334444", "123456789012"} + result = config_item_store.register_spoke_accounts(account_ids) + assert result.remote_account_ids == UnorderedList(account_ids) + assert config_item_store.get().remote_account_ids == UnorderedList(account_ids) + + +def test_deregister_spoke_account(config_item_store: DdbConfigItemStore) -> None: + account_id = "111122223333" + config_item_store.register_spoke_accounts({account_id}) + + result = config_item_store.deregister_spoke_accounts({account_id}) + + assert result.remote_account_ids == [] + assert config_item_store.get().remote_account_ids == [] + + +def test_deregister_spoke_account_does_not_throw_error_when_not_exists( + config_item_store: DdbConfigItemStore, +) -> None: + result = config_item_store.deregister_spoke_accounts({"111122223333"}) + assert result.remote_account_ids == [] + + +def test_deregister_multiple_spoke_accounts( + config_item_store: DdbConfigItemStore, +) -> None: + account_ids = {"111122223333", "222233334444", "123456789012"} + config_item_store.register_spoke_accounts(account_ids) + + result = config_item_store.deregister_spoke_accounts(account_ids) + + assert result.remote_account_ids == [] + assert config_item_store.get().remote_account_ids == [] + + +def test_deregister_multiple_spoke_accounts_deletes_all_that_exist( + config_item_store: DdbConfigItemStore, +) -> None: + account_ids = {"111122223333", "222233334444", "123456789012"} + config_item_store.register_spoke_accounts({"111122223333"}) + + result = config_item_store.deregister_spoke_accounts(account_ids) + + assert result.remote_account_ids == [] + assert config_item_store.get().remote_account_ids == [] diff --git a/source/app/tests/model/store/test_dynamo_period_definition_store.py b/source/app/tests/model/store/test_dynamo_period_definition_store.py new file mode 100644 index 00000000..944893c2 --- /dev/null +++ b/source/app/tests/model/store/test_dynamo_period_definition_store.py @@ -0,0 +1,125 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from typing import TYPE_CHECKING + +import boto3 +import pytest +from _pytest.fixtures import fixture +from botocore.exceptions import ClientError + +from instance_scheduler.model.period_definition import PeriodDefinition +from instance_scheduler.model.store.ddb_transact_write import WriteTransaction +from instance_scheduler.model.store.dynamo_period_definition_store import ( + DynamoPeriodDefinitionStore, +) + +if TYPE_CHECKING: + from mypy_boto3_dynamodb.client import DynamoDBClient +else: + DynamoDBClient = object + + +@fixture +def period_store(config_table: str) -> DynamoPeriodDefinitionStore: + return DynamoPeriodDefinitionStore(config_table) + + +def test_read_invalid_period_returns_exception_gracefully( + config_table: str, + period_store: DynamoPeriodDefinitionStore, +) -> None: + ddb_client: DynamoDBClient = boto3.client("dynamodb") + ddb_client.put_item( + TableName=config_table, + Item={ + "type": {"S": "period"}, + "name": {"S": "invalid-period"}, + "begintime": {"S": "20:00"}, # begintime after endtime + "endtime": {"S": "15:00"}, + }, + ) + + period_store.put(PeriodDefinition(name="simple_valid_period", begintime="05:00")) + + periods, errors = period_store.find_all_with_errors() + assert len(periods) == 1 + assert len(errors) == 1 + + assert len(period_store.find_all()) == 1 + + +def test_transaction_write_then_read( + period_store: DynamoPeriodDefinitionStore, +) -> None: + rpd = PeriodDefinition(name="test-period", begintime="05:00") + + with WriteTransaction(boto3.client("dynamodb")) as transaction: + transaction.add(period_store.transact_put(rpd)) + + fetched = period_store.find_by_name("test-period") + assert rpd == fetched + + +def test_transaction_overwrite_then_read( + period_store: DynamoPeriodDefinitionStore, +) -> None: + rpd = PeriodDefinition(name="test-period", begintime="05:00") + + # write first + with WriteTransaction(boto3.client("dynamodb")) as transaction: + transaction.add(period_store.transact_put(rpd, overwrite=True)) + + fetched = period_store.find_by_name("test-period") + assert rpd == fetched + + # then overwrite + new_rpd = PeriodDefinition(name="test-period", begintime="10:00") + + with WriteTransaction(boto3.client("dynamodb")) as transaction: + transaction.add(period_store.transact_put(new_rpd, overwrite=True)) + + fetched = period_store.find_by_name("test-period") + assert new_rpd == fetched + assert rpd != fetched + + +def test_transaction_overwrite_is_rejected_when_flag_not_set( + period_store: DynamoPeriodDefinitionStore, +) -> None: + rpd = PeriodDefinition(name="test-period", begintime="05:00") + + period_store.put(rpd) + with pytest.raises(ClientError): + with WriteTransaction(boto3.client("dynamodb")) as transaction: + transaction.add(period_store.transact_put(rpd)) + + +def test_transact_delete_errors_when_missing_if_enabled( + period_store: DynamoPeriodDefinitionStore, +) -> None: + with pytest.raises(ClientError): + with WriteTransaction(boto3.client("dynamodb")) as transaction: + transaction.add( + period_store.transact_delete("non-existing", error_if_missing=True) + ) + + +def test_transact_delete_on_missing_passes_if_error_flag_disabled( + period_store: DynamoPeriodDefinitionStore, +) -> None: + with WriteTransaction(boto3.client("dynamodb")) as transaction: + transaction.add( + period_store.transact_delete("non-existing") + ) # default behavior is false + + +def test_transact_delete_deletes_correctly( + period_store: DynamoPeriodDefinitionStore, +) -> None: + rpd = PeriodDefinition(name="test-period", begintime="05:00") + + period_store.put(rpd) + assert len(period_store.find_all()) == 1 + with WriteTransaction(boto3.client("dynamodb")) as transaction: + transaction.add(period_store.transact_delete(rpd.name, error_if_missing=True)) + assert len(period_store.find_all()) == 0 diff --git a/source/app/tests/model/store/test_dynamo_schedule_definition_store.py b/source/app/tests/model/store/test_dynamo_schedule_definition_store.py new file mode 100644 index 00000000..da8879b0 --- /dev/null +++ b/source/app/tests/model/store/test_dynamo_schedule_definition_store.py @@ -0,0 +1,174 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from typing import TYPE_CHECKING + +import boto3 +import pytest +from _pytest.fixtures import fixture +from botocore.exceptions import ClientError + +from instance_scheduler.model.period_identifier import PeriodIdentifier +from instance_scheduler.model.schedule_definition import ScheduleDefinition +from instance_scheduler.model.store.dynamo_schedule_definition_store import ( + DynamoScheduleDefinitionStore, +) +from tests.test_utils.unordered_list import UnorderedList + +if TYPE_CHECKING: + from mypy_boto3_dynamodb.client import DynamoDBClient +else: + DynamoDBClient = object + + +@fixture +def schedule_store(config_table: str) -> DynamoScheduleDefinitionStore: + return DynamoScheduleDefinitionStore(config_table) + + +def test_read_invalid_schedule_returns_exception_gracefully( + config_table: str, + schedule_store: DynamoScheduleDefinitionStore, +) -> None: + ddb_client: DynamoDBClient = boto3.client("dynamodb") + ddb_client.put_item( + TableName=config_table, + Item={ + "type": {"S": "schedule"}, + "name": {"S": "invalid-schedule"}, + }, + ) + + schedule_store.put( + ScheduleDefinition(name="simple_valid_schedule", override_status="running") + ) + + schedules, errors = schedule_store.find_all_with_errors() + assert len(schedules) == 1 + assert len(errors) == 1 + + assert len(schedule_store.find_all()) == 1 + + +def test_read_schedule_with_deprecated_use_metrics_flag_does_not_error( + config_table: str, + schedule_store: DynamoScheduleDefinitionStore, +) -> None: + ddb_client: DynamoDBClient = boto3.client("dynamodb") + ddb_client.put_item( + TableName=config_table, + Item={ + "type": {"S": "schedule"}, + "name": {"S": "my_schedule"}, + "override_status": {"S": "running"}, + "use_metrics": {"BOOL": True}, + }, + ) + + schedule_store.put( + ScheduleDefinition(name="simple_valid_schedule", override_status="running") + ) + + schedules, errors = schedule_store.find_all_with_errors() + assert len(schedules) == 2 + assert len(errors) == 0 + + assert len(schedule_store.find_all()) == 2 + + +def test_transact_put_then_read_simple_schedule( + schedule_store: DynamoScheduleDefinitionStore, +) -> None: + schedule = ScheduleDefinition(name="test_schedule", override_status="running") + + with schedule_store.new_transaction() as transaction: + transaction.add(schedule_store.transact_put(schedule)) + + fetched = schedule_store.find_by_name("test_schedule") + assert schedule == fetched + + +def test_transact_put_then_read_complex_schedule( + schedule_store: DynamoScheduleDefinitionStore, +) -> None: + schedule = ScheduleDefinition( + name="test_schedule", + stop_new_instances=True, + retain_running=True, + hibernate=False, + configured_in_stack="some-stack-arn", + enforced=True, + ssm_maintenance_window=["some-window"], + description="some description", + timezone="Europe/Berlin", + periods=UnorderedList( + [PeriodIdentifier("test_period1"), PeriodIdentifier("test_period2")] + ), + ) + + with schedule_store.new_transaction() as transaction: + transaction.add(schedule_store.transact_put(schedule)) + + fetched = schedule_store.find_by_name("test_schedule") + assert schedule == fetched + + +def test_transact_put_schedule_with_overwrite( + schedule_store: DynamoScheduleDefinitionStore, +) -> None: + # 1st put + schedule1 = ScheduleDefinition(name="test_schedule", override_status="running") + with schedule_store.new_transaction() as transaction: + transaction.add(schedule_store.transact_put(schedule1, overwrite=True)) + fetched = schedule_store.find_by_name("test_schedule") + assert fetched == schedule1 + + # 2nd put (overwrite) + schedule2 = ScheduleDefinition(name="test_schedule", override_status="stopped") + with schedule_store.new_transaction() as transaction: + transaction.add(schedule_store.transact_put(schedule2, overwrite=True)) + fetched = schedule_store.find_by_name("test_schedule") + assert fetched == schedule2 + + +def test_transact_put_rejects_overwrite_when_flag_not_set( + schedule_store: DynamoScheduleDefinitionStore, +) -> None: + schedule = ScheduleDefinition(name="test_schedule", override_status="running") + + schedule_store.put(schedule) + with pytest.raises(ClientError): + with schedule_store.new_transaction() as transaction: + transaction.add(schedule_store.transact_put(schedule)) + + +def test_transact_delete_errors_when_missing_if_enabled( + schedule_store: DynamoScheduleDefinitionStore, +) -> None: + with pytest.raises(ClientError): + with schedule_store.new_transaction() as transaction: + transaction.add( + schedule_store.transact_delete("non-existing", error_if_missing=True) + ) + + +def test_transact_delete_on_missing_passes_if_error_flag_disabled( + schedule_store: DynamoScheduleDefinitionStore, +) -> None: + with schedule_store.new_transaction() as transaction: + transaction.add( + schedule_store.transact_delete("non-existing") + ) # default behavior is false + + +def test_transact_delete_deletes_correctly( + schedule_store: DynamoScheduleDefinitionStore, +) -> None: + schedule = ScheduleDefinition(name="test-schedule", override_status="running") + + schedule_store.put(schedule) + assert len(schedule_store.find_all()) == 1 + with schedule_store.new_transaction() as transaction: + transaction.add( + schedule_store.transact_delete(schedule.name, error_if_missing=True) + ) + assert len(schedule_store.find_all()) == 0 diff --git a/source/app/tests/model/store/test_in_memory_period_definition_store.py b/source/app/tests/model/store/test_in_memory_period_definition_store.py new file mode 100644 index 00000000..35ee2060 --- /dev/null +++ b/source/app/tests/model/store/test_in_memory_period_definition_store.py @@ -0,0 +1,49 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import pytest +from _pytest.fixtures import fixture + +from instance_scheduler.model.period_definition import PeriodDefinition, PeriodParams +from instance_scheduler.model.store.in_memory_period_definition_store import ( + InMemoryPeriodDefinitionStore, +) +from instance_scheduler.util.validation import ValidationException + + +@fixture +def period_store() -> InMemoryPeriodDefinitionStore: + return InMemoryPeriodDefinitionStore() + + +def test_serialize_then_deserialize( + period_store: InMemoryPeriodDefinitionStore, +) -> None: + period_store.put(PeriodDefinition("period1", begintime="05:00", endtime="10:00")) + period_store.put(PeriodDefinition("period2", weekdays={"Mon-Fri"})) + period_store.put(PeriodDefinition("period3", monthdays={"1-5"})) + period_store.put(PeriodDefinition("period4", months={"Jan-Feb"})) + + serial_data = period_store.serialize() + + # ensure returned data matches own validation + period_store.validate_serial_data(serial_data) + + deserialized_store = InMemoryPeriodDefinitionStore.deserialize(serial_data) + + assert deserialized_store.find_all() == period_store.find_all() + + +def test_validate_rejects_malformed_input() -> None: + with pytest.raises(ValidationException): + # not a sequence + InMemoryPeriodDefinitionStore.validate_serial_data({}) + + with pytest.raises(ValidationException): + # contained data is not a dict + InMemoryPeriodDefinitionStore.validate_serial_data(["something-invalid"]) + + with pytest.raises(ValidationException): + # contained data is not valid PeriodParams + InMemoryPeriodDefinitionStore.validate_serial_data( + [PeriodParams(name="aPeriod"), {"invalid-key": "something-invalid"}] + ) diff --git a/source/app/tests/model/store/test_in_memory_schedule_definition_store.py b/source/app/tests/model/store/test_in_memory_schedule_definition_store.py new file mode 100644 index 00000000..f1989a54 --- /dev/null +++ b/source/app/tests/model/store/test_in_memory_schedule_definition_store.py @@ -0,0 +1,85 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import pytest +from _pytest.fixtures import fixture + +from instance_scheduler.model.period_identifier import PeriodIdentifier +from instance_scheduler.model.schedule_definition import ( + ScheduleDefinition, + ScheduleParams, +) +from instance_scheduler.model.store.in_memory_schedule_definition_store import ( + InMemoryScheduleDefinitionStore, +) +from instance_scheduler.util.validation import ValidationException + + +@fixture +def schedule_store() -> InMemoryScheduleDefinitionStore: + return InMemoryScheduleDefinitionStore() + + +def test_serialize_then_deserialize( + schedule_store: InMemoryScheduleDefinitionStore, +) -> None: + schedule_store.put( + ScheduleDefinition(name="override-sched", override_status="running") + ) + schedule_store.put( + ScheduleDefinition( + name="basic-sched", periods=[PeriodIdentifier.of("period1", "some_size")] + ) + ) + schedule_store.put( + ScheduleDefinition( + name="sched-with-everything-true", + periods=[PeriodIdentifier.of("period1", "some_size")], + timezone="Asia/Tokyo", + description="some description", + stop_new_instances=True, + ssm_maintenance_window=["some-window"], + enforced=True, + hibernate=True, + retain_running=True, + configured_in_stack="some-stack-arn", + ) + ) + schedule_store.put( + ScheduleDefinition( + name="sched-with-everything-false", + periods=[PeriodIdentifier.of("period1", "some_size")], + timezone="Asia/Tokyo", + description="some description", + stop_new_instances=False, + ssm_maintenance_window=["some-window"], + enforced=False, + hibernate=False, + retain_running=False, + configured_in_stack="some-stack-arn", + ) + ) + + serialized_store = schedule_store.serialize() + + # ensure returned data matches own validation + schedule_store.validate_serial_data(serialized_store) + + deserialized_store = InMemoryScheduleDefinitionStore.deserialize(serialized_store) + + assert deserialized_store.find_all() == schedule_store.find_all() + + +def test_validate_rejects_malformed_input() -> None: + with pytest.raises(ValidationException): + # not a sequence + InMemoryScheduleDefinitionStore.validate_serial_data({}) + + with pytest.raises(ValidationException): + # contained data is not a dict + InMemoryScheduleDefinitionStore.validate_serial_data(["something-invalid"]) + + with pytest.raises(ValidationException): + # contained data is not valid PeriodParams + InMemoryScheduleDefinitionStore.validate_serial_data( + [ScheduleParams(name="a Schedule"), {"invalid-key": "something-invalid"}] + ) diff --git a/source/app/tests/model/store/test_maint_win_store.py b/source/app/tests/model/store/test_maint_win_store.py new file mode 100644 index 00000000..9b342c53 --- /dev/null +++ b/source/app/tests/model/store/test_maint_win_store.py @@ -0,0 +1,45 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from datetime import datetime, timezone +from typing import Final +from zoneinfo import ZoneInfo + +from instance_scheduler.model import ( + EC2SSMMaintenanceWindow, + EC2SSMMaintenanceWindowStore, +) + + +def test_maint_win_store(maint_win_table: str) -> None: + account_id = "111111111111" + region = "us-east-1" + window_name = "my-window" + next_execution_time = datetime(year=2023, month=6, day=23, tzinfo=timezone.utc) + duration = 1 + window_id = "mw-00000000000000000" + schedule_timezone = "UTC" + window: Final = EC2SSMMaintenanceWindow( + account_id=account_id, + region=region, + window_id=window_id, + window_name=window_name, + schedule_timezone=ZoneInfo(schedule_timezone), + next_execution_time=next_execution_time, + duration_hours=duration, + ) + + store = EC2SSMMaintenanceWindowStore(maint_win_table) + + windows = list(store.get_ssm_windows_db(account=account_id, region=region)) + assert windows == [] + + store.put_window_dynamodb(window) + windows = list(store.get_ssm_windows_db(account=account_id, region=region)) + + assert len(windows) == 1 + assert windows[0] == window + + store.delete_window(windows[0]) + + windows = list(store.get_ssm_windows_db(account=account_id, region=region)) + assert windows == [] diff --git a/source/app/tests/model/store/test_mw_store.py b/source/app/tests/model/store/test_mw_store.py new file mode 100644 index 00000000..7a0b8753 --- /dev/null +++ b/source/app/tests/model/store/test_mw_store.py @@ -0,0 +1,91 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from datetime import datetime +from zoneinfo import ZoneInfo + +from _pytest.fixtures import SubRequest, fixture + +from instance_scheduler.model import EC2SSMMaintenanceWindow, MWStore +from instance_scheduler.model.store.dynamo_mw_store import DynamoMWStore +from instance_scheduler.model.store.in_memory_mw_store import InMemoryMWStore +from tests.integration.helpers.schedule_helpers import quick_time + +# defaults used for tests that don't explicitly need to control the account/region +account = "123456789012" +region = "us-east-1" + + +@fixture(params=["dynamo", "in-memory"]) +def mw_store(request: SubRequest) -> MWStore: + match request.param: + case "dynamo": + return DynamoMWStore(request.getfixturevalue(argname="maint_win_table")) + case "in-memory": + return InMemoryMWStore() + case _: + raise ValueError() + + +def new_mw( + window_id: str, + account_id: str = account, + region: str = region, + window_name: str = "test-window", + schedule_timezone: ZoneInfo = ZoneInfo("UTC"), + next_execution_time: datetime = quick_time(12, 0, 0), + duration_hours: int = 1, +) -> EC2SSMMaintenanceWindow: + return EC2SSMMaintenanceWindow( + account_id=account_id, + region=region, + window_id=window_id, + window_name=window_name, + schedule_timezone=schedule_timezone, + next_execution_time=next_execution_time, + duration_hours=duration_hours, + ) + + +def test_write_then_read_mw(mw_store: MWStore) -> None: + mw1 = new_mw("mw-00000000000000000") + mw2 = new_mw("mw-00000000000000012") + mw_store.put(mw1) + mw_store.put(mw2) + + fetched_result = list(mw_store.find_by_account_region(account, region)) + + assert len(fetched_result) == 2 + assert mw1 in fetched_result + assert mw2 in fetched_result + + +def test_put_overwrites_existing_mw(mw_store: MWStore) -> None: + # note: account_id, region, window_id, and window_name are all part of the unique key of a window + # thus these are all considered different windows for the db. the most common thing that will change + # and require an overwrite is the next_execution_time + orig = new_mw("mw-00000000000000000", next_execution_time=quick_time(10, 0, 0)) + replacement = new_mw( + "mw-00000000000000000", next_execution_time=quick_time(20, 0, 0) + ) + mw_store.put(orig) + mw_store.put(replacement) + + fetched_result = list(mw_store.find_by_account_region(account, region)) + assert len(fetched_result) == 1 + assert fetched_result[0] == replacement + assert fetched_result[0] != orig + + +def test_delete_mw_deletes_correctly(mw_store: MWStore) -> None: + mw = new_mw("mw-00000000000000000") + mw_store.put(mw) + + assert len(list(mw_store.find_by_account_region(account, region))) == 1 + + mw_store.delete(mw) + assert len(list(mw_store.find_by_account_region(account, region))) == 0 + + +def test_delete_mw_doesnt_error_when_mw_does_not_exist(mw_store: MWStore) -> None: + mw = new_mw("mw-00000000000000000") + mw_store.delete(mw) diff --git a/source/app/tests/model/store/test_period_definition_store.py b/source/app/tests/model/store/test_period_definition_store.py new file mode 100644 index 00000000..9552611e --- /dev/null +++ b/source/app/tests/model/store/test_period_definition_store.py @@ -0,0 +1,126 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import pytest +from _pytest.fixtures import SubRequest, fixture + +from instance_scheduler.model.period_definition import PeriodDefinition +from instance_scheduler.model.store.dynamo_period_definition_store import ( + DynamoPeriodDefinitionStore, +) +from instance_scheduler.model.store.in_memory_period_definition_store import ( + InMemoryPeriodDefinitionStore, +) +from instance_scheduler.model.store.period_definition_store import ( + PeriodAlreadyExistsException, + PeriodDefinitionStore, + UnknownPeriodException, +) + + +@fixture(params=["dynamo", "in-memory"]) +def period_store(request: SubRequest) -> PeriodDefinitionStore: + match request.param: + case "dynamo": + return DynamoPeriodDefinitionStore( + request.getfixturevalue(argname="config_table") + ) + case "in-memory": + return InMemoryPeriodDefinitionStore() + case _: + raise ValueError() + + +def test_write_then_read_simple_period( + period_store: PeriodDefinitionStore, +) -> None: + rpd = PeriodDefinition(name="test-period", begintime="05:00") + + period_store.put(rpd) + fetched = period_store.find_by_name("test-period") + assert rpd == fetched + + +def test_write_then_read_complex_period( + period_store: PeriodDefinitionStore, +) -> None: + rpd = PeriodDefinition( + name="test-period", + begintime="05:00", + endtime="06:00", + months={"jan"}, + monthdays={"1-5"}, + weekdays={"mon-fri"}, + description="some random description", + ) + + period_store.put(rpd) + fetched = period_store.find_by_name("test-period") + assert rpd == fetched + + +def test_overwrite_then_read(period_store: PeriodDefinitionStore) -> None: + # write first + rpd = PeriodDefinition(name="test-period", begintime="05:00") + period_store.put(rpd, overwrite=True) + + fetched = period_store.find_by_name("test-period") + assert rpd == fetched + + # then overwrite + new_rpd = PeriodDefinition(name="test-period", begintime="10:00") + period_store.put(new_rpd, overwrite=True) + + fetched = period_store.find_by_name("test-period") + assert new_rpd == fetched + assert rpd != fetched + + +def test_overwrite_is_rejected_when_flag_not_set( + period_store: PeriodDefinitionStore, +) -> None: + rpd = PeriodDefinition(name="test-period", begintime="05:00") + + period_store.put(rpd) + with pytest.raises(PeriodAlreadyExistsException): + period_store.put(rpd) + + +def test_find_all_returns_all_expected( + period_store: PeriodDefinitionStore, config_table: str +) -> None: + rpd1 = PeriodDefinition(name="test-period1", begintime="05:00") + rpd2 = PeriodDefinition(name="test-period2", monthdays={"12-24"}) + rpd3 = PeriodDefinition(name="test-period3", begintime="05:00", endtime="10:00") + + period_store.put(rpd1) + period_store.put(rpd2) + period_store.put(rpd3) + + fetched = period_store.find_all() + assert fetched == { + "test-period1": rpd1, + "test-period2": rpd2, + "test-period3": rpd3, + } + + +def test_delete_errors_when_missing_if_enabled( + period_store: PeriodDefinitionStore, +) -> None: + with pytest.raises(UnknownPeriodException): + period_store.delete("non-existing", error_if_missing=True) + + +def test_delete_on_missing_passes_if_error_flag_disabled( + period_store: PeriodDefinitionStore, +) -> None: + period_store.delete("non-existing") # default behavior is false + + +def test_delete_deletes_correctly(period_store: PeriodDefinitionStore) -> None: + rpd = PeriodDefinition(name="test-period", begintime="05:00") + + period_store.put(rpd) + assert len(period_store.find_all()) == 1 + period_store.delete(rpd.name, error_if_missing=True) + assert len(period_store.find_all()) == 0 diff --git a/source/app/tests/model/store/test_schedule_definition_store.py b/source/app/tests/model/store/test_schedule_definition_store.py new file mode 100644 index 00000000..32cd640a --- /dev/null +++ b/source/app/tests/model/store/test_schedule_definition_store.py @@ -0,0 +1,211 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import pytest +from _pytest.fixtures import SubRequest, fixture + +from instance_scheduler.model.period_definition import PeriodDefinition +from instance_scheduler.model.period_identifier import PeriodIdentifier +from instance_scheduler.model.schedule_definition import ScheduleDefinition +from instance_scheduler.model.store.dynamo_schedule_definition_store import ( + DynamoScheduleDefinitionStore, +) +from instance_scheduler.model.store.in_memory_schedule_definition_store import ( + InMemoryScheduleDefinitionStore, +) +from instance_scheduler.model.store.schedule_definition_store import ( + ScheduleAlreadyExistsException, + ScheduleDefinitionStore, + UnknownScheduleException, +) +from tests.test_utils.unordered_list import UnorderedList + + +@fixture(params=["dynamo", "in-memory"]) +def schedule_store(request: SubRequest) -> ScheduleDefinitionStore: + match request.param: + case "dynamo": + return DynamoScheduleDefinitionStore( + request.getfixturevalue(argname="config_table") + ) + case "in-memory": + return InMemoryScheduleDefinitionStore() + case _: + raise ValueError + + +def test_put_then_read_simple_schedule(schedule_store: ScheduleDefinitionStore) -> None: + schedule = ScheduleDefinition( + name="test_schedule", periods=[PeriodIdentifier("period_name")] + ) + + schedule_store.put(schedule) + fetched = schedule_store.find_by_name("test_schedule") + assert schedule == fetched + + +def test_put_then_read_override_status_only_schedule( + schedule_store: ScheduleDefinitionStore, +) -> None: + schedule = ScheduleDefinition(name="test_schedule", override_status="running") + + schedule_store.put(schedule) + fetched = schedule_store.find_by_name("test_schedule") + assert schedule == fetched + + +def test_put_then_read_complex_schedule( + schedule_store: ScheduleDefinitionStore, +) -> None: + schedule = ScheduleDefinition( + name="test_schedule", + stop_new_instances=True, + retain_running=True, + hibernate=False, + configured_in_stack="some-stack-arn", + enforced=True, + ssm_maintenance_window=["some-window"], + description="some description", + timezone="Europe/Berlin", + periods=UnorderedList( + [PeriodIdentifier("test_period1"), PeriodIdentifier("test_period2")] + ), + ) + + schedule_store.put(schedule) + fetched = schedule_store.find_by_name("test_schedule") + assert schedule == fetched + + +def test_put_schedule_rejects_overwrite_when_flag_not_set( + schedule_store: ScheduleDefinitionStore, +) -> None: + schedule = ScheduleDefinition(name="test_schedule", override_status="running") + + schedule_store.put(schedule) + with pytest.raises(ScheduleAlreadyExistsException): + schedule_store.put(schedule) + + +def test_put_schedule_with_overwrite(schedule_store: ScheduleDefinitionStore) -> None: + # 1st put + schedule1 = ScheduleDefinition(name="test_schedule", override_status="running") + schedule_store.put(schedule1, overwrite=True) + fetched = schedule_store.find_by_name("test_schedule") + assert fetched == schedule1 + + # 2nd put (overwrite) + schedule2 = ScheduleDefinition(name="test_schedule", override_status="stopped") + schedule_store.put(schedule2, overwrite=True) + fetched = schedule_store.find_by_name("test_schedule") + assert fetched == schedule2 + + +def test_find_schedule_returns_null_when_not_exists( + schedule_store: ScheduleDefinitionStore, +) -> None: + fetched = schedule_store.find_by_name("non-existing") + assert fetched is None + + +def test_find_all_returns_empty_map_when_none_exist( + schedule_store: ScheduleDefinitionStore, +) -> None: + fetched = schedule_store.find_all() + assert fetched == {} + + +def test_find_all_returns_all_expected( + schedule_store: ScheduleDefinitionStore, config_table: str +) -> None: + schedule1 = ScheduleDefinition(name="test_schedule1", override_status="running") + schedule2 = ScheduleDefinition(name="test_schedule2", override_status="stopped") + schedule3 = ScheduleDefinition( + name="test_schedule3", + periods=[PeriodIdentifier.of("period", "m2.micro")], + ) + + schedule_store.put(schedule1) + schedule_store.put(schedule2) + schedule_store.put(schedule3) + + fetched = schedule_store.find_all() + + assert fetched == { + "test_schedule1": schedule1, + "test_schedule2": schedule2, + "test_schedule3": schedule3, + } + + +def test_find_all_when_schedules_share_a_period( + schedule_store: ScheduleDefinitionStore, +) -> None: + schedule1 = ScheduleDefinition( + name="schedule1", + periods=[PeriodIdentifier.of("period", "m2.micro")], + ) + schedule2 = ScheduleDefinition( + name="schedule2", + periods=[PeriodIdentifier.of("period", "m4.large")], + ) + + schedule_store.put(schedule1, overwrite=True) + schedule_store.put(schedule2, overwrite=True) + + fetched = schedule_store.find_all() + + assert fetched == { + "schedule1": schedule1, + "schedule2": schedule2, + } + + +def test_find_by_period_returns_expected( + schedule_store: ScheduleDefinitionStore, +) -> None: + rpd = PeriodDefinition(name="used_period", begintime="10:00") + + schedule1 = ScheduleDefinition( + name="schedule1", + periods=[PeriodIdentifier.of(rpd.name)], + ) + schedule2 = ScheduleDefinition( + name="schedule2", + periods=[PeriodIdentifier.of(rpd.name, "m4.large")], + ) + schedule3 = ScheduleDefinition( + name="schedule3", + periods=[PeriodIdentifier.of("some_other_period")], + ) + schedule_store.put(schedule1) + schedule_store.put(schedule2) + schedule_store.put(schedule3) + + fetched = schedule_store.find_by_period("used_period") + + assert fetched == { + "schedule1": schedule1, + "schedule2": schedule2, + } + + +def test_delete_errors_when_missing_if_enabled( + schedule_store: ScheduleDefinitionStore, +) -> None: + with pytest.raises(UnknownScheduleException): + schedule_store.delete("non-existing", error_if_missing=True) + + +def test_delete_on_missing_passes_if_error_flag_disabled( + schedule_store: ScheduleDefinitionStore, +) -> None: + schedule_store.delete("non-existing") # default behavior is false + + +def test_delete_deletes_correctly(schedule_store: ScheduleDefinitionStore) -> None: + schedule = ScheduleDefinition(name="test-schedule", override_status="running") + + schedule_store.put(schedule) + assert len(schedule_store.find_all()) == 1 + schedule_store.delete(schedule.name, error_if_missing=True) + assert len(schedule_store.find_all()) == 0 diff --git a/source/app/tests/model/test_ddb_item_utils.py b/source/app/tests/model/test_ddb_item_utils.py new file mode 100644 index 00000000..1e4d29be --- /dev/null +++ b/source/app/tests/model/test_ddb_item_utils.py @@ -0,0 +1,192 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from typing import Optional + +import pytest +from mypy_boto3_dynamodb.type_defs import AttributeValueTypeDef + +from instance_scheduler.model.ddb_item_utils import ( + OptionalAttributeValue, + optionally, + parse_bool, + parse_str_set, + skip_if_empty, + skip_if_none, +) + + +def test_optional_attribute_value_type_matches_boto_type() -> None: + # ensures no drift between the actual type-def and the extension used by ddb_item_utils + assert all( + key in AttributeValueTypeDef.__annotations__.keys() + for key in OptionalAttributeValue.__annotations__.keys() + ) + assert all( + key in OptionalAttributeValue.__annotations__.keys() + for key in AttributeValueTypeDef.__annotations__.keys() + ) + + +@pytest.mark.parametrize( + "input,expected_out", + [ + (None, None), + ({"S": "True"}, True), + ({"S": "FALSE"}, False), + ({"BOOL": True}, True), + ({"BOOL": False}, False), + ], +) +def test_parse_bool_optionally( + input: AttributeValueTypeDef, expected_out: Optional[bool] +) -> None: + assert optionally(parse_bool, input, default=None) == expected_out + + +@pytest.mark.parametrize( + "input,expected_out", + [ + ({"S": "True"}, True), + ({"S": "FALSE"}, False), + ({"BOOL": True}, True), + ({"BOOL": False}, False), + ], +) +def test_parse_bool(input: AttributeValueTypeDef, expected_out: Optional[bool]) -> None: + assert parse_bool(input) == expected_out + + +@pytest.mark.parametrize( + "invalid_input", + [ + {"S": "Yes"}, + {"S": "anything else"}, + {"SS": ["true"]}, # wrong type (SS not S) + {"S": ""}, + {"N": "12"}, + ], +) +def test_parse_bool_throws_error_on_invalid_input( + invalid_input: AttributeValueTypeDef, +) -> None: + with pytest.raises(ValueError): + parse_bool(invalid_input) + + +@pytest.mark.parametrize( + "input,expected_out", + [ + ({"S": "a,b,c"}, {"a", "b", "c"}), + ({"S": "FALSE"}, {"FALSE"}), + ({"S": ""}, set()), + ({"S": " "}, {" "}), + ( + {"S": "a big string,another big string"}, + {"a big string", "another big string"}, + ), + ({"SS": []}, set()), + ({"SS": ["a", "b", "c"]}, {"a", "b", "c"}), + ], +) +def test_parse_str_set(input: AttributeValueTypeDef, expected_out: set[str]) -> None: + assert parse_str_set(input) == expected_out + + +@pytest.mark.parametrize( + "invalid_input", + [{"N": "12"}, {"Bool": True}, {"NS": ["12", "13", "23"]}], +) +def test_parse_str_set_throws_error_on_invalid_input( + invalid_input: AttributeValueTypeDef, +) -> None: + with pytest.raises(ValueError): + parse_str_set(invalid_input) + + +@pytest.mark.parametrize( + "valid_input", + [ + {"S": "a_str"}, + {"N": "str"}, + {"B": 8}, + {"SS": ["a", "str"]}, + {"SS": []}, + {"NS": ["str"]}, + {"NS": []}, + {"BS": [5, 5]}, + {"BS": []}, + {"M": {"some": "mapping"}}, + {"L": ["any", "sequence"]}, + {"NULL": True}, + {"BOOL": False}, + ], +) +def test_skip_if_none_does_not_skip_valid_inputs( + valid_input: OptionalAttributeValue, +) -> None: + assert skip_if_none("test", valid_input) == {"test": valid_input} + + +@pytest.mark.parametrize( + "none_input", + [ + {"S": None}, + {"N": None}, + {"B": None}, + {"SS": None}, + {"NS": None}, + {"BS": None}, + {"M": None}, + {"L": None}, + {"NULL": None}, + {"BOOL": None}, + ], +) +def test_skip_if_none_skips_none_inputs(none_input: OptionalAttributeValue) -> None: + assert skip_if_none("test", none_input) == {} + + +@pytest.mark.parametrize( + "valid_input", + [ + {"S": "a_str"}, + {"N": "str"}, + {"B": 8}, + {"SS": ["a", "str"]}, + {"SS": ["str"]}, + {"NS": ["str"]}, + {"BS": [5, 5]}, + {"M": {"some": "mapping"}}, + {"L": ["any", "sequence"]}, + {"NULL": True}, + {"BOOL": False}, + ], +) +def test_skip_if_empty_does_not_skip_valid_inputs( + valid_input: OptionalAttributeValue, +) -> None: + assert skip_if_empty("test", valid_input) == {"test": valid_input} + + +@pytest.mark.parametrize( + "empty_input", + [ + {"S": None}, + {"N": None}, + {"B": None}, + {"SS": None}, + {"NS": None}, + {"BS": None}, + {"M": None}, + {"L": None}, + {"NULL": None}, + {"BOOL": None}, + {"SS": []}, + {"NS": []}, + {"BS": []}, + {"M": {}}, + {"L": []}, + ], +) +def test_skip_if_empty_skips_empty_inputs(empty_input: OptionalAttributeValue) -> None: + assert skip_if_empty("test", empty_input) == {} diff --git a/source/app/tests/model/test_maint_win.py b/source/app/tests/model/test_maint_win.py index c45eee71..593afd42 100644 --- a/source/app/tests/model/test_maint_win.py +++ b/source/app/tests/model/test_maint_win.py @@ -14,10 +14,8 @@ ) if TYPE_CHECKING: - from mypy_boto3_dynamodb.type_defs import GetItemOutputTypeDef from mypy_boto3_ssm.type_defs import MaintenanceWindowIdentityTypeDef else: - GetItemOutputTypeDef = object MaintenanceWindowIdentityTypeDef = object @@ -29,7 +27,7 @@ def example_maint_win() -> EC2SSMMaintenanceWindow: window_name="my-window", schedule_timezone=ZoneInfo("UTC"), next_execution_time=datetime(year=2023, month=6, day=23, tzinfo=timezone.utc), - duration=1, + duration_hours=1, ) @@ -87,51 +85,39 @@ def test_validate_duration() -> None: invalid_durations: Final = (0, -10, 30) for invalid_duration in invalid_durations: with raises(EC2SSMMaintenanceWindowValidationError): - replace(example_maint_win(), duration=invalid_duration) + replace(example_maint_win(), duration_hours=invalid_duration) -def test_to_item() -> None: - account_id = "111111111111" - region = "us-east-1" - window_id = "mw-00000000000000000" - window_name = "my-window" - schedule_timezone = "Europe/Amsterdam" - next_execution_time = datetime( - year=2023, month=6, day=23, tzinfo=ZoneInfo(schedule_timezone) - ) - duration = 1 +def test_to_item_from_item_round_trip() -> None: maint_win = EC2SSMMaintenanceWindow( - account_id=account_id, - region=region, - window_id=window_id, - window_name=window_name, - schedule_timezone=ZoneInfo(schedule_timezone), - next_execution_time=next_execution_time, - duration=duration, + account_id="111111111111", + region="us-east-1", + window_id="mw-00000000000000000", + window_name="my-window", + schedule_timezone=ZoneInfo("Europe/Amsterdam"), + next_execution_time=datetime( + year=2023, month=6, day=23, tzinfo=ZoneInfo("Europe/Amsterdam") + ), + duration_hours=1, ) - assert maint_win.to_item() == { - "account-region": {"S": f"{account_id}:{region}"}, - "WindowId": {"S": window_id}, - "Name": {"S": window_name}, - "ScheduleTimezone": {"S": schedule_timezone}, - "NextExecutionTime": {"S": next_execution_time.isoformat()}, - "Duration": {"N": str(duration)}, - } + assert maint_win == EC2SSMMaintenanceWindow.from_item(maint_win.to_item()) def test_to_key() -> None: account_id = "111111111111" region = "us-east-1" window_name = "my-window" + window_id = "mw-00000000000000000" window = replace( example_maint_win(), account_id=account_id, region=region, window_name=window_name, + window_id=window_id, ) assert window.to_key() == { - "account-region": f"{account_id}:{region}", - "Name": window_name, + "account-region": {"S": f"{account_id}:{region}"}, + "name-id": {"S": f"{window_name}:{window_id}"}, } @@ -161,7 +147,7 @@ def test_from_identity() -> None: assert maint_win.window_name == window_name assert maint_win.schedule_timezone == ZoneInfo(schedule_timezone) assert maint_win.next_execution_time == next_execution_time - assert maint_win.duration == duration + assert maint_win.duration_hours == duration def test_from_identity_no_timezone() -> None: @@ -192,41 +178,3 @@ def test_from_identity_utc_offset_shorthand() -> None: ) assert maint_win.schedule_timezone == ZoneInfo("UTC") assert maint_win.next_execution_time == isoparse(next_execution_time) - - -def test_from_item() -> None: - account_id = "111111111111" - region = "us-east-1" - window_id = "mw-00000000000000000" - window_name = "my-window" - schedule_timezone = "Asia/Tokyo" - next_execution_time = datetime( - year=2023, month=6, day=23, tzinfo=ZoneInfo(schedule_timezone) - ) - duration = 1 - item: GetItemOutputTypeDef = { - "Item": { - "account-region": {"S": f"{account_id}:{region}"}, - "Name": {"S": window_name}, - "WindowId": {"S": window_id}, - "ScheduleTimezone": {"S": schedule_timezone}, - "NextExecutionTime": {"S": next_execution_time.isoformat()}, - "Duration": {"N": str(duration)}, - }, - "ConsumedCapacity": {}, - "ResponseMetadata": { - "RequestId": "", - "HostId": "", - "HTTPStatusCode": 200, - "HTTPHeaders": {}, - "RetryAttempts": 0, - }, - } - maint_win = EC2SSMMaintenanceWindow.from_item(item) - assert maint_win.account_id == account_id - assert maint_win.region == region - assert maint_win.window_id == window_id - assert maint_win.window_name == window_name - assert maint_win.schedule_timezone == ZoneInfo(schedule_timezone) - assert maint_win.next_execution_time == next_execution_time - assert maint_win.duration == duration diff --git a/source/app/tests/model/test_maint_win_store.py b/source/app/tests/model/test_maint_win_store.py deleted file mode 100644 index 75c18900..00000000 --- a/source/app/tests/model/test_maint_win_store.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -from datetime import datetime, timezone -from typing import TYPE_CHECKING -from unittest.mock import ANY - -from boto3 import Session - -from instance_scheduler.model import EC2SSMMaintenanceWindowStore -from tests.logger import MockLogger - -if TYPE_CHECKING: - from mypy_boto3_ssm.type_defs import MaintenanceWindowIdentityTypeDef -else: - MaintenanceWindowIdentityTypeDef = object - - -def test_maint_win_store(maint_win_table: str) -> None: - account_id = "111111111111" - region = "us-east-1" - window_name = "my-window" - next_execution_time = datetime(year=2023, month=6, day=23, tzinfo=timezone.utc) - duration = 1 - window_id = "mw-00000000000000000" - schedule_timezone = "UTC" - format_string = "%Y-%m-%dT%H:%M%z" - window: MaintenanceWindowIdentityTypeDef = { - "Name": window_name, - "NextExecutionTime": next_execution_time.strftime(format_string), - "Duration": duration, - "WindowId": window_id, - "ScheduleTimezone": schedule_timezone, - } - - store = EC2SSMMaintenanceWindowStore( - session=Session(), table_name=maint_win_table, logger=MockLogger() - ) - - windows = store.get_ssm_windows_db(account=account_id, region=region) - assert windows == [] - - store.put_window_dynamodb(window=window, account=account_id, region=region) - windows = store.get_ssm_windows_db(account=account_id, region=region) - - assert len(windows) == 1 - account_region = f"{account_id}:{region}" - assert windows[0] == { - "account-region": account_region, - "Name": window_name, - "Duration": duration, - "WindowId": window_id, - "TimeToLive": ANY, - "NextExecutionTime": next_execution_time.strftime(format_string), - "ScheduleTimezone": schedule_timezone, - } - - window["account-region"] = account_region # type: ignore[typeddict-unknown-key] - store.delete_window(window) - - windows = store.get_ssm_windows_db(account=account_id, region=region) - assert windows == [] diff --git a/source/app/tests/model/test_period_identifier.py b/source/app/tests/model/test_period_identifier.py new file mode 100644 index 00000000..63b81ab0 --- /dev/null +++ b/source/app/tests/model/test_period_identifier.py @@ -0,0 +1,16 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from instance_scheduler.model.period_identifier import PeriodIdentifier + + +def test_simple_identifier() -> None: + pid = PeriodIdentifier.of("period_name") + assert pid == "period_name" + assert pid.name == "period_name" + + +def test_identifier_with_type() -> None: + pid = PeriodIdentifier.of("period_name", "desired_type") + assert pid == "period_name@desired_type" + assert pid.name == "period_name" + assert pid.desired_type == "desired_type" diff --git a/source/app/tests/model/test_running_period_definition.py b/source/app/tests/model/test_running_period_definition.py new file mode 100644 index 00000000..286664a2 --- /dev/null +++ b/source/app/tests/model/test_running_period_definition.py @@ -0,0 +1,176 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from datetime import datetime + +import pytest +from freezegun import freeze_time + +from instance_scheduler.model.period_definition import ( + InvalidPeriodDefinition, + PeriodDefinition, +) + + +def test_to_item_includes_all_values_when_provided() -> None: + assert PeriodDefinition( + name="test-period", + begintime="05:00", + endtime="22:00", + weekdays={"mon-fri"}, + monthdays={"1-10"}, + months={"1-3"}, + description="some description", + configured_in_stack="myStackName", + ).to_item() == { + "type": {"S": "period"}, + "name": {"S": "test-period"}, + "begintime": {"S": "05:00"}, + "endtime": {"S": "22:00"}, + "weekdays": {"SS": ["mon-fri"]}, + "monthdays": {"SS": ["1-10"]}, + "months": {"SS": ["1-3"]}, + "description": {"S": "some description"}, + "configured_in_stack": {"S": "myStackName"}, + } + + +def test_to_item_strips_empty_values() -> None: + assert PeriodDefinition(name="test-period", begintime="00:00").to_item() == { + "type": {"S": "period"}, + "name": {"S": "test-period"}, + "begintime": {"S": "00:00"}, + } + + +def test_to_item_from_item_round_trip_when_configured_in_stack() -> None: + period = PeriodDefinition( + name="test-period", + begintime="05:00", + endtime="22:00", + weekdays={"mon-fri"}, + monthdays={"1-10"}, + months={"1-3"}, + description="some description", + configured_in_stack="someStackName", + ) + assert period == PeriodDefinition.from_item(period.to_item()) + + +def test_to_item_from_item_round_trip_when_not_configured_in_stack() -> None: + period = PeriodDefinition( + name="test-period", + begintime="05:00", + endtime="22:00", + weekdays={"mon-fri"}, + monthdays={"1-10"}, + months={"1-3"}, + description="some description", + ) + assert period == PeriodDefinition.from_item(period.to_item()) + + +def test_1_sided_period_is_valid() -> None: + PeriodDefinition(name="test-period", begintime="10:00") + PeriodDefinition(name="test-period", endtime="10:00") + + +def test_normal_period_is_valid() -> None: + PeriodDefinition(name="test-period", begintime="10:00", endtime="20:00") + + +def test_specific_days_only_periods_are_valid() -> None: + PeriodDefinition(name="test-period", weekdays={"mon-fri"}) + PeriodDefinition(name="test-period", monthdays={"1-3"}) + PeriodDefinition(name="test-period", months={"feb"}) + + +@pytest.mark.parametrize( + "valid_input", ["1", "12-15", "1/7", "1-15/2", "L", "20-L", "15W"] +) +def test_does_not_error_on_valid_monthday(valid_input: str) -> None: + PeriodDefinition(name="test-period", monthdays={valid_input}) + + +def test_does_not_error_on_multipart_monthday() -> None: + PeriodDefinition(name="test-period", monthdays={"1", "2", "1/8"}) + + +@freeze_time(datetime(2023, 2, 25, 0, 0, 0)) +@pytest.mark.parametrize("valid_input", ["30", "26-30", "L"]) +def test_does_not_error_in_feb(valid_input: str) -> None: + PeriodDefinition(name="test-period", monthdays={valid_input}) + + +@pytest.mark.parametrize("invalid_input", ["W", "32", "bad-string", "20-15"]) +def test_errors_on_invalid_monthday(invalid_input: str) -> None: + with pytest.raises(InvalidPeriodDefinition): + PeriodDefinition(name="test-period", monthdays={invalid_input}) + + +@pytest.mark.parametrize( + "valid_input", + ["1", "10-12", "1/3", "5-9/2", "Jan", "Jan-Jul", "mar/3", "Jan-Jul/2"], +) +def test_does_not_error_on_valid_month(valid_input: str) -> None: + PeriodDefinition(name="test-period", months={valid_input}) + + +def test_does_not_error_on_multipart_month() -> None: + PeriodDefinition(name="test-period", months={"1", "Feb", "Mar-May"}) + + +@pytest.mark.parametrize("invalid_input", ["0", "13", "not-a-month", "W", "L"]) +def test_errors_on_invalid_month(invalid_input: str) -> None: + with pytest.raises(InvalidPeriodDefinition): + PeriodDefinition(name="test-period", months={invalid_input}) + + +@pytest.mark.parametrize( + "valid_input", + [ + "1", + "4-6", + "1-2, Thu-Fri", + "0#1", + "Mon#1", + "Mon#5", + "FriL", + "6L", + "SUN", + ], +) +def test_does_not_error_on_valid_weekday(valid_input: str) -> None: + PeriodDefinition(name="test-period", weekdays={valid_input}) + + +def test_does_not_error_on_multipart_weekday() -> None: + PeriodDefinition(name="test-period", weekdays={"1", "2-3", "fri"}) + + +@pytest.mark.parametrize("invalid_input", ["-1", "7", "not-a-day", "Mon#6", "2-L"]) +def test_errors_on_invalid_weekday(invalid_input: str) -> None: + with pytest.raises(InvalidPeriodDefinition): + PeriodDefinition(name="test-period", weekdays={invalid_input}) + + +def test_throws_exception_when_begintime_is_after_endtime() -> None: + with pytest.raises(InvalidPeriodDefinition): + PeriodDefinition(name="test-period", begintime="10:00", endtime="5:00") + + +def test_throws_exception_when_no_period_values_provided() -> None: + with pytest.raises(InvalidPeriodDefinition): + PeriodDefinition(name="test-period") + + +@pytest.mark.parametrize("valid_time", ["00:00", "01:05", "23:59", "4:35"]) +def test_accepts_valid_time_strings(valid_time: str) -> None: + PeriodDefinition(name="test-period", begintime=valid_time) + PeriodDefinition(name="test-period", endtime=valid_time) + + +@pytest.mark.parametrize("invalid_time", ["25:00", "ab", "24:00", "24:01"]) +def test_rejects_invalid_time_strings(invalid_time: str) -> None: + with pytest.raises(InvalidPeriodDefinition): + PeriodDefinition(name="test-period", begintime=invalid_time) + PeriodDefinition(name="test-period", endtime=invalid_time) diff --git a/source/app/tests/model/test_schedule_definition.py b/source/app/tests/model/test_schedule_definition.py new file mode 100644 index 00000000..b66a0c16 --- /dev/null +++ b/source/app/tests/model/test_schedule_definition.py @@ -0,0 +1,260 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from datetime import time + +import pytest +from mypy_boto3_dynamodb.type_defs import AttributeValueTypeDef + +from instance_scheduler.configuration.instance_schedule import InstanceSchedule +from instance_scheduler.configuration.running_period import RunningPeriod +from instance_scheduler.configuration.running_period_dict_element import ( + RunningPeriodDictElement, +) +from instance_scheduler.model.period_definition import PeriodDefinition +from instance_scheduler.model.period_identifier import PeriodIdentifier +from instance_scheduler.model.schedule_definition import ( + InvalidScheduleDefinition, + ScheduleDefinition, +) +from instance_scheduler.model.store.period_definition_store import PeriodDefinitionStore +from instance_scheduler.util.app_env import AppEnv + + +def test_default_schedule_flags_match_expected( + period_store: PeriodDefinitionStore, +) -> None: + schedule_def = ScheduleDefinition(name="test-schedule", override_status="running") + + schedule = schedule_def.to_instance_schedule(period_store) + + assert schedule.stop_new_instances is True + assert schedule.enforced is False + assert schedule.retain_running is False + assert schedule.hibernate is False + + +def test_schedule_definition_defaults_match_instance_schedule_defaults( + period_store: PeriodDefinitionStore, app_env: AppEnv +) -> None: + """ + InstanceSchedule and ScheduleDefinition each define their defaults separately, but these need to match + to avoid unexpected behavior + """ + + schedule_def = ScheduleDefinition(name="test-schedule", override_status="running") + + schedule_from_def = schedule_def.to_instance_schedule(period_store) + + manual_built_schedule = InstanceSchedule( + name="test-schedule", + override_status="running", + timezone=app_env.default_timezone, + ) + + assert schedule_from_def == manual_built_schedule + + +@pytest.mark.parametrize("tz_str", ["", None]) +def test_timezone_uses_default_timezone_when_not_provided( + tz_str: str, app_env: AppEnv, period_store: PeriodDefinitionStore +) -> None: + schedule_def = ScheduleDefinition( + name="test-schedule", timezone=tz_str, override_status="running" + ) + + schedule = schedule_def.to_instance_schedule(period_store) + + assert schedule.timezone == app_env.default_timezone + + +def test_to_schedule_when_period_exists( + config_table: str, app_env: AppEnv, period_store: PeriodDefinitionStore +) -> None: + period_store.put(PeriodDefinition(name="period", begintime="05:00")) + schedule_def = ScheduleDefinition( + name="test-schedule", periods=[PeriodIdentifier("period")] + ) + + schedule = schedule_def.to_instance_schedule(period_store) + + assert schedule == InstanceSchedule( + name="test-schedule", + timezone=app_env.default_timezone, + periods=[ + RunningPeriodDictElement( + period=RunningPeriod(name="period", begintime=time(5, 0, 0)) + ) + ], + ) + + +def test_to_item_from_item_round_trip_with_minimal_flags() -> None: + schedule = ScheduleDefinition(name="test-schedule", override_status="running") + item = schedule.to_item() + schedule2 = ScheduleDefinition.from_item(item) + assert schedule == schedule2 + + +def test_to_item_from_item_round_trip_with_all_flags() -> None: + schedule = ScheduleDefinition( + name="test-schedule", + timezone="America/New_York", + periods=[PeriodIdentifier("period")], + override_status="stopped", + description="test-description", + stop_new_instances=True, + enforced=True, + hibernate=True, + retain_running=True, + configured_in_stack="some_stack_arn", + ssm_maintenance_window=["some_ssm_window_name"], + ) + item = schedule.to_item() + schedule2 = ScheduleDefinition.from_item(item) + assert schedule == schedule2 + + +def test_from_item_with_string_ssm_maint_win() -> None: + maintenance_window_name = "some_ssm_window_name" + item: dict[str, AttributeValueTypeDef] = { + "type": {"S": "schedule"}, + "name": {"S": "test-schedule"}, + "periods": {"SS": ["period"]}, + "ssm_maintenance_window": { + "S": maintenance_window_name + }, # tests backwards compatibility with str values + } + schedule2 = ScheduleDefinition.from_item(item) + assert schedule2.ssm_maintenance_window == [maintenance_window_name] + + +def test_to_item_strips_empty_values() -> None: + item = ScheduleDefinition( + name="test-schedule", + periods=[PeriodIdentifier("period")], + ).to_item() + + assert item == { + "type": {"S": "schedule"}, + "name": {"S": "test-schedule"}, + "periods": {"SS": ["period"]}, + } + + +def test_to_item_includes_all_values_when_provided() -> None: + item = ScheduleDefinition( + name="test-schedule", + timezone="America/New_York", + periods=[PeriodIdentifier("period")], + override_status="stopped", + description="test-description", + stop_new_instances=True, + enforced=True, + hibernate=True, + retain_running=True, + configured_in_stack="some_stack_arn", + ssm_maintenance_window=["some_ssm_window_name"], + ).to_item() + + assert item == { + "type": {"S": "schedule"}, + "name": {"S": "test-schedule"}, + "periods": {"SS": ["period"]}, + "timezone": {"S": "America/New_York"}, + "override_status": {"S": "stopped"}, + "description": {"S": "test-description"}, + "stop_new_instances": {"BOOL": True}, + "enforced": {"BOOL": True}, + "hibernate": {"BOOL": True}, + "retain_running": {"BOOL": True}, + "configured_in_stack": {"S": "some_stack_arn"}, + "ssm_maintenance_window": {"SS": ["some_ssm_window_name"]}, + } + + +# region Validation Tests +def test_accepts_basic_schedule() -> None: + ScheduleDefinition( + name="test-schedule", + timezone="America/New_York", + periods=[PeriodIdentifier("period")], + ) + + +def test_accepts_schedule_with_all_flags() -> None: + ScheduleDefinition( + name="test-schedule", + timezone="America/New_York", + periods=[PeriodIdentifier("period")], + override_status="stopped", + description="test-description", + stop_new_instances=True, + enforced=True, + hibernate=True, + retain_running=True, + configured_in_stack="some_stack_arn", + ssm_maintenance_window=["some_ssm_window_name"], + ) + + +@pytest.mark.parametrize("invalid", ["fake-timezone"]) +def test_rejects_invalid_timezone(invalid: str) -> None: + with pytest.raises(InvalidScheduleDefinition): + ScheduleDefinition( + name="test-schedule", + timezone=invalid, + periods=[PeriodIdentifier("period")], + ) + + +@pytest.mark.parametrize( + "tz_str", ["America/New_York", "America/Los_Angeles", "Asia/Tokyo"] +) +def test_accepts_valid_timezones(tz_str: str) -> None: + schedule = ScheduleDefinition( + name="test-schedule", + timezone=tz_str, + periods=[PeriodIdentifier("period")], + ) + + assert str(schedule.timezone) == tz_str + + +def test_rejects_when_no_periods_or_overrides_provided() -> None: + with pytest.raises(InvalidScheduleDefinition): + ScheduleDefinition( + name="test-schedule", + timezone="America/New_York", + ) + + +def test_accepts_override_status_with_no_periods() -> None: + ScheduleDefinition( + name="test-schedule", + timezone="America/New_York", + override_status="running", + ) + + +@pytest.mark.parametrize("status", ["invalid"]) +def test_rejects_invalid_override_status(status: str) -> None: + with pytest.raises(InvalidScheduleDefinition): + ScheduleDefinition( + name="test-schedule", + timezone="America/New_York", + periods=[PeriodIdentifier("period")], + override_status=status, + ) + + +@pytest.mark.parametrize("status", ["running", "stopped", "RUNNING", "StoPped"]) +def test_accepts_valid_override_status(status: str) -> None: + ScheduleDefinition( + name="test-schedule", + timezone="America/New_York", + periods=[PeriodIdentifier("period")], + override_status=status, + ) + + +# endregion diff --git a/source/app/tests/ops_monitoring/test_instance_counts.py b/source/app/tests/ops_monitoring/test_instance_counts.py new file mode 100644 index 00000000..1230348d --- /dev/null +++ b/source/app/tests/ops_monitoring/test_instance_counts.py @@ -0,0 +1,68 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from collections import Counter + +from instance_scheduler.ops_monitoring.instance_counts import ( + InstanceCounts, + InstanceCountsAggregator, + ServiceInstanceCounts, +) + + +def test_merge_combines_as_expected() -> None: + data1 = ServiceInstanceCounts( + { + "ec2": InstanceCountsAggregator( + { + "by_type": InstanceCounts( + {"t2.micro": Counter({"running": 10, "stopped": 5})} + ), + "by_schedule": InstanceCounts( + {"schedule_a": Counter({"running": 5, "stopped": 2})} + ), + }, + ), + "rds": InstanceCountsAggregator( + { + "by_type": InstanceCounts( + {"t3.micro": Counter({"running": 2, "stopped": 2})} + ), + "by_schedule": InstanceCounts( + {"schedule_b": Counter({"running": 2, "stopped": 1})} + ), + }, + ), + } + ) + + data2 = ServiceInstanceCounts( + { + "ec2": InstanceCountsAggregator( + { + "by_type": InstanceCounts( + { + "t2.micro": Counter({"running": 5, "stopped": 3}), + "t2.nano": Counter({"running": 4, "stopped": 2}), + } + ), + "by_schedule": InstanceCounts( + {"schedule_a": Counter({"running": 2, "stopped": 12})} + ), + }, + ), + } + ) + + assert data1.merged_with(data2) == { + "ec2": { + "by_type": { + "t2.micro": {"running": 15, "stopped": 8}, + "t2.nano": {"running": 4, "stopped": 2}, + }, + "by_schedule": {"schedule_a": {"running": 7, "stopped": 14}}, + }, + "rds": { + "by_type": {"t3.micro": {"running": 2, "stopped": 2}}, + "by_schedule": {"schedule_b": {"running": 2, "stopped": 1}}, + }, + } diff --git a/source/app/tests/schedulers/test_instance_scheduler.py b/source/app/tests/schedulers/test_instance_scheduler.py index 38f8982b..e69de29b 100644 --- a/source/app/tests/schedulers/test_instance_scheduler.py +++ b/source/app/tests/schedulers/test_instance_scheduler.py @@ -1,153 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -from zoneinfo import ZoneInfo - -from boto3 import Session -from pytest_mock import MockFixture - -from instance_scheduler.configuration.instance_schedule import ( - Instance, - InstanceSchedule, -) -from instance_scheduler.configuration.scheduling_context import SchedulingContext -from instance_scheduler.schedulers.instance_scheduler import InstanceScheduler -from instance_scheduler.schedulers.instance_states import InstanceStates -from instance_scheduler.service import Ec2Service, ServiceArgs -from tests.integration.helpers.schedule_helpers import quick_time -from tests.logger import MockLogger - - -def context() -> SchedulingContext: - return SchedulingContext( - current_dt=quick_time(10, 0, 0), - service="ec2", - schedule_clusters=False, - tag_name="tag", - region="us-east-1", - default_timezone=ZoneInfo("UTC"), - schedules={}, - trace=False, - enable_ssm_maintenance_windows=False, - use_metrics=False, - account_id="111111111111", - namespace="test", - aws_partition="aws", - scheduler_role_name="scheduler-role", - organization_id="o-0000000000", - schedule_lambda_account=True, - create_rds_snapshot=False, - ) - - -def test_get_desired_state_and_type_1(moto_backend: None, mocker: MockFixture) -> None: - schedule = InstanceSchedule( - name="test-1", - periods=[], - timezone="UTC", - override_status=None, - description=None, - use_metrics=None, - stop_new_instances=None, - use_maintenance_window=False, - ssm_maintenance_window="", - enforced=False, - hibernate=False, - retain_running=False, - ) - instance = Instance( - maintenance_window=schedule, - account="test", - region="us-east-1", - service="ec2", - id="ut12y21232u", - allow_resize=True, - hibernate=False, - state="running", - state_name="running", - is_running=True, - is_terminated=False, - current_state="running", - instancetype="type", - tags={}, - name="name", - schedule_name="schedule", - ) - scheduling_context = context() - logger = MockLogger() - ec2_service = Ec2Service( - ServiceArgs( - account_id="", - scheduling_context=scheduling_context, - logger=logger, - session=Session(), - stack_name="", - ) - ) - scheduler = InstanceScheduler( - ec2_service, - scheduling_context, - InstanceStates("", "", logger), - "", - None, - logger, - ) - mocker.patch.object(scheduler, "_logger") - inst_state, _ = scheduler.get_desired_state_and_type(schedule, instance) - assert inst_state == "stopped" - - -def test_get_desired_state_and_type_2(moto_backend: None, mocker: MockFixture) -> None: - schedule = InstanceSchedule( - name="test-1", - periods=[], - timezone="UTC", - override_status=None, - description=None, - use_metrics=None, - stop_new_instances=None, - use_maintenance_window=True, - ssm_maintenance_window="", - enforced=False, - hibernate=False, - retain_running=False, - ) - instance = Instance( - maintenance_window=None, - account="test", - region="us-east-1", - service="ec2", - id="ut12y21232u", - allow_resize=True, - hibernate=False, - state="running", - state_name="running", - is_running=True, - is_terminated=False, - current_state="running", - instancetype="type", - tags={}, - name="name", - schedule_name="schedule", - ) - scheduling_context = context() - logger = MockLogger() - ec2_service = Ec2Service( - ServiceArgs( - account_id="", - scheduling_context=scheduling_context, - logger=logger, - session=Session(), - stack_name="", - ) - ) - scheduler = InstanceScheduler( - ec2_service, - scheduling_context, - InstanceStates("", "", logger), - "", - None, - logger, - ) - mocker.patch.object(scheduler, "_logger") - inst_state, _ = scheduler.get_desired_state_and_type(schedule, instance) - assert inst_state == "stopped" diff --git a/source/app/tests/service/test_asg.py b/source/app/tests/service/test_asg.py new file mode 100644 index 00000000..5abe7e4f --- /dev/null +++ b/source/app/tests/service/test_asg.py @@ -0,0 +1,361 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import json +from datetime import datetime, timedelta, timezone +from typing import TYPE_CHECKING, Final +from zoneinfo import ZoneInfo + +from pytest import fixture, raises + +from instance_scheduler.model.period_definition import PeriodDefinition +from instance_scheduler.service.asg import ( + AsgSize, + AsgTag, + AsgValidationError, + period_to_actions, +) + +if TYPE_CHECKING: + from mypy_boto3_autoscaling.type_defs import AutoScalingGroupPaginatorTypeDef +else: + AutoScalingGroupPaginatorTypeDef = object + +ASG_SCHEDULED_TAG_KEY: Final = "scheduled" +SCHEDULE_NAME: Final = "Schedule" + + +@fixture +def default_asg_group_definition() -> AutoScalingGroupPaginatorTypeDef: + return { + "AutoScalingGroupName": "MockAutoScaling", + "MinSize": 1, + "MaxSize": 1, + "DesiredCapacity": 1, + "DefaultCooldown": 1, + "AvailabilityZones": ["us-east-1a"], + "HealthCheckType": "EC2", + "CreatedTime": datetime(2024, 1, 1), + } + + +def test_period_to_actions() -> None: + name: Final = "SimplePeriod" + tz: Final = ZoneInfo("Etc/UTC") + size: Final = AsgSize(1, 1, 1) + actions: Final = list( + period_to_actions( + PeriodDefinition( + name=name, + begintime="09:00", + endtime="17:00", + monthdays={"15"}, + ), + tz, + size, + "is-", + ) + ) + assert actions == [ + { + "ScheduledActionName": f"is-{name}Start", + "Recurrence": "0 9 15 * *", + "MinSize": size.min_size, + "MaxSize": size.max_size, + "DesiredCapacity": size.desired_size, + "TimeZone": str(tz), + }, + { + "ScheduledActionName": f"is-{name}Stop", + "Recurrence": "0 17 15 * *", + "MinSize": 0, + "MaxSize": 0, + "DesiredCapacity": 0, + "TimeZone": str(tz), + }, + ] + + +def test_asg_size_from_group( + default_asg_group_definition: AutoScalingGroupPaginatorTypeDef, +) -> None: + # Call + size = AsgSize.from_group(default_asg_group_definition) + + # Verify + assert size.min_size == default_asg_group_definition.get("MinSize") + assert size.max_size == default_asg_group_definition.get("MaxSize") + assert size.desired_size == default_asg_group_definition.get("DesiredCapacity") + + +def test_asg_size_from_tag() -> None: + # Prepare + min_size = 1 + max_size = 3 + desired_size = 2 + tag_value = AsgTag( + schedule="", + ttl="", + min_size=min_size, + max_size=max_size, + desired_size=desired_size, + ) + + # Call + size = AsgSize.from_tag(tag_value) + + # Verify + assert size.min_size == min_size + assert size.max_size == max_size + assert size.desired_size == desired_size + + +def test_asg_size_from_tag_when_min_size_invalid() -> None: + # Prepare + max_size = 3 + desired_size = 2 + tag_value = AsgTag( + schedule="", ttl="", max_size=max_size, desired_size=desired_size + ) + + # Call + size = AsgSize.from_tag(tag_value) + + # Verify + assert size.min_size == 0 + assert size.max_size == 0 + assert size.desired_size == 0 + + +def test_asg_size_from_tag_when_max_size_invalid() -> None: + # Prepare + min_size = 1 + desired_size = 2 + tag_value = AsgTag( + schedule="", ttl="", min_size=min_size, desired_size=desired_size + ) + + # Call + size = AsgSize.from_tag(tag_value) + + # Verify + assert size.min_size == 0 + assert size.max_size == 0 + assert size.desired_size == 0 + + +def test_asg_size_from_tag_when_desired_size_invalid() -> None: + # Prepare + min_size = 1 + max_size = 3 + tag_value = AsgTag(schedule="", ttl="", min_size=min_size, max_size=max_size) + + # Call + size = AsgSize.from_tag(tag_value) + + # Verify + assert size.min_size == 0 + assert size.max_size == 0 + assert size.desired_size == 0 + + +def test_asg_size_stopped() -> None: + # Call + size = AsgSize.stopped() + + # Verify + assert size.is_stopped_state() + + +def test_asg_tag_from_group( + default_asg_group_definition: AutoScalingGroupPaginatorTypeDef, +) -> None: + # Prepare + group = default_asg_group_definition + tag_value = AsgTag( + schedule=SCHEDULE_NAME, + ttl=(datetime.now(timezone.utc) + timedelta(days=30)).isoformat(), + min_size=1, + max_size=3, + desired_size=2, + ) + group["Tags"] = [{"Key": ASG_SCHEDULED_TAG_KEY, "Value": str(tag_value)}] + + # Call + scheduled_tag_value = AsgTag.from_group( + group=group, asg_scheduled_tag_key=ASG_SCHEDULED_TAG_KEY + ) + + # Verify + assert scheduled_tag_value.schedule == tag_value.schedule + assert scheduled_tag_value.ttl == tag_value.ttl + assert scheduled_tag_value.min_size == tag_value.min_size + assert scheduled_tag_value.max_size == tag_value.max_size + assert scheduled_tag_value.desired_size == tag_value.desired_size + + +def test_asg_tag_from_group_when_no_scheduled_tag( + default_asg_group_definition: AutoScalingGroupPaginatorTypeDef, +) -> None: + # Call + with raises(AsgValidationError) as e: + AsgTag.from_group( + group=default_asg_group_definition, + asg_scheduled_tag_key=ASG_SCHEDULED_TAG_KEY, + ) + + # Verify + assert str(e.value) == "Scheduled tag missing" + + +def test_asg_tag_from_group_when_unable_to_parse( + default_asg_group_definition: AutoScalingGroupPaginatorTypeDef, +) -> None: + # Prepare + group = default_asg_group_definition + group["Tags"] = [{"Key": ASG_SCHEDULED_TAG_KEY, "Value": "Mock"}] + + # Call + with raises(AsgValidationError) as e: + AsgTag.from_group( + group=group, + asg_scheduled_tag_key=ASG_SCHEDULED_TAG_KEY, + ) + + # Verify + assert str(e.value) == "Unable to parse Scheduled tag value" + + +def test_asg_tag_from_group_when_scheduled_tag_invalid( + default_asg_group_definition: AutoScalingGroupPaginatorTypeDef, +) -> None: + # Prepare + group = default_asg_group_definition + group["Tags"] = [{"Key": ASG_SCHEDULED_TAG_KEY, "Value": "1"}] + + # Call + with raises(AsgValidationError) as e: + AsgTag.from_group( + group=group, + asg_scheduled_tag_key=ASG_SCHEDULED_TAG_KEY, + ) + + # Verify + assert str(e.value) == "Invalid Scheduled tag value" + + +def test_asg_tag_not_valid_when_overridden() -> None: + # Prepare + ttl: Final = (datetime.now(timezone.utc) + timedelta(days=3)).isoformat() + asg_tag = AsgTag( + schedule=SCHEDULE_NAME, ttl=ttl, min_size=1, max_size=3, desired_size=2 + ) + + # Call + valid, reason = asg_tag.is_still_valid( + schedule_name=SCHEDULE_NAME, + is_schedule_override=True, + ) + + # Verify + assert not valid + assert reason == "Overridden" + + +def test_asg_tag_not_valid_when_schedule_tag_schedule_different() -> None: + # Prepare + ttl: Final = (datetime.now(timezone.utc) + timedelta(days=3)).isoformat() + asg_tag = AsgTag( + schedule=f"{SCHEDULE_NAME}Invalid", + ttl=ttl, + min_size=1, + max_size=3, + desired_size=2, + ) + + # Call + valid, reason = asg_tag.is_still_valid( + schedule_name=SCHEDULE_NAME, + is_schedule_override=False, + ) + + # Verify + assert not valid + assert reason == "Configured for a different schedule" + + +def test_asg_tag_not_valid_when_schedule_tag_ttl_malformed() -> None: + # Prepare + asg_tag = AsgTag( + schedule=SCHEDULE_NAME, ttl="", min_size=1, max_size=3, desired_size=2 + ) + + # Call + valid, reason = asg_tag.is_still_valid( + schedule_name=SCHEDULE_NAME, + is_schedule_override=False, + ) + + # Verify + assert not valid + assert reason == "Unable to parse configuration TTL" + + +def test_asg_tag_not_valid_when_schedule_tag_ttl_expired() -> None: + # Prepare + ttl: Final = datetime.now(timezone.utc).isoformat() + asg_tag = AsgTag( + schedule=SCHEDULE_NAME, ttl=ttl, min_size=1, max_size=3, desired_size=2 + ) + + # Call + valid, reason = asg_tag.is_still_valid( + schedule_name=SCHEDULE_NAME, + is_schedule_override=False, + ) + + # Verify + assert not valid + assert reason == "Configuration expiring in less than one day" + + +def test_asg_tag_is_still_valid() -> None: + # Prepare + ttl: Final = (datetime.now(timezone.utc) + timedelta(days=3)).isoformat() + asg_tag = AsgTag( + schedule=SCHEDULE_NAME, ttl=ttl, min_size=1, max_size=3, desired_size=2 + ) + + # Call + valid, reason = asg_tag.is_still_valid( + schedule_name=SCHEDULE_NAME, + is_schedule_override=False, + ) + + # Verify + assert valid + assert ( + reason + == f"All conditions met, current config valid for schedule {SCHEDULE_NAME} until {ttl}" + ) + + +def test_asg_tag_str() -> None: + # Prepare + ttl: Final = (datetime.now(timezone.utc) + timedelta(days=3)).isoformat() + asg_tag = AsgTag( + schedule=SCHEDULE_NAME, ttl=ttl, min_size=1, max_size=3, desired_size=2 + ) + + # Call + str_value = str(asg_tag) + + # Verify + assert str_value == json.dumps( + { + "schedule": asg_tag.schedule, + "ttl": asg_tag.ttl, + "min_size": asg_tag.min_size, + "max_size": asg_tag.max_size, + "desired_size": asg_tag.desired_size, + } + ) diff --git a/source/app/tests/service/test_ec2_service.py b/source/app/tests/service/test_ec2_service.py index 67c4ddf1..885ded78 100644 --- a/source/app/tests/service/test_ec2_service.py +++ b/source/app/tests/service/test_ec2_service.py @@ -1,25 +1,36 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 from collections.abc import Sequence -from dataclasses import dataclass, replace +from dataclasses import dataclass from itertools import chain from typing import TYPE_CHECKING, Final from unittest.mock import MagicMock, patch +from zoneinfo import ZoneInfo import boto3 from boto3.session import Session +from botocore.stub import Stubber -from instance_scheduler import ScheduleState -from instance_scheduler.configuration.instance_schedule import ( - Instance, - InstanceSchedule, +from instance_scheduler.configuration.instance_schedule import InstanceSchedule +from instance_scheduler.configuration.scheduling_context import SchedulingContext +from instance_scheduler.handler.environments.scheduling_request_environment import ( + SchedulingRequestEnvironment, ) -from instance_scheduler.service import Ec2Service, ServiceArgs +from instance_scheduler.maint_win.ssm_mw_client import SSMMWClient +from instance_scheduler.model import EC2SSMMaintenanceWindow, MWStore +from instance_scheduler.service import Ec2Service from instance_scheduler.service.ec2 import EC2StateCode, get_tags -from tests import ami +from instance_scheduler.service.ec2_instance import EC2Instance +from instance_scheduler.util.session_manager import AssumedRole +from tests.conftest import get_ami from tests.integration.helpers.schedule_helpers import quick_time -from tests.integration.helpers.scheduling_context_builder import build_context +from tests.integration.helpers.scheduling_context_builder import ( + build_scheduling_context, +) from tests.logger import MockLogger +from tests.test_utils.mock_scheduling_request_environment import ( + MockSchedulingRequestEnvironment, +) if TYPE_CHECKING: from mypy_boto3_ec2.client import EC2Client @@ -33,15 +44,6 @@ TagTypeDef = object -def test_ec2_state_code() -> None: - assert EC2StateCode.PENDING.value == 0 - assert EC2StateCode.RUNNING.value == 16 - assert EC2StateCode.SHUTTING_DOWN.value == 32 - assert EC2StateCode.TERMINATED.value == 48 - assert EC2StateCode.STOPPING.value == 64 - assert EC2StateCode.STOPPED.value == 80 - - def test_get_tags() -> None: assert get_tags({}) == {} @@ -55,21 +57,29 @@ def test_get_tags() -> None: assert get_tags(instance) == {"foo": "bar", "baz": "qux"} -def mock_service_args() -> ServiceArgs: - return ServiceArgs( - account_id="111111111111", - scheduling_context=build_context(quick_time(0, 0, 0)), +def build_ec2_service( + env: SchedulingRequestEnvironment = MockSchedulingRequestEnvironment(), + scheduling_context: SchedulingContext = build_scheduling_context( + quick_time(0, 0, 0) + ), +) -> Ec2Service: + return Ec2Service( + assumed_scheduling_role=AssumedRole( + account="123456789012", + region="us-east-1", + role_name="role-name", + session=Session(), + ), + scheduling_context=scheduling_context, logger=MockLogger(), - session=Session(), - stack_name="", + env=env, ) def test_ec2_service_attributes() -> None: - service = Ec2Service(mock_service_args()) + service = build_ec2_service() assert service.service_name == "ec2" - assert service.allow_resize @dataclass(frozen=True) @@ -91,7 +101,10 @@ def create_instances_of_status( ) -> InstancesOfStatus: total_qty: Final = qty_running + qty_stopped + qty_terminated run_response = ec2.run_instances( - ImageId=ami, InstanceType=instance_type, MinCount=total_qty, MaxCount=total_qty + ImageId=get_ami(), + InstanceType=instance_type, + MinCount=total_qty, + MaxCount=total_qty, ) instance_ids = [instance["InstanceId"] for instance in run_response["Instances"]] assert ( @@ -122,14 +135,15 @@ def create_instances_of_status( ) -@patch("instance_scheduler.service.ec2.EC2SSMMaintenanceWindows") -def test_get_schedulable_instances(mock_mw: MagicMock, moto_backend: None) -> None: - service_args = mock_service_args() - service_args["scheduling_context"] = replace( - service_args["scheduling_context"], enable_ssm_maintenance_windows=True - ) - service: Final = Ec2Service(service_args) - assert service.get_schedulable_instances() == [] +@patch.object(SSMMWClient, "get_mws_from_ssm") +def test_get_schedulable_instances( + mock_mw_backend: MagicMock, + moto_backend: None, + mw_store: MWStore, +) -> None: + env = MockSchedulingRequestEnvironment(enable_ec2_ssm_maintenance_windows=True) + service = build_ec2_service(env=env) + assert list(service.describe_tagged_instances()) == [] ec2: Final[EC2Client] = boto3.client("ec2") instance_type: Final[InstanceTypeType] = "m6g.medium" @@ -140,13 +154,18 @@ def test_get_schedulable_instances(mock_mw: MagicMock, moto_backend: None) -> No hibernated_instance_ids: Final = instances.running_ids[0:2] untagged_instance_ids: Final = instances.running_ids[2:4] - schedule_tag_key = service_args["scheduling_context"].tag_name + schedules: dict[str, InstanceSchedule] = dict() + # setup schedules + schedule_tag_key = env.schedule_tag_key for instance_id in instances.all_ids: schedule_name = f"{instance_id}-schedule" hibernate = instance_id in hibernated_instance_ids - service_args["scheduling_context"].schedules[schedule_name] = InstanceSchedule( - schedule_name, hibernate=hibernate + schedules[schedule_name] = InstanceSchedule( + schedule_name, + hibernate=hibernate, + timezone=ZoneInfo("UTC"), ) + tags: list[TagTypeDef] = [ {"Key": "Name", "Value": f"{instance_id}-name"}, ] @@ -154,103 +173,110 @@ def test_get_schedulable_instances(mock_mw: MagicMock, moto_backend: None) -> No tags.append({"Key": schedule_tag_key, "Value": schedule_name}) ec2.create_tags(Resources=[instance_id], Tags=tags) + # create schedules instance_with_maintenance_window = instances.running_ids[0] + + maintenance_window = EC2SSMMaintenanceWindow( + window_name=f"{instance_with_maintenance_window}-mw", + window_id="mw-00000000000000000", + account_id="123456789012", + region="us-east-1", + schedule_timezone=ZoneInfo("UTC"), + next_execution_time=quick_time(10, 20, 0), + duration_hours=1, + ) schedule_name = f"{instance_with_maintenance_window}-schedule" - maintenance_window_name = f"{instance_with_maintenance_window}-mw" - service_args["scheduling_context"].schedules[ - schedule_name - ].use_maintenance_window = True - service_args["scheduling_context"].schedules[ - schedule_name - ].ssm_maintenance_window = maintenance_window_name - maintenance_window_schedule = InstanceSchedule(maintenance_window_name) - mock_mw.return_value.ssm_maintenance_windows.return_value = { - maintenance_window_name: maintenance_window_schedule - } + schedules[schedule_name].ssm_maintenance_window = [maintenance_window.window_name] + + mock_mw_backend.return_value = [maintenance_window] - result = service.get_schedulable_instances() + service = build_ec2_service( + env, + build_scheduling_context(current_dt=quick_time(10, 0, 0), schedules=schedules), + ) + result = list(service.describe_tagged_instances()) assert len(result) == instances.qty - len(untagged_instance_ids) - len( instances.terminated_ids ) - schedulable_instances = {instance["id"]: instance for instance in result} + schedulable_instances = {instance.id: instance for instance in result} for instance_id, instance in schedulable_instances.items(): - assert instance["id"] == instance_id - assert "arn" not in instance - assert instance["allow_resize"] - name = f"{instance_id}-name" - assert instance["name"] == name - assert instance["instancetype"] == instance_type - assert "engine_type" not in instance - schedule_name = f"{instance_id}-schedule" - assert instance["schedule_name"] == schedule_name - assert instance["tags"] == {schedule_tag_key: schedule_name, "Name": name} - assert not instance["resized"] - assert "is_cluster" not in instance - assert "account" not in instance - assert "region" not in instance - assert "service" not in instance - assert "instance_str" not in instance + assert instance.id == instance_id + assert instance.is_resizable + name = f"{instance.id}-name" + assert instance.name == name + assert instance.instance_type == instance_type + schedule_name = f"{instance.id}-schedule" + assert instance.schedule_name == schedule_name + assert instance.tags == {schedule_tag_key: schedule_name, "Name": name} + assert not instance.resized if instance_id != instance_with_maintenance_window: - assert instance["maintenance_window"] is None + assert instance.maintenance_windows == [] for instance_id in instances.running_ids: if instance_id not in untagged_instance_ids: instance = schedulable_instances[instance_id] - assert instance["state"] == EC2StateCode.RUNNING - assert instance["state_name"] == "running" - assert instance["is_running"] - assert not instance["is_terminated"] - assert instance["current_state"] == "running" - - assert instance["hibernate"] == bool(instance_id in hibernated_instance_ids) + assert instance.current_state == "running" + assert instance.is_running + assert not instance.is_stopped - assert ( - schedulable_instances[instance_with_maintenance_window]["maintenance_window"] - is maintenance_window_schedule - ) + assert schedulable_instances[ + instance_with_maintenance_window + ].maintenance_windows == [ + maintenance_window.to_schedule(env.scheduler_frequency_minutes) + ] for instance_id in instances.stopped_ids: instance = schedulable_instances[instance_id] - assert instance["state"] == EC2StateCode.STOPPED - assert instance["state_name"] == "stopped" - assert not instance["is_running"] - assert not instance["is_terminated"] - assert instance["current_state"] == "stopped" - assert not instance["hibernate"] + assert not instance.is_running + assert instance.is_stopped + assert instance.current_state == "stopped" + assert not instance.should_hibernate for instance_id in chain(instances.terminated_ids, untagged_instance_ids): assert instance_id not in schedulable_instances +def test_get_schedulable_instances_omits_asg_instances(moto_backend: None) -> None: + env: Final = MockSchedulingRequestEnvironment() + service: Final = build_ec2_service(env=env) + + ec2: Final[EC2Client] = boto3.client("ec2") + instance_type: Final[InstanceTypeType] = "m6g.12xlarge" + instances = create_instances_of_status( + ec2, instance_type=instance_type, qty_running=1 + ) + + tags: list[TagTypeDef] = [ + {"Key": env.schedule_tag_key, "Value": "my-schedule"}, + {"Key": "aws:autoscaling:groupName", "Value": "my-group"}, + ] + ec2.create_tags(Resources=[instances.all_ids[0]], Tags=tags) + + assert list(service.describe_tagged_instances()) == [] + + def instance_data_from( *, instance_id: str, instance_state: InstanceStateNameType, instance_type: InstanceTypeType, -) -> Instance: + hibernate: bool = False, +) -> EC2Instance: if instance_state not in {"running", "stopped"}: raise ValueError(f"Unimplemented instance data conversion: {instance_state}") - running: Final = instance_state == "running" - state_code: Final = EC2StateCode.RUNNING if running else EC2StateCode.STOPPED - schedule_state: Final[ScheduleState] = "running" if running else "stopped" - return Instance( - id=instance_id, - allow_resize=True, - hibernate=False, - state=state_code, - state_name=instance_state, - is_running=running, - is_terminated=False, - current_state=schedule_state, - instancetype=instance_type, - maintenance_window=None, - tags={}, - name="", - schedule_name=None, + return EC2Instance( + _id=instance_id, + should_hibernate=hibernate, + _current_state=instance_state, + _instance_type=instance_type, + _maintenance_windows=[], + _tags={}, + _name="", + _schedule_name="sched_name", ) @@ -267,7 +293,7 @@ def test_resize_instance(moto_backend: None) -> None: ) new_instance_type: Final[InstanceTypeType] = "m6g.12xlarge" - service: Final = Ec2Service(mock_service_args()) + service: Final = build_ec2_service() service.resize_instance(instance, new_instance_type) assert ( @@ -285,10 +311,10 @@ def test_start_instances(moto_backend: None) -> None: ec2, instance_type=instance_type, qty_stopped=7 ) - service: Final = Ec2Service(mock_service_args()) + service: Final = build_ec2_service() assert list(service.start_instances([])) == [] - instances_to_start: list[Instance] = [] + instances_to_start: list[EC2Instance] = [] for instance_id in instances.stopped_ids: instances_to_start.append( instance_data_from( @@ -318,9 +344,9 @@ def test_start_instances_with_errors(moto_backend: None) -> None: ec2, instance_type=instance_type, qty_stopped=100 ) - service: Final = Ec2Service(mock_service_args()) + service: Final = build_ec2_service() - instances_to_start: list[Instance] = [] + instances_to_start: list[EC2Instance] = [] for instance_id in instances.stopped_ids: instances_to_start.append( instance_data_from( @@ -359,10 +385,10 @@ def test_stop_instances(moto_backend: None) -> None: ec2, instance_type=instance_type, qty_running=52 ) - service: Final = Ec2Service(mock_service_args()) + service: Final = build_ec2_service() assert list(service.stop_instances([])) == [] - instances_to_stop: list[Instance] = [] + instances_to_stop: list[EC2Instance] = [] for instance_id in instances.running_ids: instances_to_stop.append( instance_data_from( @@ -392,9 +418,9 @@ def test_stop_instances_with_errors(moto_backend: None) -> None: ec2, instance_type=instance_type, qty_running=52 ) - service: Final = Ec2Service(mock_service_args()) + service: Final = build_ec2_service() - instances_to_stop: list[Instance] = [] + instances_to_stop: list[EC2Instance] = [] for instance_id in instances.running_ids: instances_to_stop.append( instance_data_from( @@ -425,3 +451,62 @@ def test_stop_instances_with_errors(moto_backend: None) -> None: statuses = ec2.describe_instance_status(InstanceIds=instances.running_ids) for status in statuses["InstanceStatuses"]: assert status["InstanceState"]["Name"] == "stopped" + + +def test_stop_instances_will_fallback_on_regular_stop_when_hibernate_errors() -> None: + ec2: Final[EC2Client] = boto3.client("ec2") + stub_ec2: Final = Stubber(ec2) + scheduling_role = AssumedRole( + account="123456789012", + region="us-east-1", + role_name="role-name", + session=Session(), + ) + + setattr(scheduling_role.session, "client", MagicMock(return_value=ec2)) + + service: Final = Ec2Service( + assumed_scheduling_role=scheduling_role, + scheduling_context=build_scheduling_context(quick_time(0, 0, 0)), + logger=MockLogger(), + env=MockSchedulingRequestEnvironment(), + ) + + my_instance_id: Final = "i-1234567890abcdef0" + stub_ec2.add_client_error( + "stop_instances", + "UnsupportedHibernationConfiguration", + expected_params={"InstanceIds": [my_instance_id], "Hibernate": True}, + ) + + stub_ec2.add_response( + "stop_instances", + { + "StoppingInstances": [ + { + "CurrentState": {"Code": EC2StateCode.STOPPING, "Name": "stopping"}, + "InstanceId": my_instance_id, + "PreviousState": {"Code": EC2StateCode.RUNNING, "Name": "running"}, + } + ] + }, + {"InstanceIds": [my_instance_id]}, + ) + + with stub_ec2: + result = list( + service.stop_instances( + [ + instance_data_from( + instance_id=my_instance_id, + instance_state="stopped", + instance_type="m6g.medium", + hibernate=True, + ) + ] + ) + ) + stub_ec2.assert_no_pending_responses() + + instance_results: Final = {instance_id: status for (instance_id, status) in result} + assert instance_results[my_instance_id] == "stopped" diff --git a/source/app/tests/service/test_rds_service.py b/source/app/tests/service/test_rds_service.py new file mode 100644 index 00000000..7b7b512f --- /dev/null +++ b/source/app/tests/service/test_rds_service.py @@ -0,0 +1,179 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from contextlib import contextmanager +from typing import Iterator +from unittest.mock import MagicMock, call, patch + +import pytest +from _pytest.fixtures import fixture +from boto3.session import Session + +from instance_scheduler.handler.environments.scheduling_request_environment import ( + SchedulingRequestEnvironment, +) +from instance_scheduler.service import RdsService +from instance_scheduler.service.rds import RdsTagDescription +from instance_scheduler.util.session_manager import AssumedRole +from tests.integration.helpers.schedule_helpers import quick_time +from tests.integration.helpers.scheduling_context_builder import ( + build_scheduling_context, +) +from tests.logger import MockLogger +from tests.test_utils.mock_scheduling_request_environment import ( + MockSchedulingRequestEnvironment, +) + + +def build_rds_service( + env: SchedulingRequestEnvironment = MockSchedulingRequestEnvironment(), +) -> RdsService: + return RdsService( + assumed_scheduling_role=AssumedRole( + account="123456789012", + region="us-east-1", + role_name="role-name", + session=Session(), + ), + scheduling_context=build_scheduling_context(quick_time(0, 0, 0)), + logger=MockLogger(), + env=env, + ) + + +def test_rds_service_attributes() -> None: + service = build_rds_service() + assert service.service_name == "rds" + + +@fixture +def mock_boto_client() -> Iterator[MagicMock]: + with patch( + "instance_scheduler.service.rds.get_client_with_standard_retry" + ) as get_client_func: + mock = MagicMock() + get_client_func.return_value = mock + + # these are necessary to avoid an infinite loop + mock.describe_db_instances.return_value = {"Marker": ""} + mock.describe_db_clusters.return_value = {"Marker": ""} + + yield mock + + +@contextmanager +def mock_tagged_resources( + rds_service: RdsService, tagged_resources: RdsTagDescription +) -> Iterator[None]: + with patch.object(rds_service, "rds_resource_tags", tagged_resources): + yield + + +def test_describes_clusters_when_only_neptune_enabled( + mock_boto_client: MagicMock, +) -> None: + env = MockSchedulingRequestEnvironment( + enable_rds_service=False, + enable_rds_clusters=False, + enable_neptune_service=True, + enable_docdb_service=False, + ) + + rds_service = build_rds_service(env) + + with mock_tagged_resources( + rds_service, + { + "db": {"instanceArn": {"Schedule": "test-schedule"}}, + "cluster": {"clusterArn": {"Schedule": "test-schedule"}}, + }, + ): + list(rds_service.describe_tagged_instances()) + + mock_boto_client.get_paginator.assert_has_calls( + [call("describe_db_clusters")], + ) + + +def test_describes_clusters_when_only_docdb_enabled( + mock_boto_client: MagicMock, +) -> None: + env = MockSchedulingRequestEnvironment( + enable_rds_service=False, + enable_rds_clusters=False, + enable_neptune_service=False, + enable_docdb_service=True, + ) + + rds_service = build_rds_service(env) + + with mock_tagged_resources( + rds_service, + { + "db": {"instanceArn": {"Schedule": "test-schedule"}}, + "cluster": {"clusterArn": {"Schedule": "test-schedule"}}, + }, + ): + list(rds_service.describe_tagged_instances()) + + mock_boto_client.get_paginator.assert_has_calls( + [call("describe_db_clusters")], + ) + + +def test_skips_describe_instances_when_no_tagged_instances_found( + mock_boto_client: MagicMock, +) -> None: + env = MockSchedulingRequestEnvironment( + enable_rds_service=True, + enable_rds_clusters=True, + enable_neptune_service=True, + enable_docdb_service=True, + ) + + rds_service = build_rds_service(env) + + with mock_tagged_resources( + rds_service, + {"db": {}, "cluster": {"clusterArn": {"Schedule": "test-schedule"}}}, + ): + list(rds_service.describe_tagged_instances()) + + # did call clusters + mock_boto_client.get_paginator.assert_has_calls( + [call("describe_db_clusters")], + ) + + # did not call instances + with pytest.raises(AssertionError): + mock_boto_client.get_paginator.assert_has_calls( + [call("describe_db_instances")], + ) + + +def test_skips_describe_clusters_when_no_tagged_clusters_found( + mock_boto_client: MagicMock, +) -> None: + env = MockSchedulingRequestEnvironment( + enable_rds_service=True, + enable_rds_clusters=True, + enable_neptune_service=True, + enable_docdb_service=True, + ) + rds_service = build_rds_service(env) + + with mock_tagged_resources( + rds_service, + {"db": {"someArn": {"Schedule": "test-schedule"}}, "cluster": {}}, + ): + list(rds_service.describe_tagged_instances()) + + # did call instances + mock_boto_client.get_paginator.assert_has_calls( + [call("describe_db_instances")], + ) + + # did not call clusters + with pytest.raises(AssertionError): + mock_boto_client.get_paginator.assert_has_calls( + [call("describe_db_clusters")], + ) diff --git a/source/app/tests/test_init.py b/source/app/tests/test_init.py new file mode 100644 index 00000000..7ca98099 --- /dev/null +++ b/source/app/tests/test_init.py @@ -0,0 +1,9 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from importlib.metadata import version + +from instance_scheduler import __version__ + + +def test_version_read_from_toml_matches_package_version() -> None: + assert version("instance_scheduler") == __version__ diff --git a/source/app/tests/test_main.py b/source/app/tests/test_main.py index de88742f..a5ced4a3 100644 --- a/source/app/tests/test_main.py +++ b/source/app/tests/test_main.py @@ -5,16 +5,12 @@ from aws_lambda_powertools.utilities.typing import LambdaContext from instance_scheduler import main -from instance_scheduler.handler.scheduling_orchestrator import ( - OrchestrationRequest, - SchedulingOrchestratorHandler, -) from instance_scheduler.main import lambda_handler from instance_scheduler.util.logger import Logger -from tests.context import MockLambdaContext -def test_scheduling_request_handler_called() -> None: +@patch.object(Logger, "client") # stops logger from slowing down the test +def test_correct_handler_called(logger_client: MagicMock) -> None: mock_handler = MagicMock() mock_handler.is_handling_request.return_value = True my_response = "Everything's great!" @@ -25,16 +21,3 @@ def test_scheduling_request_handler_called() -> None: assert lambda_handler({}, LambdaContext()) == my_response mock_handler.return_value.handle_request.assert_called_once() - - -@patch.object(Logger, "client") -@patch.object(SchedulingOrchestratorHandler, "handle_request") -def test_orchestrator_event( - handle_request_method: MagicMock, logger_client: MagicMock -) -> None: - event: OrchestrationRequest = { - "scheduled_action": "run_orchestrator", - } - - lambda_handler(event, MockLambdaContext()) - handle_request_method.assert_called_once() diff --git a/source/app/tests/test_utils/any_nonempty_string.py b/source/app/tests/test_utils/any_nonempty_string.py new file mode 100644 index 00000000..7712d18b --- /dev/null +++ b/source/app/tests/test_utils/any_nonempty_string.py @@ -0,0 +1,35 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from typing import Any + +import pytest + + +class AnyNonEmptyString(str): + """helper object for asserting equals against any non-empty string value""" + + def __str__(self) -> str: + return "##AnyNonEmptyString##" + + def __len__(self) -> int: + return self.__str__().__len__() + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, str): + return False + return bool(other.strip()) + + def __ne__(self, other: Any) -> bool: + return not self.__eq__(other) + + +@pytest.mark.parametrize("valid_input", ["string", "false", "-1"]) +def test_equals_non_empty_string(valid_input: str) -> None: + assert valid_input == AnyNonEmptyString() + assert AnyNonEmptyString() == valid_input + + +@pytest.mark.parametrize("invalid_input", ["", " ", None]) +def test_not_equals_empty_string(invalid_input: str) -> None: + assert invalid_input != AnyNonEmptyString() + assert AnyNonEmptyString() != invalid_input diff --git a/source/app/tests/test_utils/app_env_utils.py b/source/app/tests/test_utils/app_env_utils.py new file mode 100644 index 00000000..9d6ea230 --- /dev/null +++ b/source/app/tests/test_utils/app_env_utils.py @@ -0,0 +1,113 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import dataclasses +import os +from contextlib import contextmanager +from functools import wraps +from typing import Any, Callable, Iterator, TypeVar, cast +from unittest.mock import patch +from zoneinfo import ZoneInfo + +import instance_scheduler.util.app_env +from instance_scheduler.util.app_env import AppEnv + +FuncT = TypeVar("FuncT", bound=Callable[..., Any]) + + +def with_mock_app_env(**overrides: Any) -> Callable[[FuncT], FuncT]: + def decorator(func_to_decorate: FuncT) -> FuncT: + @wraps(func_to_decorate) + def wrapper(*args: Any, **kwargs: Any) -> Any: + with mock_app_env(**overrides): + func_to_decorate(*args, **kwargs) + + return cast(FuncT, wrapper) + + return decorator + + +@contextmanager +def mock_app_env(**overrides: Any) -> Iterator[AppEnv]: + # clear cached env so that the new one is picked up + old_env = instance_scheduler.util.app_env._app_env + instance_scheduler.util.app_env._app_env = None + app_env = dataclasses.replace(example_app_env(), **overrides) + with patch.dict(os.environ, env_from_app_env(app_env)): + yield app_env + instance_scheduler.util.app_env._app_env = old_env + + +def example_app_env() -> AppEnv: + return AppEnv( + scheduler_frequency_minutes=5, + log_group="my-log-group", + topic_arn="arn:aws:sns:us-east-1:123456789012:my-topic-arn", + stack_name="my-stack-name", + send_anonymous_metrics=False, + solution_id="my-solution-id", + solution_version="my-solution-version", + enable_debug_logging=True, + user_agent_extra="my-user-agent-extra", + anonymous_metrics_url="my-metrics-url", + stack_id="my-stack-id", + uuid_key="my-uuid-key", + start_ec2_batch_size=1, + schedule_tag_key="Schedule", + default_timezone=ZoneInfo("Asia/Tokyo"), + enable_ec2_service=True, + enable_rds_service=True, + enable_rds_clusters=False, + enable_neptune_service=False, + enable_docdb_service=False, + enable_rds_snapshots=True, + schedule_regions=["us-east-1", "us-west-2"], + app_namespace="my-app-namespace", + scheduler_role_name="my-scheduler-role-name", + enable_schedule_hub_account=True, + enable_ec2_ssm_maintenance_windows=False, + start_tags=["my-first-start-tag", "my-second-start-tag"], + stop_tags=["my-stop-tag"], + enable_aws_organizations=False, + maintenance_window_table_name="my-maintenance-window-table", + config_table_name="my-config-table-name", + state_table_name="my-state-table-name", + ) + + +def env_from_app_env(app_env: AppEnv) -> dict[str, str]: + return { + "SCHEDULER_FREQUENCY": str(app_env.scheduler_frequency_minutes), + "LOG_GROUP": app_env.log_group, + "ISSUES_TOPIC_ARN": app_env.topic_arn, + "STACK_NAME": app_env.stack_name, + "SEND_METRICS": str(app_env.send_anonymous_metrics), + "SOLUTION_ID": app_env.solution_id, + "SOLUTION_VERSION": app_env.solution_version, + "TRACE": str(app_env.enable_debug_logging), + "USER_AGENT_EXTRA": app_env.user_agent_extra, + "METRICS_URL": app_env.anonymous_metrics_url, + "STACK_ID": app_env.stack_id, + "UUID_KEY": app_env.uuid_key, + "START_EC2_BATCH_SIZE": str(app_env.start_ec2_batch_size), + "SCHEDULE_TAG_KEY": app_env.schedule_tag_key, + "DEFAULT_TIMEZONE": str(app_env.default_timezone), + "ENABLE_EC2_SERVICE": str(app_env.enable_ec2_service), + "ENABLE_RDS_SERVICE": str(app_env.enable_rds_service), + "ENABLE_RDS_CLUSTERS": str(app_env.enable_rds_clusters), + "ENABLE_NEPTUNE_SERVICE": str(app_env.enable_neptune_service), + "ENABLE_DOCDB_SERVICE": str(app_env.enable_docdb_service), + "ENABLE_RDS_SNAPSHOTS": str(app_env.enable_rds_snapshots), + "SCHEDULE_REGIONS": ", ".join(app_env.schedule_regions), + "APP_NAMESPACE": app_env.app_namespace, + "SCHEDULER_ROLE_NAME": app_env.scheduler_role_name, + "ENABLE_SCHEDULE_HUB_ACCOUNT": str(app_env.enable_schedule_hub_account), + "ENABLE_EC2_SSM_MAINTENANCE_WINDOWS": str( + app_env.enable_ec2_ssm_maintenance_windows + ), + "START_TAGS": ", ".join(app_env.start_tags), + "STOP_TAGS": ", ".join(app_env.stop_tags), + "ENABLE_AWS_ORGANIZATIONS": str(app_env.enable_aws_organizations), + "MAINTENANCE_WINDOW_TABLE": app_env.maintenance_window_table_name, + "CONFIG_TABLE": app_env.config_table_name, + "STATE_TABLE": app_env.state_table_name, + } diff --git a/source/app/tests/test_utils/mock_asg_environment.py b/source/app/tests/test_utils/mock_asg_environment.py new file mode 100644 index 00000000..674d4a13 --- /dev/null +++ b/source/app/tests/test_utils/mock_asg_environment.py @@ -0,0 +1,40 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import os +from dataclasses import dataclass +from unittest.mock import patch +from zoneinfo import ZoneInfo + + +@dataclass +class MockAsgEnvironment: + user_agent_extra: str = "my-user-agent-extra" + issues_topic_arn: str = "arn:aws:sns:us-east-1:123456789012:my-topic-arn" + logger_raise_exceptions: bool = False + config_table_name: str = "my-config-table-name" + asg_scheduling_role_name: str = "my-role" + default_timezone: ZoneInfo = ZoneInfo("UTC") + schedule_tag_key: str = "Schedule" + scheduled_tag_key: str = "scheduled" + rule_prefix: str = "is-" + + def _to_env_dict(self) -> dict[str, str]: + return { + "USER_AGENT_EXTRA": self.user_agent_extra, + "ISSUES_TOPIC_ARN": self.issues_topic_arn, + "LOGGER_RAISE_EXCEPTIONS": str(self.logger_raise_exceptions), + "CONFIG_TABLE": self.config_table_name, + "ASG_SCHEDULING_ROLE_NAME": self.asg_scheduling_role_name, + "DEFAULT_TIMEZONE": str(self.default_timezone), + "SCHEDULE_TAG_KEY": self.schedule_tag_key, + "SCHEDULED_TAG_KEY": self.scheduled_tag_key, + "RULE_PREFIX": self.rule_prefix, + } + + def __enter__(self) -> "MockAsgEnvironment": + self._patcher = patch.dict(os.environ, self._to_env_dict()) + self._patcher.__enter__() + return self + + def __exit__(self, exc_type: None, exc_val: None, exc_tb: None) -> None: + self._patcher.__exit__() diff --git a/source/app/tests/test_utils/mock_asg_orchestrator_environment.py b/source/app/tests/test_utils/mock_asg_orchestrator_environment.py new file mode 100644 index 00000000..b534db77 --- /dev/null +++ b/source/app/tests/test_utils/mock_asg_orchestrator_environment.py @@ -0,0 +1,18 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from dataclasses import dataclass, field + +from instance_scheduler.handler.environments.asg_orch_env import AsgOrchEnv + + +@dataclass(frozen=True) +class MockAsgOrchestratorEnvironment(AsgOrchEnv): + user_agent_extra: str = "my-user-agent-extra" + + issues_topic_arn: str = "arn:aws:sns:us-east-1:123456789012:my-topic-arn" + logger_raise_exceptions: bool = False + + config_table_name: str = "my-config-table-name" + enable_schedule_hub_account: bool = True + schedule_regions: list[str] = field(default_factory=list) + asg_scheduler_name: str = "asg-scheduling-request-handler-lambda" diff --git a/source/app/tests/test_utils/mock_metrics_environment.py b/source/app/tests/test_utils/mock_metrics_environment.py new file mode 100644 index 00000000..e7adae25 --- /dev/null +++ b/source/app/tests/test_utils/mock_metrics_environment.py @@ -0,0 +1,39 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import os +import uuid +from dataclasses import dataclass +from unittest.mock import patch + +from instance_scheduler.ops_metrics import metrics +from instance_scheduler.ops_metrics.metrics import MetricsEnvironment + + +@dataclass +class MockMetricsEnviron(MetricsEnvironment): + send_anonymous_metrics: bool = False + anonymous_metrics_url: str = "my-metrics-url" + solution_id: str = "my-solution-id" + solution_version: str = "my-solution-version" + scheduler_frequency_minutes: int = 5 + metrics_uuid: uuid.UUID = uuid.uuid4() + + def _to_env_dict(self) -> dict[str, str]: + return { + "SEND_METRICS": str(self.send_anonymous_metrics), + "METRICS_URL": self.anonymous_metrics_url, + "SOLUTION_ID": self.solution_id, + "SOLUTION_VERSION": self.solution_version, + "SCHEDULING_INTERVAL_MINUTES": str(self.scheduler_frequency_minutes), + "METRICS_UUID": str(self.metrics_uuid), + } + + def __enter__(self) -> "MockMetricsEnviron": + self._patcher = patch.dict(os.environ, self._to_env_dict()) + self._patcher.__enter__() + metrics._metrics_env = None # reset caching + return self + + def __exit__(self, exc_type: None, exc_val: None, exc_tb: None) -> None: + self._patcher.__exit__() + metrics._metrics_env = None # reset caching diff --git a/source/app/tests/test_utils/mock_metrics_uuid_environment.py b/source/app/tests/test_utils/mock_metrics_uuid_environment.py new file mode 100644 index 00000000..80d88e0a --- /dev/null +++ b/source/app/tests/test_utils/mock_metrics_uuid_environment.py @@ -0,0 +1,31 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import os +from dataclasses import dataclass +from unittest.mock import patch + +from instance_scheduler.handler.environments.metrics_uuid_environment import ( + MetricsUuidEnvironment, +) + + +@dataclass +class MockMetricsUuidEnviron(MetricsUuidEnvironment): + user_agent_extra: str = "user-agent-extra" + uuid_key: str = "my-uuid-key" + stack_id: str = "my-stack-id" + + def _to_env_dict(self) -> dict[str, str]: + return { + "USER_AGENT_EXTRA": self.user_agent_extra, + "STACK_ID": self.stack_id, + "UUID_KEY": self.uuid_key, + } + + def __enter__(self) -> "MockMetricsUuidEnviron": + self._patcher = patch.dict(os.environ, self._to_env_dict()) + self._patcher.__enter__() + return self + + def __exit__(self, exc_type: None, exc_val: None, exc_tb: None) -> None: + self._patcher.__exit__() diff --git a/source/app/tests/test_utils/mock_orchestrator_environment.py b/source/app/tests/test_utils/mock_orchestrator_environment.py new file mode 100644 index 00000000..232d5161 --- /dev/null +++ b/source/app/tests/test_utils/mock_orchestrator_environment.py @@ -0,0 +1,41 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from dataclasses import dataclass, field +from zoneinfo import ZoneInfo + +from instance_scheduler.handler.environments.orchestrator_environment import ( + OrchestratorEnvironment, +) + + +@dataclass(frozen=True) +class MockOrchestratorEnvironment(OrchestratorEnvironment): + # logging + user_agent_extra: str = "my-user-agent-extra" + log_group: str = "my-log-group" + topic_arn: str = "arn:aws:sns:us-east-1:123456789012:my-topic-arn" + enable_debug_logging: bool = True + # references + scheduling_request_handler_name: str = "scheduling-request-handler-lambda" + config_table_name: str = "my-config-table-name" + # scheduling + enable_schedule_hub_account: bool = False + enable_ec2_service: bool = False + enable_rds_service: bool = False + enable_rds_clusters: bool = False + enable_neptune_service: bool = False + enable_docdb_service: bool = False + enable_asg_service: bool = False + schedule_regions: list[str] = field(default_factory=list) + + # used for metrics only + default_timezone: ZoneInfo = ZoneInfo("Asia/Tokyo") + enable_rds_snapshots: bool = True + scheduler_frequency_minutes: int = 5 + enable_aws_organizations: bool = False + enable_ec2_ssm_maintenance_windows: bool = False + ops_dashboard_enabled: bool = True + start_tags: list[str] = field( + default_factory=lambda: ["my-first-start-tag", "my-second-start-tag"] + ) + stop_tags: list[str] = field(default_factory=lambda: ["my-stop-tag"]) diff --git a/source/app/tests/test_utils/mock_scheduling_request_environment.py b/source/app/tests/test_utils/mock_scheduling_request_environment.py new file mode 100644 index 00000000..db537769 --- /dev/null +++ b/source/app/tests/test_utils/mock_scheduling_request_environment.py @@ -0,0 +1,35 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from dataclasses import dataclass, field +from zoneinfo import ZoneInfo + +from instance_scheduler.handler.environments.scheduling_request_environment import ( + SchedulingRequestEnvironment, +) + + +@dataclass(frozen=True) +class MockSchedulingRequestEnvironment(SchedulingRequestEnvironment): + user_agent_extra: str = "my-user-agent-extra" + log_group: str = "my-log-group" + topic_arn: str = "arn:aws:sns:us-east-1:123456789012:my-topic-arn" + enable_debug_logging: bool = False + stack_name: str = "my-stack-name" + state_table_name: str = "my-state-table-name" + config_table_name: str = "my-config-table-name" + maintenance_window_table_name: str = "my-maintenance-window-table" + scheduler_role_name: str = "my-scheduler-role-name" + default_timezone: ZoneInfo = ZoneInfo("Asia/Tokyo") + start_tags: list[str] = field( + default_factory=lambda: ["my-first-start-tag", "my-second-start-tag"] + ) + stop_tags: list[str] = field(default_factory=lambda: ["my-stop-tag"]) + schedule_tag_key: str = "Schedule" + scheduler_frequency_minutes: int = 5 + enable_ec2_ssm_maintenance_windows: bool = False + enable_rds_service: bool = True + enable_rds_clusters: bool = True + enable_docdb_service: bool = True + enable_neptune_service: bool = True + enable_rds_snapshots: bool = True + enable_ops_monitoring: bool = True diff --git a/source/app/tests/util/test_app_env.py b/source/app/tests/util/test_app_env.py index 0ecfa9cb..043f59ab 100644 --- a/source/app/tests/util/test_app_env.py +++ b/source/app/tests/util/test_app_env.py @@ -4,7 +4,6 @@ from os import environ from typing import Optional from unittest.mock import patch -from zoneinfo import ZoneInfo from pytest import fixture, raises @@ -12,10 +11,11 @@ from instance_scheduler.util.app_env import ( AppEnv, AppEnvError, - _to_bool, - _to_list, + env_to_bool, + env_to_list, get_app_env, ) +from tests.test_utils.app_env_utils import env_from_app_env, example_app_env @fixture(autouse=True) @@ -24,106 +24,32 @@ def reset_cached_env() -> None: def test_to_bool() -> None: - assert _to_bool("True") - assert _to_bool("true") - assert _to_bool("true ") - assert _to_bool("Yes") - assert _to_bool("yes") - assert _to_bool(" yes") - - assert not _to_bool("") - assert not _to_bool("False") - assert not _to_bool("false") - assert not _to_bool("\tfalse\r\n") - assert not _to_bool("No") - assert not _to_bool("no") - assert not _to_bool("\tno") - assert not _to_bool("Anything else") + assert env_to_bool("True") + assert env_to_bool("true") + assert env_to_bool("true ") + assert env_to_bool("Yes") + assert env_to_bool("yes") + assert env_to_bool(" yes") + + assert not env_to_bool("") + assert not env_to_bool("False") + assert not env_to_bool("false") + assert not env_to_bool("\tfalse\r\n") + assert not env_to_bool("No") + assert not env_to_bool("no") + assert not env_to_bool("\tno") + assert not env_to_bool("Anything else") def test_to_list() -> None: - assert _to_list("") == [] - - assert _to_list("a") == ["a"] - assert _to_list("a,b,c") == ["a", "b", "c"] - - assert _to_list("foo,,bar") == ["foo", "bar"] - assert _to_list("foo, bar, ") == ["foo", "bar"] - assert _to_list(" , foo , bar, ") == ["foo", "bar"] - - -def example_app_env() -> AppEnv: - return AppEnv( - scheduler_frequency_minutes=5, - log_group="my-log-group", - topic_arn="arn:aws:sns:us-east-1:123456789012:my-topic-arn", - stack_name="my-stack-name", - send_anonymous_metrics=False, - solution_id="my-solution-id", - solution_version="my-solution-version", - enable_debug_logging=True, - user_agent_extra="my-user-agent-extra", - anonymous_metrics_url="my-metrics-url", - stack_id="my-stack-id", - uuid_key="my-uuid-key", - start_ec2_batch_size=1, - schedule_tag_key="my-schedule-tag-key", - default_timezone=ZoneInfo("Asia/Tokyo"), - enable_cloudwatch_metrics=True, - enable_ec2_service=False, - enable_rds_service=True, - enable_rds_clusters=False, - enable_rds_snapshots=True, - schedule_regions=["us-east-1", "us-west-2"], - app_namespace="my-app-namespace", - scheduler_role_name="my-scheduler-role-name", - enable_schedule_hub_account=False, - enable_ec2_ssm_maintenance_windows=True, - start_tags=["my-first-start-tag", "my-second-start-tag"], - stop_tags=["my-stop-tag"], - enable_aws_organizations=False, - maintenance_window_table_name="my-maintenance-window-table", - config_table_name="my-config-table-name", - state_table_name="my-state-table-name", - ) + assert env_to_list("") == [] + assert env_to_list("a") == ["a"] + assert env_to_list("a,b,c") == ["a", "b", "c"] -def env_from_app_env(app_env: AppEnv) -> dict[str, str]: - return { - "SCHEDULER_FREQUENCY": str(app_env.scheduler_frequency_minutes), - "LOG_GROUP": app_env.log_group, - "ISSUES_TOPIC_ARN": app_env.topic_arn, - "STACK_NAME": app_env.stack_name, - "SEND_METRICS": str(app_env.send_anonymous_metrics), - "SOLUTION_ID": app_env.solution_id, - "SOLUTION_VERSION": app_env.solution_version, - "TRACE": str(app_env.enable_debug_logging), - "USER_AGENT_EXTRA": app_env.user_agent_extra, - "METRICS_URL": app_env.anonymous_metrics_url, - "STACK_ID": app_env.stack_id, - "UUID_KEY": app_env.uuid_key, - "START_EC2_BATCH_SIZE": str(app_env.start_ec2_batch_size), - "SCHEDULE_TAG_KEY": app_env.schedule_tag_key, - "DEFAULT_TIMEZONE": str(app_env.default_timezone), - "ENABLE_CLOUDWATCH_METRICS": str(app_env.enable_cloudwatch_metrics), - "ENABLE_EC2_SERVICE": str(app_env.enable_ec2_service), - "ENABLE_RDS_SERVICE": str(app_env.enable_rds_service), - "ENABLE_RDS_CLUSTERS": str(app_env.enable_rds_clusters), - "ENABLE_RDS_SNAPSHOTS": str(app_env.enable_rds_snapshots), - "SCHEDULE_REGIONS": ", ".join(app_env.schedule_regions), - "APP_NAMESPACE": app_env.app_namespace, - "SCHEDULER_ROLE_NAME": app_env.scheduler_role_name, - "ENABLE_SCHEDULE_HUB_ACCOUNT": str(app_env.enable_schedule_hub_account), - "ENABLE_EC2_SSM_MAINTENANCE_WINDOWS": str( - app_env.enable_ec2_ssm_maintenance_windows - ), - "START_TAGS": ", ".join(app_env.start_tags), - "STOP_TAGS": ", ".join(app_env.stop_tags), - "ENABLE_AWS_ORGANIZATIONS": str(app_env.enable_aws_organizations), - "MAINTENANCE_WINDOW_TABLE": app_env.maintenance_window_table_name, - "CONFIG_TABLE": app_env.config_table_name, - "STATE_TABLE": app_env.state_table_name, - } + assert env_to_list("foo,,bar") == ["foo", "bar"] + assert env_to_list("foo, bar, ") == ["foo", "bar"] + assert env_to_list(" , foo , bar, ") == ["foo", "bar"] def test_get_app_env() -> None: @@ -173,16 +99,46 @@ def test_scheduled_services() -> None: app_env = example_app_env() app_env_ec2_enabled = replace( - app_env, enable_ec2_service=True, enable_rds_service=False + app_env, + enable_ec2_service=True, + enable_rds_service=False, + enable_neptune_service=False, + enable_docdb_service=False, ) assert set(app_env_ec2_enabled.scheduled_services()) == {"ec2"} app_env_rds_enabled = replace( - app_env, enable_ec2_service=False, enable_rds_service=True + app_env, + enable_ec2_service=False, + enable_rds_service=True, + enable_neptune_service=False, + enable_docdb_service=False, ) assert set(app_env_rds_enabled.scheduled_services()) == {"rds"} app_env_both_enabled = replace( - app_env, enable_ec2_service=True, enable_rds_service=True + app_env, + enable_ec2_service=True, + enable_rds_service=True, + enable_neptune_service=False, + enable_docdb_service=False, ) assert set(app_env_both_enabled.scheduled_services()) == {"ec2", "rds"} + + app_env_neptune_enabled = replace( + app_env, + enable_neptune_service=True, + enable_rds_service=False, + enable_ec2_service=False, + enable_docdb_service=False, + ) + assert set(app_env_neptune_enabled.scheduled_services()) == {"rds"} + + app_env_docdb_enabled = replace( + app_env, + enable_docdb_service=True, + enable_rds_service=False, + enable_ec2_service=False, + enable_neptune_service=False, + ) + assert set(app_env_docdb_enabled.scheduled_services()) == {"rds"} diff --git a/source/app/tests/util/test_batch.py b/source/app/tests/util/test_batch.py index 2a568b70..eec844af 100644 --- a/source/app/tests/util/test_batch.py +++ b/source/app/tests/util/test_batch.py @@ -6,7 +6,7 @@ from typing import Final, TypeVar from unittest.mock import MagicMock, call -from instance_scheduler.util.batch import FailureResponse, bisect_retry +from instance_scheduler.util.batch import bisect_retry T = TypeVar("T") @@ -48,26 +48,32 @@ def test_bisect_retry_single_error() -> None: # single failing input called once, failure response result = bisect_retry([failing_input], action_fail_single) - assert len(result) == 1 - assert result[0].failed_input == failing_input - assert isinstance(result[0].error, ValueError) + assert len(result.success_responses) == 0 + assert len(result.intermediate_responses) == 0 + assert len(result.failure_responses) == 1 + assert result.failure_responses[0].failed_input == failing_input + assert isinstance(result.failure_responses[0].error, ValueError) action_fail_single.assert_called_once_with([failing_input]) action_fail_single.reset_mock() # one bad input out of three, should split result = bisect_retry([2, 3, 4], action_fail_single) - assert len(result) == 2 - assert result[0].failed_input == failing_input - assert isinstance(result[0].error, ValueError) + assert len(result.success_responses) == 1 + assert len(result.intermediate_responses) == 1 + assert len(result.failure_responses) == 1 + assert result.failure_responses[0].failed_input == failing_input + assert isinstance(result.failure_responses[0].error, ValueError) assert action_fail_single.call_count == 3 action_fail_single.assert_has_calls([call([2, 3, 4]), call([2]), call([3, 4])]) action_fail_single.reset_mock() # one bad input out of ten, should split multiple times result = bisect_retry(list(range(10)), action_fail_single) - assert len(result) == 4 - assert result[1].failed_input == failing_input - assert isinstance(result[1].error, ValueError) + assert len(result.success_responses) == 3 + assert len(result.intermediate_responses) == 3 + assert len(result.failure_responses) == 1 + assert result.failure_responses[0].failed_input == failing_input + assert isinstance(result.failure_responses[0].error, ValueError) assert action_fail_single.call_count == 7 action_fail_single.assert_has_calls( [ @@ -93,16 +99,17 @@ def test_bisect_retry_many_errors() -> None: result = bisect_retry(inputs, action_fail_even) # half failure responses, half success responses (None) - assert len(result) == input_size + assert len(result.success_responses) == int(input_size / 2) + assert len(result.intermediate_responses) == input_size - 1 + assert len(result.failure_responses) == int(input_size / 2) # expected sum if all failing inputs are represented in failure responses expected_sum: Final = reduce(operator.add, failing_inputs) actual_sum = 0 - for item in result: - if isinstance(item, FailureResponse): - actual_sum += item.failed_input - assert isinstance(item.error, ValueError) - else: - assert item is None + for item in result.failure_responses: + actual_sum += item.failed_input + assert isinstance(item.error, ValueError) + for item in result.success_responses: + assert item is None assert actual_sum == expected_sum # \sum_{i=0}^{log_2(n)} 2^i = 2n-1 diff --git a/source/app/tests/util/test_global_config.py b/source/app/tests/util/test_global_config.py deleted file mode 100644 index 2c47558d..00000000 --- a/source/app/tests/util/test_global_config.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -from datetime import time -from zoneinfo import ZoneInfo - -from instance_scheduler.configuration.instance_schedule import InstanceSchedule -from instance_scheduler.configuration.running_period import RunningPeriod -from instance_scheduler.configuration.running_period_dict_element import ( - RunningPeriodDictElement, -) -from instance_scheduler.configuration.scheduler_config import GlobalConfig - - -def sample_global_config() -> GlobalConfig: - return GlobalConfig( - scheduled_services=["ec2"], - schedule_clusters=False, - tag_name="Schedule", - regions=[], - default_timezone=ZoneInfo("UTC"), - schedules={ - "test-schedule": InstanceSchedule( - name="test-schedule", - periods=[ - RunningPeriodDictElement( - period=RunningPeriod( - name="test-period", - begintime=time(10, 0, 0), - endtime=time(20, 0, 0), - ) - ) - ], - ) - }, - trace=False, - enable_ssm_maintenance_windows=False, - use_metrics=False, - schedule_lambda_account=True, - create_rds_snapshot=False, - started_tags="", - stopped_tags="", - scheduler_role_name="Scheduler-Role", - namespace="dev", - organization_id="", - aws_partition="aws", - remote_account_ids=[], - ) diff --git a/source/app/tests/util/test_session_manager.py b/source/app/tests/util/test_session_manager.py new file mode 100644 index 00000000..c8f8b8a9 --- /dev/null +++ b/source/app/tests/util/test_session_manager.py @@ -0,0 +1,53 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from unittest.mock import ANY, MagicMock, patch + +from instance_scheduler.util.session_manager import assume_role + + +@patch("instance_scheduler.util.session_manager.Session") +def test_uses_regional_sts_endpoint( + mock_session: MagicMock, +) -> None: + # When assuming a role with sts, if the spoke account does not have the same region enabled as the calling region + # in sts, the assume will fail. To get around this, the IG requires that customers install the hub and spoke stacks + # in the same region (ensuring that the region is enabled in both accounts), as such all sts calls should use this + # local region to ensure proper cross-account, cross-region behavior (at time of writing, the local endpoints return + # a V2 token which is valid in all regions, the global endpoint returns + # a V1 token which is only valid in default regions) + mock_client = MagicMock() + mock_session.return_value.client = mock_client + region_name = "executing-region" + mock_session.return_value.region_name = region_name + + assume_role(account="111122223333", region="us-west-2", role_name="my-role-name") + + mock_client.assert_called_once_with( + "sts", + region_name=region_name, + endpoint_url=f"https://sts.{region_name}.amazonaws.com", + config=ANY, + ) + + +@patch("instance_scheduler.util.session_manager.Session") +def test_uses_correct_domain_in_china( + mock_session: MagicMock, +) -> None: + region_name = "cn-north-1" + + mock_client = MagicMock() + mock_session.return_value.client = mock_client + mock_session.return_value.region_name = region_name + mock_session.return_value.get_partition_for_region.return_value = "aws-cn" + + assume_role( + account="111122223333", region="cn-northwest-2", role_name="my-role-name" + ) + + mock_client.assert_called_once_with( + "sts", + region_name=region_name, + endpoint_url=f"https://sts.{region_name}.amazonaws.com.cn", + config=ANY, + ) diff --git a/source/app/tests/util/test_sns_handler.py b/source/app/tests/util/test_sns_handler.py new file mode 100644 index 00000000..6c50e21e --- /dev/null +++ b/source/app/tests/util/test_sns_handler.py @@ -0,0 +1,118 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from collections.abc import Iterator +from logging import Logger +from typing import TYPE_CHECKING, Any, Final + +import boto3 +from botocore.exceptions import ClientError +from moto.core.models import DEFAULT_ACCOUNT_ID +from moto.sns.models import SNSBackend, sns_backends +from pytest import fixture, raises + +from instance_scheduler.util.sns_handler import SnsHandler + +if TYPE_CHECKING: + from mypy_boto3_sns.client import SNSClient +else: + SNSClient = object + + +@fixture +def mock_topic(moto_backend: None) -> Iterator[str]: + sns: Final[SNSClient] = boto3.client("sns") + response: Final = sns.create_topic(Name="my_topic") + yield response["TopicArn"] + + +log_group_name: Final = "log_group" +log_stream_name: Final = "log_stream" + + +@fixture +def logger_with_handler(mock_topic: str) -> Iterator[Logger]: + logger: Final = Logger(__name__) + handler: Final = SnsHandler( + topic_arn=mock_topic, + log_group_name=log_group_name, + log_stream_name=log_stream_name, + ) + logger.addHandler(handler) + + yield logger + + +def get_sent_notifications( + topic_arn: str, +) -> list[tuple[str, str, str | None, dict[str, Any] | None, str | None]]: + sns_backend: Final[SNSBackend] = sns_backends[DEFAULT_ACCOUNT_ID]["us-east-1"] + return sns_backend.topics[topic_arn].sent_notifications + + +def test_sns_handler_publishes_warning( + logger_with_handler: Logger, mock_topic: str +) -> None: + message: Final = "warning" + logger_with_handler.warning(message) + + sent_notifications: Final = get_sent_notifications(mock_topic) + assert len(sent_notifications) == 1 + assert ( + sent_notifications[0][1] + == f"Loggroup: {log_group_name}\nLogstream {log_stream_name}\nWARNING : {message}" + ) + + +def test_sns_handler_publishes_error( + logger_with_handler: Logger, mock_topic: str +) -> None: + message: Final = "error" + logger_with_handler.error(message) + + sent_notifications: Final = get_sent_notifications(mock_topic) + assert len(sent_notifications) == 1 + assert ( + sent_notifications[0][1] + == f"Loggroup: {log_group_name}\nLogstream {log_stream_name}\nERROR : {message}" + ) + + +def test_sns_handler_suppresses_info( + logger_with_handler: Logger, mock_topic: str +) -> None: + message: Final = "info" + logger_with_handler.info(message) + + sent_notifications: Final = get_sent_notifications(mock_topic) + assert len(sent_notifications) == 0 + + +def test_sns_handler_raises_exception_when_raise_exceptions_flag_is_true( + moto_backend: None, +) -> None: + logger: Final = Logger(__name__) + handler: Final = SnsHandler( + topic_arn="arn:aws:sns:us-east-1:111111111111:fake_topic", + log_group_name=log_group_name, + log_stream_name=log_stream_name, + raise_exceptions=True, + ) + logger.addHandler(handler) + + with raises(ClientError): + logger.warning("warning") + + +def test_sns_handler_swallows_exception_when_raise_exceptions_flag_is_false( + moto_backend: None, +) -> None: + logger: Final = Logger(__name__) + handler: Final = SnsHandler( + topic_arn="arn:aws:sns:us-east-1:111111111111:fake_topic", + log_group_name=log_group_name, + log_stream_name=log_stream_name, + raise_exceptions=False, + ) + logger.addHandler(handler) + + logger.warning("warning") # Does not throw exception diff --git a/source/app/tox.ini b/source/app/tox.ini index 1df5a1a5..7e1e17b3 100644 --- a/source/app/tox.ini +++ b/source/app/tox.ini @@ -1,14 +1,14 @@ ; Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. ; SPDX-License-Identifier: Apache-2.0 [tox] -env_list = format, lint, py310-report, py311-noreport +env_list = format, lint, py311-report, py312-noreport minversion = 4.0.13 isolated_build = true [testenv:format] skip_install = true deps = - black + black~=24.1.0 isort commands = isort --profile black --check . @@ -29,7 +29,7 @@ commands = poetry run mypy . poetry run flake8 . -[testenv:py3{10,11}-{report, noreport}] +[testenv:py3{11,12}-{report, noreport}] allowlist_externals = poetry deps = poetry pass_env = PYTHON_VERSION diff --git a/source/cli/.gitattributes b/source/cli/.gitattributes new file mode 100644 index 00000000..f7738878 --- /dev/null +++ b/source/cli/.gitattributes @@ -0,0 +1,9 @@ +# ~~ Generated by projen. To modify, edit .projenrc.js and run "npx projen". + +/.gitattributes linguist-generated +/.gitignore linguist-generated +/.projen/** linguist-generated +/.projen/deps.json linguist-generated +/.projen/files.json linguist-generated +/.projen/tasks.json linguist-generated +/pyproject.toml linguist-generated \ No newline at end of file diff --git a/source/cli/.gitignore b/source/cli/.gitignore new file mode 100644 index 00000000..96b13f89 --- /dev/null +++ b/source/cli/.gitignore @@ -0,0 +1,83 @@ +# ~~ Generated by projen. To modify, edit .projenrc.js and run "npx projen". +node_modules/ +!/.gitattributes +!/.projen/tasks.json +!/.projen/deps.json +!/.projen/files.json +!/pyproject.toml +/poetry.toml +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST +*.manifest +*.spec +pip-log.txt +pip-delete-this-directory.txt +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ +*.mo +*.pot +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal +instance/ +.webassets-cache +.scrapy +docs/_build/ +.pybuilder/ +target/ +.ipynb_checkpoints +profile_default/ +ipython_config.py +__pypackages__/ +celerybeat-schedule +celerybeat.pid +*.sage.py +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ +.spyderproject +.spyproject +.ropeproject +/site +.mypy_cache/ +.dmypy.json +dmypy.json +.pyre/ +.pytype/ +cython_debug/ diff --git a/source/cli/.projen/deps.json b/source/cli/.projen/deps.json new file mode 100644 index 00000000..392eab76 --- /dev/null +++ b/source/cli/.projen/deps.json @@ -0,0 +1,85 @@ +{ + "dependencies": [ + { + "name": "black", + "version": "^24.3.0", + "type": "devenv" + }, + { + "name": "boto3-stubs-lite", + "version": "{version = \"^1.34.1\", extras = [\"cloudformation\",\"lambda\"]}", + "type": "devenv" + }, + { + "name": "flake8", + "version": "^6.1.0", + "type": "devenv" + }, + { + "name": "isort", + "version": "^5.12.0", + "type": "devenv" + }, + { + "name": "jsonschema", + "version": "~4.17.3", + "type": "devenv" + }, + { + "name": "moto", + "version": "{version = \"^5.0.2\", extras = [\"cloudformation\",\"lambda\"]}", + "type": "devenv" + }, + { + "name": "mypy", + "version": "^1.7.1", + "type": "devenv" + }, + { + "name": "pytest-cov", + "version": "^4.1.0", + "type": "devenv" + }, + { + "name": "pytest", + "version": "^7.4.3", + "type": "devenv" + }, + { + "name": "tox", + "version": "^4.11.4", + "type": "devenv" + }, + { + "name": "types-jmespath", + "version": "^1.0.1", + "type": "devenv" + }, + { + "name": "types-PyYAML", + "version": "^6.0.12.12", + "type": "devenv" + }, + { + "name": "types-requests", + "version": "2.31.0.6", + "type": "devenv" + }, + { + "name": "boto3", + "version": "^1.34.1", + "type": "runtime" + }, + { + "name": "jmespath", + "version": "^1.0.1", + "type": "runtime" + }, + { + "name": "python", + "version": "^3.8.1", + "type": "runtime" + } + ], + "//": "~~ Generated by projen. To modify, edit .projenrc.js and run \"npx projen\"." +} diff --git a/source/cli/.projen/files.json b/source/cli/.projen/files.json new file mode 100644 index 00000000..ca22e0c3 --- /dev/null +++ b/source/cli/.projen/files.json @@ -0,0 +1,12 @@ +{ + "files": [ + ".gitattributes", + ".gitignore", + ".projen/deps.json", + ".projen/files.json", + ".projen/tasks.json", + "poetry.toml", + "pyproject.toml" + ], + "//": "~~ Generated by projen. To modify, edit .projenrc.js and run \"npx projen\"." +} diff --git a/source/cli/.projen/tasks.json b/source/cli/.projen/tasks.json new file mode 100644 index 00000000..15242f7e --- /dev/null +++ b/source/cli/.projen/tasks.json @@ -0,0 +1,101 @@ +{ + "tasks": { + "build": { + "name": "build", + "description": "Full release build", + "steps": [ + { + "spawn": "pre-compile" + }, + { + "spawn": "compile" + }, + { + "spawn": "post-compile" + }, + { + "spawn": "test" + }, + { + "spawn": "package" + } + ] + }, + "compile": { + "name": "compile", + "description": "Only compile" + }, + "default": { + "name": "default", + "description": "Synthesize project files", + "steps": [ + { + "exec": "npx projen default", + "cwd": "../.." + } + ] + }, + "install": { + "name": "install", + "description": "Install dependencies and update lockfile", + "steps": [ + { + "exec": "poetry lock --no-update && poetry install" + } + ] + }, + "install:ci": { + "name": "install:ci", + "description": "Install dependencies with frozen lockfile", + "steps": [ + { + "exec": "poetry check --lock && poetry install" + } + ] + }, + "package": { + "name": "package", + "description": "Creates the distribution package", + "steps": [ + { + "exec": "poetry build" + } + ] + }, + "post-compile": { + "name": "post-compile", + "description": "Runs after successful compilation" + }, + "pre-compile": { + "name": "pre-compile", + "description": "Prepare the project for compilation" + }, + "publish": { + "name": "publish", + "description": "Uploads the package to PyPI.", + "steps": [ + { + "exec": "poetry publish" + } + ] + }, + "publish:test": { + "name": "publish:test", + "description": "Uploads the package against a test PyPI endpoint.", + "steps": [ + { + "exec": "poetry publish -r testpypi" + } + ] + }, + "test": { + "name": "test", + "description": "Run tests" + } + }, + "env": { + "VIRTUAL_ENV": "$(poetry env info -p || poetry run poetry env info -p)", + "PATH": "$(echo $(poetry env info -p)/bin:$PATH)" + }, + "//": "~~ Generated by projen. To modify, edit .projenrc.js and run \"npx projen\"." +} diff --git a/source/cli/README.md b/source/cli/README.md new file mode 100644 index 00000000..22247b66 --- /dev/null +++ b/source/cli/README.md @@ -0,0 +1 @@ +# Instance Scheduler on AWS CLI diff --git a/source/cli/instance_scheduler_cli/__init__.py b/source/cli/instance_scheduler_cli/__init__.py index 5766d9e6..898ea4fd 100644 --- a/source/cli/instance_scheduler_cli/__init__.py +++ b/source/cli/instance_scheduler_cli/__init__.py @@ -3,5 +3,3 @@ from importlib.metadata import version __version__ = version(__package__) - -del version diff --git a/source/cli/instance_scheduler_cli/scheduler_cli.py b/source/cli/instance_scheduler_cli/scheduler_cli.py index 86ea750e..72c85ea8 100644 --- a/source/cli/instance_scheduler_cli/scheduler_cli.py +++ b/source/cli/instance_scheduler_cli/scheduler_cli.py @@ -35,7 +35,6 @@ HELP_PERIOD_WEEKDAYS = "Weekdays of the period" HELP_QUERY = "JMESPath query to transform or filter the result" HELP_REGION = "Region in which the Instance Scheduler stack is deployed" -HELP_SCHEDULE_CLOUDWATCH_METRICS = "Enable CloudWatch metrics for this schedule" HELP_SCHEDULE_DESCRIPTION = "Description for the schedule." HELP_SCHEDULE_ENFORCED = "Enforce schedule state for instance." HELP_SCHEDULE_HIBERNATE = "Hibernate EC2 instances if possible when stopped." @@ -75,14 +74,12 @@ PARAM_ENFORCED = "--enforced" PARAM_HIBERNATE = "--hibernate" PARAM_RETAINED_RUNNING = "--retain-running" -PARAM_METRICS = "--use-metrics" PARAM_MONTHDAYS = "--monthdays" PARAM_MONTHS = "--months" PARAM_OVERRIDE = "--override-status" PARAM_PERIODS = "--periods" PARAM_STARTDATE = "--startdate" PARAM_KEEP_NEW = "--do-not-stop-new-instances" -PARAM_USE_MAIN = "--use-maintenance-window" PARAM_WEEKDAYS = "--weekdays" PARAM_TIMEZONE = "--timezone" PARAM_SSM_MAINTENCE_WINDOW = "--ssm-maintenance-window" @@ -156,6 +153,7 @@ def handle_command(args: Any, command: str) -> int: and not hasattr(getattr(args, a), "__call__") ) }, + "version": __version__, } payload = str.encode(json.dumps(event)) @@ -234,18 +232,10 @@ def add_schedule_arguments(schedule_parser: ArgumentParser) -> None: help=HELP_SCHEDULE_KEEP_NEW, ) - schedule_parser.add_argument( - PARAM_USE_MAIN, - default=False, - dest="use_maintenance_window", - action="store_true", - help=HELP_SCHEDULE_USE_MAIN, - ) - schedule_parser.add_argument( PARAM_SSM_MAINTENCE_WINDOW, help=HELP_SCHEDULE_SSM_MAINTENANCE_WINDOW, - type=str, + nargs="+", ) schedule_parser.add_argument( @@ -272,14 +262,6 @@ def add_schedule_arguments(schedule_parser: ArgumentParser) -> None: help=HELP_SCHEDULE_HIBERNATE, ) - schedule_parser.add_argument( - PARAM_METRICS, - default=False, - dest="use-metrics", - action="store_true", - help=HELP_SCHEDULE_CLOUDWATCH_METRICS, - ) - def build_describe_schedules_parser() -> None: sub_parser = subparsers.add_parser( CMD_DESCRIBE_SCHEDULES, help=HELP_CMD_DESCRIBE_SCHEDULES diff --git a/source/cli/mypy.ini b/source/cli/mypy.ini index b8a44f83..aa3625ea 100644 --- a/source/cli/mypy.ini +++ b/source/cli/mypy.ini @@ -1,8 +1,5 @@ [mypy] strict = True -[mypy-cli_test_helpers] -ignore_missing_imports = True - [mypy-moto] ignore_missing_imports = True diff --git a/source/cli/poetry.lock b/source/cli/poetry.lock index 77de94e8..d160071d 100644 --- a/source/cli/poetry.lock +++ b/source/cli/poetry.lock @@ -2,13 +2,13 @@ [[package]] name = "annotated-types" -version = "0.6.0" +version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" files = [ - {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, - {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] [package.dependencies] @@ -35,13 +35,13 @@ tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "p [[package]] name = "aws-sam-translator" -version = "1.88.0" +version = "1.89.0" description = "AWS SAM Translator is a library that transform SAM templates into AWS CloudFormation templates" optional = false python-versions = "!=4.0,<=4.0,>=3.8" files = [ - {file = "aws_sam_translator-1.88.0-py3-none-any.whl", hash = "sha256:aa93d498d8de3fb3d485c316155b1628144b823bbc176099a20de06df666fcac"}, - {file = "aws_sam_translator-1.88.0.tar.gz", hash = "sha256:e77c65f3488566122277accd44a0f1ec018e37403e0d5fe25120d96e537e91a7"}, + {file = "aws_sam_translator-1.89.0-py3-none-any.whl", hash = "sha256:843be1b5ca7634f700ad0c844a7e0dc42858f35da502e91691473eadd1731ded"}, + {file = "aws_sam_translator-1.89.0.tar.gz", hash = "sha256:fff1005d0b1f3cb511d0ac7e85f54af06afc9d9e433df013a2338d7a0168d174"}, ] [package.dependencies] @@ -55,13 +55,13 @@ dev = ["black (==24.3.0)", "boto3 (>=1.23,<2)", "boto3-stubs[appconfig,serverles [[package]] name = "aws-xray-sdk" -version = "2.13.0" +version = "2.13.1" description = "The AWS X-Ray SDK for Python (the SDK) enables Python developers to record and emit information from within their applications to the AWS X-Ray service." optional = false python-versions = ">=3.7" files = [ - {file = "aws-xray-sdk-2.13.0.tar.gz", hash = "sha256:816186126917bc35ae4e6e2f304702a43d494ecef34a39e6330f5018bdecc9f5"}, - {file = "aws_xray_sdk-2.13.0-py2.py3-none-any.whl", hash = "sha256:d18604a8688b4bed03ce4a858cc9acd72b71400e085bf7512fc31cf657ca85f9"}, + {file = "aws-xray-sdk-2.13.1.tar.gz", hash = "sha256:911d634c23e0693f585c4cab08d43ab5177f872de416fdc8dd0fe3b170b52835"}, + {file = "aws_xray_sdk-2.13.1-py2.py3-none-any.whl", hash = "sha256:3da9d3b3d63c62f7745b987d80a157a30f4a0cd1db7e340b8f40f4d6aab30e12"}, ] [package.dependencies] @@ -116,17 +116,17 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "boto3" -version = "1.34.99" +version = "1.34.114" description = "The AWS SDK for Python" optional = false python-versions = ">=3.8" files = [ - {file = "boto3-1.34.99-py3-none-any.whl", hash = "sha256:b54084d000483b578757df03ce39a819fbba47071c9aa98611beb8806bcecd45"}, - {file = "boto3-1.34.99.tar.gz", hash = "sha256:6f600b3fe0bda53476395c902d9af5a47294c93ec52a9cdc2b926a9dc705ce79"}, + {file = "boto3-1.34.114-py3-none-any.whl", hash = "sha256:4460958d2b0c53bd2195b23ed5d45db2350e514486fe8caeb38b285b30742280"}, + {file = "boto3-1.34.114.tar.gz", hash = "sha256:eeb11bca9b19d12baf93436fb8a16b8b824f1f7e8b9bcc722607e862c46b1b08"}, ] [package.dependencies] -botocore = ">=1.34.99,<1.35.0" +botocore = ">=1.34.114,<1.35.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.10.0,<0.11.0" @@ -135,13 +135,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "boto3-stubs-lite" -version = "1.34.99" -description = "Type annotations for boto3 1.34.99 generated with mypy-boto3-builder 7.24.0" +version = "1.34.114" +description = "Type annotations for boto3 1.34.114 generated with mypy-boto3-builder 7.24.0" optional = false python-versions = ">=3.8" files = [ - {file = "boto3_stubs_lite-1.34.99-py3-none-any.whl", hash = "sha256:6c94a9e4f00eb3197c9a7ad1a03a7681e5167513075aac5627869fae3928410f"}, - {file = "boto3_stubs_lite-1.34.99.tar.gz", hash = "sha256:8d78bdd6a3ddb844f7f6b9f554152b056aadf7210ba57e82f57bb35fc1c66669"}, + {file = "boto3_stubs_lite-1.34.114-py3-none-any.whl", hash = "sha256:a522a772ed5382d13c039b26080da116abca40e6c0217d4549972febbdaa09e9"}, + {file = "boto3_stubs_lite-1.34.114.tar.gz", hash = "sha256:76abce810ca50f1628e7ccb745dfd0af4b400f684313f615c3fe660a1da4a60f"}, ] [package.dependencies] @@ -157,7 +157,7 @@ account = ["mypy-boto3-account (>=1.34.0,<1.35.0)"] acm = ["mypy-boto3-acm (>=1.34.0,<1.35.0)"] acm-pca = ["mypy-boto3-acm-pca (>=1.34.0,<1.35.0)"] alexaforbusiness = ["mypy-boto3-alexaforbusiness (>=1.34.0,<1.35.0)"] -all = ["mypy-boto3-accessanalyzer (>=1.34.0,<1.35.0)", "mypy-boto3-account (>=1.34.0,<1.35.0)", "mypy-boto3-acm (>=1.34.0,<1.35.0)", "mypy-boto3-acm-pca (>=1.34.0,<1.35.0)", "mypy-boto3-alexaforbusiness (>=1.34.0,<1.35.0)", "mypy-boto3-amp (>=1.34.0,<1.35.0)", "mypy-boto3-amplify (>=1.34.0,<1.35.0)", "mypy-boto3-amplifybackend (>=1.34.0,<1.35.0)", "mypy-boto3-amplifyuibuilder (>=1.34.0,<1.35.0)", "mypy-boto3-apigateway (>=1.34.0,<1.35.0)", "mypy-boto3-apigatewaymanagementapi (>=1.34.0,<1.35.0)", "mypy-boto3-apigatewayv2 (>=1.34.0,<1.35.0)", "mypy-boto3-appconfig (>=1.34.0,<1.35.0)", "mypy-boto3-appconfigdata (>=1.34.0,<1.35.0)", "mypy-boto3-appfabric (>=1.34.0,<1.35.0)", "mypy-boto3-appflow (>=1.34.0,<1.35.0)", "mypy-boto3-appintegrations (>=1.34.0,<1.35.0)", "mypy-boto3-application-autoscaling (>=1.34.0,<1.35.0)", "mypy-boto3-application-insights (>=1.34.0,<1.35.0)", "mypy-boto3-applicationcostprofiler (>=1.34.0,<1.35.0)", "mypy-boto3-appmesh (>=1.34.0,<1.35.0)", "mypy-boto3-apprunner (>=1.34.0,<1.35.0)", "mypy-boto3-appstream (>=1.34.0,<1.35.0)", "mypy-boto3-appsync (>=1.34.0,<1.35.0)", "mypy-boto3-arc-zonal-shift (>=1.34.0,<1.35.0)", "mypy-boto3-artifact (>=1.34.0,<1.35.0)", "mypy-boto3-athena (>=1.34.0,<1.35.0)", "mypy-boto3-auditmanager (>=1.34.0,<1.35.0)", "mypy-boto3-autoscaling (>=1.34.0,<1.35.0)", "mypy-boto3-autoscaling-plans (>=1.34.0,<1.35.0)", "mypy-boto3-b2bi (>=1.34.0,<1.35.0)", "mypy-boto3-backup (>=1.34.0,<1.35.0)", "mypy-boto3-backup-gateway (>=1.34.0,<1.35.0)", "mypy-boto3-backupstorage (>=1.34.0,<1.35.0)", "mypy-boto3-batch (>=1.34.0,<1.35.0)", "mypy-boto3-bcm-data-exports (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock-agent (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock-agent-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-billingconductor (>=1.34.0,<1.35.0)", "mypy-boto3-braket (>=1.34.0,<1.35.0)", "mypy-boto3-budgets (>=1.34.0,<1.35.0)", "mypy-boto3-ce (>=1.34.0,<1.35.0)", "mypy-boto3-chatbot (>=1.34.0,<1.35.0)", "mypy-boto3-chime (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-identity (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-media-pipelines (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-meetings (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-messaging (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-voice (>=1.34.0,<1.35.0)", "mypy-boto3-cleanrooms (>=1.34.0,<1.35.0)", "mypy-boto3-cleanroomsml (>=1.34.0,<1.35.0)", "mypy-boto3-cloud9 (>=1.34.0,<1.35.0)", "mypy-boto3-cloudcontrol (>=1.34.0,<1.35.0)", "mypy-boto3-clouddirectory (>=1.34.0,<1.35.0)", "mypy-boto3-cloudformation (>=1.34.0,<1.35.0)", "mypy-boto3-cloudfront (>=1.34.0,<1.35.0)", "mypy-boto3-cloudfront-keyvaluestore (>=1.34.0,<1.35.0)", "mypy-boto3-cloudhsm (>=1.34.0,<1.35.0)", "mypy-boto3-cloudhsmv2 (>=1.34.0,<1.35.0)", "mypy-boto3-cloudsearch (>=1.34.0,<1.35.0)", "mypy-boto3-cloudsearchdomain (>=1.34.0,<1.35.0)", "mypy-boto3-cloudtrail (>=1.34.0,<1.35.0)", "mypy-boto3-cloudtrail-data (>=1.34.0,<1.35.0)", "mypy-boto3-cloudwatch (>=1.34.0,<1.35.0)", "mypy-boto3-codeartifact (>=1.34.0,<1.35.0)", "mypy-boto3-codebuild (>=1.34.0,<1.35.0)", "mypy-boto3-codecatalyst (>=1.34.0,<1.35.0)", "mypy-boto3-codecommit (>=1.34.0,<1.35.0)", "mypy-boto3-codeconnections (>=1.34.0,<1.35.0)", "mypy-boto3-codedeploy (>=1.34.0,<1.35.0)", "mypy-boto3-codeguru-reviewer (>=1.34.0,<1.35.0)", "mypy-boto3-codeguru-security (>=1.34.0,<1.35.0)", "mypy-boto3-codeguruprofiler (>=1.34.0,<1.35.0)", "mypy-boto3-codepipeline (>=1.34.0,<1.35.0)", "mypy-boto3-codestar (>=1.34.0,<1.35.0)", "mypy-boto3-codestar-connections (>=1.34.0,<1.35.0)", "mypy-boto3-codestar-notifications (>=1.34.0,<1.35.0)", "mypy-boto3-cognito-identity (>=1.34.0,<1.35.0)", "mypy-boto3-cognito-idp (>=1.34.0,<1.35.0)", "mypy-boto3-cognito-sync (>=1.34.0,<1.35.0)", "mypy-boto3-comprehend (>=1.34.0,<1.35.0)", "mypy-boto3-comprehendmedical (>=1.34.0,<1.35.0)", "mypy-boto3-compute-optimizer (>=1.34.0,<1.35.0)", "mypy-boto3-config (>=1.34.0,<1.35.0)", "mypy-boto3-connect (>=1.34.0,<1.35.0)", "mypy-boto3-connect-contact-lens (>=1.34.0,<1.35.0)", "mypy-boto3-connectcampaigns (>=1.34.0,<1.35.0)", "mypy-boto3-connectcases (>=1.34.0,<1.35.0)", "mypy-boto3-connectparticipant (>=1.34.0,<1.35.0)", "mypy-boto3-controlcatalog (>=1.34.0,<1.35.0)", "mypy-boto3-controltower (>=1.34.0,<1.35.0)", "mypy-boto3-cost-optimization-hub (>=1.34.0,<1.35.0)", "mypy-boto3-cur (>=1.34.0,<1.35.0)", "mypy-boto3-customer-profiles (>=1.34.0,<1.35.0)", "mypy-boto3-databrew (>=1.34.0,<1.35.0)", "mypy-boto3-dataexchange (>=1.34.0,<1.35.0)", "mypy-boto3-datapipeline (>=1.34.0,<1.35.0)", "mypy-boto3-datasync (>=1.34.0,<1.35.0)", "mypy-boto3-datazone (>=1.34.0,<1.35.0)", "mypy-boto3-dax (>=1.34.0,<1.35.0)", "mypy-boto3-deadline (>=1.34.0,<1.35.0)", "mypy-boto3-detective (>=1.34.0,<1.35.0)", "mypy-boto3-devicefarm (>=1.34.0,<1.35.0)", "mypy-boto3-devops-guru (>=1.34.0,<1.35.0)", "mypy-boto3-directconnect (>=1.34.0,<1.35.0)", "mypy-boto3-discovery (>=1.34.0,<1.35.0)", "mypy-boto3-dlm (>=1.34.0,<1.35.0)", "mypy-boto3-dms (>=1.34.0,<1.35.0)", "mypy-boto3-docdb (>=1.34.0,<1.35.0)", "mypy-boto3-docdb-elastic (>=1.34.0,<1.35.0)", "mypy-boto3-drs (>=1.34.0,<1.35.0)", "mypy-boto3-ds (>=1.34.0,<1.35.0)", "mypy-boto3-dynamodb (>=1.34.0,<1.35.0)", "mypy-boto3-dynamodbstreams (>=1.34.0,<1.35.0)", "mypy-boto3-ebs (>=1.34.0,<1.35.0)", "mypy-boto3-ec2 (>=1.34.0,<1.35.0)", "mypy-boto3-ec2-instance-connect (>=1.34.0,<1.35.0)", "mypy-boto3-ecr (>=1.34.0,<1.35.0)", "mypy-boto3-ecr-public (>=1.34.0,<1.35.0)", "mypy-boto3-ecs (>=1.34.0,<1.35.0)", "mypy-boto3-efs (>=1.34.0,<1.35.0)", "mypy-boto3-eks (>=1.34.0,<1.35.0)", "mypy-boto3-eks-auth (>=1.34.0,<1.35.0)", "mypy-boto3-elastic-inference (>=1.34.0,<1.35.0)", "mypy-boto3-elasticache (>=1.34.0,<1.35.0)", "mypy-boto3-elasticbeanstalk (>=1.34.0,<1.35.0)", "mypy-boto3-elastictranscoder (>=1.34.0,<1.35.0)", "mypy-boto3-elb (>=1.34.0,<1.35.0)", "mypy-boto3-elbv2 (>=1.34.0,<1.35.0)", "mypy-boto3-emr (>=1.34.0,<1.35.0)", "mypy-boto3-emr-containers (>=1.34.0,<1.35.0)", "mypy-boto3-emr-serverless (>=1.34.0,<1.35.0)", "mypy-boto3-entityresolution (>=1.34.0,<1.35.0)", "mypy-boto3-es (>=1.34.0,<1.35.0)", "mypy-boto3-events (>=1.34.0,<1.35.0)", "mypy-boto3-evidently (>=1.34.0,<1.35.0)", "mypy-boto3-finspace (>=1.34.0,<1.35.0)", "mypy-boto3-finspace-data (>=1.34.0,<1.35.0)", "mypy-boto3-firehose (>=1.34.0,<1.35.0)", "mypy-boto3-fis (>=1.34.0,<1.35.0)", "mypy-boto3-fms (>=1.34.0,<1.35.0)", "mypy-boto3-forecast (>=1.34.0,<1.35.0)", "mypy-boto3-forecastquery (>=1.34.0,<1.35.0)", "mypy-boto3-frauddetector (>=1.34.0,<1.35.0)", "mypy-boto3-freetier (>=1.34.0,<1.35.0)", "mypy-boto3-fsx (>=1.34.0,<1.35.0)", "mypy-boto3-gamelift (>=1.34.0,<1.35.0)", "mypy-boto3-glacier (>=1.34.0,<1.35.0)", "mypy-boto3-globalaccelerator (>=1.34.0,<1.35.0)", "mypy-boto3-glue (>=1.34.0,<1.35.0)", "mypy-boto3-grafana (>=1.34.0,<1.35.0)", "mypy-boto3-greengrass (>=1.34.0,<1.35.0)", "mypy-boto3-greengrassv2 (>=1.34.0,<1.35.0)", "mypy-boto3-groundstation (>=1.34.0,<1.35.0)", "mypy-boto3-guardduty (>=1.34.0,<1.35.0)", "mypy-boto3-health (>=1.34.0,<1.35.0)", "mypy-boto3-healthlake (>=1.34.0,<1.35.0)", "mypy-boto3-honeycode (>=1.34.0,<1.35.0)", "mypy-boto3-iam (>=1.34.0,<1.35.0)", "mypy-boto3-identitystore (>=1.34.0,<1.35.0)", "mypy-boto3-imagebuilder (>=1.34.0,<1.35.0)", "mypy-boto3-importexport (>=1.34.0,<1.35.0)", "mypy-boto3-inspector (>=1.34.0,<1.35.0)", "mypy-boto3-inspector-scan (>=1.34.0,<1.35.0)", "mypy-boto3-inspector2 (>=1.34.0,<1.35.0)", "mypy-boto3-internetmonitor (>=1.34.0,<1.35.0)", "mypy-boto3-iot (>=1.34.0,<1.35.0)", "mypy-boto3-iot-data (>=1.34.0,<1.35.0)", "mypy-boto3-iot-jobs-data (>=1.34.0,<1.35.0)", "mypy-boto3-iot1click-devices (>=1.34.0,<1.35.0)", "mypy-boto3-iot1click-projects (>=1.34.0,<1.35.0)", "mypy-boto3-iotanalytics (>=1.34.0,<1.35.0)", "mypy-boto3-iotdeviceadvisor (>=1.34.0,<1.35.0)", "mypy-boto3-iotevents (>=1.34.0,<1.35.0)", "mypy-boto3-iotevents-data (>=1.34.0,<1.35.0)", "mypy-boto3-iotfleethub (>=1.34.0,<1.35.0)", "mypy-boto3-iotfleetwise (>=1.34.0,<1.35.0)", "mypy-boto3-iotsecuretunneling (>=1.34.0,<1.35.0)", "mypy-boto3-iotsitewise (>=1.34.0,<1.35.0)", "mypy-boto3-iotthingsgraph (>=1.34.0,<1.35.0)", "mypy-boto3-iottwinmaker (>=1.34.0,<1.35.0)", "mypy-boto3-iotwireless (>=1.34.0,<1.35.0)", "mypy-boto3-ivs (>=1.34.0,<1.35.0)", "mypy-boto3-ivs-realtime (>=1.34.0,<1.35.0)", "mypy-boto3-ivschat (>=1.34.0,<1.35.0)", "mypy-boto3-kafka (>=1.34.0,<1.35.0)", "mypy-boto3-kafkaconnect (>=1.34.0,<1.35.0)", "mypy-boto3-kendra (>=1.34.0,<1.35.0)", "mypy-boto3-kendra-ranking (>=1.34.0,<1.35.0)", "mypy-boto3-keyspaces (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-archived-media (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-media (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-signaling (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-webrtc-storage (>=1.34.0,<1.35.0)", "mypy-boto3-kinesisanalytics (>=1.34.0,<1.35.0)", "mypy-boto3-kinesisanalyticsv2 (>=1.34.0,<1.35.0)", "mypy-boto3-kinesisvideo (>=1.34.0,<1.35.0)", "mypy-boto3-kms (>=1.34.0,<1.35.0)", "mypy-boto3-lakeformation (>=1.34.0,<1.35.0)", "mypy-boto3-lambda (>=1.34.0,<1.35.0)", "mypy-boto3-launch-wizard (>=1.34.0,<1.35.0)", "mypy-boto3-lex-models (>=1.34.0,<1.35.0)", "mypy-boto3-lex-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-lexv2-models (>=1.34.0,<1.35.0)", "mypy-boto3-lexv2-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-license-manager (>=1.34.0,<1.35.0)", "mypy-boto3-license-manager-linux-subscriptions (>=1.34.0,<1.35.0)", "mypy-boto3-license-manager-user-subscriptions (>=1.34.0,<1.35.0)", "mypy-boto3-lightsail (>=1.34.0,<1.35.0)", "mypy-boto3-location (>=1.34.0,<1.35.0)", "mypy-boto3-logs (>=1.34.0,<1.35.0)", "mypy-boto3-lookoutequipment (>=1.34.0,<1.35.0)", "mypy-boto3-lookoutmetrics (>=1.34.0,<1.35.0)", "mypy-boto3-lookoutvision (>=1.34.0,<1.35.0)", "mypy-boto3-m2 (>=1.34.0,<1.35.0)", "mypy-boto3-machinelearning (>=1.34.0,<1.35.0)", "mypy-boto3-macie2 (>=1.34.0,<1.35.0)", "mypy-boto3-managedblockchain (>=1.34.0,<1.35.0)", "mypy-boto3-managedblockchain-query (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-agreement (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-catalog (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-deployment (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-entitlement (>=1.34.0,<1.35.0)", "mypy-boto3-marketplacecommerceanalytics (>=1.34.0,<1.35.0)", "mypy-boto3-mediaconnect (>=1.34.0,<1.35.0)", "mypy-boto3-mediaconvert (>=1.34.0,<1.35.0)", "mypy-boto3-medialive (>=1.34.0,<1.35.0)", "mypy-boto3-mediapackage (>=1.34.0,<1.35.0)", "mypy-boto3-mediapackage-vod (>=1.34.0,<1.35.0)", "mypy-boto3-mediapackagev2 (>=1.34.0,<1.35.0)", "mypy-boto3-mediastore (>=1.34.0,<1.35.0)", "mypy-boto3-mediastore-data (>=1.34.0,<1.35.0)", "mypy-boto3-mediatailor (>=1.34.0,<1.35.0)", "mypy-boto3-medical-imaging (>=1.34.0,<1.35.0)", "mypy-boto3-memorydb (>=1.34.0,<1.35.0)", "mypy-boto3-meteringmarketplace (>=1.34.0,<1.35.0)", "mypy-boto3-mgh (>=1.34.0,<1.35.0)", "mypy-boto3-mgn (>=1.34.0,<1.35.0)", "mypy-boto3-migration-hub-refactor-spaces (>=1.34.0,<1.35.0)", "mypy-boto3-migrationhub-config (>=1.34.0,<1.35.0)", "mypy-boto3-migrationhuborchestrator (>=1.34.0,<1.35.0)", "mypy-boto3-migrationhubstrategy (>=1.34.0,<1.35.0)", "mypy-boto3-mobile (>=1.34.0,<1.35.0)", "mypy-boto3-mq (>=1.34.0,<1.35.0)", "mypy-boto3-mturk (>=1.34.0,<1.35.0)", "mypy-boto3-mwaa (>=1.34.0,<1.35.0)", "mypy-boto3-neptune (>=1.34.0,<1.35.0)", "mypy-boto3-neptune-graph (>=1.34.0,<1.35.0)", "mypy-boto3-neptunedata (>=1.34.0,<1.35.0)", "mypy-boto3-network-firewall (>=1.34.0,<1.35.0)", "mypy-boto3-networkmanager (>=1.34.0,<1.35.0)", "mypy-boto3-networkmonitor (>=1.34.0,<1.35.0)", "mypy-boto3-nimble (>=1.34.0,<1.35.0)", "mypy-boto3-oam (>=1.34.0,<1.35.0)", "mypy-boto3-omics (>=1.34.0,<1.35.0)", "mypy-boto3-opensearch (>=1.34.0,<1.35.0)", "mypy-boto3-opensearchserverless (>=1.34.0,<1.35.0)", "mypy-boto3-opsworks (>=1.34.0,<1.35.0)", "mypy-boto3-opsworkscm (>=1.34.0,<1.35.0)", "mypy-boto3-organizations (>=1.34.0,<1.35.0)", "mypy-boto3-osis (>=1.34.0,<1.35.0)", "mypy-boto3-outposts (>=1.34.0,<1.35.0)", "mypy-boto3-panorama (>=1.34.0,<1.35.0)", "mypy-boto3-payment-cryptography (>=1.34.0,<1.35.0)", "mypy-boto3-payment-cryptography-data (>=1.34.0,<1.35.0)", "mypy-boto3-pca-connector-ad (>=1.34.0,<1.35.0)", "mypy-boto3-personalize (>=1.34.0,<1.35.0)", "mypy-boto3-personalize-events (>=1.34.0,<1.35.0)", "mypy-boto3-personalize-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-pi (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint-email (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint-sms-voice (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint-sms-voice-v2 (>=1.34.0,<1.35.0)", "mypy-boto3-pipes (>=1.34.0,<1.35.0)", "mypy-boto3-polly (>=1.34.0,<1.35.0)", "mypy-boto3-pricing (>=1.34.0,<1.35.0)", "mypy-boto3-privatenetworks (>=1.34.0,<1.35.0)", "mypy-boto3-proton (>=1.34.0,<1.35.0)", "mypy-boto3-qbusiness (>=1.34.0,<1.35.0)", "mypy-boto3-qconnect (>=1.34.0,<1.35.0)", "mypy-boto3-qldb (>=1.34.0,<1.35.0)", "mypy-boto3-qldb-session (>=1.34.0,<1.35.0)", "mypy-boto3-quicksight (>=1.34.0,<1.35.0)", "mypy-boto3-ram (>=1.34.0,<1.35.0)", "mypy-boto3-rbin (>=1.34.0,<1.35.0)", "mypy-boto3-rds (>=1.34.0,<1.35.0)", "mypy-boto3-rds-data (>=1.34.0,<1.35.0)", "mypy-boto3-redshift (>=1.34.0,<1.35.0)", "mypy-boto3-redshift-data (>=1.34.0,<1.35.0)", "mypy-boto3-redshift-serverless (>=1.34.0,<1.35.0)", "mypy-boto3-rekognition (>=1.34.0,<1.35.0)", "mypy-boto3-repostspace (>=1.34.0,<1.35.0)", "mypy-boto3-resiliencehub (>=1.34.0,<1.35.0)", "mypy-boto3-resource-explorer-2 (>=1.34.0,<1.35.0)", "mypy-boto3-resource-groups (>=1.34.0,<1.35.0)", "mypy-boto3-resourcegroupstaggingapi (>=1.34.0,<1.35.0)", "mypy-boto3-robomaker (>=1.34.0,<1.35.0)", "mypy-boto3-rolesanywhere (>=1.34.0,<1.35.0)", "mypy-boto3-route53 (>=1.34.0,<1.35.0)", "mypy-boto3-route53-recovery-cluster (>=1.34.0,<1.35.0)", "mypy-boto3-route53-recovery-control-config (>=1.34.0,<1.35.0)", "mypy-boto3-route53-recovery-readiness (>=1.34.0,<1.35.0)", "mypy-boto3-route53domains (>=1.34.0,<1.35.0)", "mypy-boto3-route53profiles (>=1.34.0,<1.35.0)", "mypy-boto3-route53resolver (>=1.34.0,<1.35.0)", "mypy-boto3-rum (>=1.34.0,<1.35.0)", "mypy-boto3-s3 (>=1.34.0,<1.35.0)", "mypy-boto3-s3control (>=1.34.0,<1.35.0)", "mypy-boto3-s3outposts (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-a2i-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-edge (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-featurestore-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-geospatial (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-metrics (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-savingsplans (>=1.34.0,<1.35.0)", "mypy-boto3-scheduler (>=1.34.0,<1.35.0)", "mypy-boto3-schemas (>=1.34.0,<1.35.0)", "mypy-boto3-sdb (>=1.34.0,<1.35.0)", "mypy-boto3-secretsmanager (>=1.34.0,<1.35.0)", "mypy-boto3-securityhub (>=1.34.0,<1.35.0)", "mypy-boto3-securitylake (>=1.34.0,<1.35.0)", "mypy-boto3-serverlessrepo (>=1.34.0,<1.35.0)", "mypy-boto3-service-quotas (>=1.34.0,<1.35.0)", "mypy-boto3-servicecatalog (>=1.34.0,<1.35.0)", "mypy-boto3-servicecatalog-appregistry (>=1.34.0,<1.35.0)", "mypy-boto3-servicediscovery (>=1.34.0,<1.35.0)", "mypy-boto3-ses (>=1.34.0,<1.35.0)", "mypy-boto3-sesv2 (>=1.34.0,<1.35.0)", "mypy-boto3-shield (>=1.34.0,<1.35.0)", "mypy-boto3-signer (>=1.34.0,<1.35.0)", "mypy-boto3-simspaceweaver (>=1.34.0,<1.35.0)", "mypy-boto3-sms (>=1.34.0,<1.35.0)", "mypy-boto3-sms-voice (>=1.34.0,<1.35.0)", "mypy-boto3-snow-device-management (>=1.34.0,<1.35.0)", "mypy-boto3-snowball (>=1.34.0,<1.35.0)", "mypy-boto3-sns (>=1.34.0,<1.35.0)", "mypy-boto3-sqs (>=1.34.0,<1.35.0)", "mypy-boto3-ssm (>=1.34.0,<1.35.0)", "mypy-boto3-ssm-contacts (>=1.34.0,<1.35.0)", "mypy-boto3-ssm-incidents (>=1.34.0,<1.35.0)", "mypy-boto3-ssm-sap (>=1.34.0,<1.35.0)", "mypy-boto3-sso (>=1.34.0,<1.35.0)", "mypy-boto3-sso-admin (>=1.34.0,<1.35.0)", "mypy-boto3-sso-oidc (>=1.34.0,<1.35.0)", "mypy-boto3-stepfunctions (>=1.34.0,<1.35.0)", "mypy-boto3-storagegateway (>=1.34.0,<1.35.0)", "mypy-boto3-sts (>=1.34.0,<1.35.0)", "mypy-boto3-supplychain (>=1.34.0,<1.35.0)", "mypy-boto3-support (>=1.34.0,<1.35.0)", "mypy-boto3-support-app (>=1.34.0,<1.35.0)", "mypy-boto3-swf (>=1.34.0,<1.35.0)", "mypy-boto3-synthetics (>=1.34.0,<1.35.0)", "mypy-boto3-textract (>=1.34.0,<1.35.0)", "mypy-boto3-timestream-influxdb (>=1.34.0,<1.35.0)", "mypy-boto3-timestream-query (>=1.34.0,<1.35.0)", "mypy-boto3-timestream-write (>=1.34.0,<1.35.0)", "mypy-boto3-tnb (>=1.34.0,<1.35.0)", "mypy-boto3-transcribe (>=1.34.0,<1.35.0)", "mypy-boto3-transfer (>=1.34.0,<1.35.0)", "mypy-boto3-translate (>=1.34.0,<1.35.0)", "mypy-boto3-trustedadvisor (>=1.34.0,<1.35.0)", "mypy-boto3-verifiedpermissions (>=1.34.0,<1.35.0)", "mypy-boto3-voice-id (>=1.34.0,<1.35.0)", "mypy-boto3-vpc-lattice (>=1.34.0,<1.35.0)", "mypy-boto3-waf (>=1.34.0,<1.35.0)", "mypy-boto3-waf-regional (>=1.34.0,<1.35.0)", "mypy-boto3-wafv2 (>=1.34.0,<1.35.0)", "mypy-boto3-wellarchitected (>=1.34.0,<1.35.0)", "mypy-boto3-wisdom (>=1.34.0,<1.35.0)", "mypy-boto3-workdocs (>=1.34.0,<1.35.0)", "mypy-boto3-worklink (>=1.34.0,<1.35.0)", "mypy-boto3-workmail (>=1.34.0,<1.35.0)", "mypy-boto3-workmailmessageflow (>=1.34.0,<1.35.0)", "mypy-boto3-workspaces (>=1.34.0,<1.35.0)", "mypy-boto3-workspaces-thin-client (>=1.34.0,<1.35.0)", "mypy-boto3-workspaces-web (>=1.34.0,<1.35.0)", "mypy-boto3-xray (>=1.34.0,<1.35.0)"] +all = ["mypy-boto3-accessanalyzer (>=1.34.0,<1.35.0)", "mypy-boto3-account (>=1.34.0,<1.35.0)", "mypy-boto3-acm (>=1.34.0,<1.35.0)", "mypy-boto3-acm-pca (>=1.34.0,<1.35.0)", "mypy-boto3-alexaforbusiness (>=1.34.0,<1.35.0)", "mypy-boto3-amp (>=1.34.0,<1.35.0)", "mypy-boto3-amplify (>=1.34.0,<1.35.0)", "mypy-boto3-amplifybackend (>=1.34.0,<1.35.0)", "mypy-boto3-amplifyuibuilder (>=1.34.0,<1.35.0)", "mypy-boto3-apigateway (>=1.34.0,<1.35.0)", "mypy-boto3-apigatewaymanagementapi (>=1.34.0,<1.35.0)", "mypy-boto3-apigatewayv2 (>=1.34.0,<1.35.0)", "mypy-boto3-appconfig (>=1.34.0,<1.35.0)", "mypy-boto3-appconfigdata (>=1.34.0,<1.35.0)", "mypy-boto3-appfabric (>=1.34.0,<1.35.0)", "mypy-boto3-appflow (>=1.34.0,<1.35.0)", "mypy-boto3-appintegrations (>=1.34.0,<1.35.0)", "mypy-boto3-application-autoscaling (>=1.34.0,<1.35.0)", "mypy-boto3-application-insights (>=1.34.0,<1.35.0)", "mypy-boto3-applicationcostprofiler (>=1.34.0,<1.35.0)", "mypy-boto3-appmesh (>=1.34.0,<1.35.0)", "mypy-boto3-apprunner (>=1.34.0,<1.35.0)", "mypy-boto3-appstream (>=1.34.0,<1.35.0)", "mypy-boto3-appsync (>=1.34.0,<1.35.0)", "mypy-boto3-arc-zonal-shift (>=1.34.0,<1.35.0)", "mypy-boto3-artifact (>=1.34.0,<1.35.0)", "mypy-boto3-athena (>=1.34.0,<1.35.0)", "mypy-boto3-auditmanager (>=1.34.0,<1.35.0)", "mypy-boto3-autoscaling (>=1.34.0,<1.35.0)", "mypy-boto3-autoscaling-plans (>=1.34.0,<1.35.0)", "mypy-boto3-b2bi (>=1.34.0,<1.35.0)", "mypy-boto3-backup (>=1.34.0,<1.35.0)", "mypy-boto3-backup-gateway (>=1.34.0,<1.35.0)", "mypy-boto3-backupstorage (>=1.34.0,<1.35.0)", "mypy-boto3-batch (>=1.34.0,<1.35.0)", "mypy-boto3-bcm-data-exports (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock-agent (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock-agent-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-billingconductor (>=1.34.0,<1.35.0)", "mypy-boto3-braket (>=1.34.0,<1.35.0)", "mypy-boto3-budgets (>=1.34.0,<1.35.0)", "mypy-boto3-ce (>=1.34.0,<1.35.0)", "mypy-boto3-chatbot (>=1.34.0,<1.35.0)", "mypy-boto3-chime (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-identity (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-media-pipelines (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-meetings (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-messaging (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-voice (>=1.34.0,<1.35.0)", "mypy-boto3-cleanrooms (>=1.34.0,<1.35.0)", "mypy-boto3-cleanroomsml (>=1.34.0,<1.35.0)", "mypy-boto3-cloud9 (>=1.34.0,<1.35.0)", "mypy-boto3-cloudcontrol (>=1.34.0,<1.35.0)", "mypy-boto3-clouddirectory (>=1.34.0,<1.35.0)", "mypy-boto3-cloudformation (>=1.34.0,<1.35.0)", "mypy-boto3-cloudfront (>=1.34.0,<1.35.0)", "mypy-boto3-cloudfront-keyvaluestore (>=1.34.0,<1.35.0)", "mypy-boto3-cloudhsm (>=1.34.0,<1.35.0)", "mypy-boto3-cloudhsmv2 (>=1.34.0,<1.35.0)", "mypy-boto3-cloudsearch (>=1.34.0,<1.35.0)", "mypy-boto3-cloudsearchdomain (>=1.34.0,<1.35.0)", "mypy-boto3-cloudtrail (>=1.34.0,<1.35.0)", "mypy-boto3-cloudtrail-data (>=1.34.0,<1.35.0)", "mypy-boto3-cloudwatch (>=1.34.0,<1.35.0)", "mypy-boto3-codeartifact (>=1.34.0,<1.35.0)", "mypy-boto3-codebuild (>=1.34.0,<1.35.0)", "mypy-boto3-codecatalyst (>=1.34.0,<1.35.0)", "mypy-boto3-codecommit (>=1.34.0,<1.35.0)", "mypy-boto3-codeconnections (>=1.34.0,<1.35.0)", "mypy-boto3-codedeploy (>=1.34.0,<1.35.0)", "mypy-boto3-codeguru-reviewer (>=1.34.0,<1.35.0)", "mypy-boto3-codeguru-security (>=1.34.0,<1.35.0)", "mypy-boto3-codeguruprofiler (>=1.34.0,<1.35.0)", "mypy-boto3-codepipeline (>=1.34.0,<1.35.0)", "mypy-boto3-codestar (>=1.34.0,<1.35.0)", "mypy-boto3-codestar-connections (>=1.34.0,<1.35.0)", "mypy-boto3-codestar-notifications (>=1.34.0,<1.35.0)", "mypy-boto3-cognito-identity (>=1.34.0,<1.35.0)", "mypy-boto3-cognito-idp (>=1.34.0,<1.35.0)", "mypy-boto3-cognito-sync (>=1.34.0,<1.35.0)", "mypy-boto3-comprehend (>=1.34.0,<1.35.0)", "mypy-boto3-comprehendmedical (>=1.34.0,<1.35.0)", "mypy-boto3-compute-optimizer (>=1.34.0,<1.35.0)", "mypy-boto3-config (>=1.34.0,<1.35.0)", "mypy-boto3-connect (>=1.34.0,<1.35.0)", "mypy-boto3-connect-contact-lens (>=1.34.0,<1.35.0)", "mypy-boto3-connectcampaigns (>=1.34.0,<1.35.0)", "mypy-boto3-connectcases (>=1.34.0,<1.35.0)", "mypy-boto3-connectparticipant (>=1.34.0,<1.35.0)", "mypy-boto3-controlcatalog (>=1.34.0,<1.35.0)", "mypy-boto3-controltower (>=1.34.0,<1.35.0)", "mypy-boto3-cost-optimization-hub (>=1.34.0,<1.35.0)", "mypy-boto3-cur (>=1.34.0,<1.35.0)", "mypy-boto3-customer-profiles (>=1.34.0,<1.35.0)", "mypy-boto3-databrew (>=1.34.0,<1.35.0)", "mypy-boto3-dataexchange (>=1.34.0,<1.35.0)", "mypy-boto3-datapipeline (>=1.34.0,<1.35.0)", "mypy-boto3-datasync (>=1.34.0,<1.35.0)", "mypy-boto3-datazone (>=1.34.0,<1.35.0)", "mypy-boto3-dax (>=1.34.0,<1.35.0)", "mypy-boto3-deadline (>=1.34.0,<1.35.0)", "mypy-boto3-detective (>=1.34.0,<1.35.0)", "mypy-boto3-devicefarm (>=1.34.0,<1.35.0)", "mypy-boto3-devops-guru (>=1.34.0,<1.35.0)", "mypy-boto3-directconnect (>=1.34.0,<1.35.0)", "mypy-boto3-discovery (>=1.34.0,<1.35.0)", "mypy-boto3-dlm (>=1.34.0,<1.35.0)", "mypy-boto3-dms (>=1.34.0,<1.35.0)", "mypy-boto3-docdb (>=1.34.0,<1.35.0)", "mypy-boto3-docdb-elastic (>=1.34.0,<1.35.0)", "mypy-boto3-drs (>=1.34.0,<1.35.0)", "mypy-boto3-ds (>=1.34.0,<1.35.0)", "mypy-boto3-dynamodb (>=1.34.0,<1.35.0)", "mypy-boto3-dynamodbstreams (>=1.34.0,<1.35.0)", "mypy-boto3-ebs (>=1.34.0,<1.35.0)", "mypy-boto3-ec2 (>=1.34.0,<1.35.0)", "mypy-boto3-ec2-instance-connect (>=1.34.0,<1.35.0)", "mypy-boto3-ecr (>=1.34.0,<1.35.0)", "mypy-boto3-ecr-public (>=1.34.0,<1.35.0)", "mypy-boto3-ecs (>=1.34.0,<1.35.0)", "mypy-boto3-efs (>=1.34.0,<1.35.0)", "mypy-boto3-eks (>=1.34.0,<1.35.0)", "mypy-boto3-eks-auth (>=1.34.0,<1.35.0)", "mypy-boto3-elastic-inference (>=1.34.0,<1.35.0)", "mypy-boto3-elasticache (>=1.34.0,<1.35.0)", "mypy-boto3-elasticbeanstalk (>=1.34.0,<1.35.0)", "mypy-boto3-elastictranscoder (>=1.34.0,<1.35.0)", "mypy-boto3-elb (>=1.34.0,<1.35.0)", "mypy-boto3-elbv2 (>=1.34.0,<1.35.0)", "mypy-boto3-emr (>=1.34.0,<1.35.0)", "mypy-boto3-emr-containers (>=1.34.0,<1.35.0)", "mypy-boto3-emr-serverless (>=1.34.0,<1.35.0)", "mypy-boto3-entityresolution (>=1.34.0,<1.35.0)", "mypy-boto3-es (>=1.34.0,<1.35.0)", "mypy-boto3-events (>=1.34.0,<1.35.0)", "mypy-boto3-evidently (>=1.34.0,<1.35.0)", "mypy-boto3-finspace (>=1.34.0,<1.35.0)", "mypy-boto3-finspace-data (>=1.34.0,<1.35.0)", "mypy-boto3-firehose (>=1.34.0,<1.35.0)", "mypy-boto3-fis (>=1.34.0,<1.35.0)", "mypy-boto3-fms (>=1.34.0,<1.35.0)", "mypy-boto3-forecast (>=1.34.0,<1.35.0)", "mypy-boto3-forecastquery (>=1.34.0,<1.35.0)", "mypy-boto3-frauddetector (>=1.34.0,<1.35.0)", "mypy-boto3-freetier (>=1.34.0,<1.35.0)", "mypy-boto3-fsx (>=1.34.0,<1.35.0)", "mypy-boto3-gamelift (>=1.34.0,<1.35.0)", "mypy-boto3-glacier (>=1.34.0,<1.35.0)", "mypy-boto3-globalaccelerator (>=1.34.0,<1.35.0)", "mypy-boto3-glue (>=1.34.0,<1.35.0)", "mypy-boto3-grafana (>=1.34.0,<1.35.0)", "mypy-boto3-greengrass (>=1.34.0,<1.35.0)", "mypy-boto3-greengrassv2 (>=1.34.0,<1.35.0)", "mypy-boto3-groundstation (>=1.34.0,<1.35.0)", "mypy-boto3-guardduty (>=1.34.0,<1.35.0)", "mypy-boto3-health (>=1.34.0,<1.35.0)", "mypy-boto3-healthlake (>=1.34.0,<1.35.0)", "mypy-boto3-honeycode (>=1.34.0,<1.35.0)", "mypy-boto3-iam (>=1.34.0,<1.35.0)", "mypy-boto3-identitystore (>=1.34.0,<1.35.0)", "mypy-boto3-imagebuilder (>=1.34.0,<1.35.0)", "mypy-boto3-importexport (>=1.34.0,<1.35.0)", "mypy-boto3-inspector (>=1.34.0,<1.35.0)", "mypy-boto3-inspector-scan (>=1.34.0,<1.35.0)", "mypy-boto3-inspector2 (>=1.34.0,<1.35.0)", "mypy-boto3-internetmonitor (>=1.34.0,<1.35.0)", "mypy-boto3-iot (>=1.34.0,<1.35.0)", "mypy-boto3-iot-data (>=1.34.0,<1.35.0)", "mypy-boto3-iot-jobs-data (>=1.34.0,<1.35.0)", "mypy-boto3-iot1click-devices (>=1.34.0,<1.35.0)", "mypy-boto3-iot1click-projects (>=1.34.0,<1.35.0)", "mypy-boto3-iotanalytics (>=1.34.0,<1.35.0)", "mypy-boto3-iotdeviceadvisor (>=1.34.0,<1.35.0)", "mypy-boto3-iotevents (>=1.34.0,<1.35.0)", "mypy-boto3-iotevents-data (>=1.34.0,<1.35.0)", "mypy-boto3-iotfleethub (>=1.34.0,<1.35.0)", "mypy-boto3-iotfleetwise (>=1.34.0,<1.35.0)", "mypy-boto3-iotsecuretunneling (>=1.34.0,<1.35.0)", "mypy-boto3-iotsitewise (>=1.34.0,<1.35.0)", "mypy-boto3-iotthingsgraph (>=1.34.0,<1.35.0)", "mypy-boto3-iottwinmaker (>=1.34.0,<1.35.0)", "mypy-boto3-iotwireless (>=1.34.0,<1.35.0)", "mypy-boto3-ivs (>=1.34.0,<1.35.0)", "mypy-boto3-ivs-realtime (>=1.34.0,<1.35.0)", "mypy-boto3-ivschat (>=1.34.0,<1.35.0)", "mypy-boto3-kafka (>=1.34.0,<1.35.0)", "mypy-boto3-kafkaconnect (>=1.34.0,<1.35.0)", "mypy-boto3-kendra (>=1.34.0,<1.35.0)", "mypy-boto3-kendra-ranking (>=1.34.0,<1.35.0)", "mypy-boto3-keyspaces (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-archived-media (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-media (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-signaling (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-webrtc-storage (>=1.34.0,<1.35.0)", "mypy-boto3-kinesisanalytics (>=1.34.0,<1.35.0)", "mypy-boto3-kinesisanalyticsv2 (>=1.34.0,<1.35.0)", "mypy-boto3-kinesisvideo (>=1.34.0,<1.35.0)", "mypy-boto3-kms (>=1.34.0,<1.35.0)", "mypy-boto3-lakeformation (>=1.34.0,<1.35.0)", "mypy-boto3-lambda (>=1.34.0,<1.35.0)", "mypy-boto3-launch-wizard (>=1.34.0,<1.35.0)", "mypy-boto3-lex-models (>=1.34.0,<1.35.0)", "mypy-boto3-lex-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-lexv2-models (>=1.34.0,<1.35.0)", "mypy-boto3-lexv2-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-license-manager (>=1.34.0,<1.35.0)", "mypy-boto3-license-manager-linux-subscriptions (>=1.34.0,<1.35.0)", "mypy-boto3-license-manager-user-subscriptions (>=1.34.0,<1.35.0)", "mypy-boto3-lightsail (>=1.34.0,<1.35.0)", "mypy-boto3-location (>=1.34.0,<1.35.0)", "mypy-boto3-logs (>=1.34.0,<1.35.0)", "mypy-boto3-lookoutequipment (>=1.34.0,<1.35.0)", "mypy-boto3-lookoutmetrics (>=1.34.0,<1.35.0)", "mypy-boto3-lookoutvision (>=1.34.0,<1.35.0)", "mypy-boto3-m2 (>=1.34.0,<1.35.0)", "mypy-boto3-machinelearning (>=1.34.0,<1.35.0)", "mypy-boto3-macie2 (>=1.34.0,<1.35.0)", "mypy-boto3-mailmanager (>=1.34.0,<1.35.0)", "mypy-boto3-managedblockchain (>=1.34.0,<1.35.0)", "mypy-boto3-managedblockchain-query (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-agreement (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-catalog (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-deployment (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-entitlement (>=1.34.0,<1.35.0)", "mypy-boto3-marketplacecommerceanalytics (>=1.34.0,<1.35.0)", "mypy-boto3-mediaconnect (>=1.34.0,<1.35.0)", "mypy-boto3-mediaconvert (>=1.34.0,<1.35.0)", "mypy-boto3-medialive (>=1.34.0,<1.35.0)", "mypy-boto3-mediapackage (>=1.34.0,<1.35.0)", "mypy-boto3-mediapackage-vod (>=1.34.0,<1.35.0)", "mypy-boto3-mediapackagev2 (>=1.34.0,<1.35.0)", "mypy-boto3-mediastore (>=1.34.0,<1.35.0)", "mypy-boto3-mediastore-data (>=1.34.0,<1.35.0)", "mypy-boto3-mediatailor (>=1.34.0,<1.35.0)", "mypy-boto3-medical-imaging (>=1.34.0,<1.35.0)", "mypy-boto3-memorydb (>=1.34.0,<1.35.0)", "mypy-boto3-meteringmarketplace (>=1.34.0,<1.35.0)", "mypy-boto3-mgh (>=1.34.0,<1.35.0)", "mypy-boto3-mgn (>=1.34.0,<1.35.0)", "mypy-boto3-migration-hub-refactor-spaces (>=1.34.0,<1.35.0)", "mypy-boto3-migrationhub-config (>=1.34.0,<1.35.0)", "mypy-boto3-migrationhuborchestrator (>=1.34.0,<1.35.0)", "mypy-boto3-migrationhubstrategy (>=1.34.0,<1.35.0)", "mypy-boto3-mobile (>=1.34.0,<1.35.0)", "mypy-boto3-mq (>=1.34.0,<1.35.0)", "mypy-boto3-mturk (>=1.34.0,<1.35.0)", "mypy-boto3-mwaa (>=1.34.0,<1.35.0)", "mypy-boto3-neptune (>=1.34.0,<1.35.0)", "mypy-boto3-neptune-graph (>=1.34.0,<1.35.0)", "mypy-boto3-neptunedata (>=1.34.0,<1.35.0)", "mypy-boto3-network-firewall (>=1.34.0,<1.35.0)", "mypy-boto3-networkmanager (>=1.34.0,<1.35.0)", "mypy-boto3-networkmonitor (>=1.34.0,<1.35.0)", "mypy-boto3-nimble (>=1.34.0,<1.35.0)", "mypy-boto3-oam (>=1.34.0,<1.35.0)", "mypy-boto3-omics (>=1.34.0,<1.35.0)", "mypy-boto3-opensearch (>=1.34.0,<1.35.0)", "mypy-boto3-opensearchserverless (>=1.34.0,<1.35.0)", "mypy-boto3-opsworks (>=1.34.0,<1.35.0)", "mypy-boto3-opsworkscm (>=1.34.0,<1.35.0)", "mypy-boto3-organizations (>=1.34.0,<1.35.0)", "mypy-boto3-osis (>=1.34.0,<1.35.0)", "mypy-boto3-outposts (>=1.34.0,<1.35.0)", "mypy-boto3-panorama (>=1.34.0,<1.35.0)", "mypy-boto3-payment-cryptography (>=1.34.0,<1.35.0)", "mypy-boto3-payment-cryptography-data (>=1.34.0,<1.35.0)", "mypy-boto3-pca-connector-ad (>=1.34.0,<1.35.0)", "mypy-boto3-personalize (>=1.34.0,<1.35.0)", "mypy-boto3-personalize-events (>=1.34.0,<1.35.0)", "mypy-boto3-personalize-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-pi (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint-email (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint-sms-voice (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint-sms-voice-v2 (>=1.34.0,<1.35.0)", "mypy-boto3-pipes (>=1.34.0,<1.35.0)", "mypy-boto3-polly (>=1.34.0,<1.35.0)", "mypy-boto3-pricing (>=1.34.0,<1.35.0)", "mypy-boto3-privatenetworks (>=1.34.0,<1.35.0)", "mypy-boto3-proton (>=1.34.0,<1.35.0)", "mypy-boto3-qbusiness (>=1.34.0,<1.35.0)", "mypy-boto3-qconnect (>=1.34.0,<1.35.0)", "mypy-boto3-qldb (>=1.34.0,<1.35.0)", "mypy-boto3-qldb-session (>=1.34.0,<1.35.0)", "mypy-boto3-quicksight (>=1.34.0,<1.35.0)", "mypy-boto3-ram (>=1.34.0,<1.35.0)", "mypy-boto3-rbin (>=1.34.0,<1.35.0)", "mypy-boto3-rds (>=1.34.0,<1.35.0)", "mypy-boto3-rds-data (>=1.34.0,<1.35.0)", "mypy-boto3-redshift (>=1.34.0,<1.35.0)", "mypy-boto3-redshift-data (>=1.34.0,<1.35.0)", "mypy-boto3-redshift-serverless (>=1.34.0,<1.35.0)", "mypy-boto3-rekognition (>=1.34.0,<1.35.0)", "mypy-boto3-repostspace (>=1.34.0,<1.35.0)", "mypy-boto3-resiliencehub (>=1.34.0,<1.35.0)", "mypy-boto3-resource-explorer-2 (>=1.34.0,<1.35.0)", "mypy-boto3-resource-groups (>=1.34.0,<1.35.0)", "mypy-boto3-resourcegroupstaggingapi (>=1.34.0,<1.35.0)", "mypy-boto3-robomaker (>=1.34.0,<1.35.0)", "mypy-boto3-rolesanywhere (>=1.34.0,<1.35.0)", "mypy-boto3-route53 (>=1.34.0,<1.35.0)", "mypy-boto3-route53-recovery-cluster (>=1.34.0,<1.35.0)", "mypy-boto3-route53-recovery-control-config (>=1.34.0,<1.35.0)", "mypy-boto3-route53-recovery-readiness (>=1.34.0,<1.35.0)", "mypy-boto3-route53domains (>=1.34.0,<1.35.0)", "mypy-boto3-route53profiles (>=1.34.0,<1.35.0)", "mypy-boto3-route53resolver (>=1.34.0,<1.35.0)", "mypy-boto3-rum (>=1.34.0,<1.35.0)", "mypy-boto3-s3 (>=1.34.0,<1.35.0)", "mypy-boto3-s3control (>=1.34.0,<1.35.0)", "mypy-boto3-s3outposts (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-a2i-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-edge (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-featurestore-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-geospatial (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-metrics (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-savingsplans (>=1.34.0,<1.35.0)", "mypy-boto3-scheduler (>=1.34.0,<1.35.0)", "mypy-boto3-schemas (>=1.34.0,<1.35.0)", "mypy-boto3-sdb (>=1.34.0,<1.35.0)", "mypy-boto3-secretsmanager (>=1.34.0,<1.35.0)", "mypy-boto3-securityhub (>=1.34.0,<1.35.0)", "mypy-boto3-securitylake (>=1.34.0,<1.35.0)", "mypy-boto3-serverlessrepo (>=1.34.0,<1.35.0)", "mypy-boto3-service-quotas (>=1.34.0,<1.35.0)", "mypy-boto3-servicecatalog (>=1.34.0,<1.35.0)", "mypy-boto3-servicecatalog-appregistry (>=1.34.0,<1.35.0)", "mypy-boto3-servicediscovery (>=1.34.0,<1.35.0)", "mypy-boto3-ses (>=1.34.0,<1.35.0)", "mypy-boto3-sesv2 (>=1.34.0,<1.35.0)", "mypy-boto3-shield (>=1.34.0,<1.35.0)", "mypy-boto3-signer (>=1.34.0,<1.35.0)", "mypy-boto3-simspaceweaver (>=1.34.0,<1.35.0)", "mypy-boto3-sms (>=1.34.0,<1.35.0)", "mypy-boto3-sms-voice (>=1.34.0,<1.35.0)", "mypy-boto3-snow-device-management (>=1.34.0,<1.35.0)", "mypy-boto3-snowball (>=1.34.0,<1.35.0)", "mypy-boto3-sns (>=1.34.0,<1.35.0)", "mypy-boto3-sqs (>=1.34.0,<1.35.0)", "mypy-boto3-ssm (>=1.34.0,<1.35.0)", "mypy-boto3-ssm-contacts (>=1.34.0,<1.35.0)", "mypy-boto3-ssm-incidents (>=1.34.0,<1.35.0)", "mypy-boto3-ssm-sap (>=1.34.0,<1.35.0)", "mypy-boto3-sso (>=1.34.0,<1.35.0)", "mypy-boto3-sso-admin (>=1.34.0,<1.35.0)", "mypy-boto3-sso-oidc (>=1.34.0,<1.35.0)", "mypy-boto3-stepfunctions (>=1.34.0,<1.35.0)", "mypy-boto3-storagegateway (>=1.34.0,<1.35.0)", "mypy-boto3-sts (>=1.34.0,<1.35.0)", "mypy-boto3-supplychain (>=1.34.0,<1.35.0)", "mypy-boto3-support (>=1.34.0,<1.35.0)", "mypy-boto3-support-app (>=1.34.0,<1.35.0)", "mypy-boto3-swf (>=1.34.0,<1.35.0)", "mypy-boto3-synthetics (>=1.34.0,<1.35.0)", "mypy-boto3-textract (>=1.34.0,<1.35.0)", "mypy-boto3-timestream-influxdb (>=1.34.0,<1.35.0)", "mypy-boto3-timestream-query (>=1.34.0,<1.35.0)", "mypy-boto3-timestream-write (>=1.34.0,<1.35.0)", "mypy-boto3-tnb (>=1.34.0,<1.35.0)", "mypy-boto3-transcribe (>=1.34.0,<1.35.0)", "mypy-boto3-transfer (>=1.34.0,<1.35.0)", "mypy-boto3-translate (>=1.34.0,<1.35.0)", "mypy-boto3-trustedadvisor (>=1.34.0,<1.35.0)", "mypy-boto3-verifiedpermissions (>=1.34.0,<1.35.0)", "mypy-boto3-voice-id (>=1.34.0,<1.35.0)", "mypy-boto3-vpc-lattice (>=1.34.0,<1.35.0)", "mypy-boto3-waf (>=1.34.0,<1.35.0)", "mypy-boto3-waf-regional (>=1.34.0,<1.35.0)", "mypy-boto3-wafv2 (>=1.34.0,<1.35.0)", "mypy-boto3-wellarchitected (>=1.34.0,<1.35.0)", "mypy-boto3-wisdom (>=1.34.0,<1.35.0)", "mypy-boto3-workdocs (>=1.34.0,<1.35.0)", "mypy-boto3-worklink (>=1.34.0,<1.35.0)", "mypy-boto3-workmail (>=1.34.0,<1.35.0)", "mypy-boto3-workmailmessageflow (>=1.34.0,<1.35.0)", "mypy-boto3-workspaces (>=1.34.0,<1.35.0)", "mypy-boto3-workspaces-thin-client (>=1.34.0,<1.35.0)", "mypy-boto3-workspaces-web (>=1.34.0,<1.35.0)", "mypy-boto3-xray (>=1.34.0,<1.35.0)"] amp = ["mypy-boto3-amp (>=1.34.0,<1.35.0)"] amplify = ["mypy-boto3-amplify (>=1.34.0,<1.35.0)"] amplifybackend = ["mypy-boto3-amplifybackend (>=1.34.0,<1.35.0)"] @@ -194,7 +194,7 @@ bedrock-agent = ["mypy-boto3-bedrock-agent (>=1.34.0,<1.35.0)"] bedrock-agent-runtime = ["mypy-boto3-bedrock-agent-runtime (>=1.34.0,<1.35.0)"] bedrock-runtime = ["mypy-boto3-bedrock-runtime (>=1.34.0,<1.35.0)"] billingconductor = ["mypy-boto3-billingconductor (>=1.34.0,<1.35.0)"] -boto3 = ["boto3 (==1.34.99)", "botocore (==1.34.99)"] +boto3 = ["boto3 (==1.34.114)", "botocore (==1.34.114)"] braket = ["mypy-boto3-braket (>=1.34.0,<1.35.0)"] budgets = ["mypy-boto3-budgets (>=1.34.0,<1.35.0)"] ce = ["mypy-boto3-ce (>=1.34.0,<1.35.0)"] @@ -375,6 +375,7 @@ lookoutvision = ["mypy-boto3-lookoutvision (>=1.34.0,<1.35.0)"] m2 = ["mypy-boto3-m2 (>=1.34.0,<1.35.0)"] machinelearning = ["mypy-boto3-machinelearning (>=1.34.0,<1.35.0)"] macie2 = ["mypy-boto3-macie2 (>=1.34.0,<1.35.0)"] +mailmanager = ["mypy-boto3-mailmanager (>=1.34.0,<1.35.0)"] managedblockchain = ["mypy-boto3-managedblockchain (>=1.34.0,<1.35.0)"] managedblockchain-query = ["mypy-boto3-managedblockchain-query (>=1.34.0,<1.35.0)"] marketplace-agreement = ["mypy-boto3-marketplace-agreement (>=1.34.0,<1.35.0)"] @@ -541,13 +542,13 @@ xray = ["mypy-boto3-xray (>=1.34.0,<1.35.0)"] [[package]] name = "botocore" -version = "1.34.99" +version = "1.34.114" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.8" files = [ - {file = "botocore-1.34.99-py3-none-any.whl", hash = "sha256:18c68bdeb0ffb73290912b0c96204fc36d3128f00a00b5cdc35ac34d66225f1c"}, - {file = "botocore-1.34.99.tar.gz", hash = "sha256:cafe569e2136cb33cb0e5dd32fb1c0e1503ddc1413d3be215df8ddf05e69137a"}, + {file = "botocore-1.34.114-py3-none-any.whl", hash = "sha256:606d1e55984d45e41a812badee292755f4db0233eed9cca63ea3bb8f5755507f"}, + {file = "botocore-1.34.114.tar.gz", hash = "sha256:5705f74fda009656a218ffaf4afd81228359160f2ab806ab8222d07e9da3a73b"}, ] [package.dependencies] @@ -579,6 +580,17 @@ typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.9\""} [package.extras] botocore = ["botocore"] +[[package]] +name = "cachetools" +version = "5.3.3" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +files = [ + {file = "cachetools-5.3.3-py3-none-any.whl", hash = "sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945"}, + {file = "cachetools-5.3.3.tar.gz", hash = "sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105"}, +] + [[package]] name = "certifi" version = "2024.2.2" @@ -656,17 +668,17 @@ pycparser = "*" [[package]] name = "cfn-lint" -version = "0.87.1" +version = "0.87.4" description = "Checks CloudFormation templates for practices and behaviour that could potentially be improved" optional = false python-versions = "!=4.0,<=4.0,>=3.8" files = [ - {file = "cfn_lint-0.87.1-py3-none-any.whl", hash = "sha256:d450f450635fc223b6f66880ccac52a5fd1a52966fa1705f1ba52b88dfed3071"}, - {file = "cfn_lint-0.87.1.tar.gz", hash = "sha256:b3ce9d3e5e0eadcea5d584c8ccaa00bf2a990a36a64d7ffd8683bc60b7e4f06f"}, + {file = "cfn_lint-0.87.4-py3-none-any.whl", hash = "sha256:a4e00f36b589a686efc59df5a25838b661c482ea51391c091553921db38fca50"}, + {file = "cfn_lint-0.87.4.tar.gz", hash = "sha256:1bf635bfe252dd6160c2ed7a8c5b920381bc404cba67d316b454cd70ba678fd7"}, ] [package.dependencies] -aws-sam-translator = ">=1.87.0" +aws-sam-translator = ">=1.89.0" jschema-to-python = ">=1.2.3,<1.3.0" jsonpatch = "*" jsonschema = ">=3.0,<5" @@ -677,6 +689,17 @@ regex = ">=2021.7.1" sarif-om = ">=1.0.4,<1.1.0" sympy = ">=1.0.0" +[[package]] +name = "chardet" +version = "5.2.0" +description = "Universal encoding detector for Python 3" +optional = false +python-versions = ">=3.7" +files = [ + {file = "chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970"}, + {file = "chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7"}, +] + [[package]] name = "charset-normalizer" version = "3.3.2" @@ -776,17 +799,6 @@ files = [ {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, ] -[[package]] -name = "cli-test-helpers" -version = "3.5.0" -description = "Useful helpers for writing tests for your Python CLI program." -optional = false -python-versions = "*" -files = [ - {file = "cli-test-helpers-3.5.0.tar.gz", hash = "sha256:86d5dea5b4fee4767cd87ff26eb709ad06764545e0fa348bace735247054b23e"}, - {file = "cli_test_helpers-3.5.0-py3-none-any.whl", hash = "sha256:e05359625f9134d20df284bc32dd7e1002925945fe7ff0912351e6b08f55cee8"}, -] - [[package]] name = "click" version = "8.1.7" @@ -814,63 +826,63 @@ files = [ [[package]] name = "coverage" -version = "7.5.1" +version = "7.5.3" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.5.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0884920835a033b78d1c73b6d3bbcda8161a900f38a488829a83982925f6c2e"}, - {file = "coverage-7.5.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:39afcd3d4339329c5f58de48a52f6e4e50f6578dd6099961cf22228feb25f38f"}, - {file = "coverage-7.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a7b0ceee8147444347da6a66be737c9d78f3353b0681715b668b72e79203e4a"}, - {file = "coverage-7.5.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a9ca3f2fae0088c3c71d743d85404cec8df9be818a005ea065495bedc33da35"}, - {file = "coverage-7.5.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fd215c0c7d7aab005221608a3c2b46f58c0285a819565887ee0b718c052aa4e"}, - {file = "coverage-7.5.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4bf0655ab60d754491004a5efd7f9cccefcc1081a74c9ef2da4735d6ee4a6223"}, - {file = "coverage-7.5.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:61c4bf1ba021817de12b813338c9be9f0ad5b1e781b9b340a6d29fc13e7c1b5e"}, - {file = "coverage-7.5.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:db66fc317a046556a96b453a58eced5024af4582a8dbdc0c23ca4dbc0d5b3146"}, - {file = "coverage-7.5.1-cp310-cp310-win32.whl", hash = "sha256:b016ea6b959d3b9556cb401c55a37547135a587db0115635a443b2ce8f1c7228"}, - {file = "coverage-7.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:df4e745a81c110e7446b1cc8131bf986157770fa405fe90e15e850aaf7619bc8"}, - {file = "coverage-7.5.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:796a79f63eca8814ca3317a1ea443645c9ff0d18b188de470ed7ccd45ae79428"}, - {file = "coverage-7.5.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4fc84a37bfd98db31beae3c2748811a3fa72bf2007ff7902f68746d9757f3746"}, - {file = "coverage-7.5.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6175d1a0559986c6ee3f7fccfc4a90ecd12ba0a383dcc2da30c2b9918d67d8a3"}, - {file = "coverage-7.5.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fc81d5878cd6274ce971e0a3a18a8803c3fe25457165314271cf78e3aae3aa2"}, - {file = "coverage-7.5.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:556cf1a7cbc8028cb60e1ff0be806be2eded2daf8129b8811c63e2b9a6c43bca"}, - {file = "coverage-7.5.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9981706d300c18d8b220995ad22627647be11a4276721c10911e0e9fa44c83e8"}, - {file = "coverage-7.5.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d7fed867ee50edf1a0b4a11e8e5d0895150e572af1cd6d315d557758bfa9c057"}, - {file = "coverage-7.5.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ef48e2707fb320c8f139424a596f5b69955a85b178f15af261bab871873bb987"}, - {file = "coverage-7.5.1-cp311-cp311-win32.whl", hash = "sha256:9314d5678dcc665330df5b69c1e726a0e49b27df0461c08ca12674bcc19ef136"}, - {file = "coverage-7.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:5fa567e99765fe98f4e7d7394ce623e794d7cabb170f2ca2ac5a4174437e90dd"}, - {file = "coverage-7.5.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b6cf3764c030e5338e7f61f95bd21147963cf6aa16e09d2f74f1fa52013c1206"}, - {file = "coverage-7.5.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2ec92012fefebee89a6b9c79bc39051a6cb3891d562b9270ab10ecfdadbc0c34"}, - {file = "coverage-7.5.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16db7f26000a07efcf6aea00316f6ac57e7d9a96501e990a36f40c965ec7a95d"}, - {file = "coverage-7.5.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:beccf7b8a10b09c4ae543582c1319c6df47d78fd732f854ac68d518ee1fb97fa"}, - {file = "coverage-7.5.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8748731ad392d736cc9ccac03c9845b13bb07d020a33423fa5b3a36521ac6e4e"}, - {file = "coverage-7.5.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7352b9161b33fd0b643ccd1f21f3a3908daaddf414f1c6cb9d3a2fd618bf2572"}, - {file = "coverage-7.5.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:7a588d39e0925f6a2bff87154752481273cdb1736270642aeb3635cb9b4cad07"}, - {file = "coverage-7.5.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:68f962d9b72ce69ea8621f57551b2fa9c70509af757ee3b8105d4f51b92b41a7"}, - {file = "coverage-7.5.1-cp312-cp312-win32.whl", hash = "sha256:f152cbf5b88aaeb836127d920dd0f5e7edff5a66f10c079157306c4343d86c19"}, - {file = "coverage-7.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:5a5740d1fb60ddf268a3811bcd353de34eb56dc24e8f52a7f05ee513b2d4f596"}, - {file = "coverage-7.5.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e2213def81a50519d7cc56ed643c9e93e0247f5bbe0d1247d15fa520814a7cd7"}, - {file = "coverage-7.5.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5037f8fcc2a95b1f0e80585bd9d1ec31068a9bcb157d9750a172836e98bc7a90"}, - {file = "coverage-7.5.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3721c2c9e4c4953a41a26c14f4cef64330392a6d2d675c8b1db3b645e31f0e"}, - {file = "coverage-7.5.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca498687ca46a62ae590253fba634a1fe9836bc56f626852fb2720f334c9e4e5"}, - {file = "coverage-7.5.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0cdcbc320b14c3e5877ee79e649677cb7d89ef588852e9583e6b24c2e5072661"}, - {file = "coverage-7.5.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:57e0204b5b745594e5bc14b9b50006da722827f0b8c776949f1135677e88d0b8"}, - {file = "coverage-7.5.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8fe7502616b67b234482c3ce276ff26f39ffe88adca2acf0261df4b8454668b4"}, - {file = "coverage-7.5.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:9e78295f4144f9dacfed4f92935fbe1780021247c2fabf73a819b17f0ccfff8d"}, - {file = "coverage-7.5.1-cp38-cp38-win32.whl", hash = "sha256:1434e088b41594baa71188a17533083eabf5609e8e72f16ce8c186001e6b8c41"}, - {file = "coverage-7.5.1-cp38-cp38-win_amd64.whl", hash = "sha256:0646599e9b139988b63704d704af8e8df7fa4cbc4a1f33df69d97f36cb0a38de"}, - {file = "coverage-7.5.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4cc37def103a2725bc672f84bd939a6fe4522310503207aae4d56351644682f1"}, - {file = "coverage-7.5.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fc0b4d8bfeabd25ea75e94632f5b6e047eef8adaed0c2161ada1e922e7f7cece"}, - {file = "coverage-7.5.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d0a0f5e06881ecedfe6f3dd2f56dcb057b6dbeb3327fd32d4b12854df36bf26"}, - {file = "coverage-7.5.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9735317685ba6ec7e3754798c8871c2f49aa5e687cc794a0b1d284b2389d1bd5"}, - {file = "coverage-7.5.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d21918e9ef11edf36764b93101e2ae8cc82aa5efdc7c5a4e9c6c35a48496d601"}, - {file = "coverage-7.5.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c3e757949f268364b96ca894b4c342b41dc6f8f8b66c37878aacef5930db61be"}, - {file = "coverage-7.5.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:79afb6197e2f7f60c4824dd4b2d4c2ec5801ceb6ba9ce5d2c3080e5660d51a4f"}, - {file = "coverage-7.5.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d1d0d98d95dd18fe29dc66808e1accf59f037d5716f86a501fc0256455219668"}, - {file = "coverage-7.5.1-cp39-cp39-win32.whl", hash = "sha256:1cc0fe9b0b3a8364093c53b0b4c0c2dd4bb23acbec4c9240b5f284095ccf7981"}, - {file = "coverage-7.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:dde0070c40ea8bb3641e811c1cfbf18e265d024deff6de52c5950677a8fb1e0f"}, - {file = "coverage-7.5.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:6537e7c10cc47c595828b8a8be04c72144725c383c4702703ff4e42e44577312"}, - {file = "coverage-7.5.1.tar.gz", hash = "sha256:54de9ef3a9da981f7af93eafde4ede199e0846cd819eb27c88e2b712aae9708c"}, + {file = "coverage-7.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a6519d917abb15e12380406d721e37613e2a67d166f9fb7e5a8ce0375744cd45"}, + {file = "coverage-7.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aea7da970f1feccf48be7335f8b2ca64baf9b589d79e05b9397a06696ce1a1ec"}, + {file = "coverage-7.5.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:923b7b1c717bd0f0f92d862d1ff51d9b2b55dbbd133e05680204465f454bb286"}, + {file = "coverage-7.5.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62bda40da1e68898186f274f832ef3e759ce929da9a9fd9fcf265956de269dbc"}, + {file = "coverage-7.5.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8b7339180d00de83e930358223c617cc343dd08e1aa5ec7b06c3a121aec4e1d"}, + {file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:25a5caf742c6195e08002d3b6c2dd6947e50efc5fc2c2205f61ecb47592d2d83"}, + {file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:05ac5f60faa0c704c0f7e6a5cbfd6f02101ed05e0aee4d2822637a9e672c998d"}, + {file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:239a4e75e09c2b12ea478d28815acf83334d32e722e7433471fbf641c606344c"}, + {file = "coverage-7.5.3-cp310-cp310-win32.whl", hash = "sha256:a5812840d1d00eafae6585aba38021f90a705a25b8216ec7f66aebe5b619fb84"}, + {file = "coverage-7.5.3-cp310-cp310-win_amd64.whl", hash = "sha256:33ca90a0eb29225f195e30684ba4a6db05dbef03c2ccd50b9077714c48153cac"}, + {file = "coverage-7.5.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f81bc26d609bf0fbc622c7122ba6307993c83c795d2d6f6f6fd8c000a770d974"}, + {file = "coverage-7.5.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7cec2af81f9e7569280822be68bd57e51b86d42e59ea30d10ebdbb22d2cb7232"}, + {file = "coverage-7.5.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55f689f846661e3f26efa535071775d0483388a1ccfab899df72924805e9e7cd"}, + {file = "coverage-7.5.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50084d3516aa263791198913a17354bd1dc627d3c1639209640b9cac3fef5807"}, + {file = "coverage-7.5.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:341dd8f61c26337c37988345ca5c8ccabeff33093a26953a1ac72e7d0103c4fb"}, + {file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ab0b028165eea880af12f66086694768f2c3139b2c31ad5e032c8edbafca6ffc"}, + {file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5bc5a8c87714b0c67cfeb4c7caa82b2d71e8864d1a46aa990b5588fa953673b8"}, + {file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:38a3b98dae8a7c9057bd91fbf3415c05e700a5114c5f1b5b0ea5f8f429ba6614"}, + {file = "coverage-7.5.3-cp311-cp311-win32.whl", hash = "sha256:fcf7d1d6f5da887ca04302db8e0e0cf56ce9a5e05f202720e49b3e8157ddb9a9"}, + {file = "coverage-7.5.3-cp311-cp311-win_amd64.whl", hash = "sha256:8c836309931839cca658a78a888dab9676b5c988d0dd34ca247f5f3e679f4e7a"}, + {file = "coverage-7.5.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:296a7d9bbc598e8744c00f7a6cecf1da9b30ae9ad51c566291ff1314e6cbbed8"}, + {file = "coverage-7.5.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:34d6d21d8795a97b14d503dcaf74226ae51eb1f2bd41015d3ef332a24d0a17b3"}, + {file = "coverage-7.5.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e317953bb4c074c06c798a11dbdd2cf9979dbcaa8ccc0fa4701d80042d4ebf1"}, + {file = "coverage-7.5.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:705f3d7c2b098c40f5b81790a5fedb274113373d4d1a69e65f8b68b0cc26f6db"}, + {file = "coverage-7.5.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1196e13c45e327d6cd0b6e471530a1882f1017eb83c6229fc613cd1a11b53cd"}, + {file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:015eddc5ccd5364dcb902eaecf9515636806fa1e0d5bef5769d06d0f31b54523"}, + {file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:fd27d8b49e574e50caa65196d908f80e4dff64d7e592d0c59788b45aad7e8b35"}, + {file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:33fc65740267222fc02975c061eb7167185fef4cc8f2770267ee8bf7d6a42f84"}, + {file = "coverage-7.5.3-cp312-cp312-win32.whl", hash = "sha256:7b2a19e13dfb5c8e145c7a6ea959485ee8e2204699903c88c7d25283584bfc08"}, + {file = "coverage-7.5.3-cp312-cp312-win_amd64.whl", hash = "sha256:0bbddc54bbacfc09b3edaec644d4ac90c08ee8ed4844b0f86227dcda2d428fcb"}, + {file = "coverage-7.5.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f78300789a708ac1f17e134593f577407d52d0417305435b134805c4fb135adb"}, + {file = "coverage-7.5.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b368e1aee1b9b75757942d44d7598dcd22a9dbb126affcbba82d15917f0cc155"}, + {file = "coverage-7.5.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f836c174c3a7f639bded48ec913f348c4761cbf49de4a20a956d3431a7c9cb24"}, + {file = "coverage-7.5.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:244f509f126dc71369393ce5fea17c0592c40ee44e607b6d855e9c4ac57aac98"}, + {file = "coverage-7.5.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4c2872b3c91f9baa836147ca33650dc5c172e9273c808c3c3199c75490e709d"}, + {file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:dd4b3355b01273a56b20c219e74e7549e14370b31a4ffe42706a8cda91f19f6d"}, + {file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:f542287b1489c7a860d43a7d8883e27ca62ab84ca53c965d11dac1d3a1fab7ce"}, + {file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:75e3f4e86804023e991096b29e147e635f5e2568f77883a1e6eed74512659ab0"}, + {file = "coverage-7.5.3-cp38-cp38-win32.whl", hash = "sha256:c59d2ad092dc0551d9f79d9d44d005c945ba95832a6798f98f9216ede3d5f485"}, + {file = "coverage-7.5.3-cp38-cp38-win_amd64.whl", hash = "sha256:fa21a04112c59ad54f69d80e376f7f9d0f5f9123ab87ecd18fbb9ec3a2beed56"}, + {file = "coverage-7.5.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f5102a92855d518b0996eb197772f5ac2a527c0ec617124ad5242a3af5e25f85"}, + {file = "coverage-7.5.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d1da0a2e3b37b745a2b2a678a4c796462cf753aebf94edcc87dcc6b8641eae31"}, + {file = "coverage-7.5.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8383a6c8cefba1b7cecc0149415046b6fc38836295bc4c84e820872eb5478b3d"}, + {file = "coverage-7.5.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9aad68c3f2566dfae84bf46295a79e79d904e1c21ccfc66de88cd446f8686341"}, + {file = "coverage-7.5.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e079c9ec772fedbade9d7ebc36202a1d9ef7291bc9b3a024ca395c4d52853d7"}, + {file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bde997cac85fcac227b27d4fb2c7608a2c5f6558469b0eb704c5726ae49e1c52"}, + {file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:990fb20b32990b2ce2c5f974c3e738c9358b2735bc05075d50a6f36721b8f303"}, + {file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3d5a67f0da401e105753d474369ab034c7bae51a4c31c77d94030d59e41df5bd"}, + {file = "coverage-7.5.3-cp39-cp39-win32.whl", hash = "sha256:e08c470c2eb01977d221fd87495b44867a56d4d594f43739a8028f8646a51e0d"}, + {file = "coverage-7.5.3-cp39-cp39-win_amd64.whl", hash = "sha256:1d2a830ade66d3563bb61d1e3c77c8def97b30ed91e166c67d0632c018f380f0"}, + {file = "coverage-7.5.3-pp38.pp39.pp310-none-any.whl", hash = "sha256:3538d8fb1ee9bdd2e2692b3b18c22bb1c19ffbefd06880f5ac496e42d7bb3884"}, + {file = "coverage-7.5.3.tar.gz", hash = "sha256:04aefca5190d1dc7a53a4c1a5a7f8568811306d7a8ee231c42fb69215571944f"}, ] [package.dependencies] @@ -933,24 +945,36 @@ ssh = ["bcrypt (>=3.1.5)"] test = ["certifi", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] +[[package]] +name = "distlib" +version = "0.3.8" +description = "Distribution utilities" +optional = false +python-versions = "*" +files = [ + {file = "distlib-0.3.8-py2.py3-none-any.whl", hash = "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784"}, + {file = "distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64"}, +] + [[package]] name = "docker" -version = "7.0.0" +version = "7.1.0" description = "A Python library for the Docker Engine API." optional = false python-versions = ">=3.8" files = [ - {file = "docker-7.0.0-py3-none-any.whl", hash = "sha256:12ba681f2777a0ad28ffbcc846a69c31b4dfd9752b47eb425a274ee269c5e14b"}, - {file = "docker-7.0.0.tar.gz", hash = "sha256:323736fb92cd9418fc5e7133bc953e11a9da04f4483f828b527db553f1e7e5a3"}, + {file = "docker-7.1.0-py3-none-any.whl", hash = "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0"}, + {file = "docker-7.1.0.tar.gz", hash = "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c"}, ] [package.dependencies] -packaging = ">=14.0" pywin32 = {version = ">=304", markers = "sys_platform == \"win32\""} requests = ">=2.26.0" urllib3 = ">=1.26.0" [package.extras] +dev = ["coverage (==7.2.7)", "pytest (==7.4.2)", "pytest-cov (==4.1.0)", "pytest-timeout (==2.1.0)", "ruff (==0.1.8)"] +docs = ["myst-parser (==0.18.0)", "sphinx (==5.1.1)"] ssh = ["paramiko (>=2.4.3)"] websockets = ["websocket-client (>=1.3.0)"] @@ -968,6 +992,22 @@ files = [ [package.extras] test = ["pytest (>=6)"] +[[package]] +name = "filelock" +version = "3.14.0" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.8" +files = [ + {file = "filelock-3.14.0-py3-none-any.whl", hash = "sha256:43339835842f110ca7ae60f1e1c160714c5a6afd15a2873419ab185334975c0f"}, + {file = "filelock-3.14.0.tar.gz", hash = "sha256:6ea72da3be9b8c82afd3edcf99f2fffbb5076335a5ae4d03248bb5b6c3eae78a"}, +] + +[package.extras] +docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] +typing = ["typing-extensions (>=4.8)"] + [[package]] name = "flake8" version = "6.1.0" @@ -1079,13 +1119,13 @@ files = [ [[package]] name = "joserfc" -version = "0.9.0" +version = "0.10.0" description = "The ultimate Python library for JOSE RFCs, including JWS, JWE, JWK, JWA, JWT" optional = false python-versions = ">=3.8" files = [ - {file = "joserfc-0.9.0-py3-none-any.whl", hash = "sha256:4026bdbe2c196cd40574e916fa1e28874d99649412edaab0e373dec3077153fb"}, - {file = "joserfc-0.9.0.tar.gz", hash = "sha256:eebca7f587b1761ce43a98ffd5327f2b600b9aa5bb0a77b947687f503ad43bc0"}, + {file = "joserfc-0.10.0-py3-none-any.whl", hash = "sha256:1b39bd9078d7f0087bfe694f96c9723c7a2fd63e5974f5efd805016319f2f50d"}, + {file = "joserfc-0.10.0.tar.gz", hash = "sha256:d1c16ff2179145e248fd67dbaa47bb5a3855f8754c64902dd09e2775e63bcd63"}, ] [package.dependencies] @@ -1342,13 +1382,13 @@ files = [ [[package]] name = "moto" -version = "5.0.6" +version = "5.0.8" description = "" optional = false python-versions = ">=3.8" files = [ - {file = "moto-5.0.6-py2.py3-none-any.whl", hash = "sha256:ca1e22831a741733b581ff2ef4d6ae2e1c6db1eab97af1b78b86ca2c6e88c609"}, - {file = "moto-5.0.6.tar.gz", hash = "sha256:ad8b23f2b555ad694da8b2432a42b6d96beaaf67a4e7d932196a72193a2eee2c"}, + {file = "moto-5.0.8-py2.py3-none-any.whl", hash = "sha256:7d1035e366434bfa9fcc0621f07d5aa724b6846408071d540137a0554c46f214"}, + {file = "moto-5.0.8.tar.gz", hash = "sha256:517fb808dc718bcbdda54c6ffeaca0adc34cf6e10821bfb01216ce420a31765c"}, ] [package.dependencies] @@ -1363,7 +1403,7 @@ Jinja2 = ">=2.10.1" joserfc = {version = ">=0.9.0", optional = true, markers = "extra == \"cloudformation\""} jsondiff = {version = ">=1.1.2", optional = true, markers = "extra == \"cloudformation\""} openapi-spec-validator = {version = ">=0.5.0", optional = true, markers = "extra == \"cloudformation\""} -py-partiql-parser = {version = "0.5.4", optional = true, markers = "extra == \"cloudformation\""} +py-partiql-parser = {version = "0.5.5", optional = true, markers = "extra == \"cloudformation\""} pyparsing = {version = ">=3.0.7", optional = true, markers = "extra == \"cloudformation\""} python-dateutil = ">=2.1,<3.0.0" PyYAML = {version = ">=5.1", optional = true, markers = "extra == \"cloudformation\""} @@ -1374,23 +1414,23 @@ werkzeug = ">=0.5,<2.2.0 || >2.2.0,<2.2.1 || >2.2.1" xmltodict = "*" [package.extras] -all = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "jsonpath-ng", "multipart", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.4)", "pyparsing (>=3.0.7)", "setuptools"] +all = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "jsonpath-ng", "multipart", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.5)", "pyparsing (>=3.0.7)", "setuptools"] apigateway = ["PyYAML (>=5.1)", "joserfc (>=0.9.0)", "openapi-spec-validator (>=0.5.0)"] apigatewayv2 = ["PyYAML (>=5.1)", "openapi-spec-validator (>=0.5.0)"] appsync = ["graphql-core"] awslambda = ["docker (>=3.0.0)"] batch = ["docker (>=3.0.0)"] -cloudformation = ["PyYAML (>=5.1)", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.4)", "pyparsing (>=3.0.7)", "setuptools"] +cloudformation = ["PyYAML (>=5.1)", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.5)", "pyparsing (>=3.0.7)", "setuptools"] cognitoidp = ["joserfc (>=0.9.0)"] -dynamodb = ["docker (>=3.0.0)", "py-partiql-parser (==0.5.4)"] -dynamodbstreams = ["docker (>=3.0.0)", "py-partiql-parser (==0.5.4)"] +dynamodb = ["docker (>=3.0.0)", "py-partiql-parser (==0.5.5)"] +dynamodbstreams = ["docker (>=3.0.0)", "py-partiql-parser (==0.5.5)"] glue = ["pyparsing (>=3.0.7)"] iotdata = ["jsondiff (>=1.1.2)"] -proxy = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=2.5.1)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "jsonpath-ng", "multipart", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.4)", "pyparsing (>=3.0.7)", "setuptools"] -resourcegroupstaggingapi = ["PyYAML (>=5.1)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.4)", "pyparsing (>=3.0.7)"] -s3 = ["PyYAML (>=5.1)", "py-partiql-parser (==0.5.4)"] -s3crc32c = ["PyYAML (>=5.1)", "crc32c", "py-partiql-parser (==0.5.4)"] -server = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "flask (!=2.2.0,!=2.2.1)", "flask-cors", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "jsonpath-ng", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.4)", "pyparsing (>=3.0.7)", "setuptools"] +proxy = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=2.5.1)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "jsonpath-ng", "multipart", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.5)", "pyparsing (>=3.0.7)", "setuptools"] +resourcegroupstaggingapi = ["PyYAML (>=5.1)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.5)", "pyparsing (>=3.0.7)"] +s3 = ["PyYAML (>=5.1)", "py-partiql-parser (==0.5.5)"] +s3crc32c = ["PyYAML (>=5.1)", "crc32c", "py-partiql-parser (==0.5.5)"] +server = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "flask (!=2.2.0,!=2.2.1)", "flask-cors", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "jsonpath-ng", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.5)", "pyparsing (>=3.0.7)", "setuptools"] ssm = ["PyYAML (>=5.1)"] stepfunctions = ["antlr4-python3-runtime", "jsonpath-ng"] xray = ["aws-xray-sdk (>=0.93,!=0.96)", "setuptools"] @@ -1461,13 +1501,13 @@ reports = ["lxml"] [[package]] name = "mypy-boto3-cloudformation" -version = "1.34.84" -description = "Type annotations for boto3.CloudFormation 1.34.84 service generated with mypy-boto3-builder 7.23.2" +version = "1.34.111" +description = "Type annotations for boto3.CloudFormation 1.34.111 service generated with mypy-boto3-builder 7.24.0" optional = false python-versions = ">=3.8" files = [ - {file = "mypy_boto3_cloudformation-1.34.84-py3-none-any.whl", hash = "sha256:580954031cb3650588b91f592e8f51855b2ff435d763ac0d69cf271c8433315f"}, - {file = "mypy_boto3_cloudformation-1.34.84.tar.gz", hash = "sha256:82d14df3757f30b5a1d34650839d415d265d4de41cf355d63e10221fcc67f177"}, + {file = "mypy_boto3_cloudformation-1.34.111-py3-none-any.whl", hash = "sha256:526e928c504fa2880b1774aa10629a04fe0ec70ed2864ab3d3f7772386a1a925"}, + {file = "mypy_boto3_cloudformation-1.34.111.tar.gz", hash = "sha256:a02e201d1a9d9a8fb4db5b942d5c537a4e8861c611f0d986126674ac557cb9e8"}, ] [package.dependencies] @@ -1609,13 +1649,13 @@ files = [ [[package]] name = "platformdirs" -version = "4.2.1" +version = "4.2.2" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.2.1-py3-none-any.whl", hash = "sha256:17d5a1161b3fd67b390023cb2d3b026bbd40abde6fdb052dfbd3a29c3ba22ee1"}, - {file = "platformdirs-4.2.1.tar.gz", hash = "sha256:031cd18d4ec63ec53e82dceaac0417d218a6863f7745dfcc9efe7793b7039bdf"}, + {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, + {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, ] [package.extras] @@ -1640,13 +1680,13 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "py-partiql-parser" -version = "0.5.4" +version = "0.5.5" description = "Pure Python PartiQL Parser" optional = false python-versions = "*" files = [ - {file = "py_partiql_parser-0.5.4-py2.py3-none-any.whl", hash = "sha256:3dc4295a47da9587681a96b35c6e151886fdbd0a4acbe0d97c4c68e5f689d315"}, - {file = "py_partiql_parser-0.5.4.tar.gz", hash = "sha256:72e043919538fa63edae72fb59afc7e3fd93adbde656718a7d2b4666f23dd114"}, + {file = "py_partiql_parser-0.5.5-py2.py3-none-any.whl", hash = "sha256:90d278818385bd60c602410c953ee78f04ece599d8cd21c656fc5e47399577a1"}, + {file = "py_partiql_parser-0.5.5.tar.gz", hash = "sha256:ed07f8edf4b55e295cab4f5fd3e2ba3196cee48a43fe210d53ddd6ffce1cf1ff"}, ] [package.extras] @@ -1676,18 +1716,18 @@ files = [ [[package]] name = "pydantic" -version = "2.7.1" +version = "2.7.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.7.1-py3-none-any.whl", hash = "sha256:e029badca45266732a9a79898a15ae2e8b14840b1eabbb25844be28f0b33f3d5"}, - {file = "pydantic-2.7.1.tar.gz", hash = "sha256:e9dbb5eada8abe4d9ae5f46b9939aead650cd2b68f249bb3a8139dbe125803cc"}, + {file = "pydantic-2.7.2-py3-none-any.whl", hash = "sha256:834ab954175f94e6e68258537dc49402c4a5e9d0409b9f1b86b7e934a8372de7"}, + {file = "pydantic-2.7.2.tar.gz", hash = "sha256:71b2945998f9c9b7919a45bde9a50397b289937d215ae141c1d0903ba7149fd7"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.18.2" +pydantic-core = "2.18.3" typing-extensions = ">=4.6.1" [package.extras] @@ -1695,90 +1735,90 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.18.2" +version = "2.18.3" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.18.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9e08e867b306f525802df7cd16c44ff5ebbe747ff0ca6cf3fde7f36c05a59a81"}, - {file = "pydantic_core-2.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f0a21cbaa69900cbe1a2e7cad2aa74ac3cf21b10c3efb0fa0b80305274c0e8a2"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0680b1f1f11fda801397de52c36ce38ef1c1dc841a0927a94f226dea29c3ae3d"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95b9d5e72481d3780ba3442eac863eae92ae43a5f3adb5b4d0a1de89d42bb250"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fcf5cd9c4b655ad666ca332b9a081112cd7a58a8b5a6ca7a3104bc950f2038"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b5155ff768083cb1d62f3e143b49a8a3432e6789a3abee8acd005c3c7af1c74"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:553ef617b6836fc7e4df130bb851e32fe357ce36336d897fd6646d6058d980af"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89ed9eb7d616ef5714e5590e6cf7f23b02d0d539767d33561e3675d6f9e3857"}, - {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:75f7e9488238e920ab6204399ded280dc4c307d034f3924cd7f90a38b1829563"}, - {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ef26c9e94a8c04a1b2924149a9cb081836913818e55681722d7f29af88fe7b38"}, - {file = "pydantic_core-2.18.2-cp310-none-win32.whl", hash = "sha256:182245ff6b0039e82b6bb585ed55a64d7c81c560715d1bad0cbad6dfa07b4027"}, - {file = "pydantic_core-2.18.2-cp310-none-win_amd64.whl", hash = "sha256:e23ec367a948b6d812301afc1b13f8094ab7b2c280af66ef450efc357d2ae543"}, - {file = "pydantic_core-2.18.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:219da3f096d50a157f33645a1cf31c0ad1fe829a92181dd1311022f986e5fbe3"}, - {file = "pydantic_core-2.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cc1cfd88a64e012b74e94cd00bbe0f9c6df57049c97f02bb07d39e9c852e19a4"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b7133a6e6aeb8df37d6f413f7705a37ab4031597f64ab56384c94d98fa0e90"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:224c421235f6102e8737032483f43c1a8cfb1d2f45740c44166219599358c2cd"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b14d82cdb934e99dda6d9d60dc84a24379820176cc4a0d123f88df319ae9c150"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2728b01246a3bba6de144f9e3115b532ee44bd6cf39795194fb75491824a1413"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:470b94480bb5ee929f5acba6995251ada5e059a5ef3e0dfc63cca287283ebfa6"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:997abc4df705d1295a42f95b4eec4950a37ad8ae46d913caeee117b6b198811c"}, - {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75250dbc5290e3f1a0f4618db35e51a165186f9034eff158f3d490b3fed9f8a0"}, - {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4456f2dca97c425231d7315737d45239b2b51a50dc2b6f0c2bb181fce6207664"}, - {file = "pydantic_core-2.18.2-cp311-none-win32.whl", hash = "sha256:269322dcc3d8bdb69f054681edff86276b2ff972447863cf34c8b860f5188e2e"}, - {file = "pydantic_core-2.18.2-cp311-none-win_amd64.whl", hash = "sha256:800d60565aec896f25bc3cfa56d2277d52d5182af08162f7954f938c06dc4ee3"}, - {file = "pydantic_core-2.18.2-cp311-none-win_arm64.whl", hash = "sha256:1404c69d6a676245199767ba4f633cce5f4ad4181f9d0ccb0577e1f66cf4c46d"}, - {file = "pydantic_core-2.18.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:fb2bd7be70c0fe4dfd32c951bc813d9fe6ebcbfdd15a07527796c8204bd36242"}, - {file = "pydantic_core-2.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6132dd3bd52838acddca05a72aafb6eab6536aa145e923bb50f45e78b7251043"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d904828195733c183d20a54230c0df0eb46ec746ea1a666730787353e87182"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c9bd70772c720142be1020eac55f8143a34ec9f82d75a8e7a07852023e46617f"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b8ed04b3582771764538f7ee7001b02e1170223cf9b75dff0bc698fadb00cf3"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6dac87ddb34aaec85f873d737e9d06a3555a1cc1a8e0c44b7f8d5daeb89d86f"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca4ae5a27ad7a4ee5170aebce1574b375de390bc01284f87b18d43a3984df72"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:886eec03591b7cf058467a70a87733b35f44707bd86cf64a615584fd72488b7c"}, - {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ca7b0c1f1c983e064caa85f3792dd2fe3526b3505378874afa84baf662e12241"}, - {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b4356d3538c3649337df4074e81b85f0616b79731fe22dd11b99499b2ebbdf3"}, - {file = "pydantic_core-2.18.2-cp312-none-win32.whl", hash = "sha256:8b172601454f2d7701121bbec3425dd71efcb787a027edf49724c9cefc14c038"}, - {file = "pydantic_core-2.18.2-cp312-none-win_amd64.whl", hash = "sha256:b1bd7e47b1558ea872bd16c8502c414f9e90dcf12f1395129d7bb42a09a95438"}, - {file = "pydantic_core-2.18.2-cp312-none-win_arm64.whl", hash = "sha256:98758d627ff397e752bc339272c14c98199c613f922d4a384ddc07526c86a2ec"}, - {file = "pydantic_core-2.18.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9fdad8e35f278b2c3eb77cbdc5c0a49dada440657bf738d6905ce106dc1de439"}, - {file = "pydantic_core-2.18.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1d90c3265ae107f91a4f279f4d6f6f1d4907ac76c6868b27dc7fb33688cfb347"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:390193c770399861d8df9670fb0d1874f330c79caaca4642332df7c682bf6b91"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:82d5d4d78e4448683cb467897fe24e2b74bb7b973a541ea1dcfec1d3cbce39fb"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4774f3184d2ef3e14e8693194f661dea5a4d6ca4e3dc8e39786d33a94865cefd"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4d938ec0adf5167cb335acb25a4ee69a8107e4984f8fbd2e897021d9e4ca21b"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0e8b1be28239fc64a88a8189d1df7fad8be8c1ae47fcc33e43d4be15f99cc70"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:868649da93e5a3d5eacc2b5b3b9235c98ccdbfd443832f31e075f54419e1b96b"}, - {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:78363590ef93d5d226ba21a90a03ea89a20738ee5b7da83d771d283fd8a56761"}, - {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:852e966fbd035a6468fc0a3496589b45e2208ec7ca95c26470a54daed82a0788"}, - {file = "pydantic_core-2.18.2-cp38-none-win32.whl", hash = "sha256:6a46e22a707e7ad4484ac9ee9f290f9d501df45954184e23fc29408dfad61350"}, - {file = "pydantic_core-2.18.2-cp38-none-win_amd64.whl", hash = "sha256:d91cb5ea8b11607cc757675051f61b3d93f15eca3cefb3e6c704a5d6e8440f4e"}, - {file = "pydantic_core-2.18.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ae0a8a797a5e56c053610fa7be147993fe50960fa43609ff2a9552b0e07013e8"}, - {file = "pydantic_core-2.18.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:042473b6280246b1dbf530559246f6842b56119c2926d1e52b631bdc46075f2a"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a388a77e629b9ec814c1b1e6b3b595fe521d2cdc625fcca26fbc2d44c816804"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25add29b8f3b233ae90ccef2d902d0ae0432eb0d45370fe315d1a5cf231004b"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f459a5ce8434614dfd39bbebf1041952ae01da6bed9855008cb33b875cb024c0"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eff2de745698eb46eeb51193a9f41d67d834d50e424aef27df2fcdee1b153845"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8309f67285bdfe65c372ea3722b7a5642680f3dba538566340a9d36e920b5f0"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f93a8a2e3938ff656a7c1bc57193b1319960ac015b6e87d76c76bf14fe0244b4"}, - {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:22057013c8c1e272eb8d0eebc796701167d8377441ec894a8fed1af64a0bf399"}, - {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cfeecd1ac6cc1fb2692c3d5110781c965aabd4ec5d32799773ca7b1456ac636b"}, - {file = "pydantic_core-2.18.2-cp39-none-win32.whl", hash = "sha256:0d69b4c2f6bb3e130dba60d34c0845ba31b69babdd3f78f7c0c8fae5021a253e"}, - {file = "pydantic_core-2.18.2-cp39-none-win_amd64.whl", hash = "sha256:d9319e499827271b09b4e411905b24a426b8fb69464dfa1696258f53a3334641"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1874c6dd4113308bd0eb568418e6114b252afe44319ead2b4081e9b9521fe75"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:ccdd111c03bfd3666bd2472b674c6899550e09e9f298954cfc896ab92b5b0e6d"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e18609ceaa6eed63753037fc06ebb16041d17d28199ae5aba0052c51449650a9"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e5c584d357c4e2baf0ff7baf44f4994be121e16a2c88918a5817331fc7599d7"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43f0f463cf89ace478de71a318b1b4f05ebc456a9b9300d027b4b57c1a2064fb"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e1b395e58b10b73b07b7cf740d728dd4ff9365ac46c18751bf8b3d8cca8f625a"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0098300eebb1c837271d3d1a2cd2911e7c11b396eac9661655ee524a7f10587b"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:36789b70d613fbac0a25bb07ab3d9dba4d2e38af609c020cf4d888d165ee0bf3"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3f9a801e7c8f1ef8718da265bba008fa121243dfe37c1cea17840b0944dfd72c"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:3a6515ebc6e69d85502b4951d89131ca4e036078ea35533bb76327f8424531ce"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20aca1e2298c56ececfd8ed159ae4dde2df0781988c97ef77d5c16ff4bd5b400"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:223ee893d77a310a0391dca6df00f70bbc2f36a71a895cecd9a0e762dc37b349"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2334ce8c673ee93a1d6a65bd90327588387ba073c17e61bf19b4fd97d688d63c"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cbca948f2d14b09d20268cda7b0367723d79063f26c4ffc523af9042cad95592"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b3ef08e20ec49e02d5c6717a91bb5af9b20f1805583cb0adfe9ba2c6b505b5ae"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6fdc8627910eed0c01aed6a390a252fe3ea6d472ee70fdde56273f198938374"}, - {file = "pydantic_core-2.18.2.tar.gz", hash = "sha256:2e29d20810dfc3043ee13ac7d9e25105799817683348823f305ab3f349b9386e"}, + {file = "pydantic_core-2.18.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:744697428fcdec6be5670460b578161d1ffe34743a5c15656be7ea82b008197c"}, + {file = "pydantic_core-2.18.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37b40c05ced1ba4218b14986fe6f283d22e1ae2ff4c8e28881a70fb81fbfcda7"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:544a9a75622357076efb6b311983ff190fbfb3c12fc3a853122b34d3d358126c"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e2e253af04ceaebde8eb201eb3f3e3e7e390f2d275a88300d6a1959d710539e2"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:855ec66589c68aa367d989da5c4755bb74ee92ccad4fdb6af942c3612c067e34"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d3e42bb54e7e9d72c13ce112e02eb1b3b55681ee948d748842171201a03a98a"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6ac9ffccc9d2e69d9fba841441d4259cb668ac180e51b30d3632cd7abca2b9b"}, + {file = "pydantic_core-2.18.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c56eca1686539fa0c9bda992e7bd6a37583f20083c37590413381acfc5f192d6"}, + {file = "pydantic_core-2.18.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:17954d784bf8abfc0ec2a633108207ebc4fa2df1a0e4c0c3ccbaa9bb01d2c426"}, + {file = "pydantic_core-2.18.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:98ed737567d8f2ecd54f7c8d4f8572ca7c7921ede93a2e52939416170d357812"}, + {file = "pydantic_core-2.18.3-cp310-none-win32.whl", hash = "sha256:9f9e04afebd3ed8c15d67a564ed0a34b54e52136c6d40d14c5547b238390e779"}, + {file = "pydantic_core-2.18.3-cp310-none-win_amd64.whl", hash = "sha256:45e4ffbae34f7ae30d0047697e724e534a7ec0a82ef9994b7913a412c21462a0"}, + {file = "pydantic_core-2.18.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:b9ebe8231726c49518b16b237b9fe0d7d361dd221302af511a83d4ada01183ab"}, + {file = "pydantic_core-2.18.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b8e20e15d18bf7dbb453be78a2d858f946f5cdf06c5072453dace00ab652e2b2"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0d9ff283cd3459fa0bf9b0256a2b6f01ac1ff9ffb034e24457b9035f75587cb"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f7ef5f0ebb77ba24c9970da18b771711edc5feaf00c10b18461e0f5f5949231"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73038d66614d2e5cde30435b5afdced2b473b4c77d4ca3a8624dd3e41a9c19be"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6afd5c867a74c4d314c557b5ea9520183fadfbd1df4c2d6e09fd0d990ce412cd"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd7df92f28d351bb9f12470f4c533cf03d1b52ec5a6e5c58c65b183055a60106"}, + {file = "pydantic_core-2.18.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:80aea0ffeb1049336043d07799eace1c9602519fb3192916ff525b0287b2b1e4"}, + {file = "pydantic_core-2.18.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:aaee40f25bba38132e655ffa3d1998a6d576ba7cf81deff8bfa189fb43fd2bbe"}, + {file = "pydantic_core-2.18.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9128089da8f4fe73f7a91973895ebf2502539d627891a14034e45fb9e707e26d"}, + {file = "pydantic_core-2.18.3-cp311-none-win32.whl", hash = "sha256:fec02527e1e03257aa25b1a4dcbe697b40a22f1229f5d026503e8b7ff6d2eda7"}, + {file = "pydantic_core-2.18.3-cp311-none-win_amd64.whl", hash = "sha256:58ff8631dbab6c7c982e6425da8347108449321f61fe427c52ddfadd66642af7"}, + {file = "pydantic_core-2.18.3-cp311-none-win_arm64.whl", hash = "sha256:3fc1c7f67f34c6c2ef9c213e0f2a351797cda98249d9ca56a70ce4ebcaba45f4"}, + {file = "pydantic_core-2.18.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f0928cde2ae416a2d1ebe6dee324709c6f73e93494d8c7aea92df99aab1fc40f"}, + {file = "pydantic_core-2.18.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0bee9bb305a562f8b9271855afb6ce00223f545de3d68560b3c1649c7c5295e9"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e862823be114387257dacbfa7d78547165a85d7add33b446ca4f4fae92c7ff5c"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6a36f78674cbddc165abab0df961b5f96b14461d05feec5e1f78da58808b97e7"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba905d184f62e7ddbb7a5a751d8a5c805463511c7b08d1aca4a3e8c11f2e5048"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7fdd362f6a586e681ff86550b2379e532fee63c52def1c666887956748eaa326"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24b214b7ee3bd3b865e963dbed0f8bc5375f49449d70e8d407b567af3222aae4"}, + {file = "pydantic_core-2.18.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:691018785779766127f531674fa82bb368df5b36b461622b12e176c18e119022"}, + {file = "pydantic_core-2.18.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:60e4c625e6f7155d7d0dcac151edf5858102bc61bf959d04469ca6ee4e8381bd"}, + {file = "pydantic_core-2.18.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4e651e47d981c1b701dcc74ab8fec5a60a5b004650416b4abbef13db23bc7be"}, + {file = "pydantic_core-2.18.3-cp312-none-win32.whl", hash = "sha256:ffecbb5edb7f5ffae13599aec33b735e9e4c7676ca1633c60f2c606beb17efc5"}, + {file = "pydantic_core-2.18.3-cp312-none-win_amd64.whl", hash = "sha256:2c8333f6e934733483c7eddffdb094c143b9463d2af7e6bd85ebcb2d4a1b82c6"}, + {file = "pydantic_core-2.18.3-cp312-none-win_arm64.whl", hash = "sha256:7a20dded653e516a4655f4c98e97ccafb13753987434fe7cf044aa25f5b7d417"}, + {file = "pydantic_core-2.18.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:eecf63195be644b0396f972c82598cd15693550f0ff236dcf7ab92e2eb6d3522"}, + {file = "pydantic_core-2.18.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2c44efdd3b6125419c28821590d7ec891c9cb0dff33a7a78d9d5c8b6f66b9702"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e59fca51ffbdd1638b3856779342ed69bcecb8484c1d4b8bdb237d0eb5a45e2"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:70cf099197d6b98953468461d753563b28e73cf1eade2ffe069675d2657ed1d5"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:63081a49dddc6124754b32a3774331467bfc3d2bd5ff8f10df36a95602560361"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:370059b7883485c9edb9655355ff46d912f4b03b009d929220d9294c7fd9fd60"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a64faeedfd8254f05f5cf6fc755023a7e1606af3959cfc1a9285744cc711044"}, + {file = "pydantic_core-2.18.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19d2e725de0f90d8671f89e420d36c3dd97639b98145e42fcc0e1f6d492a46dc"}, + {file = "pydantic_core-2.18.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:67bc078025d70ec5aefe6200ef094576c9d86bd36982df1301c758a9fff7d7f4"}, + {file = "pydantic_core-2.18.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:adf952c3f4100e203cbaf8e0c907c835d3e28f9041474e52b651761dc248a3c0"}, + {file = "pydantic_core-2.18.3-cp38-none-win32.whl", hash = "sha256:9a46795b1f3beb167eaee91736d5d17ac3a994bf2215a996aed825a45f897558"}, + {file = "pydantic_core-2.18.3-cp38-none-win_amd64.whl", hash = "sha256:200ad4e3133cb99ed82342a101a5abf3d924722e71cd581cc113fe828f727fbc"}, + {file = "pydantic_core-2.18.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:304378b7bf92206036c8ddd83a2ba7b7d1a5b425acafff637172a3aa72ad7083"}, + {file = "pydantic_core-2.18.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c826870b277143e701c9ccf34ebc33ddb4d072612683a044e7cce2d52f6c3fef"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e201935d282707394f3668380e41ccf25b5794d1b131cdd96b07f615a33ca4b1"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5560dda746c44b48bf82b3d191d74fe8efc5686a9ef18e69bdabccbbb9ad9442"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b32c2a1f8032570842257e4c19288eba9a2bba4712af542327de9a1204faff8"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:929c24e9dea3990bc8bcd27c5f2d3916c0c86f5511d2caa69e0d5290115344a9"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1a8376fef60790152564b0eab376b3e23dd6e54f29d84aad46f7b264ecca943"}, + {file = "pydantic_core-2.18.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dccf3ef1400390ddd1fb55bf0632209d39140552d068ee5ac45553b556780e06"}, + {file = "pydantic_core-2.18.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:41dbdcb0c7252b58fa931fec47937edb422c9cb22528f41cb8963665c372caf6"}, + {file = "pydantic_core-2.18.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:666e45cf071669fde468886654742fa10b0e74cd0fa0430a46ba6056b24fb0af"}, + {file = "pydantic_core-2.18.3-cp39-none-win32.whl", hash = "sha256:f9c08cabff68704a1b4667d33f534d544b8a07b8e5d039c37067fceb18789e78"}, + {file = "pydantic_core-2.18.3-cp39-none-win_amd64.whl", hash = "sha256:4afa5f5973e8572b5c0dcb4e2d4fda7890e7cd63329bd5cc3263a25c92ef0026"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:77319771a026f7c7d29c6ebc623de889e9563b7087911b46fd06c044a12aa5e9"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:df11fa992e9f576473038510d66dd305bcd51d7dd508c163a8c8fe148454e059"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d531076bdfb65af593326ffd567e6ab3da145020dafb9187a1d131064a55f97c"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d33ce258e4e6e6038f2b9e8b8a631d17d017567db43483314993b3ca345dcbbb"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1f9cd7f5635b719939019be9bda47ecb56e165e51dd26c9a217a433e3d0d59a9"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cd4a032bb65cc132cae1fe3e52877daecc2097965cd3914e44fbd12b00dae7c5"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f2718430098bcdf60402136c845e4126a189959d103900ebabb6774a5d9fdb"}, + {file = "pydantic_core-2.18.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c0037a92cf0c580ed14e10953cdd26528e8796307bb8bb312dc65f71547df04d"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b95a0972fac2b1ff3c94629fc9081b16371dad870959f1408cc33b2f78ad347a"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a62e437d687cc148381bdd5f51e3e81f5b20a735c55f690c5be94e05da2b0d5c"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b367a73a414bbb08507da102dc2cde0fa7afe57d09b3240ce82a16d608a7679c"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ecce4b2360aa3f008da3327d652e74a0e743908eac306198b47e1c58b03dd2b"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bd4435b8d83f0c9561a2a9585b1de78f1abb17cb0cef5f39bf6a4b47d19bafe3"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:616221a6d473c5b9aa83fa8982745441f6a4a62a66436be9445c65f241b86c94"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7e6382ce89a92bc1d0c0c5edd51e931432202b9080dc921d8d003e616402efd1"}, + {file = "pydantic_core-2.18.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ff58f379345603d940e461eae474b6bbb6dab66ed9a851ecd3cb3709bf4dcf6a"}, + {file = "pydantic_core-2.18.3.tar.gz", hash = "sha256:432e999088d85c8f36b9a3f769a8e2b57aabd817bbb729a90d1fe7f18f6f1f39"}, ] [package.dependencies] @@ -1809,6 +1849,25 @@ files = [ [package.extras] diagrams = ["jinja2", "railroad-diagrams"] +[[package]] +name = "pyproject-api" +version = "1.6.1" +description = "API to interact with the python pyproject.toml based projects" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyproject_api-1.6.1-py3-none-any.whl", hash = "sha256:4c0116d60476b0786c88692cf4e325a9814965e2469c5998b830bba16b183675"}, + {file = "pyproject_api-1.6.1.tar.gz", hash = "sha256:1817dc018adc0d1ff9ca1ed8c60e1623d5aaca40814b953af14a9cf9a5cae538"}, +] + +[package.dependencies] +packaging = ">=23.1" +tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} + +[package.extras] +docs = ["furo (>=2023.8.19)", "sphinx (<7.2)", "sphinx-autodoc-typehints (>=1.24)"] +testing = ["covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)", "setuptools (>=68.1.2)", "wheel (>=0.41.2)"] + [[package]] name = "pyrsistent" version = "0.20.0" @@ -1989,101 +2048,101 @@ files = [ [[package]] name = "regex" -version = "2024.4.28" +version = "2024.5.15" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" files = [ - {file = "regex-2024.4.28-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd196d056b40af073d95a2879678585f0b74ad35190fac04ca67954c582c6b61"}, - {file = "regex-2024.4.28-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8bb381f777351bd534462f63e1c6afb10a7caa9fa2a421ae22c26e796fe31b1f"}, - {file = "regex-2024.4.28-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:47af45b6153522733aa6e92543938e97a70ce0900649ba626cf5aad290b737b6"}, - {file = "regex-2024.4.28-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99d6a550425cc51c656331af0e2b1651e90eaaa23fb4acde577cf15068e2e20f"}, - {file = "regex-2024.4.28-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bf29304a8011feb58913c382902fde3395957a47645bf848eea695839aa101b7"}, - {file = "regex-2024.4.28-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:92da587eee39a52c91aebea8b850e4e4f095fe5928d415cb7ed656b3460ae79a"}, - {file = "regex-2024.4.28-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6277d426e2f31bdbacb377d17a7475e32b2d7d1f02faaecc48d8e370c6a3ff31"}, - {file = "regex-2024.4.28-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:28e1f28d07220c0f3da0e8fcd5a115bbb53f8b55cecf9bec0c946eb9a059a94c"}, - {file = "regex-2024.4.28-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:aaa179975a64790c1f2701ac562b5eeb733946eeb036b5bcca05c8d928a62f10"}, - {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6f435946b7bf7a1b438b4e6b149b947c837cb23c704e780c19ba3e6855dbbdd3"}, - {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:19d6c11bf35a6ad077eb23852827f91c804eeb71ecb85db4ee1386825b9dc4db"}, - {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:fdae0120cddc839eb8e3c15faa8ad541cc6d906d3eb24d82fb041cfe2807bc1e"}, - {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:e672cf9caaf669053121f1766d659a8813bd547edef6e009205378faf45c67b8"}, - {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f57515750d07e14743db55d59759893fdb21d2668f39e549a7d6cad5d70f9fea"}, - {file = "regex-2024.4.28-cp310-cp310-win32.whl", hash = "sha256:a1409c4eccb6981c7baabc8888d3550df518add6e06fe74fa1d9312c1838652d"}, - {file = "regex-2024.4.28-cp310-cp310-win_amd64.whl", hash = "sha256:1f687a28640f763f23f8a9801fe9e1b37338bb1ca5d564ddd41619458f1f22d1"}, - {file = "regex-2024.4.28-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:84077821c85f222362b72fdc44f7a3a13587a013a45cf14534df1cbbdc9a6796"}, - {file = "regex-2024.4.28-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b45d4503de8f4f3dc02f1d28a9b039e5504a02cc18906cfe744c11def942e9eb"}, - {file = "regex-2024.4.28-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:457c2cd5a646dd4ed536c92b535d73548fb8e216ebee602aa9f48e068fc393f3"}, - {file = "regex-2024.4.28-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b51739ddfd013c6f657b55a508de8b9ea78b56d22b236052c3a85a675102dc6"}, - {file = "regex-2024.4.28-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:459226445c7d7454981c4c0ce0ad1a72e1e751c3e417f305722bbcee6697e06a"}, - {file = "regex-2024.4.28-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:670fa596984b08a4a769491cbdf22350431970d0112e03d7e4eeaecaafcd0fec"}, - {file = "regex-2024.4.28-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe00f4fe11c8a521b173e6324d862ee7ee3412bf7107570c9b564fe1119b56fb"}, - {file = "regex-2024.4.28-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:36f392dc7763fe7924575475736bddf9ab9f7a66b920932d0ea50c2ded2f5636"}, - {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:23a412b7b1a7063f81a742463f38821097b6a37ce1e5b89dd8e871d14dbfd86b"}, - {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:f1d6e4b7b2ae3a6a9df53efbf199e4bfcff0959dbdb5fd9ced34d4407348e39a"}, - {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:499334ad139557de97cbc4347ee921c0e2b5e9c0f009859e74f3f77918339257"}, - {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:0940038bec2fe9e26b203d636c44d31dd8766abc1fe66262da6484bd82461ccf"}, - {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:66372c2a01782c5fe8e04bff4a2a0121a9897e19223d9eab30c54c50b2ebeb7f"}, - {file = "regex-2024.4.28-cp311-cp311-win32.whl", hash = "sha256:c77d10ec3c1cf328b2f501ca32583625987ea0f23a0c2a49b37a39ee5c4c4630"}, - {file = "regex-2024.4.28-cp311-cp311-win_amd64.whl", hash = "sha256:fc0916c4295c64d6890a46e02d4482bb5ccf33bf1a824c0eaa9e83b148291f90"}, - {file = "regex-2024.4.28-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:08a1749f04fee2811c7617fdd46d2e46d09106fa8f475c884b65c01326eb15c5"}, - {file = "regex-2024.4.28-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b8eb28995771c087a73338f695a08c9abfdf723d185e57b97f6175c5051ff1ae"}, - {file = "regex-2024.4.28-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:dd7ef715ccb8040954d44cfeff17e6b8e9f79c8019daae2fd30a8806ef5435c0"}, - {file = "regex-2024.4.28-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb0315a2b26fde4005a7c401707c5352df274460f2f85b209cf6024271373013"}, - {file = "regex-2024.4.28-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f2fc053228a6bd3a17a9b0a3f15c3ab3cf95727b00557e92e1cfe094b88cc662"}, - {file = "regex-2024.4.28-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7fe9739a686dc44733d52d6e4f7b9c77b285e49edf8570754b322bca6b85b4cc"}, - {file = "regex-2024.4.28-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a74fcf77d979364f9b69fcf8200849ca29a374973dc193a7317698aa37d8b01c"}, - {file = "regex-2024.4.28-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:965fd0cf4694d76f6564896b422724ec7b959ef927a7cb187fc6b3f4e4f59833"}, - {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:2fef0b38c34ae675fcbb1b5db760d40c3fc3612cfa186e9e50df5782cac02bcd"}, - {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bc365ce25f6c7c5ed70e4bc674f9137f52b7dd6a125037f9132a7be52b8a252f"}, - {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:ac69b394764bb857429b031d29d9604842bc4cbfd964d764b1af1868eeebc4f0"}, - {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:144a1fc54765f5c5c36d6d4b073299832aa1ec6a746a6452c3ee7b46b3d3b11d"}, - {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2630ca4e152c221072fd4a56d4622b5ada876f668ecd24d5ab62544ae6793ed6"}, - {file = "regex-2024.4.28-cp312-cp312-win32.whl", hash = "sha256:7f3502f03b4da52bbe8ba962621daa846f38489cae5c4a7b5d738f15f6443d17"}, - {file = "regex-2024.4.28-cp312-cp312-win_amd64.whl", hash = "sha256:0dd3f69098511e71880fb00f5815db9ed0ef62c05775395968299cb400aeab82"}, - {file = "regex-2024.4.28-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:374f690e1dd0dbdcddea4a5c9bdd97632cf656c69113f7cd6a361f2a67221cb6"}, - {file = "regex-2024.4.28-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:25f87ae6b96374db20f180eab083aafe419b194e96e4f282c40191e71980c666"}, - {file = "regex-2024.4.28-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5dbc1bcc7413eebe5f18196e22804a3be1bfdfc7e2afd415e12c068624d48247"}, - {file = "regex-2024.4.28-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f85151ec5a232335f1be022b09fbbe459042ea1951d8a48fef251223fc67eee1"}, - {file = "regex-2024.4.28-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:57ba112e5530530fd175ed550373eb263db4ca98b5f00694d73b18b9a02e7185"}, - {file = "regex-2024.4.28-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:224803b74aab56aa7be313f92a8d9911dcade37e5f167db62a738d0c85fdac4b"}, - {file = "regex-2024.4.28-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a54a047b607fd2d2d52a05e6ad294602f1e0dec2291152b745870afc47c1397"}, - {file = "regex-2024.4.28-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a2a512d623f1f2d01d881513af9fc6a7c46e5cfffb7dc50c38ce959f9246c94"}, - {file = "regex-2024.4.28-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c06bf3f38f0707592898428636cbb75d0a846651b053a1cf748763e3063a6925"}, - {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1031a5e7b048ee371ab3653aad3030ecfad6ee9ecdc85f0242c57751a05b0ac4"}, - {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d7a353ebfa7154c871a35caca7bfd8f9e18666829a1dc187115b80e35a29393e"}, - {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:7e76b9cfbf5ced1aca15a0e5b6f229344d9b3123439ffce552b11faab0114a02"}, - {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:5ce479ecc068bc2a74cb98dd8dba99e070d1b2f4a8371a7dfe631f85db70fe6e"}, - {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7d77b6f63f806578c604dca209280e4c54f0fa9a8128bb8d2cc5fb6f99da4150"}, - {file = "regex-2024.4.28-cp38-cp38-win32.whl", hash = "sha256:d84308f097d7a513359757c69707ad339da799e53b7393819ec2ea36bc4beb58"}, - {file = "regex-2024.4.28-cp38-cp38-win_amd64.whl", hash = "sha256:2cc1b87bba1dd1a898e664a31012725e48af826bf3971e786c53e32e02adae6c"}, - {file = "regex-2024.4.28-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7413167c507a768eafb5424413c5b2f515c606be5bb4ef8c5dee43925aa5718b"}, - {file = "regex-2024.4.28-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:108e2dcf0b53a7c4ab8986842a8edcb8ab2e59919a74ff51c296772e8e74d0ae"}, - {file = "regex-2024.4.28-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f1c5742c31ba7d72f2dedf7968998730664b45e38827637e0f04a2ac7de2f5f1"}, - {file = "regex-2024.4.28-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ecc6148228c9ae25ce403eade13a0961de1cb016bdb35c6eafd8e7b87ad028b1"}, - {file = "regex-2024.4.28-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7d893c8cf0e2429b823ef1a1d360a25950ed11f0e2a9df2b5198821832e1947"}, - {file = "regex-2024.4.28-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4290035b169578ffbbfa50d904d26bec16a94526071ebec3dadbebf67a26b25e"}, - {file = "regex-2024.4.28-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44a22ae1cfd82e4ffa2066eb3390777dc79468f866f0625261a93e44cdf6482b"}, - {file = "regex-2024.4.28-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd24fd140b69f0b0bcc9165c397e9b2e89ecbeda83303abf2a072609f60239e2"}, - {file = "regex-2024.4.28-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:39fb166d2196413bead229cd64a2ffd6ec78ebab83fff7d2701103cf9f4dfd26"}, - {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9301cc6db4d83d2c0719f7fcda37229691745168bf6ae849bea2e85fc769175d"}, - {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7c3d389e8d76a49923683123730c33e9553063d9041658f23897f0b396b2386f"}, - {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:99ef6289b62042500d581170d06e17f5353b111a15aa6b25b05b91c6886df8fc"}, - {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:b91d529b47798c016d4b4c1d06cc826ac40d196da54f0de3c519f5a297c5076a"}, - {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:43548ad74ea50456e1c68d3c67fff3de64c6edb85bcd511d1136f9b5376fc9d1"}, - {file = "regex-2024.4.28-cp39-cp39-win32.whl", hash = "sha256:05d9b6578a22db7dedb4df81451f360395828b04f4513980b6bd7a1412c679cc"}, - {file = "regex-2024.4.28-cp39-cp39-win_amd64.whl", hash = "sha256:3986217ec830c2109875be740531feb8ddafe0dfa49767cdcd072ed7e8927962"}, - {file = "regex-2024.4.28.tar.gz", hash = "sha256:83ab366777ea45d58f72593adf35d36ca911ea8bd838483c1823b883a121b0e4"}, + {file = "regex-2024.5.15-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a81e3cfbae20378d75185171587cbf756015ccb14840702944f014e0d93ea09f"}, + {file = "regex-2024.5.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7b59138b219ffa8979013be7bc85bb60c6f7b7575df3d56dc1e403a438c7a3f6"}, + {file = "regex-2024.5.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a0bd000c6e266927cb7a1bc39d55be95c4b4f65c5be53e659537537e019232b1"}, + {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5eaa7ddaf517aa095fa8da0b5015c44d03da83f5bd49c87961e3c997daed0de7"}, + {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba68168daedb2c0bab7fd7e00ced5ba90aebf91024dea3c88ad5063c2a562cca"}, + {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6e8d717bca3a6e2064fc3a08df5cbe366369f4b052dcd21b7416e6d71620dca1"}, + {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1337b7dbef9b2f71121cdbf1e97e40de33ff114801263b275aafd75303bd62b5"}, + {file = "regex-2024.5.15-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9ebd0a36102fcad2f03696e8af4ae682793a5d30b46c647eaf280d6cfb32796"}, + {file = "regex-2024.5.15-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9efa1a32ad3a3ea112224897cdaeb6aa00381627f567179c0314f7b65d354c62"}, + {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1595f2d10dff3d805e054ebdc41c124753631b6a471b976963c7b28543cf13b0"}, + {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b802512f3e1f480f41ab5f2cfc0e2f761f08a1f41092d6718868082fc0d27143"}, + {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:a0981022dccabca811e8171f913de05720590c915b033b7e601f35ce4ea7019f"}, + {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:19068a6a79cf99a19ccefa44610491e9ca02c2be3305c7760d3831d38a467a6f"}, + {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1b5269484f6126eee5e687785e83c6b60aad7663dafe842b34691157e5083e53"}, + {file = "regex-2024.5.15-cp310-cp310-win32.whl", hash = "sha256:ada150c5adfa8fbcbf321c30c751dc67d2f12f15bd183ffe4ec7cde351d945b3"}, + {file = "regex-2024.5.15-cp310-cp310-win_amd64.whl", hash = "sha256:ac394ff680fc46b97487941f5e6ae49a9f30ea41c6c6804832063f14b2a5a145"}, + {file = "regex-2024.5.15-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f5b1dff3ad008dccf18e652283f5e5339d70bf8ba7c98bf848ac33db10f7bc7a"}, + {file = "regex-2024.5.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c6a2b494a76983df8e3d3feea9b9ffdd558b247e60b92f877f93a1ff43d26656"}, + {file = "regex-2024.5.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a32b96f15c8ab2e7d27655969a23895eb799de3665fa94349f3b2fbfd547236f"}, + {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10002e86e6068d9e1c91eae8295ef690f02f913c57db120b58fdd35a6bb1af35"}, + {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ec54d5afa89c19c6dd8541a133be51ee1017a38b412b1321ccb8d6ddbeb4cf7d"}, + {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10e4ce0dca9ae7a66e6089bb29355d4432caed736acae36fef0fdd7879f0b0cb"}, + {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e507ff1e74373c4d3038195fdd2af30d297b4f0950eeda6f515ae3d84a1770f"}, + {file = "regex-2024.5.15-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1f059a4d795e646e1c37665b9d06062c62d0e8cc3c511fe01315973a6542e40"}, + {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0721931ad5fe0dda45d07f9820b90b2148ccdd8e45bb9e9b42a146cb4f695649"}, + {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:833616ddc75ad595dee848ad984d067f2f31be645d603e4d158bba656bbf516c"}, + {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:287eb7f54fc81546346207c533ad3c2c51a8d61075127d7f6d79aaf96cdee890"}, + {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:19dfb1c504781a136a80ecd1fff9f16dddf5bb43cec6871778c8a907a085bb3d"}, + {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:119af6e56dce35e8dfb5222573b50c89e5508d94d55713c75126b753f834de68"}, + {file = "regex-2024.5.15-cp311-cp311-win32.whl", hash = "sha256:1c1c174d6ec38d6c8a7504087358ce9213d4332f6293a94fbf5249992ba54efa"}, + {file = "regex-2024.5.15-cp311-cp311-win_amd64.whl", hash = "sha256:9e717956dcfd656f5055cc70996ee2cc82ac5149517fc8e1b60261b907740201"}, + {file = "regex-2024.5.15-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:632b01153e5248c134007209b5c6348a544ce96c46005d8456de1d552455b014"}, + {file = "regex-2024.5.15-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e64198f6b856d48192bf921421fdd8ad8eb35e179086e99e99f711957ffedd6e"}, + {file = "regex-2024.5.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68811ab14087b2f6e0fc0c2bae9ad689ea3584cad6917fc57be6a48bbd012c49"}, + {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8ec0c2fea1e886a19c3bee0cd19d862b3aa75dcdfb42ebe8ed30708df64687a"}, + {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d0c0c0003c10f54a591d220997dd27d953cd9ccc1a7294b40a4be5312be8797b"}, + {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2431b9e263af1953c55abbd3e2efca67ca80a3de8a0437cb58e2421f8184717a"}, + {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a605586358893b483976cffc1723fb0f83e526e8f14c6e6614e75919d9862cf"}, + {file = "regex-2024.5.15-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:391d7f7f1e409d192dba8bcd42d3e4cf9e598f3979cdaed6ab11288da88cb9f2"}, + {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9ff11639a8d98969c863d4617595eb5425fd12f7c5ef6621a4b74b71ed8726d5"}, + {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4eee78a04e6c67e8391edd4dad3279828dd66ac4b79570ec998e2155d2e59fd5"}, + {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8fe45aa3f4aa57faabbc9cb46a93363edd6197cbc43523daea044e9ff2fea83e"}, + {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:d0a3d8d6acf0c78a1fff0e210d224b821081330b8524e3e2bc5a68ef6ab5803d"}, + {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c486b4106066d502495b3025a0a7251bf37ea9540433940a23419461ab9f2a80"}, + {file = "regex-2024.5.15-cp312-cp312-win32.whl", hash = "sha256:c49e15eac7c149f3670b3e27f1f28a2c1ddeccd3a2812cba953e01be2ab9b5fe"}, + {file = "regex-2024.5.15-cp312-cp312-win_amd64.whl", hash = "sha256:673b5a6da4557b975c6c90198588181029c60793835ce02f497ea817ff647cb2"}, + {file = "regex-2024.5.15-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:87e2a9c29e672fc65523fb47a90d429b70ef72b901b4e4b1bd42387caf0d6835"}, + {file = "regex-2024.5.15-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c3bea0ba8b73b71b37ac833a7f3fd53825924165da6a924aec78c13032f20850"}, + {file = "regex-2024.5.15-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bfc4f82cabe54f1e7f206fd3d30fda143f84a63fe7d64a81558d6e5f2e5aaba9"}, + {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5bb9425fe881d578aeca0b2b4b3d314ec88738706f66f219c194d67179337cb"}, + {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64c65783e96e563103d641760664125e91bd85d8e49566ee560ded4da0d3e704"}, + {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cf2430df4148b08fb4324b848672514b1385ae3807651f3567871f130a728cc3"}, + {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5397de3219a8b08ae9540c48f602996aa6b0b65d5a61683e233af8605c42b0f2"}, + {file = "regex-2024.5.15-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:455705d34b4154a80ead722f4f185b04c4237e8e8e33f265cd0798d0e44825fa"}, + {file = "regex-2024.5.15-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b2b6f1b3bb6f640c1a92be3bbfbcb18657b125b99ecf141fb3310b5282c7d4ed"}, + {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3ad070b823ca5890cab606c940522d05d3d22395d432f4aaaf9d5b1653e47ced"}, + {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:5b5467acbfc153847d5adb21e21e29847bcb5870e65c94c9206d20eb4e99a384"}, + {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:e6662686aeb633ad65be2a42b4cb00178b3fbf7b91878f9446075c404ada552f"}, + {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:2b4c884767504c0e2401babe8b5b7aea9148680d2e157fa28f01529d1f7fcf67"}, + {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:3cd7874d57f13bf70078f1ff02b8b0aa48d5b9ed25fc48547516c6aba36f5741"}, + {file = "regex-2024.5.15-cp38-cp38-win32.whl", hash = "sha256:e4682f5ba31f475d58884045c1a97a860a007d44938c4c0895f41d64481edbc9"}, + {file = "regex-2024.5.15-cp38-cp38-win_amd64.whl", hash = "sha256:d99ceffa25ac45d150e30bd9ed14ec6039f2aad0ffa6bb87a5936f5782fc1569"}, + {file = "regex-2024.5.15-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:13cdaf31bed30a1e1c2453ef6015aa0983e1366fad2667657dbcac7b02f67133"}, + {file = "regex-2024.5.15-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cac27dcaa821ca271855a32188aa61d12decb6fe45ffe3e722401fe61e323cd1"}, + {file = "regex-2024.5.15-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7dbe2467273b875ea2de38ded4eba86cbcbc9a1a6d0aa11dcf7bd2e67859c435"}, + {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64f18a9a3513a99c4bef0e3efd4c4a5b11228b48aa80743be822b71e132ae4f5"}, + {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d347a741ea871c2e278fde6c48f85136c96b8659b632fb57a7d1ce1872547600"}, + {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1878b8301ed011704aea4c806a3cadbd76f84dece1ec09cc9e4dc934cfa5d4da"}, + {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4babf07ad476aaf7830d77000874d7611704a7fcf68c9c2ad151f5d94ae4bfc4"}, + {file = "regex-2024.5.15-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:35cb514e137cb3488bce23352af3e12fb0dbedd1ee6e60da053c69fb1b29cc6c"}, + {file = "regex-2024.5.15-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cdd09d47c0b2efee9378679f8510ee6955d329424c659ab3c5e3a6edea696294"}, + {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:72d7a99cd6b8f958e85fc6ca5b37c4303294954eac1376535b03c2a43eb72629"}, + {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:a094801d379ab20c2135529948cb84d417a2169b9bdceda2a36f5f10977ebc16"}, + {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:c0c18345010870e58238790a6779a1219b4d97bd2e77e1140e8ee5d14df071aa"}, + {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:16093f563098448ff6b1fa68170e4acbef94e6b6a4e25e10eae8598bb1694b5d"}, + {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e38a7d4e8f633a33b4c7350fbd8bad3b70bf81439ac67ac38916c4a86b465456"}, + {file = "regex-2024.5.15-cp39-cp39-win32.whl", hash = "sha256:71a455a3c584a88f654b64feccc1e25876066c4f5ef26cd6dd711308aa538694"}, + {file = "regex-2024.5.15-cp39-cp39-win_amd64.whl", hash = "sha256:cab12877a9bdafde5500206d1020a584355a97884dfd388af3699e9137bf7388"}, + {file = "regex-2024.5.15.tar.gz", hash = "sha256:d3ee02d9e5f482cc8309134a91eeaacbdd2261ba111b0fef3748eeb4913e6a2c"}, ] [[package]] name = "requests" -version = "2.31.0" +version = "2.32.2" description = "Python HTTP for Humans." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, + {file = "requests-2.32.2-py3-none-any.whl", hash = "sha256:fc06670dd0ed212426dfeb94fc1b983d917c4f9847c863f313c9dfaaffb7c23c"}, + {file = "requests-2.32.2.tar.gz", hash = "sha256:dd951ff5ecf3e3b3aa26b40703ba77495dab41da839ae72ef3c8e5d8e2433289"}, ] [package.dependencies] @@ -2163,19 +2222,18 @@ pbr = "*" [[package]] name = "setuptools" -version = "69.5.1" +version = "70.0.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-69.5.1-py3-none-any.whl", hash = "sha256:c636ac361bc47580504644275c9ad802c50415c7522212252c033bd15f301f32"}, - {file = "setuptools-69.5.1.tar.gz", hash = "sha256:6c1fccdac05a97e598fb0ae3bbed5904ccb317337a51139dcd51453611bbb987"}, + {file = "setuptools-70.0.0-py3-none-any.whl", hash = "sha256:54faa7f2e8d2d11bcd2c07bed282eef1046b5c080d1c32add737d7b5817b1ad4"}, + {file = "setuptools-70.0.0.tar.gz", hash = "sha256:f211a66637b8fa059bb28183da127d4e86396c991a942b028c6650d4319c3fd0"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mypy (==1.9)", "packaging (>=23.2)", "pip (>=19.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] -testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.2)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mypy (==1.9)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] [[package]] name = "six" @@ -2213,6 +2271,33 @@ files = [ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] +[[package]] +name = "tox" +version = "4.15.0" +description = "tox is a generic virtualenv management and test command line tool" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tox-4.15.0-py3-none-any.whl", hash = "sha256:300055f335d855b2ab1b12c5802de7f62a36d4fd53f30bd2835f6a201dda46ea"}, + {file = "tox-4.15.0.tar.gz", hash = "sha256:7a0beeef166fbe566f54f795b4906c31b428eddafc0102ac00d20998dd1933f6"}, +] + +[package.dependencies] +cachetools = ">=5.3.2" +chardet = ">=5.2" +colorama = ">=0.4.6" +filelock = ">=3.13.1" +packaging = ">=23.2" +platformdirs = ">=4.1" +pluggy = ">=1.3" +pyproject-api = ">=1.6.1" +tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} +virtualenv = ">=20.25" + +[package.extras] +docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-argparse-cli (>=1.11.1)", "sphinx-autodoc-typehints (>=1.25.2)", "sphinx-copybutton (>=0.5.2)", "sphinx-inline-tabs (>=2023.4.21)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.11)"] +testing = ["build[virtualenv] (>=1.0.3)", "covdefaults (>=2.3)", "detect-test-pollution (>=1.2)", "devpi-process (>=1)", "diff-cover (>=8.0.2)", "distlib (>=0.3.8)", "flaky (>=3.7)", "hatch-vcs (>=0.4)", "hatchling (>=1.21)", "psutil (>=5.9.7)", "pytest (>=7.4.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-xdist (>=3.5)", "re-assert (>=1.1)", "time-machine (>=2.13)", "wheel (>=0.42)"] + [[package]] name = "types-awscrt" version = "0.20.9" @@ -2235,6 +2320,17 @@ files = [ {file = "types_jmespath-1.0.2.20240106-py3-none-any.whl", hash = "sha256:c3e715fcaae9e5f8d74e14328fdedc4f2b3f0e18df17f3e457ae0a18e245bde0"}, ] +[[package]] +name = "types-pyyaml" +version = "6.0.12.20240311" +description = "Typing stubs for PyYAML" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-PyYAML-6.0.12.20240311.tar.gz", hash = "sha256:a9e0f0f88dc835739b0c1ca51ee90d04ca2a897a71af79de9aec5f38cb0a5342"}, + {file = "types_PyYAML-6.0.12.20240311-py3-none-any.whl", hash = "sha256:b845b06a1c7e54b8e5b4c683043de0d9caf205e7434b3edc678ff2411979b8f6"}, +] + [[package]] name = "types-requests" version = "2.31.0.6" @@ -2273,13 +2369,13 @@ files = [ [[package]] name = "typing-extensions" -version = "4.11.0" +version = "4.12.0" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"}, - {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"}, + {file = "typing_extensions-4.12.0-py3-none-any.whl", hash = "sha256:b349c66bea9016ac22978d800cfff206d5f9816951f12a7d0ec5578b0a819594"}, + {file = "typing_extensions-4.12.0.tar.gz", hash = "sha256:8cbcdc8606ebcb0d95453ad7dc5065e6237b6aa230a31e81d0f440c30fed5fd8"}, ] [[package]] @@ -2315,6 +2411,26 @@ h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "virtualenv" +version = "20.26.2" +description = "Virtual Python Environment builder" +optional = false +python-versions = ">=3.7" +files = [ + {file = "virtualenv-20.26.2-py3-none-any.whl", hash = "sha256:a624db5e94f01ad993d476b9ee5346fdf7b9de43ccaee0e0197012dc838a0e9b"}, + {file = "virtualenv-20.26.2.tar.gz", hash = "sha256:82bf0f4eebbb78d36ddaee0283d43fe5736b53880b8a8cdcd37390a07ac3741c"}, +] + +[package.dependencies] +distlib = ">=0.3.7,<1" +filelock = ">=3.12.2,<4" +platformdirs = ">=3.9.1,<5" + +[package.extras] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] + [[package]] name = "werkzeug" version = "3.0.3" @@ -2424,20 +2540,20 @@ files = [ [[package]] name = "zipp" -version = "3.18.1" +version = "3.19.0" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" files = [ - {file = "zipp-3.18.1-py3-none-any.whl", hash = "sha256:206f5a15f2af3dbaee80769fb7dc6f249695e940acca08dfb2a4769fe61e538b"}, - {file = "zipp-3.18.1.tar.gz", hash = "sha256:2884ed22e7d8961de1c9a05142eb69a247f120291bc0206a00a7642f09b5b715"}, + {file = "zipp-3.19.0-py3-none-any.whl", hash = "sha256:96dc6ad62f1441bcaccef23b274ec471518daf4fbbc580341204936a5a3dddec"}, + {file = "zipp-3.19.0.tar.gz", hash = "sha256:952df858fb3164426c976d9338d3961e8e8b3758e2e059e0f754b8c4262625ee"}, ] [package.extras] docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] +testing = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] [metadata] lock-version = "2.0" python-versions = "^3.8.1" -content-hash = "5a40e3b393f109e3f011031b3e21d9d3353415d5da2e4f360e4644d394186c71" +content-hash = "c867f1d50ffc77ecb8932709c072160991ab9a485f42fff44dc0160807ee58f0" diff --git a/source/cli/pyproject.toml b/source/cli/pyproject.toml index 0d9fd10c..a0c9e4ad 100644 --- a/source/cli/pyproject.toml +++ b/source/cli/pyproject.toml @@ -1,38 +1,43 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" +# ~~ Generated by projen. To modify, edit .projenrc.js and run "npx projen". [tool.poetry] name = "instance_scheduler_cli" -version = "1.5.3" +version = "3.0.0" description = "Instance Scheduler on AWS CLI" license = "Apache-2.0" -authors = ["Amazon Web Services"] +authors = [ "Amazon Web Services" ] homepage = "https://aws.amazon.com/solutions/implementations/instance-scheduler-on-aws/" -repository = "https://github.com/aws-solutions/instance-scheduler-on-aws" -documentation = "https://docs.aws.amazon.com/solutions/latest/instance-scheduler-on-aws/solution-overview.html" - -[tool.poetry.scripts] -scheduler-cli = "instance_scheduler_cli:__main__" +readme = "README.md" -[tool.poetry.dependencies] -python = "^3.8.1" -boto3 = "^1.26.90" -jmespath = "^1.0.1" + [tool.poetry.dependencies] + boto3 = "^1.34.1" + jmespath = "^1.0.1" + python = "^3.8.1" [tool.poetry.group.dev.dependencies] black = "^24.3.0" -boto3-stubs-lite = {extras = ["cloudformation", "lambda"], version = "^1.26.90"} -cli-test-helpers = "^3.4.0" flake8 = "^6.1.0" isort = "^5.12.0" -# held back, 4.18.0 is a breaking change jsonschema = "~4.17.3" -moto = {extras = ["cloudformation", "lambda"], version = "^5.0.2"} -mypy = "^1.5.1" -pytest = "^7.4.2" +mypy = "^1.7.1" pytest-cov = "^4.1.0" +pytest = "^7.4.3" +tox = "^4.11.4" types-jmespath = "^1.0.1" +types-PyYAML = "^6.0.12.12" types-requests = "2.31.0.6" + + [tool.poetry.group.dev.dependencies.boto3-stubs-lite] + version = "^1.34.1" + extras = [ "cloudformation", "lambda" ] + + [tool.poetry.group.dev.dependencies.moto] + version = "^5.0.2" + extras = [ "cloudformation", "lambda" ] + + [tool.poetry.scripts] + scheduler-cli = "instance_scheduler_cli:__main__" + +[build-system] +requires = [ "poetry-core" ] +build-backend = "poetry.core.masonry.api" diff --git a/source/cli/tests/test_cli.py b/source/cli/tests/test_cli.py index 71d13425..0fd5c91f 100644 --- a/source/cli/tests/test_cli.py +++ b/source/cli/tests/test_cli.py @@ -1,18 +1,23 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -from cli_test_helpers import shell +import subprocess -def test_cli_entrypoint_exists() -> None: - result = shell("scheduler-cli --help") - assert result.exit_code == 0 +def test_run_as_module() -> None: + result = subprocess.run( + ["python", "-m", "instance_scheduler_cli", "--version"], + stdout=subprocess.DEVNULL, + ) + assert result.stderr is None def test_calling_with_no_args_exits_gracefully() -> None: - result = shell("scheduler-cli") - assert result.exit_code == 0 - - -def test_run_as_module() -> None: - result = shell("python -m instance_scheduler_cli --version") - assert result.exit_code == 0 + result = subprocess.run( + [ + "python", + "-m", + "instance_scheduler_cli", + ], + stdout=subprocess.DEVNULL, + ) + assert result.stderr is None diff --git a/source/cli/tox.ini b/source/cli/tox.ini index 23ba185c..77850307 100644 --- a/source/cli/tox.ini +++ b/source/cli/tox.ini @@ -1,14 +1,14 @@ ; Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. ; SPDX-License-Identifier: Apache-2.0 [tox] -env_list = format, lint, py310-report, py3{8,9,11}-noreport +env_list = format, lint, py311-report, py3{8,9,10,12}-noreport minversion = 4.0.13 isolated_build = true [testenv:format] skip_install = true deps = - black + black~=24.1.0 isort commands = isort --profile black --check . @@ -29,7 +29,7 @@ commands = poetry run mypy . poetry run flake8 . -[testenv:py3{8,9,10,11}-{report, noreport}] +[testenv:py3{8,9,10,11,12}-{report, noreport}] allowlist_externals = poetry deps = poetry pass_env = PYTHON_VERSION diff --git a/source/instance-scheduler.ts b/source/instance-scheduler.ts index 08363c01..60025420 100644 --- a/source/instance-scheduler.ts +++ b/source/instance-scheduler.ts @@ -2,10 +2,10 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 import { App, Aspects, DefaultStackSynthesizer, StackSynthesizer } from "aws-cdk-lib"; -import { AwsSolutionsChecks, NagSuppressions } from "cdk-nag"; +import { AwsSolutionsChecks } from "cdk-nag"; import { getSolutionContext } from "./instance-scheduler/lib/cdk-context"; import { InstanceSchedulerStack } from "./instance-scheduler/lib/instance-scheduler-stack"; -import { InstanceSchedulerRemoteStack } from "./instance-scheduler/lib/remote-stack"; +import { SpokeStack } from "./instance-scheduler/lib/remote-stack"; import { SourceProvider, TestingPipelineStack } from "./pipeline/lib/testing-pipeline-stack"; import { E2eTestStack } from "./pipeline/lib/e2e-test-stack"; import { PipelineBootstrapStack } from "./pipeline/lib/pipeline-bootstrap-stack"; @@ -20,7 +20,7 @@ interface AppProps { function addAppStacks(app: App, props: AppProps): void { const solutionDetails = getSolutionContext(app); - const hubStack = new InstanceSchedulerStack(app, "instance-scheduler-on-aws", { + new InstanceSchedulerStack(app, "instance-scheduler-on-aws", { synthesizer: props.synthesizer, description: `(${solutionDetails.solutionId}) ${solutionDetails.solutionName} ${props.solutionVersion}`, solutionId: solutionDetails.solutionId, @@ -28,9 +28,10 @@ function addAppStacks(app: App, props: AppProps): void { solutionVersion: props.solutionVersion, appregApplicationName: solutionDetails.appRegAppName, appregSolutionName: solutionDetails.appRegSolutionName, + analyticsReporting: false, }); - new InstanceSchedulerRemoteStack(app, "instance-scheduler-on-aws-remote", { + new SpokeStack(app, "instance-scheduler-on-aws-remote", { synthesizer: props.synthesizer, description: `(${solutionDetails.solutionId}S) ${solutionDetails.solutionName} remote ${props.solutionVersion}`, solutionId: solutionDetails.solutionId, @@ -38,18 +39,8 @@ function addAppStacks(app: App, props: AppProps): void { solutionVersion: props.solutionVersion, appregApplicationName: solutionDetails.appRegAppName, appregSolutionName: solutionDetails.appRegSolutionName, + analyticsReporting: false, }); - - NagSuppressions.addResourceSuppressionsByPath( - hubStack, - "/instance-scheduler-on-aws/SchedulerRole/DefaultPolicy/Resource", - [ - { - id: "AwsSolutions-IAM5", - reason: "The scheduling lambda must access multiple resources across services", - }, - ], - ); } function getSourceProvider(sourceType: string): SourceProvider { diff --git a/source/instance-scheduler/lib/anonymized-metrics-environment.ts b/source/instance-scheduler/lib/anonymized-metrics-environment.ts new file mode 100644 index 00000000..4dfb684c --- /dev/null +++ b/source/instance-scheduler/lib/anonymized-metrics-environment.ts @@ -0,0 +1,12 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +export interface AnonymizedMetricsEnvironment { + // environment variables for the metrics.py singleton service + // omitting these variables will disable metrics reporting + SEND_METRICS: string; + METRICS_URL: string; + SOLUTION_ID: string; + SOLUTION_VERSION: string; + SCHEDULING_INTERVAL_MINUTES: string; + METRICS_UUID: string; +} diff --git a/source/instance-scheduler/lib/app-registry.ts b/source/instance-scheduler/lib/app-registry.ts index f166b7b9..c6c3d051 100644 --- a/source/instance-scheduler/lib/app-registry.ts +++ b/source/instance-scheduler/lib/app-registry.ts @@ -4,6 +4,8 @@ import * as cdk from "aws-cdk-lib"; import { Construct } from "constructs"; import { Aws, Stack, Tags } from "aws-cdk-lib"; import * as appreg from "@aws-cdk/aws-servicecatalogappregistry-alpha"; +import { CfnResourceAssociation } from "aws-cdk-lib/aws-servicecatalogappregistry"; +import { ConditionAspect } from "./cfn"; export interface AppRegistryForInstanceSchedulerProps extends cdk.StackProps { readonly solutionId: string; @@ -17,6 +19,10 @@ export class AppRegistryForInstanceScheduler extends Construct { constructor(scope: Stack, id: string, props: AppRegistryForInstanceSchedulerProps) { super(scope, id); + const shouldDeploy = new cdk.CfnCondition(this, "ShouldDeploy", { + expression: cdk.Fn.conditionNot(cdk.Fn.conditionEquals(Aws.PARTITION, "aws-cn")), + }); + const map = new cdk.CfnMapping(this, "Solution"); map.setValue("Data", "ID", props.solutionId); map.setValue("Data", "Version", props.solutionVersion); @@ -36,7 +42,12 @@ export class AppRegistryForInstanceScheduler extends Construct { "SolutionName", )}`, }); + + cdk.Aspects.of(application).add(new ConditionAspect(shouldDeploy)); + application.associateApplicationWithStack(scope); + cdk.Aspects.of(scope).add(new ConditionAspect(shouldDeploy, CfnResourceAssociation)); + Tags.of(application).add("Solutions:SolutionID", map.findInMap("Data", "ID")); Tags.of(application).add("Solutions:SolutionName", map.findInMap("Data", "SolutionName")); Tags.of(application).add("Solutions:SolutionVersion", map.findInMap("Data", "Version")); diff --git a/source/instance-scheduler/lib/asg-scheduler.ts b/source/instance-scheduler/lib/asg-scheduler.ts new file mode 100644 index 00000000..1af535c7 --- /dev/null +++ b/source/instance-scheduler/lib/asg-scheduler.ts @@ -0,0 +1,92 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Aspects, CfnCondition, Duration } from "aws-cdk-lib"; +import { Table } from "aws-cdk-lib/aws-dynamodb"; +import { Rule, RuleTargetInput, Schedule } from "aws-cdk-lib/aws-events"; +import { LambdaFunction as LambdaFunctionTarget } from "aws-cdk-lib/aws-events-targets"; +import { Key } from "aws-cdk-lib/aws-kms"; +import { Function as LambdaFunction } from "aws-cdk-lib/aws-lambda"; +import { RetentionDays } from "aws-cdk-lib/aws-logs"; +import { Topic } from "aws-cdk-lib/aws-sns"; +import { Construct } from "constructs"; +import { AnonymizedMetricsEnvironment } from "./anonymized-metrics-environment"; +import { AsgSchedulingRole } from "./iam/asg-scheduling-role"; +import { AsgHandler } from "./lambda-functions/asg-handler"; +import { AsgOrchestrator } from "./lambda-functions/asg-orchestrator"; +import { FunctionFactory } from "./lambda-functions/function-factory"; +import { ScheduleUpdateHandler } from "./lambda-functions/schedule-update-handler"; +import { ConditionAspect } from "./cfn"; + +interface AsgSchedulerProps { + readonly USER_AGENT_EXTRA: string; + readonly asgHandler: AsgHandler; + readonly configTable: Table; + readonly enableAsgs: CfnCondition; + readonly enableDebugLogging: CfnCondition; + readonly enableSchedulingHubAccount: CfnCondition; + readonly encryptionKey: Key; + readonly factory: FunctionFactory; + readonly logRetentionDays: RetentionDays; + readonly metricsEnv: AnonymizedMetricsEnvironment; + readonly namespace: string; + readonly regions: string[]; + readonly snsErrorReportingTopic: Topic; + readonly solutionVersion: string; +} + +export class AsgScheduler extends Construct { + public asgOrchestratorLambdaFunction: LambdaFunction; + + constructor(scope: Construct, id: string, props: AsgSchedulerProps) { + super(scope, id); + + const asgOrchestrator = new AsgOrchestrator(this, { + USER_AGENT_EXTRA: props.USER_AGENT_EXTRA, + asgHandler: props.asgHandler.lambdaFunction, + configTable: props.configTable, + enableDebugLogging: props.enableDebugLogging, + enableSchedulingHubAccount: props.enableSchedulingHubAccount, + encryptionKey: props.encryptionKey, + factory: props.factory, + logRetentionDays: props.logRetentionDays, + metricsEnv: props.metricsEnv, + regions: props.regions, + snsErrorReportingTopic: props.snsErrorReportingTopic, + }); + this.asgOrchestratorLambdaFunction = asgOrchestrator.lambdaFunction; + + new ScheduleUpdateHandler(this, { + USER_AGENT_EXTRA: props.USER_AGENT_EXTRA, + asgHandler: props.asgHandler.lambdaFunction, + configTable: props.configTable, + enableDebugLogging: props.enableDebugLogging, + enableSchedulingHubAccount: props.enableSchedulingHubAccount, + encryptionKey: props.encryptionKey, + factory: props.factory, + logRetentionDays: props.logRetentionDays, + metricsEnv: props.metricsEnv, + regions: props.regions, + snsErrorReportingTopic: props.snsErrorReportingTopic, + }); + + new Rule(this, "ASGOrchRule", { + description: `Instance Scheduler - Rule to trigger scheduling for AutoScaling Groups version ${props.solutionVersion}`, + schedule: Schedule.rate(Duration.hours(1)), + targets: [ + new LambdaFunctionTarget(asgOrchestrator.lambdaFunction, { + event: RuleTargetInput.fromObject({}), + retryAttempts: 5, + }), + ], + }); + + new AsgSchedulingRole(this, "AsgSchedulingRole", { + assumedBy: props.asgHandler.role.grantPrincipal, + namespace: props.namespace, + }); + + const conditionAspect = new ConditionAspect(props.enableAsgs); + Aspects.of(this).add(conditionAspect); + } +} diff --git a/source/instance-scheduler/lib/cfn-nag.ts b/source/instance-scheduler/lib/cfn-nag.ts new file mode 100644 index 00000000..6229f24e --- /dev/null +++ b/source/instance-scheduler/lib/cfn-nag.ts @@ -0,0 +1,26 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { CfnResource } from "aws-cdk-lib"; +import { IConstruct } from "constructs"; + +export interface CfnNagSuppression { + readonly id: string; + readonly reason: string; +} + +export function addCfnNagSuppressions(resource: IConstruct, ...suppressions: CfnNagSuppression[]): void { + const cfnResource = resource.node.defaultChild as CfnResource; + if (!cfnResource?.cfnOptions) { + throw new Error(`Resource ${cfnResource?.logicalId} has no cfnOptions, unable to add cfn-nag suppression`); + } + const existingSuppressions: CfnNagSuppression[] = cfnResource.cfnOptions.metadata?.cfn_nag?.rules_to_suppress; + if (existingSuppressions) { + existingSuppressions.push(...suppressions); + } else { + cfnResource.cfnOptions.metadata = { + cfn_nag: { + rules_to_suppress: [...suppressions], + }, + }; + } +} diff --git a/source/instance-scheduler/lib/cfn.ts b/source/instance-scheduler/lib/cfn.ts new file mode 100644 index 00000000..c4ba8434 --- /dev/null +++ b/source/instance-scheduler/lib/cfn.ts @@ -0,0 +1,183 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { Aws, CfnCondition, CfnParameter, CfnParameterProps, CfnResource, Fn, IAspect, Stack } from "aws-cdk-lib"; +import { Construct, IConstruct } from "constructs"; + +export const UniqueStackIdPart = Fn.select(2, Fn.split("/", `${Aws.STACK_ID}`)); + +export function overrideLogicalId(construct: IConstruct, logicalId: string) { + const cfnResource = construct.node.defaultChild as CfnResource; + if (!cfnResource) { + throw new Error("Unable to override logical ID, not a CfnResource"); + } + + cfnResource.overrideLogicalId(logicalId); +} + +export function overrideRetentionPolicies(construct: IConstruct, value: unknown) { + const cfnResource = construct.node.defaultChild as CfnResource; + if (!cfnResource) { + throw new Error("Unable to override retention policies, not a CfnResource"); + } + cfnResource.addOverride("DeletionPolicy", value); + cfnResource.addOverride("UpdateReplacePolicy", value); +} + +export function overrideProperty(construct: IConstruct, propertyPath: string, value: unknown) { + const cfnResource = construct.node.defaultChild as CfnResource; + if (!cfnResource) { + throw new Error("Unable to override property, not a CfnResource"); + } + + cfnResource.addPropertyOverride(propertyPath, value); +} + +export const YesNoType = { + Yes: "Yes", + No: "No", +} as const; + +export const yesNoValues: (keyof typeof YesNoType)[] = [YesNoType.Yes, YesNoType.No]; + +export function yesNoCondition(scope: Construct, id: string, value: string): CfnCondition { + return new CfnCondition(scope, id, { expression: Fn.conditionEquals(value, YesNoType.Yes) }); +} + +export const EnabledDisabledType = { + Enabled: "Enabled", + Disabled: "Disabled", +} as const; + +export const enabledDisabledValues: (keyof typeof EnabledDisabledType)[] = [ + EnabledDisabledType.Enabled, + EnabledDisabledType.Disabled, +]; + +export function enabledDisabledCondition(scope: Construct, id: string, value: string): CfnCondition { + return new CfnCondition(scope, id, { expression: Fn.conditionEquals(value, EnabledDisabledType.Enabled) }); +} + +export function trueCondition(scope: Construct, id: string): CfnCondition { + return new CfnCondition(scope, id, { expression: Fn.conditionEquals(true, true) }); +} + +export function cfnConditionToTrueFalse(condition: CfnCondition): string { + return Fn.conditionIf(condition.logicalId, "True", "False").toString(); +} + +const cfnInterfaceKey = "AWS::CloudFormation::Interface"; +const parameterGroupsKey = "ParameterGroups"; +const parameterLabelsKey = "ParameterLabels"; + +function initCfnInterface(scope: Construct): void { + const stack = Stack.of(scope); + if (!stack.templateOptions.metadata) { + stack.templateOptions.metadata = {}; + } + const metadata = stack.templateOptions.metadata; + if (!(cfnInterfaceKey in metadata)) { + metadata[cfnInterfaceKey] = {}; + } + const cfnInterface = metadata[cfnInterfaceKey]; + if (!(parameterLabelsKey in cfnInterface)) { + cfnInterface[parameterLabelsKey] = {}; + } + if (!(parameterGroupsKey in cfnInterface)) { + cfnInterface[parameterGroupsKey] = []; + } +} + +export function addParameterLabel(parameter: CfnParameter, label: string): void { + const stack = Stack.of(parameter); + initCfnInterface(stack); + stack.templateOptions.metadata![cfnInterfaceKey][parameterLabelsKey][parameter.logicalId] = { default: label }; +} + +export interface ParameterGroup { + readonly label: string; + readonly parameters: CfnParameter[]; +} + +export function addParameterGroup(scope: Construct, group: ParameterGroup): void { + initCfnInterface(scope); + const stack = Stack.of(scope); + stack.templateOptions.metadata![cfnInterfaceKey][parameterGroupsKey].push({ + Label: { default: group.label }, + Parameters: group.parameters.map((parameter: CfnParameter) => parameter.logicalId), + }); +} + +export class ConditionAspect CfnResource> implements IAspect { + constructor( + private condition: CfnCondition, + private resourceType?: T, + ) {} + + visit(node: IConstruct): void { + if (node instanceof (this.resourceType ?? CfnResource)) { + node.cfnOptions.condition = this.condition; + } + } +} + +export interface ParameterWithLabelProps extends CfnParameterProps { + label?: string; +} + +export class ParameterWithLabel extends CfnParameter { + constructor(scope: Construct, id: string, props: ParameterWithLabelProps) { + super(scope, id, props); + + if (props.label) { + addParameterLabel(this, props.label); + } + } +} + +export interface YesNoParameterProps extends ParameterWithLabelProps { + default?: keyof typeof YesNoType; +} + +export class YesNoParameter extends ParameterWithLabel { + private condition?: CfnCondition; + private conditionId: string; + + constructor(scope: Construct, id: string, props?: ParameterWithLabelProps) { + super(scope, id, { + allowedValues: [YesNoType.Yes, YesNoType.No], + ...props, + }); + this.conditionId = `${id}Condition`; + } + + getCondition(): CfnCondition { + if (!this.condition) { + this.condition = yesNoCondition(this.stack, this.conditionId, this.valueAsString); + } + return this.condition; + } +} + +export interface EnabledDisabledParameterProps extends ParameterWithLabelProps { + default?: keyof typeof EnabledDisabledType; +} + +export class EnabledDisabledParameter extends ParameterWithLabel { + private condition?: CfnCondition; + private conditionId: string; + + constructor(scope: Construct, id: string, props?: ParameterWithLabelProps) { + super(scope, id, { + allowedValues: [EnabledDisabledType.Enabled, EnabledDisabledType.Disabled], + ...props, + }); + this.conditionId = `${id}Condition`; + } + + getCondition(): CfnCondition { + if (!this.condition) { + this.condition = enabledDisabledCondition(this.stack, this.conditionId, this.valueAsString); + } + return this.condition; + } +} diff --git a/source/instance-scheduler/lib/core-scheduler.ts b/source/instance-scheduler/lib/core-scheduler.ts index fe53352c..49954ea0 100644 --- a/source/instance-scheduler/lib/core-scheduler.ts +++ b/source/instance-scheduler/lib/core-scheduler.ts @@ -1,141 +1,392 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 +import { Aws, CfnCondition, Fn, RemovalPolicy, Stack } from "aws-cdk-lib"; +import { AttributeType, BillingMode, StreamViewType, Table, TableEncryption } from "aws-cdk-lib/aws-dynamodb"; +import { CfnRule, Rule, RuleTargetInput, Schedule } from "aws-cdk-lib/aws-events"; +import { LambdaFunction as LambdaFunctionTarget } from "aws-cdk-lib/aws-events-targets"; +import { Role } from "aws-cdk-lib/aws-iam"; +import { Alias, Key } from "aws-cdk-lib/aws-kms"; +import { Function as LambdaFunction } from "aws-cdk-lib/aws-lambda/lib/function"; +import { LogGroup, RetentionDays } from "aws-cdk-lib/aws-logs"; +import { Topic } from "aws-cdk-lib/aws-sns"; +import { NagSuppressions } from "cdk-nag"; +import { AnonymizedMetricsEnvironment } from "./anonymized-metrics-environment"; +import { AppRegistryForInstanceScheduler } from "./app-registry"; +import { AsgScheduler } from "./asg-scheduler"; +import { cfnConditionToTrueFalse, overrideRetentionPolicies, overrideLogicalId, overrideProperty } from "./cfn"; +import { addCfnNagSuppressions } from "./cfn-nag"; +import { OperationalInsightsDashboard } from "./dashboard/ops-insights-dashboard"; +import { AsgSchedulingRole } from "./iam/asg-scheduling-role"; +import { SchedulerRole } from "./iam/scheduler-role"; +import { AsgHandler } from "./lambda-functions/asg-handler"; +import { FunctionFactory } from "./lambda-functions/function-factory"; +import { MainLambda } from "./lambda-functions/main"; +import { MetricsUuidGenerator } from "./lambda-functions/metrics-uuid-generator"; +import { SchedulingOrchestrator } from "./lambda-functions/scheduling-orchestrator"; +import { SchedulingRequestHandlerLambda } from "./lambda-functions/scheduling-request-handler"; +import { SpokeRegistrationLambda } from "./lambda-functions/spoke-registration"; +import { SchedulingIntervalToCron } from "./scheduling-interval-mappings"; -import { LambdaToDynamoDB } from "@aws-solutions-constructs/aws-lambda-dynamodb"; -import { Aws, RemovalPolicy, Stack } from "aws-cdk-lib"; - -import * as iam from "aws-cdk-lib/aws-iam"; -import * as lambda from "aws-cdk-lib/aws-lambda"; -import * as cdk from "aws-cdk-lib"; -import * as dynamodb from "aws-cdk-lib/aws-dynamodb"; -import * as kms from "aws-cdk-lib/aws-kms"; -import * as python from "@aws-cdk/aws-lambda-python-alpha"; -export interface InstanceSchedulerLambdaProps { +export interface CoreSchedulerProps { + readonly solutionName: string; readonly solutionVersion: string; - readonly memorySize: number; - readonly schedulerRole: iam.Role; - readonly kmsEncryptionKey: kms.Key; - /** - * Lambda Function environment variables - */ - readonly environment?: { - [key: string]: string; - }; + readonly solutionId: string; + readonly memorySizeMB: number; + readonly principals: string[]; + readonly logRetentionDays: RetentionDays; + readonly schedulingEnabled: CfnCondition; + readonly schedulingIntervalMinutes: number; + readonly namespace: string; + readonly sendAnonymizedMetrics: CfnCondition; + readonly enableDebugLogging: CfnCondition; + readonly tagKey: string; + readonly defaultTimezone: string; + readonly enableEc2: CfnCondition; + readonly enableRds: CfnCondition; + readonly enableRdsClusters: CfnCondition; + readonly enableNeptune: CfnCondition; + readonly enableDocdb: CfnCondition; + readonly enableRdsSnapshots: CfnCondition; + readonly regions: string[]; + readonly enableSchedulingHubAccount: CfnCondition; + readonly enableEc2SsmMaintenanceWindows: CfnCondition; + readonly startTags: string; + readonly stopTags: string; + readonly enableAwsOrganizations: CfnCondition; + readonly appregSolutionName: string; + readonly appregApplicationName: string; + readonly enableOpsInsights: CfnCondition; + readonly kmsKeyArns: string[]; + readonly factory: FunctionFactory; + readonly enableDdbDeletionProtection: CfnCondition; + readonly enableAsgs: CfnCondition; + readonly scheduledTagKey: string; + readonly rulePrefix: string; } + export class CoreScheduler { - public readonly lambdaFunction: lambda.Function; - public readonly configTable: dynamodb.Table; - private readonly stateTable: dynamodb.Table; - private readonly maintenanceWindowTable: dynamodb.Table; - - constructor(scope: Stack, props: InstanceSchedulerLambdaProps) { - this.lambdaFunction = new python.PythonFunction(scope, "scheduler-lambda", { - functionName: Aws.STACK_NAME + "-InstanceSchedulerMain", - description: "EC2 and RDS instance scheduler, version " + props.solutionVersion, - entry: `${__dirname}/../../app`, - index: "instance_scheduler/main.py", - handler: "lambda_handler", - runtime: lambda.Runtime.PYTHON_3_10, - role: props.schedulerRole, - memorySize: props.memorySize, - timeout: cdk.Duration.seconds(300), - environment: props.environment, - tracing: lambda.Tracing.ACTIVE, - bundling: { - assetExcludes: [".mypy_cache", ".tox", "__pycache__"], - }, + public readonly cfnScheduleCustomResourceHandler: LambdaFunction; + public readonly hubSchedulerRole: Role; + public readonly configTable: Table; + public readonly topic: Topic; + public readonly asgOrch: LambdaFunction; + + constructor(scope: Stack, props: CoreSchedulerProps) { + new AppRegistryForInstanceScheduler(scope, "AppRegistryForInstanceScheduler", { + solutionId: props.solutionId, + solutionName: props.solutionName, + solutionVersion: props.solutionVersion, + appregSolutionName: props.appregSolutionName, + appregAppName: props.appregApplicationName, }); - const lambdaToDynamoDbConstruct = new LambdaToDynamoDB(scope, "instance-scheduler-lambda", { - existingLambdaObj: this.lambdaFunction, - dynamoTableProps: { - partitionKey: { - name: "service", - type: dynamodb.AttributeType.STRING, - }, - sortKey: { - name: "account-region", - type: dynamodb.AttributeType.STRING, - }, - billingMode: dynamodb.BillingMode.PAY_PER_REQUEST, - removalPolicy: RemovalPolicy.DESTROY, - pointInTimeRecovery: true, - }, - tablePermissions: "ReadWrite", + const USER_AGENT_EXTRA = `AwsSolution/${props.solutionId}/${props.solutionVersion}`; + + const metricsUuidGenerator = new MetricsUuidGenerator(scope, { + solutionName: props.solutionName, + logRetentionDays: props.logRetentionDays, + USER_AGENT_EXTRA, + STACK_ID: Aws.STACK_ID, + UUID_KEY: `/Solutions/${props.solutionName}/UUID/`, + factory: props.factory, }); - this.stateTable = lambdaToDynamoDbConstruct.dynamoTable; + const key = new Key(scope, "InstanceSchedulerEncryptionKey", { + description: "Key for SNS", + enabled: true, + enableKeyRotation: true, + removalPolicy: RemovalPolicy.DESTROY, + }); + overrideLogicalId(key, "InstanceSchedulerEncryptionKey"); - const cfnStateTable = this.stateTable.node.defaultChild as dynamodb.CfnTable; - cfnStateTable.overrideLogicalId("StateTable"); - cfnStateTable.addPropertyOverride("SSESpecification", { - KMSMasterKeyId: props.kmsEncryptionKey.keyId, - SSEEnabled: true, - SSEType: "KMS", + const keyAlias = new Alias(scope, "InstanceSchedulerEncryptionKeyAlias", { + aliasName: `alias/${Aws.STACK_NAME}-instance-scheduler-encryption-key`, + targetKey: key, }); - this.configTable = new dynamodb.Table(scope, "ConfigTable", { - sortKey: { - name: "name", - type: dynamodb.AttributeType.STRING, - }, - partitionKey: { - name: "type", - type: dynamodb.AttributeType.STRING, - }, - billingMode: dynamodb.BillingMode.PAY_PER_REQUEST, + overrideLogicalId(keyAlias, "InstanceSchedulerEncryptionKeyAlias"); + + this.topic = new Topic(scope, "InstanceSchedulerSnsTopic", { + masterKey: key, + }); + overrideLogicalId(this.topic, "InstanceSchedulerSnsTopic"); + + const schedulerLogGroup = new LogGroup(scope, "SchedulerLogGroup", { + logGroupName: Aws.STACK_NAME + "-logs", removalPolicy: RemovalPolicy.DESTROY, + retention: props.logRetentionDays, + }); + overrideLogicalId(schedulerLogGroup, "SchedulerLogGroup"); + // todo: this may not be true anymore + addCfnNagSuppressions(schedulerLogGroup, { + id: "W84", + reason: + "CloudWatch log groups only have transactional data from the Lambda function, this template has to be supported in gov cloud which doesn't yet have the feature to provide kms key id to cloudwatch log group.", + }); + + const stateTable = new Table(scope, "StateTable", { + partitionKey: { name: "service", type: AttributeType.STRING }, + sortKey: { name: "account-region", type: AttributeType.STRING }, + billingMode: BillingMode.PAY_PER_REQUEST, pointInTimeRecovery: true, + encryption: TableEncryption.CUSTOMER_MANAGED, + encryptionKey: key, }); + overrideLogicalId(stateTable, "StateTable"); + overrideRetentionPolicies( + stateTable, + Fn.conditionIf(props.enableDdbDeletionProtection.logicalId, "Retain", "Delete"), + ); + overrideProperty( + stateTable, + "DeletionProtectionEnabled", + Fn.conditionIf(props.enableDdbDeletionProtection.logicalId, "True", "False"), + ); - const cfnConfigTable = this.configTable.node.defaultChild as dynamodb.CfnTable; - cfnConfigTable.overrideLogicalId("ConfigTable"); - cfnConfigTable.addPropertyOverride("SSESpecification", { - KMSMasterKeyId: props.kmsEncryptionKey.keyId, - SSEEnabled: true, - SSEType: "KMS", + this.configTable = new Table(scope, "ConfigTable", { + sortKey: { name: "name", type: AttributeType.STRING }, + partitionKey: { name: "type", type: AttributeType.STRING }, + billingMode: BillingMode.PAY_PER_REQUEST, + removalPolicy: RemovalPolicy.DESTROY, + pointInTimeRecovery: true, + encryption: TableEncryption.CUSTOMER_MANAGED, + encryptionKey: key, + stream: StreamViewType.KEYS_ONLY, }); + overrideLogicalId(this.configTable, "ConfigTable"); + overrideRetentionPolicies( + this.configTable, + Fn.conditionIf(props.enableDdbDeletionProtection.logicalId, "Retain", "Delete"), + ); + overrideProperty( + this.configTable, + "DeletionProtectionEnabled", + Fn.conditionIf(props.enableDdbDeletionProtection.logicalId, "True", "False"), + ); - this.maintenanceWindowTable = new dynamodb.Table(scope, "MaintenanceWindowTable", { - partitionKey: { - name: "Name", - type: dynamodb.AttributeType.STRING, - }, - sortKey: { - name: "account-region", - type: dynamodb.AttributeType.STRING, - }, - billingMode: dynamodb.BillingMode.PAY_PER_REQUEST, + const mwTable = new Table(scope, "MaintenanceWindowTable", { + partitionKey: { name: "account-region", type: AttributeType.STRING }, + sortKey: { name: "name-id", type: AttributeType.STRING }, + billingMode: BillingMode.PAY_PER_REQUEST, removalPolicy: RemovalPolicy.DESTROY, pointInTimeRecovery: true, + encryption: TableEncryption.CUSTOMER_MANAGED, + encryptionKey: key, }); + overrideLogicalId(mwTable, "MaintenanceWindowTable"); + overrideRetentionPolicies(mwTable, Fn.conditionIf(props.enableDdbDeletionProtection.logicalId, "Retain", "Delete")); + overrideProperty( + mwTable, + "DeletionProtectionEnabled", + Fn.conditionIf(props.enableDdbDeletionProtection.logicalId, "True", "False"), + ); - const cfnMaintenanceWindowTable = this.maintenanceWindowTable.node.defaultChild as dynamodb.CfnTable; - cfnMaintenanceWindowTable.overrideLogicalId("MaintenanceWindowTable"); - cfnMaintenanceWindowTable.addPropertyOverride("SSESpecification", { - KMSMasterKeyId: props.kmsEncryptionKey.keyId, - SSEEnabled: true, - SSEType: "KMS", + new SpokeRegistrationLambda(scope, { + snsErrorReportingTopic: this.topic, + scheduleLogGroup: schedulerLogGroup, + logRetentionDays: props.logRetentionDays, + USER_AGENT_EXTRA: USER_AGENT_EXTRA, + configTable: this.configTable, + solutionVersion: props.solutionVersion, + enableDebugLogging: props.enableDebugLogging, + principals: props.principals, + namespace: props.namespace, + enableAwsOrganizations: props.enableAwsOrganizations, + factory: props.factory, }); - this.lambdaFunction.addEnvironment("CONFIG_TABLE", cfnConfigTable.ref); - this.lambdaFunction.addEnvironment("MAINTENANCE_WINDOW_TABLE", cfnMaintenanceWindowTable.ref); - this.lambdaFunction.addEnvironment("STATE_TABLE", cfnStateTable.ref); + const metricsEnv: AnonymizedMetricsEnvironment = { + METRICS_URL: "https://metrics.awssolutionsbuilder.com/generic", + SEND_METRICS: cfnConditionToTrueFalse(props.sendAnonymizedMetrics), + SOLUTION_ID: props.solutionId, + SOLUTION_VERSION: props.solutionVersion, + SCHEDULING_INTERVAL_MINUTES: props.schedulingIntervalMinutes.toString(), + METRICS_UUID: metricsUuidGenerator.metricsUuid, + }; - const dynamodbPolicy = new iam.PolicyStatement({ - actions: [ - "dynamodb:DeleteItem", - "dynamodb:GetItem", - "dynamodb:PutItem", - "dynamodb:Query", - "dynamodb:Scan", - "dynamodb:BatchWriteItem", - "dynamodb:UpdateItem", + const mainFunction = new MainLambda(scope, { + description: "EC2 and RDS instance scheduler, version " + props.solutionVersion, + configTable: this.configTable, + scheduleLogGroup: schedulerLogGroup, + snsErrorReportingTopic: this.topic, + principals: props.principals, + logRetentionDays: props.logRetentionDays, + appenv: { + SCHEDULER_FREQUENCY: props.schedulingIntervalMinutes.toString(), + STACK_NAME: Aws.STACK_NAME, + SEND_METRICS: cfnConditionToTrueFalse(props.sendAnonymizedMetrics), + SOLUTION_ID: props.solutionId, + SOLUTION_VERSION: props.solutionVersion, + TRACE: cfnConditionToTrueFalse(props.enableDebugLogging), + USER_AGENT_EXTRA: USER_AGENT_EXTRA, + METRICS_URL: "https://metrics.awssolutionsbuilder.com/generic", + STACK_ID: Aws.STACK_ID, + UUID_KEY: `/Solutions/${props.solutionName}/UUID/`, + START_EC2_BATCH_SIZE: "5", + SCHEDULE_TAG_KEY: props.tagKey, + DEFAULT_TIMEZONE: props.defaultTimezone, + ENABLE_EC2_SERVICE: cfnConditionToTrueFalse(props.enableEc2), + ENABLE_RDS_SERVICE: cfnConditionToTrueFalse(props.enableRds), + ENABLE_RDS_CLUSTERS: cfnConditionToTrueFalse(props.enableRdsClusters), + ENABLE_NEPTUNE_SERVICE: cfnConditionToTrueFalse(props.enableNeptune), + ENABLE_DOCDB_SERVICE: cfnConditionToTrueFalse(props.enableDocdb), + ENABLE_RDS_SNAPSHOTS: cfnConditionToTrueFalse(props.enableRdsSnapshots), + SCHEDULE_REGIONS: Fn.join(",", props.regions), + APP_NAMESPACE: props.namespace, + SCHEDULER_ROLE_NAME: "Scheduler-Role", + ENABLE_SCHEDULE_HUB_ACCOUNT: cfnConditionToTrueFalse(props.enableSchedulingHubAccount), + ENABLE_EC2_SSM_MAINTENANCE_WINDOWS: cfnConditionToTrueFalse(props.enableEc2SsmMaintenanceWindows), + START_TAGS: props.startTags, + STOP_TAGS: props.stopTags, + ENABLE_AWS_ORGANIZATIONS: cfnConditionToTrueFalse(props.enableAwsOrganizations), + LOG_GROUP: schedulerLogGroup.logGroupName, + ISSUES_TOPIC_ARN: this.topic.topicArn, + METRICS_UUID: metricsUuidGenerator.metricsUuid, + STATE_TABLE: stateTable.tableName, + CONFIG_TABLE: this.configTable.tableName, + MAINTENANCE_WINDOW_TABLE: mwTable.tableName, + }, + factory: props.factory, + }); + + const schedulingRequestHandler = new SchedulingRequestHandlerLambda(scope, { + description: "Handles scheduling requests for Instance Scheduler on AWS, version " + props.solutionVersion, + namespace: props.namespace, + logRetentionDays: props.logRetentionDays, + memorySizeMB: props.memorySizeMB, + schedulerRoleName: SchedulerRole.roleName(props.namespace), + DEFAULT_TIMEZONE: props.defaultTimezone, + STACK_NAME: Aws.STACK_NAME, + scheduleLogGroup: schedulerLogGroup, + snsErrorReportingTopic: this.topic, + USER_AGENT_EXTRA: USER_AGENT_EXTRA, + configTable: this.configTable, + stateTable: stateTable, + maintWindowTable: mwTable, + startTags: props.startTags, + stopTags: props.stopTags, + enableRds: props.enableRds, + enableRdsClusters: props.enableRdsClusters, + enableNeptune: props.enableNeptune, + enableDocdb: props.enableDocdb, + enableRdsSnapshots: props.enableRdsSnapshots, + enableOpsMonitoring: props.enableOpsInsights, + enableDebugLogging: props.enableDebugLogging, + enableEc2SsmMaintenanceWindows: props.enableEc2SsmMaintenanceWindows, + tagKey: props.tagKey, + schedulingIntervalMinutes: props.schedulingIntervalMinutes, + metricsEnv: metricsEnv, + solutionName: props.solutionName, + factory: props.factory, + }); + + const orchestratorLambda = new SchedulingOrchestrator(scope, { + description: "scheduling orchestrator for Instance Scheduler on AWS, version " + props.solutionVersion, + logRetentionDays: props.logRetentionDays, + schedulingRequestHandlerLambda: schedulingRequestHandler.lambdaFunction, + enableDebugLogging: props.enableDebugLogging, + configTable: this.configTable, + snsErrorReportingTopic: this.topic, + snsKmsKey: key, + scheduleLogGroup: schedulerLogGroup, + USER_AGENT_EXTRA: USER_AGENT_EXTRA, + enableSchedulingHubAccount: props.enableSchedulingHubAccount, + enableEc2: props.enableEc2, + enableRds: props.enableRds, + enableRdsClusters: props.enableRdsClusters, + enableNeptune: props.enableNeptune, + enableDocdb: props.enableDocdb, + enableAsgs: props.enableAsgs, + regions: props.regions, + defaultTimezone: props.defaultTimezone, + enableRdsSnapshots: props.enableRdsSnapshots, + enableAwsOrganizations: props.enableAwsOrganizations, + enableEc2SsmMaintenanceWindows: props.enableEc2SsmMaintenanceWindows, + opsDashboardEnabled: props.enableOpsInsights, + startTags: props.startTags, + stopTags: props.stopTags, + metricsEnv: metricsEnv, + factory: props.factory, + }); + + const asgHandler = new AsgHandler(scope, { + namespace: props.namespace, + logRetentionDays: props.logRetentionDays, + configTable: this.configTable, + snsErrorReportingTopic: this.topic, + encryptionKey: key, + enableDebugLogging: props.enableDebugLogging, + metricsEnv, + tagKey: props.tagKey, + asgSchedulingRoleName: AsgSchedulingRole.roleName(props.namespace), + scheduledTagKey: props.scheduledTagKey, + rulePrefix: props.rulePrefix, + USER_AGENT_EXTRA, + DEFAULT_TIMEZONE: props.defaultTimezone, + factory: props.factory, + }); + + const asgScheduler = new AsgScheduler(scope, "ASGScheduler", { + USER_AGENT_EXTRA, + asgHandler, + configTable: this.configTable, + enableAsgs: props.enableAsgs, + enableDebugLogging: props.enableDebugLogging, + enableSchedulingHubAccount: props.enableSchedulingHubAccount, + encryptionKey: key, + factory: props.factory, + logRetentionDays: props.logRetentionDays, + metricsEnv, + namespace: props.namespace, + regions: props.regions, + snsErrorReportingTopic: this.topic, + solutionVersion: props.solutionVersion, + }); + this.asgOrch = asgScheduler.asgOrchestratorLambdaFunction; + + const schedulingIntervalToCron = new SchedulingIntervalToCron(scope, "CronExpressionsForSchedulingIntervals", {}); + + const schedulerRule = new Rule(scope, "SchedulerEventRule", { + description: `Instance Scheduler - Rule to trigger instance for scheduler function version ${props.solutionVersion}`, + schedule: Schedule.expression(schedulingIntervalToCron.getMapping(props.schedulingIntervalMinutes.toString())), + targets: [ + new LambdaFunctionTarget(orchestratorLambda.lambdaFunction, { + event: RuleTargetInput.fromObject({ + scheduled_action: "run_orchestrator", + }), + retryAttempts: 5, + }), ], - effect: iam.Effect.ALLOW, - resources: [cfnConfigTable.attrArn, cfnMaintenanceWindowTable.attrArn], }); - this.lambdaFunction.addToRolePolicy(dynamodbPolicy); + //local scheduling roles + this.hubSchedulerRole = new SchedulerRole(scope, "SchedulerRole", { + assumedBy: schedulingRequestHandler.lambdaFunction.grantPrincipal, + namespace: props.namespace, + kmsKeys: props.kmsKeyArns, + }); + + new OperationalInsightsDashboard(scope, { + enabled: props.enableOpsInsights, + schedulingRequestHandler: schedulingRequestHandler, + asgHandler: asgHandler, + orchestrator: orchestratorLambda, + schedulingIntervalMinutes: props.schedulingIntervalMinutes, + }); + + const cfnSchedulerRule = schedulerRule.node.defaultChild as CfnRule; + cfnSchedulerRule.addPropertyOverride( + "State", + Fn.conditionIf(props.schedulingEnabled.logicalId, "ENABLED", "DISABLED"), + ); + + this.cfnScheduleCustomResourceHandler = mainFunction.lambdaFunction; + + NagSuppressions.addStackSuppressions(Stack.of(scope), [ + { + id: "AwsSolutions-L1", + reason: "Python 3.11 is the newest available runtime. This finding is a false positive.", + }, + ]); } } diff --git a/source/instance-scheduler/lib/dashboard/metrics.ts b/source/instance-scheduler/lib/dashboard/metrics.ts new file mode 100644 index 00000000..f5893e67 --- /dev/null +++ b/source/instance-scheduler/lib/dashboard/metrics.ts @@ -0,0 +1,184 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { MathExpression, Metric } from "aws-cdk-lib/aws-cloudwatch"; +import { Aws, Stack } from "aws-cdk-lib"; +import { SchedulingRequestHandlerLambda } from "../lambda-functions/scheduling-request-handler"; +import { AsgHandler } from "../lambda-functions/asg-handler"; +import { SchedulingOrchestrator } from "../lambda-functions/scheduling-orchestrator"; +import { SchedulingIntervalToSeconds } from "../scheduling-interval-mappings"; + +export interface MetricProps { + readonly schedulingRequestHandler: SchedulingRequestHandlerLambda; + readonly asgHandler: AsgHandler; + readonly orchestrator: SchedulingOrchestrator; + readonly schedulingIntervalMinutes: number; +} +export class Metrics { + /* + helper class for defining the underlying metrics available to the solution for ingestion into dashboard widgets + + */ + public static readonly metricNamespace = `${Aws.STACK_NAME}:InstanceScheduler`; + + public readonly schedulingIntervalMinutes; + public readonly schedulingIntervalSeconds; + private readonly props; + + constructor(scope: Stack, props: MetricProps) { + this.props = props; + this.schedulingIntervalMinutes = props.schedulingIntervalMinutes; + // use a mapping to translate interval minutes to seconds + this.schedulingIntervalSeconds = new SchedulingIntervalToSeconds( + scope, + "MetricsSchedulingIntervalToSeconds", + {}, + ).getMapping(this.props.schedulingIntervalMinutes.toString()); + } + + TotalEc2InstancesControlled() { + return new MathExpression({ + expression: `SUM(SEARCH('{"${Metrics.metricNamespace}",Service,InstanceType,SchedulingInterval} "Service"="ec2" "SchedulingInterval"="${this.schedulingIntervalMinutes}" MetricName=ManagedInstances', 'Sum', ${this.schedulingIntervalSeconds}))`, + }); + } + + TotalEc2HoursSaved() { + const searchExpr = `SEARCH('{"${Metrics.metricNamespace}",Service,InstanceType,SchedulingInterval} Service="ec2" "SchedulingInterval"="${this.schedulingIntervalMinutes}" MetricName="StoppedInstances"', 'Sum', ${this.schedulingIntervalSeconds})`; + return new MathExpression({ + expression: `SUM(${searchExpr}) * ${this.props.schedulingIntervalMinutes} / 60`, + }); + } + + TotalRDSInstancesControlled() { + return new MathExpression({ + expression: `SUM(SEARCH('{"${Metrics.metricNamespace}",Service,InstanceType,SchedulingInterval} "Service"="rds" "SchedulingInterval"="${this.schedulingIntervalMinutes}" MetricName=ManagedInstances', 'Sum', ${this.schedulingIntervalSeconds}))`, + }); + } + + TotalRDSHoursSaved() { + const searchExpr = `SEARCH('{"${Metrics.metricNamespace}",Service,InstanceType,SchedulingInterval} Service="rds" "SchedulingInterval"="${this.schedulingIntervalMinutes}" MetricName="StoppedInstances"', 'Sum', ${this.schedulingIntervalSeconds})`; + return new MathExpression({ + expression: `SUM(${searchExpr}) * ${this.props.schedulingIntervalMinutes} / 60`, + }); + } + Ec2InstancesControlledByType() { + return new MathExpression({ + expression: `SEARCH('{"${Metrics.metricNamespace}",Service,InstanceType,SchedulingInterval} "Service"="ec2" "SchedulingInterval"="${this.schedulingIntervalMinutes}" MetricName=ManagedInstances', 'Sum', ${this.schedulingIntervalSeconds})`, + }); + } + + Ec2InstancesControlledBySchedule() { + return new MathExpression({ + expression: `SEARCH('{"${Metrics.metricNamespace}",Service,Schedule,SchedulingInterval} Service="ec2" "SchedulingInterval"="${this.schedulingIntervalMinutes}" MetricName="ManagedInstances"', 'Sum', ${this.schedulingIntervalSeconds})`, + }); + } + + Ec2InstancesRunningByType() { + return new MathExpression({ + expression: `SEARCH('{"${Metrics.metricNamespace}",Service,InstanceType,SchedulingInterval} Service="ec2" "SchedulingInterval"="${this.schedulingIntervalMinutes}" MetricName="RunningInstances"', 'Sum', ${this.schedulingIntervalSeconds})`, + }); + } + + Ec2InstancesRunningBySchedule() { + return new MathExpression({ + expression: `SEARCH('{"${Metrics.metricNamespace}",Service,Schedule,SchedulingInterval} Service="ec2" "SchedulingInterval"="${this.schedulingIntervalMinutes}" MetricName="RunningInstances"', 'Sum', ${this.schedulingIntervalSeconds})`, + }); + } + + Ec2HoursSaved() { + const searchExpr = `SEARCH('{"${Metrics.metricNamespace}",Service,InstanceType,SchedulingInterval} Service="ec2" "SchedulingInterval"="${this.schedulingIntervalMinutes}" MetricName="StoppedInstances"', 'Sum', ${this.schedulingIntervalSeconds})`; + return new MathExpression({ + expression: `${searchExpr} * ${this.props.schedulingIntervalMinutes} / 60`, + }); + } + + RdsInstancesControlledByType() { + return new MathExpression({ + expression: `SEARCH('{"${Metrics.metricNamespace}",Service,InstanceType,SchedulingInterval} "Service"="rds" "SchedulingInterval"="${this.schedulingIntervalMinutes}" MetricName="ManagedInstances"', 'Sum', ${this.schedulingIntervalSeconds})`, + }); + } + + RdsInstancesControlledBySchedule() { + return new MathExpression({ + expression: `SEARCH('{"${Metrics.metricNamespace}",Service,Schedule,SchedulingInterval} Service="rds" "SchedulingInterval"="${this.schedulingIntervalMinutes}" MetricName="ManagedInstances"', 'Sum', ${this.schedulingIntervalSeconds})`, + }); + } + + RdsInstancesRunningByType() { + return new MathExpression({ + expression: `SEARCH('{"${Metrics.metricNamespace}",Service,InstanceType,SchedulingInterval} Service="rds" "SchedulingInterval"="${this.schedulingIntervalMinutes}" MetricName="RunningInstances"', 'Sum', ${this.schedulingIntervalSeconds})`, + }); + } + + RdsInstancesRunningBySchedule() { + return new MathExpression({ + expression: `SEARCH('{"${Metrics.metricNamespace}",Service,Schedule,SchedulingInterval} Service="rds" "SchedulingInterval"="${this.schedulingIntervalMinutes}" MetricName="RunningInstances"', 'Sum', ${this.schedulingIntervalSeconds})`, + }); + } + + RdsHoursSaved() { + const searchExpr = `SEARCH('{"${Metrics.metricNamespace}",Service,InstanceType,SchedulingInterval} Service="rds" "SchedulingInterval"="${this.schedulingIntervalMinutes}" MetricName="StoppedInstances"', 'Sum', ${this.schedulingIntervalSeconds})`; + return new MathExpression({ + expression: `${searchExpr} * ${this.props.schedulingIntervalMinutes} / 60`, + }); + } + + OrchestratorLambdaErrors() { + return new Metric({ + namespace: "AWS/Lambda", + metricName: "Errors", + dimensionsMap: { + FunctionName: this.props.orchestrator.lambdaFunction.functionName, + }, + }); + } + + SchedulingRequestHandlerLambdaErrors() { + return new Metric({ + namespace: "AWS/Lambda", + metricName: "Errors", + dimensionsMap: { + FunctionName: this.props.schedulingRequestHandler.lambdaFunction.functionName, + }, + }); + } + + AsgHandlerLambdaErrors() { + return new Metric({ + namespace: "AWS/Lambda", + metricName: "Errors", + dimensionsMap: { + FunctionName: this.props.asgHandler.lambdaFunction.functionName, + }, + }); + } + + OrchestratorLambdaDuration() { + return new Metric({ + namespace: "AWS/Lambda", + metricName: "Duration", + dimensionsMap: { + FunctionName: this.props.orchestrator.lambdaFunction.functionName, + }, + }); + } + + SchedulingRequestHandlerLambdaDuration() { + return new Metric({ + namespace: "AWS/Lambda", + metricName: "Duration", + dimensionsMap: { + FunctionName: this.props.schedulingRequestHandler.lambdaFunction.functionName, + }, + }); + } + + AsgHandlerLambdaDuration() { + return new Metric({ + namespace: "AWS/Lambda", + metricName: "Duration", + dimensionsMap: { + FunctionName: this.props.asgHandler.lambdaFunction.functionName, + }, + }); + } +} diff --git a/source/instance-scheduler/lib/dashboard/ops-insights-dashboard.ts b/source/instance-scheduler/lib/dashboard/ops-insights-dashboard.ts new file mode 100644 index 00000000..3ba531fc --- /dev/null +++ b/source/instance-scheduler/lib/dashboard/ops-insights-dashboard.ts @@ -0,0 +1,174 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { Aspects, Aws, CfnCondition, Duration, Stack } from "aws-cdk-lib"; +import { Dashboard, PeriodOverride, TextWidget } from "aws-cdk-lib/aws-cloudwatch"; +import { + ControlledEC2InstancesByScheduleLineChart, + ControlledEC2InstancesByTypeLineChart, + ControlledEc2InstancesPieChart, + ControlledRdsInstancesByScheduleLineChart, + ControlledRDSInstancesByTypeLineChart, + ControlledRDSInstancesPieChart, + EC2HoursSavedPieChart, + LambdaDurationLineChart, + LambdaErrorRateLineChart, + RdsHoursSavedPieChart, + RunningEC2InstancesByScheduleLineChart, + RunningRdsInstancesByScheduleLineChart, + Size, + RunningEC2InstancesByTypeLineChart, + RunningRDSInstancesByTypeLineChart, + TotalControlledEc2InstancesKPI, + TotalControlledRdsInstancesKPI, + TotalEc2HoursSavedInstancesKPI, + TotalRdsHoursSavedInstancesKPI, +} from "./widgets"; +import { Metrics } from "./metrics"; +import { ConditionAspect } from "../cfn"; +import { SchedulingRequestHandlerLambda } from "../lambda-functions/scheduling-request-handler"; +import { AsgHandler } from "../lambda-functions/asg-handler"; +import { SchedulingOrchestrator } from "../lambda-functions/scheduling-orchestrator"; + +export interface OperationalInsightsDashboardProps { + readonly enabled: CfnCondition; + readonly schedulingRequestHandler: SchedulingRequestHandlerLambda; + readonly asgHandler: AsgHandler; + readonly orchestrator: SchedulingOrchestrator; + readonly schedulingIntervalMinutes: number; +} +export class OperationalInsightsDashboard { + constructor(scope: Stack, props: OperationalInsightsDashboardProps) { + const dashboard = new Dashboard(scope, "OperationalInsightsDashboard", { + dashboardName: Aws.STACK_NAME + "-Operational-Insights-Dashboard", + defaultInterval: Duration.days(7), + periodOverride: PeriodOverride.INHERIT, + }); + + const metrics = new Metrics(scope, { + schedulingRequestHandler: props.schedulingRequestHandler, + asgHandler: props.asgHandler, + orchestrator: props.orchestrator, + schedulingIntervalMinutes: props.schedulingIntervalMinutes, + }); + + dashboard.addWidgets( + new TextWidget({ + markdown: "# EC2", + width: Size.FULL_WIDTH, + height: 1, + }), + ); + + dashboard.addWidgets( + new TotalControlledEc2InstancesKPI(metrics, { + width: Size.QUARTER_WIDTH, + height: Size.QUARTER_WIDTH, + }), + new ControlledEc2InstancesPieChart(metrics, { + width: Size.QUARTER_WIDTH, + height: Size.QUARTER_WIDTH, + }), + new TotalEc2HoursSavedInstancesKPI(metrics, { + width: Size.QUARTER_WIDTH, + height: Size.QUARTER_WIDTH, + }), + new EC2HoursSavedPieChart(metrics, { + width: Size.QUARTER_WIDTH, + height: Size.QUARTER_WIDTH, + }), + ); + + dashboard.addWidgets( + new ControlledEC2InstancesByTypeLineChart(metrics, { + width: Size.HALF_WIDTH, + height: 6, + }), + new RunningEC2InstancesByTypeLineChart(metrics, { + width: Size.HALF_WIDTH, + height: 6, + }), + ); + + dashboard.addWidgets( + new ControlledEC2InstancesByScheduleLineChart(metrics, { + width: Size.HALF_WIDTH, + height: 6, + }), + new RunningEC2InstancesByScheduleLineChart(metrics, { + width: Size.HALF_WIDTH, + height: 6, + }), + ); + + dashboard.addWidgets( + new TextWidget({ + markdown: "# RDS", + width: Size.FULL_WIDTH, + height: 1, + }), + ); + + dashboard.addWidgets( + new TotalControlledRdsInstancesKPI(metrics, { + width: Size.QUARTER_WIDTH, + height: Size.QUARTER_WIDTH, + }), + new ControlledRDSInstancesPieChart(metrics, { + width: Size.QUARTER_WIDTH, + height: Size.QUARTER_WIDTH, + }), + new TotalRdsHoursSavedInstancesKPI(metrics, { + width: Size.QUARTER_WIDTH, + height: Size.QUARTER_WIDTH, + }), + new RdsHoursSavedPieChart(metrics, { + width: Size.QUARTER_WIDTH, + height: Size.QUARTER_WIDTH, + }), + ); + + dashboard.addWidgets( + new ControlledRDSInstancesByTypeLineChart(metrics, { + width: Size.HALF_WIDTH, + height: 6, + }), + new RunningRDSInstancesByTypeLineChart(metrics, { + width: Size.HALF_WIDTH, + height: 6, + }), + ); + + dashboard.addWidgets( + new ControlledRdsInstancesByScheduleLineChart(metrics, { + width: Size.HALF_WIDTH, + height: 6, + }), + new RunningRdsInstancesByScheduleLineChart(metrics, { + width: Size.HALF_WIDTH, + height: 6, + }), + ); + + dashboard.addWidgets( + new TextWidget({ + markdown: "# Lambda", + width: Size.FULL_WIDTH, + height: 1, + }), + ); + + dashboard.addWidgets( + new LambdaDurationLineChart(metrics, { + width: Size.HALF_WIDTH, + height: 6, + }), + new LambdaErrorRateLineChart(metrics, { + width: Size.HALF_WIDTH, + height: 6, + }), + ); + + const dashboardConditionAspect = new ConditionAspect(props.enabled); + Aspects.of(dashboard).add(dashboardConditionAspect); + } +} diff --git a/source/instance-scheduler/lib/dashboard/widgets.ts b/source/instance-scheduler/lib/dashboard/widgets.ts new file mode 100644 index 00000000..543b7382 --- /dev/null +++ b/source/instance-scheduler/lib/dashboard/widgets.ts @@ -0,0 +1,409 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { + Color, + GraphWidget, + GraphWidgetView, + LegendPosition, + Shading, + SingleValueWidget, + Stats, +} from "aws-cdk-lib/aws-cloudwatch"; +import { Duration, Token } from "aws-cdk-lib"; +import { Metrics } from "./metrics"; + +export enum Size { + FULL_WIDTH = 24, + HALF_WIDTH = 12, + QUARTER_WIDTH = 6, + SMALL = 3, +} +export interface WidgetProps { + width: number; + height: number; +} +export class TotalEc2HoursSavedInstancesKPI extends SingleValueWidget { + constructor(metrics: Metrics, props: WidgetProps) { + super({ + title: "Total EC2 Hours Saved", + width: props.width, + height: props.height, + metrics: [ + metrics.TotalEc2HoursSaved().with({ + label: "Hours Saved", + }), + ], + setPeriodToTimeRange: true, + }); + } +} + +export class TotalRdsHoursSavedInstancesKPI extends SingleValueWidget { + constructor(metrics: Metrics, props: WidgetProps) { + super({ + title: "Total RDS Hours Saved", + width: props.width, + height: props.height, + metrics: [ + metrics.TotalRDSHoursSaved().with({ + label: "Hours Saved", + }), + ], + setPeriodToTimeRange: true, + }); + } +} + +export class TotalControlledEc2InstancesKPI extends SingleValueWidget { + constructor(metrics: Metrics, props: WidgetProps) { + super({ + title: "Total EC2 Instances Controlled", + width: props.width, + height: props.height, + metrics: [ + metrics.TotalEc2InstancesControlled().with({ + label: "EC2 Instances", + }), + ], + period: Duration.seconds(Token.asNumber(metrics.schedulingIntervalSeconds)), + }); + } +} + +export class TotalControlledRdsInstancesKPI extends SingleValueWidget { + constructor(metrics: Metrics, props: WidgetProps) { + super({ + title: "Total RDS Instances Controlled", + width: props.width, + height: props.height, + metrics: [ + metrics.TotalRDSInstancesControlled().with({ + label: "RDS Instances", + }), + ], + period: Duration.seconds(Token.asNumber(metrics.schedulingIntervalSeconds)), + }); + } +} + +export class EC2HoursSavedPieChart extends GraphWidget { + constructor(metrics: Metrics, props: WidgetProps) { + super({ + title: "EC2 Hours Saved", + view: GraphWidgetView.PIE, + left: [ + metrics.Ec2HoursSaved().with({ + label: "[${SUM}]", + }), + ], + legendPosition: LegendPosition.RIGHT, + statistic: Stats.SUM, + width: props.width, + height: props.height, + setPeriodToTimeRange: true, + }); + } +} + +export class RdsHoursSavedPieChart extends GraphWidget { + constructor(metrics: Metrics, props: WidgetProps) { + super({ + title: "RDS Hours Saved", + view: GraphWidgetView.PIE, + left: [ + metrics.RdsHoursSaved().with({ + period: Duration.days(30), + label: "[${SUM}]", + }), + ], + legendPosition: LegendPosition.RIGHT, + statistic: Stats.SUM, + width: props.width, + height: props.height, + setPeriodToTimeRange: true, + }); + } +} + +export class ControlledEc2InstancesPieChart extends GraphWidget { + constructor(metrics: Metrics, props: WidgetProps) { + super({ + title: "EC2 Instances Controlled", + view: GraphWidgetView.PIE, + width: props.width, + height: props.height, + left: [ + metrics.Ec2InstancesControlledByType().with({ + label: "[${LAST}]", + }), + ], + legendPosition: LegendPosition.RIGHT, + period: Duration.seconds(Token.asNumber(metrics.schedulingIntervalSeconds)), + }); + } +} + +export class ControlledRDSInstancesPieChart extends GraphWidget { + constructor(metrics: Metrics, props: WidgetProps) { + super({ + title: "RDS Instances Controlled", + view: GraphWidgetView.PIE, + width: props.width, + height: props.height, + left: [ + metrics.RdsInstancesControlledByType().with({ + label: "[${LAST}]", + }), + ], + legendPosition: LegendPosition.RIGHT, + period: Duration.seconds(Token.asNumber(metrics.schedulingIntervalSeconds)), + }); + } +} + +export class ControlledEC2InstancesByTypeLineChart extends GraphWidget { + constructor(metrics: Metrics, props: WidgetProps) { + super({ + title: "Controlled EC2 Instances by Type", + width: props.width, + height: props.height, + left: [ + metrics.Ec2InstancesControlledByType().with({ + label: "", + }), + ], + leftYAxis: { + label: "EC2 Instances", + showUnits: false, + min: 0, + }, + legendPosition: LegendPosition.BOTTOM, + }); + } +} + +export class ControlledEC2InstancesByScheduleLineChart extends GraphWidget { + constructor(metrics: Metrics, props: WidgetProps) { + super({ + title: "Controlled EC2 Instances by Schedule", + width: props.width, + height: props.height, + left: [ + metrics.Ec2InstancesControlledBySchedule().with({ + label: "", + }), + ], + leftYAxis: { + label: "EC2 Instances", + showUnits: false, + min: 0, + }, + legendPosition: LegendPosition.BOTTOM, + }); + } +} + +export class RunningEC2InstancesByScheduleLineChart extends GraphWidget { + constructor(metrics: Metrics, props: WidgetProps) { + super({ + title: "Running EC2 Instances by Schedule", + width: props.width, + height: props.height, + left: [ + metrics.Ec2InstancesRunningBySchedule().with({ + label: "", + }), + ], + leftYAxis: { + label: "Running EC2 Instances", + showUnits: false, + min: 0, + }, + legendPosition: LegendPosition.BOTTOM, + }); + } +} + +export class RunningEC2InstancesByTypeLineChart extends GraphWidget { + constructor(metrics: Metrics, props: WidgetProps) { + super({ + title: "Running EC2 Instances by Type", + width: props.width, + height: props.height, + left: [ + metrics.Ec2InstancesRunningByType().with({ + label: "", + }), + ], + leftYAxis: { + label: "Running EC2 Instances", + showUnits: false, + min: 0, + }, + legendPosition: LegendPosition.BOTTOM, + }); + } +} + +export class ControlledRDSInstancesByTypeLineChart extends GraphWidget { + constructor(metrics: Metrics, props: WidgetProps) { + super({ + title: "Controlled RDS Instances by Type", + width: props.width, + height: props.height, + left: [ + metrics.RdsInstancesControlledByType().with({ + label: "", + }), + ], + leftYAxis: { + label: "Controlled RDS Instances", + showUnits: false, + min: 0, + }, + legendPosition: LegendPosition.BOTTOM, + }); + } +} + +export class RunningRDSInstancesByTypeLineChart extends GraphWidget { + constructor(metrics: Metrics, props: WidgetProps) { + super({ + title: "Running RDS Instances By Type", + width: props.width, + height: props.height, + left: [ + metrics.RdsInstancesRunningByType().with({ + label: "", + }), + ], + leftYAxis: { + label: "Running RDS Instances", + showUnits: false, + min: 0, + }, + legendPosition: LegendPosition.BOTTOM, + }); + } +} + +export class ControlledRdsInstancesByScheduleLineChart extends GraphWidget { + constructor(metrics: Metrics, props: WidgetProps) { + super({ + title: "Controlled RDS Instances By Schedule", + width: props.width, + height: props.height, + left: [ + metrics.RdsInstancesControlledBySchedule().with({ + label: "", + }), + ], + leftYAxis: { + label: "Controlled RDS Instances", + showUnits: false, + min: 0, + }, + legendPosition: LegendPosition.BOTTOM, + }); + } +} + +export class RunningRdsInstancesByScheduleLineChart extends GraphWidget { + constructor(metrics: Metrics, props: WidgetProps) { + super({ + title: "Running RDS Instances by Schedule", + width: props.width, + height: props.height, + left: [ + metrics.RdsInstancesRunningBySchedule().with({ + label: "", + }), + ], + leftYAxis: { + label: "Running RDS Instances", + showUnits: false, + min: 0, + }, + legendPosition: LegendPosition.BOTTOM, + }); + } +} + +export class LambdaErrorRateLineChart extends GraphWidget { + constructor(metrics: Metrics, props: WidgetProps) { + super({ + title: "Lambda Errors", + width: props.width, + height: props.height, + view: GraphWidgetView.TIME_SERIES, + period: Duration.minutes(30), + liveData: true, + left: [ + metrics.OrchestratorLambdaErrors().with({ + label: "Orchestrator", + }), + metrics.SchedulingRequestHandlerLambdaErrors().with({ + label: "SchedulingRequestHandler", + }), + metrics.AsgHandlerLambdaErrors().with({ + label: "AsgHandler", + }), + ], + leftYAxis: { + label: "Errors", + showUnits: false, + }, + legendPosition: LegendPosition.BOTTOM, + statistic: Stats.SUM, + }); + } +} + +export class LambdaDurationLineChart extends GraphWidget { + constructor(metrics: Metrics, props: WidgetProps) { + super({ + title: "Lambda Duration (P99)", + width: props.width, + height: props.height, + view: GraphWidgetView.TIME_SERIES, + period: Duration.minutes(30), + liveData: true, + left: [ + metrics.OrchestratorLambdaDuration().with({ + label: "Orchestrator", + }), + metrics.SchedulingRequestHandlerLambdaDuration().with({ + label: "SchedulingRequestHandler", + }), + metrics.AsgHandlerLambdaDuration().with({ + label: "AsgHandler", + }), + ], + leftYAxis: { + label: "duration (ms)", + showUnits: false, + }, + leftAnnotations: [ + // lambda times out after 5 minutes, runtime < 3 mins is health, 4 mins iffy, close to 5 is a warning + { + value: 5 * 60 * 1000, + fill: Shading.BELOW, + color: Color.RED, + label: "Timeout Threshold (5 minutes)", + }, + { + value: 4 * 60 * 1000, + fill: Shading.BELOW, + color: Color.ORANGE, + }, + { + value: 3 * 60 * 1000, + fill: Shading.BELOW, + color: Color.GREEN, + }, + ], + legendPosition: LegendPosition.BOTTOM, + statistic: Stats.p(99), + }); + } +} diff --git a/source/instance-scheduler/lib/event-bus-resources.ts b/source/instance-scheduler/lib/event-bus-resources.ts deleted file mode 100644 index 23ee11ff..00000000 --- a/source/instance-scheduler/lib/event-bus-resources.ts +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: Apache-2.0 - -import * as events from "aws-cdk-lib/aws-events"; -import * as cdk from "aws-cdk-lib"; - -export interface SchedulerEventBusProps { - organizationId: string[]; - namespace: string; - lambdaFunctionArn: string; - eventBusName: string; - isMemberOfOrganizationsCondition: cdk.CfnCondition; -} - -export class SchedulerEventBusResources { - readonly eventRuleCrossAccount: events.CfnRule; - constructor(scope: cdk.Stack, props: SchedulerEventBusProps) { - const schedulerEventBus = new events.CfnEventBus(scope, "scheduler-event-bus", { - name: props.namespace + "-" + props.eventBusName, - }); - - const eventBusPolicy = new events.CfnEventBusPolicy(scope, "scheduler-event-bus-policy", { - eventBusName: schedulerEventBus.attrName, - statementId: schedulerEventBus.attrName, - action: "events:PutEvents", - principal: "*", - condition: { - type: "StringEquals", - key: "aws:PrincipalOrgID", - value: cdk.Fn.select(0, props.organizationId), - }, - }); - - this.eventRuleCrossAccount = new events.CfnRule(scope, "scheduler-ssm-parameter-cross-account-events", { - description: - "Event rule to invoke Instance Scheduler lambda function to store spoke account id(s) in configuration.", - eventBusName: schedulerEventBus.attrName, - state: "ENABLED", - targets: [ - { - arn: props.lambdaFunctionArn, - id: "Scheduler-Lambda-Function", - }, - ], - eventPattern: { - source: ["aws.ssm"], - "detail-type": ["Parameter Store Change"], - detail: { - name: ["/instance-scheduler/do-not-delete-manually"], - operation: ["Create", "Delete"], - type: ["String"], - }, - }, - }); - - schedulerEventBus.cfnOptions.condition = props.isMemberOfOrganizationsCondition; - eventBusPolicy.cfnOptions.condition = props.isMemberOfOrganizationsCondition; - this.eventRuleCrossAccount.cfnOptions.condition = props.isMemberOfOrganizationsCondition; - } -} diff --git a/source/instance-scheduler/lib/iam/asg-scheduling-permissions-policy.ts b/source/instance-scheduler/lib/iam/asg-scheduling-permissions-policy.ts new file mode 100644 index 00000000..f5acc2dd --- /dev/null +++ b/source/instance-scheduler/lib/iam/asg-scheduling-permissions-policy.ts @@ -0,0 +1,48 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { Aws } from "aws-cdk-lib"; +import { Policy, PolicyStatement } from "aws-cdk-lib/aws-iam"; +import { NagSuppressions } from "cdk-nag"; +import { addCfnNagSuppressions } from "../cfn-nag"; +import { Construct } from "constructs"; + +export class AsgSchedulingPermissionsPolicy extends Policy { + constructor(scope: Construct, id: string) { + super(scope, id); + + this.addStatements( + new PolicyStatement({ + actions: [ + "autoscaling:BatchPutScheduledUpdateGroupAction", + "autoscaling:BatchDeleteScheduledAction", + "autoscaling:CreateOrUpdateTags", + ], + resources: [`arn:${Aws.PARTITION}:autoscaling:*:${Aws.ACCOUNT_ID}:autoScalingGroup:*:autoScalingGroupName/*`], + }), + new PolicyStatement({ + actions: ["autoscaling:DescribeAutoScalingGroups", "autoscaling:DescribeScheduledActions"], + resources: ["*"], + }), + ); + + addCfnNagSuppressions(this, { + id: "W12", + reason: "DescribeAutoScalingGroups and autoscaling:DescribeScheduledActions actions require wildcard permissions", + }); + + NagSuppressions.addResourceSuppressions(this, [ + { + id: "AwsSolutions-IAM5", + appliesTo: ["Resource::*"], + reason: "Required permissions to describe AutoScaling Groups", + }, + { + id: "AwsSolutions-IAM5", + appliesTo: [ + "Resource::arn::autoscaling:*::autoScalingGroup:*:autoScalingGroupName/*", + ], + reason: "Required permissions to modify scheduled scaling actions on AutoScaling Groups", + }, + ]); + } +} diff --git a/source/instance-scheduler/lib/iam/asg-scheduling-role.ts b/source/instance-scheduler/lib/iam/asg-scheduling-role.ts new file mode 100644 index 00000000..74fbf0d8 --- /dev/null +++ b/source/instance-scheduler/lib/iam/asg-scheduling-role.ts @@ -0,0 +1,29 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { IPrincipal, Role } from "aws-cdk-lib/aws-iam"; +import { Construct } from "constructs"; +import { addCfnNagSuppressions } from "../cfn-nag"; +import { AsgSchedulingPermissionsPolicy } from "./asg-scheduling-permissions-policy"; + +export interface AsgSchedulingRoleProps { + assumedBy: IPrincipal; + namespace: string; +} +export class AsgSchedulingRole extends Role { + static roleName(namespace: string) { + return `${namespace}-ASG-Scheduling-Role`; + } + constructor(scope: Construct, id: string, props: AsgSchedulingRoleProps) { + super(scope, id, { + assumedBy: props.assumedBy, + roleName: AsgSchedulingRole.roleName(props.namespace), + }); + + new AsgSchedulingPermissionsPolicy(this, `ASGSchedulingPermissions`).attachToRole(this); + + addCfnNagSuppressions(this, { + id: "W28", + reason: "The role name is defined to allow cross account access from the hub account.", + }); + } +} diff --git a/source/instance-scheduler/lib/iam/ec2-kms-permissions-policy.ts b/source/instance-scheduler/lib/iam/ec2-kms-permissions-policy.ts new file mode 100644 index 00000000..b6bb2e6b --- /dev/null +++ b/source/instance-scheduler/lib/iam/ec2-kms-permissions-policy.ts @@ -0,0 +1,44 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { Effect, Policy, PolicyStatement } from "aws-cdk-lib/aws-iam"; +import { Construct } from "constructs"; +import { NagSuppressions } from "cdk-nag"; +export class Ec2KmsPermissionsPolicy extends Policy { + constructor(scope: Construct, id: string, kmsKeyArns: string[]) { + super(scope, id); + + this.addStatements( + new PolicyStatement({ + actions: ["kms:CreateGrant"], + resources: kmsKeyArns, + effect: Effect.ALLOW, + conditions: { + Bool: { + "kms:GrantIsForAWSResource": true, + }, + StringLike: { + "kms:ViaService": "ec2.*.amazonaws.com", + }, + "ForAllValues:StringEquals": { + "kms:GrantOperations": ["Decrypt"], + "kms:EncryptionContextKeys": ["aws:ebs:id"], + }, + Null: { + "kms:EncryptionContextKeys": false, + "kms:GrantOperations": false, + }, + }, + }), + ); + + NagSuppressions.addResourceSuppressions(this, [ + { + id: "AwsSolutions-IAM5", + appliesTo: ["Resource::*"], + reason: + "Specific kms keys are unknown until runtime, for security, access is instead restricted to only granting decryption" + + " permissions to the ec2 service for encrypted EBS volumes", + }, + ]); + } +} diff --git a/source/instance-scheduler/lib/iam/roles.ts b/source/instance-scheduler/lib/iam/roles.ts new file mode 100644 index 00000000..e0f9d854 --- /dev/null +++ b/source/instance-scheduler/lib/iam/roles.ts @@ -0,0 +1,8 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Aws } from "aws-cdk-lib"; + +export function roleArnFor(accountId: string, roleName: string) { + return `arn:${Aws.PARTITION}:iam::${accountId}:role/${roleName}`; +} diff --git a/source/instance-scheduler/lib/iam/scheduler-role.ts b/source/instance-scheduler/lib/iam/scheduler-role.ts new file mode 100644 index 00000000..7e2353a1 --- /dev/null +++ b/source/instance-scheduler/lib/iam/scheduler-role.ts @@ -0,0 +1,42 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { IPrincipal, Role } from "aws-cdk-lib/aws-iam"; +import { Construct } from "constructs"; +import { Aspects, CfnCondition, Fn } from "aws-cdk-lib"; +import { ConditionAspect } from "../cfn"; +import { Ec2KmsPermissionsPolicy } from "./ec2-kms-permissions-policy"; +import { SchedulingPermissionsPolicy } from "./scheduling-permissions-policy"; +import { addCfnNagSuppressions } from "../cfn-nag"; + +export interface ScheduleRoleProps { + assumedBy: IPrincipal; + namespace: string; + kmsKeys: string[]; +} +export class SchedulerRole extends Role { + static roleName(namespace: string) { + return `${namespace}-Scheduler-Role`; + } + constructor(scope: Construct, id: string, props: ScheduleRoleProps) { + super(scope, id, { + assumedBy: props.assumedBy, + roleName: SchedulerRole.roleName(props.namespace), + }); + + new SchedulingPermissionsPolicy(this, `SchedulingPermissions`).attachToRole(this); + + //optional KMS permissions + const kmsCondition = new CfnCondition(this, "kmsAccessCondition", { + expression: Fn.conditionNot(Fn.conditionEquals(Fn.select(0, props.kmsKeys), "")), + }); + const kmsConditionAspect = new ConditionAspect(kmsCondition); + const kmsAccess = new Ec2KmsPermissionsPolicy(this, `KmsPermissions`, props.kmsKeys); + kmsAccess.attachToRole(this); + Aspects.of(kmsAccess).add(kmsConditionAspect); + + addCfnNagSuppressions(this, { + id: "W28", + reason: "The role name is defined to allow cross account access from the hub account.", + }); + } +} diff --git a/source/instance-scheduler/lib/iam/scheduling-permissions-policy.ts b/source/instance-scheduler/lib/iam/scheduling-permissions-policy.ts new file mode 100644 index 00000000..5577733d --- /dev/null +++ b/source/instance-scheduler/lib/iam/scheduling-permissions-policy.ts @@ -0,0 +1,86 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { Effect, Policy, PolicyStatement } from "aws-cdk-lib/aws-iam"; +import { Construct } from "constructs"; +import { Fn } from "aws-cdk-lib"; +import { NagSuppressions } from "cdk-nag"; +import { addCfnNagSuppressions } from "../cfn-nag"; + +export class SchedulingPermissionsPolicy extends Policy { + constructor(scope: Construct, id: string) { + super(scope, id); + + // describe ec2 instances for scheduling (cannot be scoped to tagged instances) + this.addStatements( + new PolicyStatement({ + actions: ["ec2:DescribeInstances"], + effect: Effect.ALLOW, + resources: ["*"], + }), + + // start/stop/resize permissions scoped to tagged EC2 instances only + new PolicyStatement({ + actions: [ + "ec2:StartInstances", + "ec2:StopInstances", + "ec2:CreateTags", + "ec2:DeleteTags", + "ec2:ModifyInstanceAttribute", + ], + effect: Effect.ALLOW, + resources: [Fn.sub("arn:${AWS::Partition}:ec2:*:${AWS::AccountId}:instance/*")], + }), + + // describe ec2 maintenance windows + new PolicyStatement({ + actions: ["ssm:DescribeMaintenanceWindows"], + effect: Effect.ALLOW, + resources: ["*"], + }), + + // describe rds instances and clusters + new PolicyStatement({ + actions: ["rds:DescribeDBClusters", "rds:DescribeDBInstances", "tag:GetResources"], + effect: Effect.ALLOW, + resources: ["*"], + }), + + new PolicyStatement({ + //StopDBInstance here is required + actions: ["rds:DeleteDBSnapshot", "rds:DescribeDBSnapshots", "rds:StopDBInstance"], + effect: Effect.ALLOW, + resources: [Fn.sub("arn:${AWS::Partition}:rds:*:${AWS::AccountId}:snapshot:*")], + }), + + // start/stop/tag for rds instances + new PolicyStatement({ + actions: ["rds:AddTagsToResource", "rds:RemoveTagsFromResource", "rds:StartDBInstance", "rds:StopDBInstance"], + effect: Effect.ALLOW, + resources: [Fn.sub("arn:${AWS::Partition}:rds:*:${AWS::AccountId}:db:*")], + }), + + // start/stop/tag for rds clusters + new PolicyStatement({ + actions: ["rds:AddTagsToResource", "rds:RemoveTagsFromResource", "rds:StartDBCluster", "rds:StopDBCluster"], + effect: Effect.ALLOW, + resources: [Fn.sub("arn:${AWS::Partition}:rds:*:${AWS::AccountId}:cluster:*")], + }), + ); + + addCfnNagSuppressions(this, { id: "W12", reason: "required scheduling permissions" }); + + NagSuppressions.addResourceSuppressions(this, [ + { + id: "AwsSolutions-IAM5", + appliesTo: [ + "Resource::arn::rds:*::db:*", + "Resource::arn::rds:*::cluster:*", + "Resource::arn::ec2:*::instance/*", + "Resource::arn::rds:*::snapshot:*", + "Resource::*", + ], + reason: "required scheduling permissions", + }, + ]); + } +} diff --git a/source/instance-scheduler/lib/instance-scheduler-stack.ts b/source/instance-scheduler/lib/instance-scheduler-stack.ts index f20c5ef0..5028a754 100644 --- a/source/instance-scheduler/lib/instance-scheduler-stack.ts +++ b/source/instance-scheduler/lib/instance-scheduler-stack.ts @@ -1,868 +1,350 @@ -#!/usr/bin/env node // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 - -import * as cdk from "aws-cdk-lib"; -import { Aws, CfnOutput, RemovalPolicy } from "aws-cdk-lib"; -import * as kms from "aws-cdk-lib/aws-kms"; -import * as iam from "aws-cdk-lib/aws-iam"; -import { ArnPrincipal, Effect, PolicyStatement } from "aws-cdk-lib/aws-iam"; -import * as logs from "aws-cdk-lib/aws-logs"; -import * as lambda from "aws-cdk-lib/aws-lambda"; -import * as sns from "aws-cdk-lib/aws-sns"; -import * as events from "aws-cdk-lib/aws-events"; +import { Aws, CfnMapping, CfnOutput, Stack, StackProps } from "aws-cdk-lib"; +import { RetentionDays } from "aws-cdk-lib/aws-logs"; import { Construct } from "constructs"; -import * as EBEventTarget from "aws-cdk-lib/aws-events-targets"; -import * as dynamodb from "aws-cdk-lib/aws-dynamodb"; -import { SUPPORTED_TIME_ZONES } from "./time-zones"; -import { AppRegistryForInstanceScheduler } from "./app-registry"; -import { NagSuppressions } from "cdk-nag"; +import { + EnabledDisabledParameter, + EnabledDisabledType, + ParameterWithLabel, + YesNoParameter, + YesNoType, + addParameterGroup, + yesNoCondition, +} from "./cfn"; import { CoreScheduler } from "./core-scheduler"; -import { SchedulerEventBusResources, SchedulerEventBusProps } from "./event-bus-resources"; +import { FunctionFactory, PythonFunctionFactory } from "./lambda-functions/function-factory"; +import { SUPPORTED_TIME_ZONES } from "./time-zones"; +import { schedulerIntervalValues } from "./scheduling-interval-mappings"; -export interface InstanceSchedulerStackProps extends cdk.StackProps { - readonly description: string; +export interface InstanceSchedulerStackProps extends StackProps { readonly solutionId: string; readonly solutionName: string; readonly solutionVersion: string; readonly appregApplicationName: string; readonly appregSolutionName: string; - - readonly paramOverrides?: InstanceSchedulerParameterDefaultOverrides; - readonly disableOpMetrics?: boolean; + readonly factory?: FunctionFactory; } -export interface InstanceSchedulerParameterDefaultOverrides { - readonly schedulingActive?: "Yes" | "No"; - readonly scheduledServices?: "EC2" | "RDS" | "Both"; - readonly scheduleRdsClusters?: "Yes" | "No"; - readonly createRdsSnapshot?: "Yes" | "No"; - readonly memorySize?: "128" | "384" | "512" | "640" | "768" | "896" | "1024" | "1152" | "1280" | "1408" | "1536"; - readonly useCloudWatchMetrics?: "Yes" | "No"; - readonly logRetention?: - | "1" - | "3" - | "5" - | "7" - | "14" - | "30" - | "60" - | "90" - | "120" - | "150" - | "180" - | "365" - | "400" - | "545" - | "731" - | "1827" - | "3653"; - readonly trace?: "Yes" | "No"; - readonly enableSSMMaintenanceWindows?: "Yes" | "No"; - readonly tagName?: string; - readonly defaultTimezone?: string; - readonly regions?: string; - readonly principals?: string; - readonly startedTags?: string; - readonly stoppedTags?: string; - readonly schedulerFrequency?: "1" | "2" | "5" | "10" | "15" | "30" | "60"; - readonly scheduleLambdaAccount?: "Yes" | "No"; - readonly namespace?: string; -} - -/* - * Instance Scheduler on AWS stack - * The stack has three dynamoDB tables defined for storing the state, configuration and maintenance information. - * The stack also includes one lambda, which is scheduled using an AWS CloudWatch Event Rule. - * The stack also includes a cloudwatch log group for the entire solution, encryption key, encryption key alias and SNS topic, - * and the necessary AWS IAM Policies and IAM Roles. For more information on the architecture, refer to the documentation at - * https://aws.amazon.com/solutions/implementations/instance-scheduler-on-aws/ - */ -export class InstanceSchedulerStack extends cdk.Stack { - readonly configurationTableOutput: CfnOutput; - readonly issueSnsTopicArn: CfnOutput; - readonly schedulerRoleArn: CfnOutput; - +export class InstanceSchedulerStack extends Stack { constructor(scope: Construct, id: string, props: InstanceSchedulerStackProps) { super(scope, id, props); - //Start CFN Parameters for instance scheduler. + const scheduleTagKey = new ParameterWithLabel(this, "TagName", { + label: "Schedule tag key", + description: + "The tag key Instance Scheduler will read to determine the schedule for a resource. The value of the tag with this key on a resource specifies the name of the schedule.", + default: "Schedule", + minLength: 1, + maxLength: 127, + }); + + const schedulerIntervalMinutes = new ParameterWithLabel(this, "SchedulerFrequency", { + label: "Scheduling interval (minutes)", + type: "Number", + description: "Interval in minutes between scheduler executions. For EC2 and RDS", + allowedValues: schedulerIntervalValues, + default: "5", + }); - const schedulingActive = new cdk.CfnParameter(this, "SchedulingActive", { - description: "Activate or deactivate scheduling.", - type: "String", - allowedValues: ["Yes", "No"], - default: props.paramOverrides?.schedulingActive ?? "Yes", + const defaultTimezone = new ParameterWithLabel(this, "DefaultTimezone", { + label: "Default time zone", + description: "Default IANA time zone identifier used by schedules that do not specify a time zone.", + default: "UTC", + allowedValues: SUPPORTED_TIME_ZONES, }); - const scheduledServices = new cdk.CfnParameter(this, "ScheduledServices", { - description: "Scheduled Services.", - type: "String", - allowedValues: ["EC2", "RDS", "Both"], - default: props.paramOverrides?.scheduledServices ?? "EC2", + const enableScheduling = new YesNoParameter(this, "SchedulingActive", { + label: "Enable scheduling", + description: 'Set to "No" to disable scheduling for all services.', + default: YesNoType.Yes, }); - const scheduleRdsClusters = new cdk.CfnParameter(this, "ScheduleRdsClusters", { - description: "Enable scheduling of Aurora clusters for RDS Service.", - type: "String", - allowedValues: ["Yes", "No"], - default: props.paramOverrides?.scheduleRdsClusters ?? "No", + addParameterGroup(this, { + label: `Scheduler (${props.solutionVersion})`, + parameters: [scheduleTagKey, schedulerIntervalMinutes, defaultTimezone, enableScheduling], }); - const createRdsSnapshot = new cdk.CfnParameter(this, "CreateRdsSnapshot", { - description: "Create snapshot before stopping RDS instances (does not apply to Aurora Clusters).", - type: "String", - allowedValues: ["Yes", "No"], - default: props.paramOverrides?.createRdsSnapshot ?? "No", + const enableEc2 = new EnabledDisabledParameter(this, "ScheduleEC2", { + label: "Enable EC2 scheduling", + description: "Enable scheduling EC2 instances.", + default: EnabledDisabledType.Enabled, }); - const memorySize = new cdk.CfnParameter(this, "MemorySize", { - description: - "Size of the Lambda function running the scheduler, increase size when processing large numbers of instances.", - type: "Number", - allowedValues: ["128", "384", "512", "640", "768", "896", "1024", "1152", "1280", "1408", "1536"], - default: props.paramOverrides?.memorySize ?? 128, + const enableRds = new EnabledDisabledParameter(this, "ScheduleRds", { + label: "Enable RDS instance scheduling", + description: "Enable scheduling individual RDS instances (not clusters).", + default: EnabledDisabledType.Enabled, }); - const useCloudWatchMetrics = new cdk.CfnParameter(this, "UseCloudWatchMetrics", { - description: "Collect instance scheduling data using CloudWatch metrics.", - type: "String", - allowedValues: ["Yes", "No"], - default: props.paramOverrides?.useCloudWatchMetrics ?? "No", + const enableRdsClusters = new EnabledDisabledParameter(this, "EnableRdsClusterScheduling", { + label: "Enable RDS cluster scheduling", + description: "Enable scheduling RDS clusters (multi-AZ and Aurora).", + default: EnabledDisabledType.Enabled, }); - const logRetention = new cdk.CfnParameter(this, "LogRetentionDays", { - description: "Retention days for scheduler logs.", - type: "Number", - allowedValues: [ - "1", - "3", - "5", - "7", - "14", - "30", - "60", - "90", - "120", - "150", - "180", - "365", - "400", - "545", - "731", - "1827", - "3653", - ], - default: props.paramOverrides?.logRetention ?? 30, - }); - - const trace = new cdk.CfnParameter(this, "Trace", { - description: "Enable debug-level logging in CloudWatch logs.", - type: "String", - allowedValues: ["Yes", "No"], - default: props.paramOverrides?.trace ?? "No", - }); - - const enableSSMMaintenanceWindows = new cdk.CfnParameter(this, "EnableSSMMaintenanceWindows", { + const enableNeptune = new EnabledDisabledParameter(this, "ScheduleNeptune", { + label: "Enable Neptune cluster scheduling", + description: "Enable scheduling Neptune clusters.", + default: EnabledDisabledType.Enabled, + }); + + const enableDocDb = new EnabledDisabledParameter(this, "ScheduleDocDb", { + label: "Enable DocumentDB cluster scheduling", + description: "Enable scheduling DocumentDB clusters.", + default: EnabledDisabledType.Enabled, + }); + + const enableAsgs = new EnabledDisabledParameter(this, "ScheduleASGs", { + label: "Enable AutoScaling Group scheduling", + description: "Enable scheduling AutoScaling Groups", + default: EnabledDisabledType.Enabled, + }); + + addParameterGroup(this, { + label: "Services", + parameters: [enableEc2, enableRds, enableRdsClusters, enableNeptune, enableDocDb, enableAsgs], + }); + + const startTags = new ParameterWithLabel(this, "StartedTags", { + label: "Start tags", description: - "Enable the solution to load SSM Maintenance Windows, so that they can be used for EC2 instance Scheduling.", - type: "String", - allowedValues: ["Yes", "No"], - default: props.paramOverrides?.enableSSMMaintenanceWindows ?? "No", + "Comma-separated list of tag keys and values of the format key=value, key=value,... that are set on started instances. Leave blank to disable.", + default: "InstanceScheduler-LastAction=Started By {scheduler} {year}-{month}-{day} {hour}:{minute} {timezone}", }); - const tagName = new cdk.CfnParameter(this, "TagName", { - description: "Name of tag to use for associating instance schedule schemas with service instances.", - type: "String", - default: props.paramOverrides?.tagName ?? "Schedule", - minLength: 1, - maxLength: 127, + const stopTags = new ParameterWithLabel(this, "StoppedTags", { + label: "Stop tags", + description: + "Comma-separated list of tag keys and values of the format key=value, key=value,... that are set on stopped instances. Leave blank to disable.", + default: "InstanceScheduler-LastAction=Stopped By {scheduler} {year}-{month}-{day} {hour}:{minute} {timezone}", }); - const defaultTimezone = new cdk.CfnParameter(this, "DefaultTimezone", { - description: "Choose the default Time Zone. Default is 'UTC'.", - type: "String", - default: props.paramOverrides?.defaultTimezone ?? "UTC", - allowedValues: SUPPORTED_TIME_ZONES, + addParameterGroup(this, { label: "Tagging", parameters: [startTags, stopTags] }); + + const enableEc2SsmMaintenanceWindows = new YesNoParameter(this, "EnableSSMMaintenanceWindows", { + label: "Enable EC2 SSM Maintenance Windows", + description: + "Allow schedules to specify a maintenance window name. Instance Scheduler will ensure the instance is running during that maintenance window.", + default: YesNoType.No, }); - const regions = new cdk.CfnParameter(this, "Regions", { + const kmsKeyArns = new ParameterWithLabel(this, "KmsKeyArns", { + label: "Kms Key Arns for EC2", + description: + "comma-separated list of kms arns to grant Instance Scheduler kms:CreateGrant permissions to provide the EC2 " + + " service with Decrypt permissions for encrypted EBS volumes." + + " This allows the scheduler to start EC2 instances with attached encrypted EBS volumes." + + " provide just (*) to give limited access to all kms keys, leave blank to disable." + + " For details on the exact policy created, refer to security section of the implementation guide" + + " (https://aws.amazon.com/solutions/implementations/instance-scheduler-on-aws/)", type: "CommaDelimitedList", - description: "List of regions in which instances should be scheduled, leave blank for current region only.", - default: props.paramOverrides?.regions ?? "", + default: "", + }); + + const createRdsSnapshots = new YesNoParameter(this, "CreateRdsSnapshot", { + label: "Create RDS instance snapshots on stop", + description: "Create snapshots before stopping RDS instances (not clusters).", + default: YesNoType.No, + }); + + const scheduledTagKey = new ParameterWithLabel(this, "AsgScheduledTagKey", { + label: "ASG scheduled tag key", + description: "Key for the tag Instance Scheduler will add to scheduled AutoScaling Groups", + default: "scheduled", }); - const usingAWSOrganizations = new cdk.CfnParameter(this, "UsingAWSOrganizations", { - type: "String", - description: "Use AWS Organizations to automate spoke account registration.", - allowedValues: ["Yes", "No"], - default: "No", + const rulePrefix = new ParameterWithLabel(this, "AsgRulePrefix", { + label: "ASG action name prefix", + description: + "The prefix Instance Scheduler will use when naming Scheduled Scaling actions for AutoScaling Groups. Actions with this prefix will be added and removed by Instance Scheduler as needed.", + default: "is-", }); - const principals = new cdk.CfnParameter(this, "Principals", { + addParameterGroup(this, { + label: "Service-specific", + parameters: [enableEc2SsmMaintenanceWindows, kmsKeyArns, createRdsSnapshots, scheduledTagKey, rulePrefix], + }); + + const usingAWSOrganizations = new YesNoParameter(this, "UsingAWSOrganizations", { + label: "Use AWS Organizations", + description: "Deploy resources to enable automatic spoke stack registration using AWS Organizations.", + default: YesNoType.No, + }); + + const namespace = new ParameterWithLabel(this, "Namespace", { + label: "Namespace", + description: "Unique identifier per deployment. Cannot contain spaces.", + default: "default", + }); + + const principals = new ParameterWithLabel(this, "Principals", { + label: "Organization ID/remote account IDs", type: "CommaDelimitedList", description: "(Required) If using AWS Organizations, provide the Organization ID. Eg. o-xxxxyyy. " + - "Else, provide a comma separated list of spoke account ids to schedule. Eg.: 1111111111, 2222222222 or {param: ssm-param-name}", - default: props.paramOverrides?.principals ?? "", + "Else, provide a comma-separated list of spoke account ids to schedule. Eg.: 1111111111, 2222222222 or {param: ssm-param-name}", + default: "", }); - const namespace = new cdk.CfnParameter(this, "Namespace", { - type: "String", + const regions = new ParameterWithLabel(this, "Regions", { + label: "Region(s)", + type: "CommaDelimitedList", description: - "Provide unique identifier to differentiate between multiple solution deployments (No Spaces). Example: Dev", - default: props.paramOverrides?.namespace ?? "", + "Comma-separated List of regions in which resources should be scheduled. Leave blank for current region only.", + default: "", + }); + + const scheduleLambdaAccount = new YesNoParameter(this, "ScheduleLambdaAccount", { + label: "Enable hub account scheduling", + description: "Enable scheduling in this account.", + default: YesNoType.Yes, + }); + + addParameterGroup(this, { + label: "Account structure", + parameters: [usingAWSOrganizations, namespace, principals, regions, scheduleLambdaAccount], + }); + + const logRetentionValues: RetentionDays[] = [ + RetentionDays.ONE_DAY, + RetentionDays.THREE_DAYS, + RetentionDays.FIVE_DAYS, + RetentionDays.ONE_WEEK, + RetentionDays.TWO_WEEKS, + RetentionDays.ONE_MONTH, + RetentionDays.TWO_MONTHS, + RetentionDays.THREE_MONTHS, + RetentionDays.FOUR_MONTHS, + RetentionDays.FIVE_MONTHS, + RetentionDays.SIX_MONTHS, + RetentionDays.ONE_YEAR, + RetentionDays.THIRTEEN_MONTHS, + RetentionDays.EIGHTEEN_MONTHS, + RetentionDays.TWO_YEARS, + RetentionDays.FIVE_YEARS, + RetentionDays.TEN_YEARS, + ]; + const logRetentionDays = new ParameterWithLabel(this, "LogRetentionDays", { + label: "Log retention period (days)", + description: "Retention period in days for logs.", + type: "Number", + allowedValues: logRetentionValues.map((value: number) => value.toString()), + default: RetentionDays.ONE_MONTH, + }); + + const enableDebugLogging = new YesNoParameter(this, "Trace", { + label: "Enable CloudWatch debug Logs", + description: "Enable debug-level logging in CloudWatch Logs.", + default: YesNoType.No, + }); + + const enableOpsMonitoring = new EnabledDisabledParameter(this, "OpsMonitoring", { + label: "Operational Monitoring", + description: "Deploy operational metrics and an Ops Monitoring Dashboard to Cloudwatch", + default: EnabledDisabledType.Enabled, }); - const startedTags = new cdk.CfnParameter(this, "StartedTags", { - type: "String", + addParameterGroup(this, { + label: "Monitoring", + parameters: [logRetentionDays, enableDebugLogging, enableOpsMonitoring], + }); + + const memorySizeValues = ["128", "384", "512", "640", "768", "896", "1024", "1152", "1280", "1408", "1536"]; + const memorySize = new ParameterWithLabel(this, "MemorySize", { + label: "Memory size (MB)", description: - "Comma separated list of tag keys and values of the format key=value, key=value,... that are set on started instances. Leave blank to disable.", - default: - props.paramOverrides?.startedTags ?? - "InstanceScheduler-LastAction=Started By {scheduler} {year}/{month}/{day} {hour}:{minute}{timezone}, ", + "Memory size of the Lambda function that schedules EC2 and RDS resources. Increase if you are experiencing high memory usage or timeouts.", + type: "Number", + allowedValues: memorySizeValues, + default: 128, }); - const stoppedTags = new cdk.CfnParameter(this, "StoppedTags", { - type: "String", + const enableDdbDeletionProtection = new EnabledDisabledParameter(this, "ddbDeletionProtection", { + label: "Protect DynamoDB Tables", description: - "Comma separated list of tag keys and values of the format key=value, key=value,... that are set on stopped instances. Leave blank to disable.", - default: - props.paramOverrides?.stoppedTags ?? - "InstanceScheduler-LastAction=Stopped By {scheduler} {year}/{month}/{day} {hour}:{minute}{timezone}, ", - }); - - const schedulerFrequency = new cdk.CfnParameter(this, "SchedulerFrequency", { - type: "String", - description: "Scheduler running frequency in minutes.", - allowedValues: ["1", "2", "5", "10", "15", "30", "60"], - default: props.paramOverrides?.schedulerFrequency ?? "5", - }); - - const scheduleLambdaAccount = new cdk.CfnParameter(this, "ScheduleLambdaAccount", { - type: "String", - allowedValues: ["Yes", "No"], - default: props.paramOverrides?.scheduleLambdaAccount ?? "Yes", - description: "Schedule instances in this account.", - }); - - //End CFN parameters for instance scheduler. - - // CFN Conditions - const isMemberOfOrganization = new cdk.CfnCondition(this, "IsMemberOfOrganization", { - expression: cdk.Fn.conditionEquals(usingAWSOrganizations, "Yes"), - }); - - //Start Mappings for instance scheduler. - - const mappings = new cdk.CfnMapping(this, "mappings"); - mappings.setValue("TrueFalse", "Yes", "True"); - mappings.setValue("TrueFalse", "No", "False"); - mappings.setValue("EnabledDisabled", "Yes", "ENABLED"); - mappings.setValue("EnabledDisabled", "No", "DISABLED"); - mappings.setValue("Services", "EC2", "ec2"); - mappings.setValue("Services", "RDS", "rds"); - mappings.setValue("Services", "Both", "ec2,rds"); - mappings.setValue("Timeouts", "1", "cron(0/1 * * * ? *)"); - mappings.setValue("Timeouts", "2", "cron(0/2 * * * ? *)"); - mappings.setValue("Timeouts", "5", "cron(0/5 * * * ? *)"); - mappings.setValue("Timeouts", "10", "cron(0/10 * * * ? *)"); - mappings.setValue("Timeouts", "15", "cron(0/15 * * * ? *)"); - mappings.setValue("Timeouts", "30", "cron(0/30 * * * ? *)"); - mappings.setValue("Timeouts", "60", "cron(0 0/1 * * ? *)"); - mappings.setValue("Settings", "MetricsUrl", "https://metrics.awssolutionsbuilder.com/generic"); - mappings.setValue("Settings", "MetricsSolutionId", "S00030"); - mappings.setValue("SchedulerRole", "Name", "Scheduler-Role"); - mappings.setValue("SchedulerEventBusName", "Name", "scheduler-event-bus"); - - const send = new cdk.CfnMapping(this, "Send"); - if (props.disableOpMetrics) { - send.setValue("AnonymousUsage", "Data", "No"); - } else { - send.setValue("AnonymousUsage", "Data", "Yes"); - } - send.setValue("ParameterKey", "UniqueId", `/Solutions/${props.solutionName}/UUID/`); - - //End Mappings for instance scheduler. - - new AppRegistryForInstanceScheduler(this, "AppRegistryForInstanceScheduler", { - solutionId: props.solutionId, - solutionName: props.solutionName, - solutionVersion: props.solutionVersion, - appregSolutionName: props.appregSolutionName, - appregAppName: props.appregApplicationName, - }); - - /* - * Instance Scheduler solutions log group reference. - */ - const schedulerLogGroup = new logs.LogGroup(this, "SchedulerLogGroup", { - logGroupName: Aws.STACK_NAME + "-logs", - removalPolicy: RemovalPolicy.DESTROY, - }); - - const schedulerLogGroup_ref = schedulerLogGroup.node.defaultChild as logs.CfnLogGroup; - schedulerLogGroup_ref.addPropertyOverride("RetentionInDays", logRetention.valueAsNumber); - schedulerLogGroup_ref.cfnOptions.metadata = { - cfn_nag: { - rules_to_suppress: [ - { - id: "W84", - reason: - "CloudWatch log groups only have transactional data from the Lambda function, this template has to be supported in gov cloud which doesn't yet have the feature to provide kms key id to cloudwatch log group.", - }, - ], - }, - }; - - //Start scheduler role reference and related references of principle, policy statement, and policy document. - const compositePrincipal = new iam.CompositePrincipal( - new iam.ServicePrincipal("events.amazonaws.com"), - new iam.ServicePrincipal("lambda.amazonaws.com"), + "Enable deletion protection for DynamoDB tables used by the solution. This will cause the tables to be retained" + + " when deleting this stack. To delete the tables when deleting this stack, first disable this parameter.", + default: EnabledDisabledType.Enabled, + }); + + addParameterGroup(this, { + label: "Other", + parameters: [memorySize, enableDdbDeletionProtection], + }); + + const sendAnonymizedUsageMetricsMapping = new CfnMapping(this, "Send"); + const anonymizedUsageKey1 = "AnonymousUsage"; + const anonymizedUsageKey2 = "Data"; + sendAnonymizedUsageMetricsMapping.setValue(anonymizedUsageKey1, anonymizedUsageKey2, YesNoType.Yes); + const sendAnonymizedMetrics = yesNoCondition( + this, + "AnonymizedMetricsEnabled", + sendAnonymizedUsageMetricsMapping.findInMap(anonymizedUsageKey1, anonymizedUsageKey2), ); - const schedulerRole = new iam.Role(this, "SchedulerRole", { - assumedBy: compositePrincipal, - path: "/", - }); - //End scheduler role reference - - //Start instance scheduler encryption key and encryption key alias. - const instanceSchedulerEncryptionKey = new kms.Key(this, "InstanceSchedulerEncryptionKey", { - description: "Key for SNS", - enabled: true, - enableKeyRotation: true, - policy: new iam.PolicyDocument({ - statements: [ - new iam.PolicyStatement({ - actions: ["kms:*"], - effect: Effect.ALLOW, - resources: ["*"], - principals: [new ArnPrincipal("arn:" + this.partition + ":iam::" + this.account + ":root")], - sid: "default", - }), - new iam.PolicyStatement({ - sid: "Allows use of key", - effect: Effect.ALLOW, - actions: ["kms:GenerateDataKey*", "kms:Decrypt"], - resources: ["*"], - principals: [new ArnPrincipal(schedulerRole.roleArn)], - }), - ], - }), - removalPolicy: RemovalPolicy.DESTROY, - }); - - const keyAlias = new kms.Alias(this, "InstanceSchedulerEncryptionKeyAlias", { - aliasName: `alias/${Aws.STACK_NAME}-instance-scheduler-encryption-key`, - targetKey: instanceSchedulerEncryptionKey, - }); - //End instance scheduler encryption key and encryption key alias. - - /* - * Instance scheduler SNS Topic reference. - */ - const snsTopic = new sns.Topic(this, "InstanceSchedulerSnsTopic", { - masterKey: instanceSchedulerEncryptionKey, - }); - - const conditionScheduleEc2 = new cdk.CfnCondition(this, "ScheduleEC2", { - expression: cdk.Fn.conditionOr( - cdk.Fn.conditionEquals(scheduledServices.valueAsString, "EC2"), - cdk.Fn.conditionEquals(scheduledServices.valueAsString, "Both"), - ), - }); - - const conditionScheduleRds = new cdk.CfnCondition(this, "ScheduleRDS", { - expression: cdk.Fn.conditionOr( - cdk.Fn.conditionEquals(scheduledServices.valueAsString, "RDS"), - cdk.Fn.conditionEquals(scheduledServices.valueAsString, "Both"), - ), - }); - - //instance scheduler core scheduler construct reference. + const factory = props.factory ?? new PythonFunctionFactory(); + const coreScheduler = new CoreScheduler(this, { + solutionName: props.solutionName, solutionVersion: props.solutionVersion, - memorySize: memorySize.valueAsNumber, - schedulerRole: schedulerRole, - kmsEncryptionKey: instanceSchedulerEncryptionKey, - environment: { - SCHEDULER_FREQUENCY: schedulerFrequency.valueAsString, - LOG_GROUP: schedulerLogGroup.logGroupName, - ACCOUNT: this.account, - ISSUES_TOPIC_ARN: snsTopic.topicArn, - STACK_NAME: Aws.STACK_NAME, - SEND_METRICS: mappings.findInMap("TrueFalse", send.findInMap("AnonymousUsage", "Data")), - SOLUTION_ID: mappings.findInMap("Settings", "MetricsSolutionId"), - SOLUTION_VERSION: props.solutionVersion, - TRACE: mappings.findInMap("TrueFalse", trace.valueAsString), - USER_AGENT_EXTRA: `AwsSolution/${props.solutionId}/${props.solutionVersion}`, - METRICS_URL: mappings.findInMap("Settings", "MetricsUrl"), - STACK_ID: `${cdk.Aws.STACK_ID}`, - UUID_KEY: send.findInMap("ParameterKey", "UniqueId"), - START_EC2_BATCH_SIZE: "5", - SCHEDULE_TAG_KEY: tagName.valueAsString, - DEFAULT_TIMEZONE: defaultTimezone.valueAsString, - ENABLE_CLOUDWATCH_METRICS: mappings.findInMap("TrueFalse", useCloudWatchMetrics.valueAsString), - ENABLE_EC2_SERVICE: cdk.Fn.conditionIf(conditionScheduleEc2.logicalId, "True", "False").toString(), - ENABLE_RDS_SERVICE: cdk.Fn.conditionIf(conditionScheduleRds.logicalId, "True", "False").toString(), - ENABLE_RDS_CLUSTERS: mappings.findInMap("TrueFalse", scheduleRdsClusters.valueAsString), - ENABLE_RDS_SNAPSHOTS: mappings.findInMap("TrueFalse", createRdsSnapshot.valueAsString), - SCHEDULE_REGIONS: cdk.Fn.join(",", regions.valueAsList), - APP_NAMESPACE: namespace.valueAsString, - SCHEDULER_ROLE_NAME: mappings.findInMap("SchedulerRole", "Name"), - ENABLE_SCHEDULE_HUB_ACCOUNT: mappings.findInMap("TrueFalse", scheduleLambdaAccount.valueAsString), - ENABLE_EC2_SSM_MAINTENANCE_WINDOWS: mappings.findInMap("TrueFalse", enableSSMMaintenanceWindows.valueAsString), - START_TAGS: startedTags.valueAsString, - STOP_TAGS: stoppedTags.valueAsString, - ENABLE_AWS_ORGANIZATIONS: mappings.findInMap("TrueFalse", usingAWSOrganizations.valueAsString), - }, - }); - - // Support AWS Organizations - Cross account event bus - const eventBusProps: SchedulerEventBusProps = { - organizationId: principals.valueAsList, + solutionId: props.solutionId, + memorySizeMB: memorySize.valueAsNumber, + logRetentionDays: logRetentionDays.valueAsNumber, + principals: principals.valueAsList, + schedulingEnabled: enableScheduling.getCondition(), + schedulingIntervalMinutes: schedulerIntervalMinutes.valueAsNumber, namespace: namespace.valueAsString, - lambdaFunctionArn: coreScheduler.lambdaFunction.functionArn, - eventBusName: mappings.findInMap("SchedulerEventBusName", "Name"), - isMemberOfOrganizationsCondition: isMemberOfOrganization, - }; - - const eventBusResource = new SchedulerEventBusResources(this, eventBusProps); + sendAnonymizedMetrics, + enableDebugLogging: enableDebugLogging.getCondition(), + tagKey: scheduleTagKey.valueAsString, + defaultTimezone: defaultTimezone.valueAsString, + enableEc2: enableEc2.getCondition(), + enableRds: enableRds.getCondition(), + enableRdsClusters: enableRdsClusters.getCondition(), + enableNeptune: enableNeptune.getCondition(), + enableDocdb: enableDocDb.getCondition(), + enableRdsSnapshots: createRdsSnapshots.getCondition(), + regions: regions.valueAsList, + enableSchedulingHubAccount: scheduleLambdaAccount.getCondition(), + enableEc2SsmMaintenanceWindows: enableEc2SsmMaintenanceWindows.getCondition(), + startTags: startTags.valueAsString, + stopTags: stopTags.valueAsString, + enableAwsOrganizations: usingAWSOrganizations.getCondition(), + appregSolutionName: props.appregSolutionName, + appregApplicationName: props.appregApplicationName, + enableOpsInsights: enableOpsMonitoring.getCondition(), + kmsKeyArns: kmsKeyArns.valueAsList, + factory, + enableDdbDeletionProtection: enableDdbDeletionProtection.getCondition(), + enableAsgs: enableAsgs.getCondition(), + scheduledTagKey: scheduledTagKey.valueAsString, + rulePrefix: rulePrefix.valueAsString, + }); - const eventBusRuleLambdaPermission = new lambda.CfnPermission(this, "EventBusRuleLambdaPermission", { - functionName: coreScheduler.lambdaFunction.functionName, - action: "lambda:InvokeFunction", - principal: "events.amazonaws.com", - sourceArn: eventBusResource.eventRuleCrossAccount.attrArn, + new CfnOutput(this, "AccountId", { + value: Aws.ACCOUNT_ID, + description: "Hub Account ID - for use in corresponding spoke stack parameter", }); - eventBusRuleLambdaPermission.cfnOptions.condition = isMemberOfOrganization; + new CfnOutput(this, "ConfigurationTable", { + value: coreScheduler.configTable.tableArn, + description: "DynamoDB Configuration table ARN", + }); - //PolicyStatement for SSM Get and Put Parameters - const ssmParameterPolicyStatement = new PolicyStatement({ - actions: ["ssm:PutParameter", "ssm:GetParameter"], - effect: Effect.ALLOW, - resources: [ - cdk.Fn.sub( - `arn:\${AWS::Partition}:ssm:\${AWS::Region}:\${AWS::AccountId}:parameter/Solutions/${props.solutionName}/UUID/*`, - ), - ], + new CfnOutput(this, "IssueSnsTopicArn", { + value: coreScheduler.topic.topicArn, + description: "Notification SNS Topic ARN", }); - coreScheduler.lambdaFunction.addToRolePolicy(ssmParameterPolicyStatement); - //End instance scheduler database policy statement for lambda. - const schedulerRule = new events.Rule(this, "SchedulerEventRule", { - description: - "Instance Scheduler - Rule to trigger instance for scheduler function version " + props["solutionVersion"], - schedule: events.Schedule.expression(mappings.findInMap("Timeouts", schedulerFrequency.valueAsString)), - targets: [ - new EBEventTarget.LambdaFunction(coreScheduler.lambdaFunction, { - event: events.RuleTargetInput.fromObject({ - scheduled_action: "run_orchestrator", - }), - }), - ], - }); - - const cfnSchedulerRule = schedulerRule.node.defaultChild as events.CfnRule; - cfnSchedulerRule.addPropertyOverride( - "State", - mappings.findInMap("EnabledDisabled", schedulingActive.valueAsString), - ); + new CfnOutput(this, "SchedulerRoleArn", { + value: coreScheduler.hubSchedulerRole.roleArn, + description: "Scheduler role ARN", + }); - //End instance scheduler aws-event-lambda construct reference. - - /* - * Instance scheduler custom resource reference. - */ - const customService = new cdk.CustomResource(this, "ServiceSetup", { - serviceToken: coreScheduler.lambdaFunction.functionArn, - resourceType: "Custom::ServiceSetup", - properties: { - timeout: 120, - tagname: tagName, - default_timezone: defaultTimezone, - use_metrics: mappings.findInMap("TrueFalse", useCloudWatchMetrics.valueAsString), - scheduled_services: cdk.Fn.split(",", mappings.findInMap("Services", scheduledServices.valueAsString)), - schedule_clusters: mappings.findInMap("TrueFalse", scheduleRdsClusters.valueAsString), - create_rds_snapshot: mappings.findInMap("TrueFalse", createRdsSnapshot.valueAsString), - regions: regions, - remote_account_ids: principals, - namespace: namespace.valueAsString, - aws_partition: cdk.Fn.sub("${AWS::Partition}"), - scheduler_role_name: mappings.findInMap("SchedulerRole", "Name"), - schedule_lambda_account: mappings.findInMap("TrueFalse", scheduleLambdaAccount.valueAsString), - trace: mappings.findInMap("TrueFalse", trace.valueAsString), - enable_ssm_maintenance_windows: mappings.findInMap("TrueFalse", enableSSMMaintenanceWindows.valueAsString), - log_retention_days: logRetention.valueAsNumber, - started_tags: startedTags.valueAsString, - stopped_tags: stoppedTags.valueAsString, - stack_version: props.solutionVersion, - use_aws_organizations: mappings.findInMap("TrueFalse", usingAWSOrganizations.valueAsString), - }, - }); - - const customServiceCfn = customService.node.defaultChild as cdk.CfnCustomResource; - customServiceCfn.addDependency(schedulerLogGroup_ref); - - //Instance scheduler Cloudformation Output references. - new cdk.CfnOutput(this, "AccountId", { - value: this.account, - description: "Account to give access to when creating cross-account access role for cross account scenario ", - }); - - this.configurationTableOutput = new cdk.CfnOutput(this, "ConfigurationTable", { - value: (coreScheduler.configTable.node.defaultChild as dynamodb.CfnTable).attrArn, - description: "Name of the DynamoDB configuration table", - }); - - this.issueSnsTopicArn = new cdk.CfnOutput(this, "IssueSnsTopicArn", { - value: snsTopic.topicArn, - description: "Topic to subscribe to for notifications of errors and warnings", - }); - - this.schedulerRoleArn = new cdk.CfnOutput(this, "SchedulerRoleArn", { - value: schedulerRole.roleArn, - description: "Role for the instance scheduler lambda function", - }); - - new cdk.CfnOutput(this, "ServiceInstanceScheduleServiceToken", { - value: coreScheduler.lambdaFunction.functionArn, - description: "Arn to use as ServiceToken property for custom resource type Custom::ServiceInstanceSchedule", - }); - - //Instance scheduler ec2 policy statement, policy documents and role references. - const ec2PolicyStatementForLogs = new PolicyStatement({ - actions: ["logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents", "logs:PutRetentionPolicy"], - resources: [ - cdk.Fn.sub("arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/*"), - schedulerLogGroup.logGroupArn, - ], - effect: Effect.ALLOW, - }); - - const ec2PolicyStatementforMisc = new PolicyStatement({ - actions: [ - "rds:DescribeDBClusters", - "rds:DescribeDBInstances", - "ec2:DescribeInstances", - "cloudwatch:PutMetricData", - "ssm:DescribeMaintenanceWindows", - "tag:GetResources", - ], - effect: Effect.ALLOW, - resources: ["*"], - }); - - const ec2PolicyAssumeRoleStatement = new PolicyStatement({ - actions: ["sts:AssumeRole"], - resources: [ - cdk.Fn.sub("arn:${AWS::Partition}:iam::*:role/${Namespace}-${Name}", { - Name: mappings.findInMap("SchedulerRole", "Name"), - }), - ], - effect: Effect.ALLOW, - }); - - const ec2PolicySSMStatement = new PolicyStatement({ - actions: ["ssm:GetParameter", "ssm:GetParameters"], - resources: [cdk.Fn.sub("arn:${AWS::Partition}:ssm:*:${AWS::AccountId}:parameter/*")], - effect: Effect.ALLOW, - }); - - const ec2Permissions = new iam.Policy(this, "Ec2Permissions", { - statements: [ - new PolicyStatement({ - actions: ["ec2:ModifyInstanceAttribute"], - effect: Effect.ALLOW, - resources: [cdk.Fn.sub("arn:${AWS::Partition}:ec2:*:${AWS::AccountId}:instance/*")], - }), - ec2PolicyAssumeRoleStatement, - ], - roles: [schedulerRole], - }); - - NagSuppressions.addResourceSuppressions(ec2Permissions, [ - { - id: "AwsSolutions-IAM5", - reason: "This Lambda function needs to be able to modify ec2 instances for scheduling purposes.", - }, - ]); - - const ec2DynamoDBPolicy = new iam.Policy(this, "EC2DynamoDBPolicy", { - roles: [schedulerRole], - policyName: "EC2DynamoDBPolicy", - statements: [ec2PolicySSMStatement, ec2PolicyStatementforMisc, ec2PolicyStatementForLogs], - }); - - //Instance scheduler, scheduling policy statement, policy documents and role references. - const schedulerPolicyStatement1 = new PolicyStatement({ - actions: ["rds:DeleteDBSnapshot", "rds:DescribeDBSnapshots", "rds:StopDBInstance"], - effect: Effect.ALLOW, - resources: [cdk.Fn.sub("arn:${AWS::Partition}:rds:*:${AWS::AccountId}:snapshot:*")], - }); - - const schedulerPolicyStatement2 = new PolicyStatement({ - actions: [ - "rds:AddTagsToResource", - "rds:RemoveTagsFromResource", - "rds:DescribeDBSnapshots", - "rds:StartDBInstance", - "rds:StopDBInstance", - ], - effect: Effect.ALLOW, - resources: [cdk.Fn.sub("arn:${AWS::Partition}:rds:*:${AWS::AccountId}:db:*")], - }); - - const schedulerPolicyStatement3 = new PolicyStatement({ - actions: ["ec2:StartInstances", "ec2:StopInstances", "ec2:CreateTags", "ec2:DeleteTags"], - effect: Effect.ALLOW, - resources: [cdk.Fn.sub("arn:${AWS::Partition}:ec2:*:${AWS::AccountId}:instance/*")], - }); - - const schedulerPolicyStatement4 = new PolicyStatement({ - actions: ["sns:Publish"], - effect: Effect.ALLOW, - resources: [snsTopic.topicArn], - }); - - const schedulerPolicyStatement5 = new PolicyStatement({ - actions: ["lambda:InvokeFunction"], - effect: Effect.ALLOW, - resources: [ - cdk.Fn.sub( - "arn:${AWS::Partition}:lambda:${AWS::Region}:${AWS::AccountId}:function:${AWS::StackName}-InstanceSchedulerMain", - ), - ], - }); - - const schedulerPolicyStatement6 = new PolicyStatement({ - actions: ["kms:GenerateDataKey*", "kms:Decrypt"], - effect: Effect.ALLOW, - resources: [instanceSchedulerEncryptionKey.keyArn], - }); - - const schedulerPolicyStatement7 = new PolicyStatement({ - actions: ["rds:AddTagsToResource", "rds:RemoveTagsFromResource", "rds:StartDBCluster", "rds:StopDBCluster"], - effect: Effect.ALLOW, - resources: [cdk.Fn.sub("arn:${AWS::Partition}:rds:*:${AWS::AccountId}:cluster:*")], - }); - - const schedulerPolicy = new iam.Policy(this, "SchedulerPolicy", { - roles: [schedulerRole], - policyName: "SchedulerPolicy", - statements: [ - schedulerPolicyStatement2, - schedulerPolicyStatement3, - schedulerPolicyStatement4, - schedulerPolicyStatement5, - schedulerPolicyStatement6, - ], - }); - NagSuppressions.addResourceSuppressions(schedulerPolicy, [ - { - id: "AwsSolutions-IAM5", - reason: - "All policies have been scoped to be as restrictive as possible. This solution needs to access ec2/rds resources across all regions.", - }, - ]); - - const schedulerRDSPolicy = new iam.Policy(this, "SchedulerRDSPolicy", { - roles: [schedulerRole], - statements: [schedulerPolicyStatement1, schedulerPolicyStatement7], - }); - NagSuppressions.addResourceSuppressions(schedulerRDSPolicy, [ - { - id: "AwsSolutions-IAM5", - reason: - "All policies have been scoped to be as restrictive as possible. This solution needs to access ec2/rds resources across all regions.", - }, - ]); - - //Adding the EC2 and scheduling policy dependencies to the lambda. - const lambdaFunction = coreScheduler.lambdaFunction.node.defaultChild as lambda.CfnFunction; - lambdaFunction.addDependency(ec2DynamoDBPolicy.node.defaultChild as iam.CfnPolicy); - lambdaFunction.addDependency(ec2Permissions.node.defaultChild as iam.CfnPolicy); - lambdaFunction.addDependency(schedulerPolicy.node.defaultChild as iam.CfnPolicy); - lambdaFunction.addDependency(schedulerRDSPolicy.node.defaultChild as iam.CfnPolicy); - lambdaFunction.cfnOptions.metadata = { - cfn_nag: { - rules_to_suppress: [ - { - id: "W89", - reason: "This Lambda function does not need to access any resource provisioned within a VPC.", - }, - { - id: "W58", - reason: "This Lambda function has permission provided to write to CloudWatch logs using the iam roles.", - }, - { - id: "W92", - reason: "Lambda function is only used by the event rule periodically, concurrent calls are very limited.", - }, - ], - }, - }; - - NagSuppressions.addResourceSuppressions(lambdaFunction, [ - { - id: "AwsSolutions-L1", - reason: "Lambda runtime held back to the newest supported by all partitions", - }, - ]); - - //Cloud Formation cfn references for ensuring the resource names are similar to earlier releases, and additional metadata for the cfn nag rules. - const instanceSchedulerEncryptionKey_cfn_ref = instanceSchedulerEncryptionKey.node.defaultChild as kms.CfnKey; - instanceSchedulerEncryptionKey_cfn_ref.overrideLogicalId("InstanceSchedulerEncryptionKey"); - - const keyAlias_cfn_ref = keyAlias.node.defaultChild as kms.CfnAlias; - keyAlias_cfn_ref.overrideLogicalId("InstanceSchedulerEncryptionKeyAlias"); - - const ec2DynamoDBPolicy_cfn_ref = ec2DynamoDBPolicy.node.defaultChild as iam.CfnPolicy; - ec2DynamoDBPolicy_cfn_ref.overrideLogicalId("EC2DynamoDBPolicy"); - - ec2DynamoDBPolicy_cfn_ref.cfnOptions.metadata = { - cfn_nag: { - rules_to_suppress: [ - { - id: "W12", - reason: - "All policies have been scoped to be as restrictive as possible. This solution needs to access ec2/rds resources across all regions.", - }, - ], - }, - }; - - NagSuppressions.addResourceSuppressions(ec2DynamoDBPolicy_cfn_ref, [ - { - id: "AwsSolutions-IAM5", - reason: - "All policies have been scoped to be as restrictive as possible. This solution needs to access ec2/rds resources across all regions.", - }, - ]); - - const schedulerPolicy_cfn_Ref = schedulerPolicy.node.defaultChild as iam.CfnPolicy; - schedulerPolicy_cfn_Ref.overrideLogicalId("SchedulerPolicy"); - - const schedulerRole_cfn_ref = schedulerRole.node.defaultChild as iam.CfnRole; - schedulerRole_cfn_ref.overrideLogicalId("SchedulerRole"); - - schedulerLogGroup_ref.overrideLogicalId("SchedulerLogGroup"); - - const snsTopic_cfn_ref = snsTopic.node.defaultChild as sns.CfnTopic; - snsTopic_cfn_ref.overrideLogicalId("InstanceSchedulerSnsTopic"); - - lambdaFunction.overrideLogicalId("Main"); - - const rule_cfn_ref = schedulerRule.node.defaultChild as events.CfnRule; - rule_cfn_ref.overrideLogicalId("SchedulerRule"); - - customServiceCfn.overrideLogicalId("SchedulerConfigHelper"); - - const stack = cdk.Stack.of(this); - - stack.templateOptions.metadata = { - "AWS::CloudFormation::Interface": { - ParameterGroups: [ - { - Label: { - default: "Scheduler (version " + props["solutionVersion"] + ")", - }, - Parameters: [ - "TagName", - "ScheduledServices", - "ScheduleRdsClusters", - "CreateRdsSnapshot", - "SchedulingActive", - "DefaultTimezone", - "ScheduleLambdaAccount", - "SchedulerFrequency", - "MemorySize", - ], - }, - { - Label: { default: "Namespace Configuration" }, - Parameters: ["Namespace"], - }, - { - Label: { default: "Account Structure" }, - Parameters: ["UsingAWSOrganizations", "Principals", "Regions"], - }, - { - Label: { - default: "Options", - }, - Parameters: ["UseCloudWatchMetrics", "Trace", "EnableSSMMaintenanceWindows"], - }, - { - Label: { - default: "Other parameters", - }, - Parameters: ["LogRetentionDays", "StartedTags", "StoppedTags"], - }, - ], - ParameterLabels: { - Namespace: { - default: "Namespace", - }, - LogRetentionDays: { - default: "Log retention days", - }, - StartedTags: { - default: "Started tags", - }, - StoppedTags: { - default: "Stopped tags", - }, - SchedulingActive: { - default: "Scheduling enabled", - }, - UsingAWSOrganizations: { - default: "Use AWS Organizations", - }, - Principals: { - default: "Organization Id/Remote Account Ids", - }, - ScheduleLambdaAccount: { - default: "This account", - }, - UseCloudWatchMetrics: { - default: "Enable CloudWatch Metrics", - }, - Trace: { - default: "Enable CloudWatch Debug Logs", - }, - EnableSSMMaintenanceWindows: { - default: "Enable SSM Maintenance windows", - }, - TagName: { - default: "Instance Scheduler tag name", - }, - ScheduledServices: { - default: "Service(s) to schedule", - }, - ScheduleRdsClusters: { - default: "Schedule Aurora Clusters", - }, - CreateRdsSnapshot: { - default: "Create RDS instance snapshot", - }, - DefaultTimezone: { - default: "Default time zone", - }, - SchedulerFrequency: { - default: "Frequency", - }, - Regions: { - default: "Region(s)", - }, - MemorySize: { - default: "Memory size", - }, - }, - }, - }; - stack.templateOptions.templateFormatVersion = "2010-09-09"; + new CfnOutput(this, "ServiceInstanceScheduleServiceToken", { + value: coreScheduler.cfnScheduleCustomResourceHandler.functionArn, + description: "Custom resource provider ARN - use as ServiceToken property value for CloudFormation Schedules", + }); } } diff --git a/source/instance-scheduler/lib/lambda-functions/asg-handler.ts b/source/instance-scheduler/lib/lambda-functions/asg-handler.ts new file mode 100644 index 00000000..488b7049 --- /dev/null +++ b/source/instance-scheduler/lib/lambda-functions/asg-handler.ts @@ -0,0 +1,160 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { Aws, CfnCondition, Duration, Fn, RemovalPolicy } from "aws-cdk-lib"; +import { Table } from "aws-cdk-lib/aws-dynamodb"; +import { Effect, IRole, Policy, PolicyStatement, Role, ServicePrincipal } from "aws-cdk-lib/aws-iam"; +import { Key } from "aws-cdk-lib/aws-kms"; +import { Function as LambdaFunction } from "aws-cdk-lib/aws-lambda"; +import { LogGroup, RetentionDays } from "aws-cdk-lib/aws-logs"; +import { Topic } from "aws-cdk-lib/aws-sns"; +import { NagSuppressions } from "cdk-nag"; +import { Construct } from "constructs"; +import { AnonymizedMetricsEnvironment } from "../anonymized-metrics-environment"; +import { addCfnNagSuppressions } from "../cfn-nag"; +import { FunctionFactory } from "./function-factory"; + +export interface AsgHandlerProps { + readonly DEFAULT_TIMEZONE: string; + readonly USER_AGENT_EXTRA: string; + readonly asgSchedulingRoleName: string; + readonly configTable: Table; + readonly encryptionKey: Key; + readonly enableDebugLogging: CfnCondition; + readonly factory: FunctionFactory; + readonly logRetentionDays: RetentionDays; + readonly metricsEnv: AnonymizedMetricsEnvironment; + readonly namespace: string; + readonly rulePrefix: string; + readonly scheduledTagKey: string; + readonly snsErrorReportingTopic: Topic; + readonly tagKey: string; +} + +export class AsgHandler { + readonly role: IRole; + readonly lambdaFunction: LambdaFunction; + + static roleName(namespace: string) { + return `${namespace}-AsgRequestHandler-Role`; + } + + constructor(scope: Construct, props: AsgHandlerProps) { + this.role = new Role(scope, "ASGRole", { + assumedBy: new ServicePrincipal("lambda.amazonaws.com"), + roleName: AsgHandler.roleName(props.namespace), + }); + + this.lambdaFunction = props.factory.createFunction(scope, "ASGHandler", { + description: `Instance Scheduler for AutoScaling Groups version ${props.metricsEnv.SOLUTION_VERSION}`, + index: "instance_scheduler/handler/asg.py", + handler: "lambda_handler", + memorySize: 128, + role: this.role, + timeout: Duration.minutes(5), + environment: { + CONFIG_TABLE: props.configTable.tableName, + USER_AGENT_EXTRA: props.USER_AGENT_EXTRA, + ISSUES_TOPIC_ARN: props.snsErrorReportingTopic.topicArn, + POWERTOOLS_LOG_LEVEL: Fn.conditionIf(props.enableDebugLogging.logicalId, "DEBUG", "INFO").toString(), + POWERTOOLS_SERVICE_NAME: "asg", + ASG_SCHEDULING_ROLE_NAME: props.asgSchedulingRoleName, + DEFAULT_TIMEZONE: props.DEFAULT_TIMEZONE, + SCHEDULE_TAG_KEY: props.tagKey, + SCHEDULED_TAG_KEY: props.scheduledTagKey, + RULE_PREFIX: props.rulePrefix, + ...props.metricsEnv, + }, + }); + + const lambdaDefaultLogGroup = new LogGroup(scope, "ASGHandlerLogGroup", { + logGroupName: `/aws/lambda/${this.lambdaFunction.functionName}`, + removalPolicy: RemovalPolicy.RETAIN, + retention: props.logRetentionDays, + }); + + if (!this.lambdaFunction.role) { + throw new Error("lambdaFunction role is missing"); + } + + const schedulingRequestHandlerPolicy = new Policy(scope, "ASGPolicy", { + roles: [this.lambdaFunction.role], + }); + + lambdaDefaultLogGroup.grantWrite(schedulingRequestHandlerPolicy); + props.configTable.grantReadData(schedulingRequestHandlerPolicy); + props.snsErrorReportingTopic.grantPublish(schedulingRequestHandlerPolicy); + props.encryptionKey.grantEncryptDecrypt(schedulingRequestHandlerPolicy); + + schedulingRequestHandlerPolicy.addStatements( + new PolicyStatement({ + actions: ["sts:AssumeRole"], + effect: Effect.ALLOW, + resources: [`arn:${Aws.PARTITION}:iam::*:role/${props.asgSchedulingRoleName}`], + }), + ); + + const defaultPolicy = this.lambdaFunction.role.node.tryFindChild("DefaultPolicy"); + + if (!defaultPolicy) { + throw Error("Unable to find default policy on lambda role"); + } + + addCfnNagSuppressions(defaultPolicy, { + id: "W12", + reason: "Wildcard required for xray", + }); + + NagSuppressions.addResourceSuppressions(defaultPolicy, [ + { + id: "AwsSolutions-IAM5", + appliesTo: ["Resource::*"], + reason: "required for xray", + }, + ]); + + addCfnNagSuppressions(schedulingRequestHandlerPolicy, { + id: "W76", + reason: "Acknowledged IAM policy document SPCM > 25", + }); + + NagSuppressions.addResourceSuppressions(schedulingRequestHandlerPolicy, [ + { + id: "AwsSolutions-IAM5", + appliesTo: ["Action::kms:GenerateDataKey*", "Action::kms:ReEncrypt*"], + reason: "Permission to use solution CMK with dynamo/sns", + }, + { + id: "AwsSolutions-IAM5", + appliesTo: ["Resource::arn::iam::*:role/-ASG-Scheduling-Role"], + reason: "This handler's primary purpose is to assume role into spoke accounts for scheduling purposes", + }, + ]); + + addCfnNagSuppressions( + this.lambdaFunction, + { + id: "W89", + reason: "This Lambda function does not need to access any resource provisioned within a VPC.", + }, + { + id: "W58", + reason: "This Lambda function has permission provided to write to CloudWatch logs using the iam roles.", + }, + { + id: "W92", + reason: "Need to investigate appropriate ReservedConcurrentExecutions for this lambda", + }, + ); + + addCfnNagSuppressions(lambdaDefaultLogGroup, { + id: "W84", + reason: + "This template has to be supported in gov cloud which doesn't yet have the feature to provide kms key id to cloudwatch log group", + }); + + addCfnNagSuppressions(this.role, { + id: "W28", + reason: "Explicit role name required for assumedBy arn principle in spoke stack", + }); + } +} diff --git a/source/instance-scheduler/lib/lambda-functions/asg-orchestrator.ts b/source/instance-scheduler/lib/lambda-functions/asg-orchestrator.ts new file mode 100644 index 00000000..17c96249 --- /dev/null +++ b/source/instance-scheduler/lib/lambda-functions/asg-orchestrator.ts @@ -0,0 +1,133 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { CfnCondition, Duration, Fn, RemovalPolicy } from "aws-cdk-lib"; +import { Table } from "aws-cdk-lib/aws-dynamodb"; +import { Policy, Role, ServicePrincipal } from "aws-cdk-lib/aws-iam"; +import { Key } from "aws-cdk-lib/aws-kms"; +import { Function as LambdaFunction } from "aws-cdk-lib/aws-lambda"; +import { LogGroup, RetentionDays } from "aws-cdk-lib/aws-logs"; +import { Topic } from "aws-cdk-lib/aws-sns"; +import { NagSuppressions } from "cdk-nag"; +import { Construct } from "constructs"; +import { AnonymizedMetricsEnvironment } from "../anonymized-metrics-environment"; +import { cfnConditionToTrueFalse } from "../cfn"; +import { addCfnNagSuppressions } from "../cfn-nag"; +import { FunctionFactory } from "./function-factory"; + +export interface AsgOrchestratorProps { + readonly USER_AGENT_EXTRA: string; + readonly asgHandler: LambdaFunction; + readonly configTable: Table; + readonly enableDebugLogging: CfnCondition; + readonly enableSchedulingHubAccount: CfnCondition; + readonly encryptionKey: Key; + readonly factory: FunctionFactory; + readonly logRetentionDays: RetentionDays; + readonly metricsEnv: AnonymizedMetricsEnvironment; + readonly regions: string[]; + readonly snsErrorReportingTopic: Topic; +} + +export class AsgOrchestrator { + readonly lambdaFunction: LambdaFunction; + + constructor(scope: Construct, props: AsgOrchestratorProps) { + const role = new Role(scope, "ASGOrchRole", { + assumedBy: new ServicePrincipal("lambda.amazonaws.com"), + }); + + this.lambdaFunction = props.factory.createFunction(scope, "ASGOrchestrator", { + description: `Instance Scheduler orchestrator for AutoScaling Groups version ${props.metricsEnv.SOLUTION_VERSION}`, + index: "instance_scheduler/handler/asg_orchestrator.py", + handler: "lambda_handler", + memorySize: 128, + role, + timeout: Duration.minutes(1), + environment: { + USER_AGENT_EXTRA: props.USER_AGENT_EXTRA, + CONFIG_TABLE: props.configTable.tableName, + ISSUES_TOPIC_ARN: props.snsErrorReportingTopic.topicArn, + ENABLE_SCHEDULE_HUB_ACCOUNT: cfnConditionToTrueFalse(props.enableSchedulingHubAccount), + SCHEDULE_REGIONS: Fn.join(",", props.regions), + ASG_SCHEDULER_NAME: props.asgHandler.functionName, + POWERTOOLS_LOG_LEVEL: Fn.conditionIf(props.enableDebugLogging.logicalId, "DEBUG", "INFO").toString(), + POWERTOOLS_SERVICE_NAME: "asg_orch", + ...props.metricsEnv, + }, + }); + + const lambdaDefaultLogGroup = new LogGroup(scope, "ASGOrchLogGroup", { + logGroupName: `/aws/lambda/${this.lambdaFunction.functionName}`, + removalPolicy: RemovalPolicy.RETAIN, + retention: props.logRetentionDays, + }); + + if (!this.lambdaFunction.role) { + throw new Error("lambdaFunction role is missing"); + } + + const asgOrchPolicy = new Policy(scope, "ASGOrchPolicy", { + roles: [this.lambdaFunction.role], + }); + + lambdaDefaultLogGroup.grantWrite(asgOrchPolicy); + props.configTable.grantReadData(asgOrchPolicy); + props.snsErrorReportingTopic.grantPublish(asgOrchPolicy); + props.encryptionKey.grantEncryptDecrypt(asgOrchPolicy); + props.asgHandler.grantInvoke(this.lambdaFunction.role); + + const defaultPolicy = this.lambdaFunction.role.node.tryFindChild("DefaultPolicy"); + + if (!defaultPolicy) { + throw Error("Unable to find default policy on lambda role"); + } + + addCfnNagSuppressions(defaultPolicy, { + id: "W12", + reason: "Wildcard required for xray", + }); + + NagSuppressions.addResourceSuppressions(defaultPolicy, [ + { + id: "AwsSolutions-IAM5", + appliesTo: ["Resource::*"], + reason: "required for xray", + }, + { + id: "AwsSolutions-IAM5", + appliesTo: ["Resource:::*"], + reason: "permissions to invoke all versions of the ASG scheduling request handler", + }, + ]); + + NagSuppressions.addResourceSuppressions(asgOrchPolicy, [ + { + id: "AwsSolutions-IAM5", + appliesTo: ["Action::kms:GenerateDataKey*", "Action::kms:ReEncrypt*"], + reason: "Permission to use solution CMK with dynamo/sns", + }, + ]); + + addCfnNagSuppressions( + this.lambdaFunction, + { + id: "W89", + reason: "This Lambda function does not need to access any resource provisioned within a VPC.", + }, + { + id: "W58", + reason: "This Lambda function has permission provided to write to CloudWatch logs using the iam roles.", + }, + { + id: "W92", + reason: "Need to investigate appropriate ReservedConcurrentExecutions for this lambda", + }, + ); + + addCfnNagSuppressions(lambdaDefaultLogGroup, { + id: "W84", + reason: + "This template has to be supported in gov cloud which doesn't yet have the feature to provide kms key id to cloudwatch log group", + }); + } +} diff --git a/source/instance-scheduler/lib/lambda-functions/function-factory.ts b/source/instance-scheduler/lib/lambda-functions/function-factory.ts new file mode 100644 index 00000000..7393c342 --- /dev/null +++ b/source/instance-scheduler/lib/lambda-functions/function-factory.ts @@ -0,0 +1,63 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { PythonFunction } from "@aws-cdk/aws-lambda-python-alpha"; +import { Duration } from "aws-cdk-lib"; +import { IRole } from "aws-cdk-lib/aws-iam"; +import { Code, Function as LambdaFunction, Runtime, Tracing } from "aws-cdk-lib/aws-lambda"; +import { ILogGroup } from "aws-cdk-lib/aws-logs"; +import { Construct } from "constructs"; +import path from "path"; + +export interface FunctionProps { + readonly functionName?: string; + readonly description: string; + readonly index: string; + readonly handler: string; + readonly role: IRole; + readonly memorySize: number; + readonly timeout: Duration; + readonly logGroup?: ILogGroup; + environment: { [_: string]: string }; +} + +export abstract class FunctionFactory { + abstract createFunction(scope: Construct, id: string, props: FunctionProps): LambdaFunction; +} + +export class PythonFunctionFactory extends FunctionFactory { + override createFunction(scope: Construct, id: string, props: FunctionProps): LambdaFunction { + return new PythonFunction(scope, id, { + functionName: props.functionName, + description: props.description, + entry: path.join(__dirname, "..", "..", "..", "app"), + index: props.index, + handler: props.handler, + runtime: Runtime.PYTHON_3_11, + role: props.role, + memorySize: props.memorySize, + timeout: props.timeout, + logGroup: props.logGroup, + environment: props.environment, + tracing: Tracing.ACTIVE, + bundling: { assetExcludes: [".mypy_cache", ".tox", "__pycache__", "tests"] }, + }); + } +} + +export class TestFunctionFactory extends FunctionFactory { + override createFunction(scope: Construct, id: string, props: FunctionProps): LambdaFunction { + return new LambdaFunction(scope, id, { + code: Code.fromAsset(path.join(__dirname, "..", "..", "tests", "test_function")), + runtime: Runtime.PYTHON_3_11, + functionName: props.functionName, + description: props.description, + handler: props.handler, + role: props.role, + memorySize: props.memorySize, + timeout: props.timeout, + logGroup: props.logGroup, + environment: props.environment, + tracing: Tracing.ACTIVE, + }); + } +} diff --git a/source/instance-scheduler/lib/lambda-functions/main.ts b/source/instance-scheduler/lib/lambda-functions/main.ts new file mode 100644 index 00000000..7afe413f --- /dev/null +++ b/source/instance-scheduler/lib/lambda-functions/main.ts @@ -0,0 +1,151 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { Aws, CustomResource, Duration, Stack } from "aws-cdk-lib"; +import { Table } from "aws-cdk-lib/aws-dynamodb"; +import { Effect, PolicyStatement, Role, ServicePrincipal } from "aws-cdk-lib/aws-iam"; +import { Function as LambdaFunction } from "aws-cdk-lib/aws-lambda"; +import { LogGroup, RetentionDays } from "aws-cdk-lib/aws-logs"; +import { Topic } from "aws-cdk-lib/aws-sns"; +import { NagSuppressions } from "cdk-nag"; +import { overrideLogicalId } from "../cfn"; +import { FunctionFactory } from "./function-factory"; +import { addCfnNagSuppressions } from "../cfn-nag"; + +export interface MainLambdaProps { + readonly description: string; + readonly appenv: { [p: string]: string }; + readonly configTable: Table; + readonly snsErrorReportingTopic: Topic; + readonly scheduleLogGroup: LogGroup; + readonly principals: string[]; + readonly logRetentionDays: RetentionDays; + readonly factory: FunctionFactory; +} +export class MainLambda { + /* + For backwards compatibility with <1.5.x this function encapsulates the CFN, CLI, and ServiceSetup handlers + */ + + readonly lambdaFunction: LambdaFunction; + + constructor(scope: Stack, props: MainLambdaProps) { + const role = new Role(scope, "MainLambdaRole", { + assumedBy: new ServicePrincipal("lambda.amazonaws.com"), + }); + + const functionName = Aws.STACK_NAME + "-InstanceSchedulerMain"; + this.lambdaFunction = props.factory.createFunction(scope, "scheduler-lambda", { + functionName: functionName, + description: props.description, + index: "instance_scheduler/main.py", + handler: "lambda_handler", + role: role, + memorySize: 128, + timeout: Duration.seconds(300), + environment: props.appenv, + }); + + //backwards compatibility (<1.5.x) override + overrideLogicalId(this.lambdaFunction, "Main"); + + const customService = new CustomResource(scope, "ServiceSetup", { + serviceToken: this.lambdaFunction.functionArn, + resourceType: "Custom::ServiceSetup", + properties: { + timeout: 120, + remote_account_ids: props.principals, + log_retention_days: props.logRetentionDays, + }, + }); + overrideLogicalId(customService, "SchedulerConfigHelper"); + customService.node.addDependency(props.scheduleLogGroup); + + if (!this.lambdaFunction.role) { + throw new Error("lambdaFunction role is missing"); + } + + props.configTable.grantReadWriteData(this.lambdaFunction.role); + props.scheduleLogGroup.grantWrite(this.lambdaFunction.role); + props.snsErrorReportingTopic.grantPublish(this.lambdaFunction.role); + + // basic logging permissions and permission to modify retention policy + // https://docs.aws.amazon.com/lambda/latest/operatorguide/access-logs.html + this.lambdaFunction.role.addToPrincipalPolicy( + new PolicyStatement({ + actions: ["logs:CreateLogGroup"], + effect: Effect.ALLOW, + resources: [`arn:${Aws.PARTITION}:logs:${Aws.REGION}:${Aws.ACCOUNT_ID}:*`], + }), + ); + + // specifying the function in the following two statements directly creates a circular dependency + // these should go into a separate policy, but custom resources need to be sure to depend on it + this.lambdaFunction.role.addToPrincipalPolicy( + new PolicyStatement({ + actions: ["logs:CreateLogStream", "logs:PutLogEvents", "logs:PutRetentionPolicy"], + effect: Effect.ALLOW, + resources: [ + `arn:${Aws.PARTITION}:logs:${Aws.REGION}:${Aws.ACCOUNT_ID}:log-group:/aws/lambda/${functionName}:*`, + ], + }), + ); + + const defaultPolicy = this.lambdaFunction.role.node.tryFindChild("DefaultPolicy"); + if (!defaultPolicy) { + throw Error("Unable to find default policy on lambda role"); + } + + addCfnNagSuppressions( + defaultPolicy, + { + id: "W12", + reason: "Wildcard required for xray", + }, + { + id: "W76", + reason: "Acknowledged IAM policy document SPCM > 25", + }, + ); + + NagSuppressions.addResourceSuppressions(defaultPolicy, [ + { + id: "AwsSolutions-IAM5", + appliesTo: ["Action::kms:GenerateDataKey*", "Action::kms:ReEncrypt*"], + reason: "Permission to use solution CMK with dynamo/sns", + }, + { + id: "AwsSolutions-IAM5", + appliesTo: ["Resource::*"], + reason: "required for xray", + }, + { + id: "AwsSolutions-IAM5", + appliesTo: ["Resource::arn::logs:::*"], + reason: "Permission to use the solution's custom log group", + }, + { + id: "AwsSolutions-IAM5", + appliesTo: [ + "Resource::arn::logs:::log-group:/aws/lambda/-InstanceSchedulerMain:*", + ], + reason: "Permission to modify own log group retention policy", + }, + ]); + + addCfnNagSuppressions( + this.lambdaFunction, + { + id: "W89", + reason: "This Lambda function does not need to access any resource provisioned within a VPC.", + }, + { + id: "W58", + reason: "This Lambda function has permission provided to write to CloudWatch logs using the iam roles.", + }, + { + id: "W92", + reason: "Need to investigate appropriate ReservedConcurrentExecutions for this lambda", + }, + ); + } +} diff --git a/source/instance-scheduler/lib/lambda-functions/metrics-uuid-generator.ts b/source/instance-scheduler/lib/lambda-functions/metrics-uuid-generator.ts new file mode 100644 index 00000000..3a86b296 --- /dev/null +++ b/source/instance-scheduler/lib/lambda-functions/metrics-uuid-generator.ts @@ -0,0 +1,128 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { Aws, CustomResource, Duration, RemovalPolicy } from "aws-cdk-lib"; +import { Effect, Policy, PolicyStatement, Role, ServicePrincipal } from "aws-cdk-lib/aws-iam"; +import { LogGroup, RetentionDays } from "aws-cdk-lib/aws-logs"; +import { NagSuppressions } from "cdk-nag"; +import { Construct } from "constructs"; +import { addCfnNagSuppressions } from "../cfn-nag"; +import { FunctionFactory } from "./function-factory"; + +export interface MetricsUuidGeneratorProps { + readonly solutionName: string; + readonly logRetentionDays: RetentionDays; + readonly USER_AGENT_EXTRA: string; + readonly UUID_KEY: string; + readonly STACK_ID: string; + readonly factory: FunctionFactory; +} +export class MetricsUuidGenerator { + readonly metricsUuidCustomResource: CustomResource; + readonly metricsUuid: string; + + constructor(scope: Construct, props: MetricsUuidGeneratorProps) { + //todo: ensure custom resource depends on the policy that provides logging access + const role = new Role(scope, "MetricsGeneratorRole", { + assumedBy: new ServicePrincipal("lambda.amazonaws.com"), + }); + + const lambdaResourceProvider = props.factory.createFunction(scope, "MetricsUuidGenerator", { + description: "Custom Resource Provider used to generate unique UUIDs for solution deployments", + index: "instance_scheduler/handler/metrics_uuid_custom_resource.py", + handler: "handle_metrics_uuid_request", + memorySize: 128, + role: role, + timeout: Duration.minutes(1), + environment: { + USER_AGENT_EXTRA: props.USER_AGENT_EXTRA, + UUID_KEY: props.UUID_KEY, + STACK_ID: props.STACK_ID, + }, + }); + + const lambdaDefaultLogGroup = new LogGroup(scope, "MetricsUuidHandlerLogGroup", { + logGroupName: `/aws/lambda/${lambdaResourceProvider.functionName}`, + removalPolicy: RemovalPolicy.RETAIN, + retention: props.logRetentionDays, + }); + + if (!lambdaResourceProvider.role) { + throw new Error("lambdaFunction role is missing"); + } + + const metricsUuidPolicy = new Policy(scope, "MetricsUuidPermissionsPolicy", { + roles: [lambdaResourceProvider.role], + }); + + lambdaDefaultLogGroup.grantWrite(metricsUuidPolicy); + metricsUuidPolicy.addStatements( + new PolicyStatement({ + actions: ["ssm:GetParameters", "ssm:GetParameter", "ssm:GetParameterHistory"], + effect: Effect.ALLOW, + resources: [ + `arn:${Aws.PARTITION}:ssm:${Aws.REGION}:${Aws.ACCOUNT_ID}:parameter/Solutions/${props.solutionName}/UUID/*`, + ], + }), + ); + + // CUSTOM RESOURCE + this.metricsUuidCustomResource = new CustomResource(scope, "MetricsUuidProvider", { + serviceToken: lambdaResourceProvider.functionArn, + resourceType: "Custom::MetricsUuid", + }); + + //permissions policy must be applied before custom resource can be invoked + this.metricsUuidCustomResource.node.addDependency(metricsUuidPolicy); + this.metricsUuid = this.metricsUuidCustomResource.getAttString("Uuid"); + + const defaultPolicy = lambdaResourceProvider.role.node.tryFindChild("DefaultPolicy"); + if (!defaultPolicy) { + throw Error("Unable to find default policy on lambda role"); + } + + addCfnNagSuppressions(defaultPolicy, { + id: "W12", + reason: "Wildcard required for xray", + }); + + NagSuppressions.addResourceSuppressions(defaultPolicy, [ + { + id: "AwsSolutions-IAM5", + appliesTo: ["Resource::*"], + reason: "required for xray", + }, + ]); + + NagSuppressions.addResourceSuppressions(metricsUuidPolicy, [ + { + id: "AwsSolutions-IAM5", + appliesTo: [ + "Resource::arn::ssm:::parameter/Solutions/instance-scheduler-on-aws/UUID/*", + ], + reason: "backwards compatibility (<=1.5.3) -- ability to read metrics UUID from ssm parameter", + }, + ]); + + addCfnNagSuppressions( + lambdaResourceProvider, + { + id: "W89", + reason: "This Lambda function does not need to access any resource provisioned within a VPC.", + }, + { + id: "W58", + reason: "This Lambda function has permission provided to write to CloudWatch logs using the iam roles.", + }, + { + id: "W92", + reason: "Lambda function is a custom resource. Concurrent calls are very limited.", + }, + ); + + addCfnNagSuppressions(lambdaDefaultLogGroup, { + id: "W84", + reason: + "This template has to be supported in gov cloud which doesn't yet have the feature to provide kms key id to cloudwatch log group", + }); + } +} diff --git a/source/instance-scheduler/lib/lambda-functions/remote-registration.ts b/source/instance-scheduler/lib/lambda-functions/remote-registration.ts new file mode 100644 index 00000000..1eede8fd --- /dev/null +++ b/source/instance-scheduler/lib/lambda-functions/remote-registration.ts @@ -0,0 +1,141 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { + Aspects, + Aws, + CfnCondition, + CustomResource, + Duration, + CfnWaitConditionHandle, + RemovalPolicy, +} from "aws-cdk-lib"; +import { Effect, Policy, PolicyStatement, Role, ServicePrincipal } from "aws-cdk-lib/aws-iam"; +import { NagSuppressions } from "cdk-nag"; +import { FunctionFactory } from "./function-factory"; +import { SpokeRegistrationLambda } from "./spoke-registration"; +import { Construct } from "constructs"; +import { ConditionAspect } from "../cfn"; +import { LogGroup, RetentionDays } from "aws-cdk-lib/aws-logs"; +import { addCfnNagSuppressions } from "../cfn-nag"; + +export interface RemoteRegistrationCustomResourceProps { + readonly hubAccountId: string; + readonly namespace: string; + readonly shouldRegisterSpokeAccountCondition: CfnCondition; + readonly factory: FunctionFactory; + readonly USER_AGENT_EXTRA: string; +} + +export class RemoteRegistrationCustomResource { + constructor(scope: Construct, id: string, props: RemoteRegistrationCustomResourceProps) { + const shouldRegisterSpokeAccountAspect = new ConditionAspect(props.shouldRegisterSpokeAccountCondition); + + const role = new Role(scope, "RegisterSpokeAccountCustomResourceLambdaRole", { + assumedBy: new ServicePrincipal("lambda.amazonaws.com"), + }); + Aspects.of(role).add(shouldRegisterSpokeAccountAspect); + + const hubRegistrationLambdaArn = `arn:${Aws.PARTITION}:lambda:${Aws.REGION}:${ + props.hubAccountId + }:function:${SpokeRegistrationLambda.getFunctionName(props.namespace)}`; + + const lambdaFunction = props.factory.createFunction(scope, "RegisterSpokeAccountCustomResourceLambda", { + description: "Custom Resource Provider used for spoke account self registration via aws organization", + index: "instance_scheduler/handler/remote_registration_custom_resource.py", + handler: "handle_remote_registration_request", + memorySize: 128, + role: role, + timeout: Duration.minutes(1), + environment: { + USER_AGENT_EXTRA: props.USER_AGENT_EXTRA, + HUB_REGISTRATION_LAMBDA_ARN: hubRegistrationLambdaArn, + }, + }); + Aspects.of(lambdaFunction).add(shouldRegisterSpokeAccountAspect); + + const policy = new Policy(scope, "RegisterSpokeAccountCustomResourceLambdaPolicy", { + roles: [role], + statements: [ + new PolicyStatement({ + actions: ["lambda:InvokeFunction"], + resources: [hubRegistrationLambdaArn], + effect: Effect.ALLOW, + }), + ], + }); + Aspects.of(policy).add(shouldRegisterSpokeAccountAspect); + + // Retention set to ONE_YEAR to not introduce new CfnParameters. + // Logs are for custom resource lambda and will rarely be generated. + const lambdaDefaultLogGroup = new LogGroup(scope, "SpokeRegistrationLogGroup", { + logGroupName: `/aws/lambda/${lambdaFunction.functionName}`, + removalPolicy: RemovalPolicy.RETAIN, + retention: RetentionDays.ONE_YEAR, + }); + Aspects.of(lambdaDefaultLogGroup).add(shouldRegisterSpokeAccountAspect); + lambdaDefaultLogGroup.grantWrite(policy); + + const registerSpokeAccountCustomResource = new CustomResource(scope, id, { + serviceToken: lambdaFunction.functionArn, + resourceType: "Custom::RegisterSpokeAccount", + }); + Aspects.of(registerSpokeAccountCustomResource).add(shouldRegisterSpokeAccountAspect); + + // CfnWaitConditionHandle adds some time between the policy and custom resource creation as the addDependency method is potentially unreliable here. + const waitConditionHandle = new CfnWaitConditionHandle( + scope, + "RegisterSpokeAccountCustomResourceLambdaPolicyWaiter", + ); + Aspects.of(waitConditionHandle).add(shouldRegisterSpokeAccountAspect); + registerSpokeAccountCustomResource.node.addDependency(waitConditionHandle); + waitConditionHandle.node.addDependency(policy); + + const defaultPolicy = role.node.tryFindChild("DefaultPolicy"); + if (!defaultPolicy) { + throw Error("Unable to find default policy on lambda role"); + } + + addCfnNagSuppressions(defaultPolicy, { + id: "W12", + reason: "Wildcard required for xray", + }); + + NagSuppressions.addResourceSuppressions(defaultPolicy, [ + { + id: "AwsSolutions-IAM5", + appliesTo: ["Resource::*"], + reason: "required for xray", + }, + ]); + + addCfnNagSuppressions( + lambdaFunction, + { + id: "W89", + reason: "This Lambda function does not need to access any resource provisioned within a VPC.", + }, + { + id: "W58", + reason: "This Lambda function has permission provided to write to CloudWatch logs using the iam roles.", + }, + { + id: "W92", + reason: "Lambda function is a custom resource. Concurrent calls are very limited.", + }, + ); + + NagSuppressions.addResourceSuppressions(lambdaFunction, [ + { + id: "AwsSolutions-L1", + reason: "Python 3.11 is the newest available runtime. This finding is a false positive.", + }, + ]); + + addCfnNagSuppressions(lambdaDefaultLogGroup, { + id: "W84", + reason: + "This template has to be supported in gov cloud which doesn't yet have the feature to provide kms key id to cloudwatch log group", + }); + } +} diff --git a/source/instance-scheduler/lib/lambda-functions/schedule-update-handler.ts b/source/instance-scheduler/lib/lambda-functions/schedule-update-handler.ts new file mode 100644 index 00000000..79b3eb85 --- /dev/null +++ b/source/instance-scheduler/lib/lambda-functions/schedule-update-handler.ts @@ -0,0 +1,145 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { CfnCondition, Duration, Fn, RemovalPolicy } from "aws-cdk-lib"; +import { Table } from "aws-cdk-lib/aws-dynamodb"; +import { Policy, Role, ServicePrincipal } from "aws-cdk-lib/aws-iam"; +import { Key } from "aws-cdk-lib/aws-kms"; +import { FilterCriteria, Function as LambdaFunction, StartingPosition } from "aws-cdk-lib/aws-lambda"; +import { DynamoEventSource } from "aws-cdk-lib/aws-lambda-event-sources"; +import { LogGroup, RetentionDays } from "aws-cdk-lib/aws-logs"; +import { Topic } from "aws-cdk-lib/aws-sns"; +import { NagSuppressions } from "cdk-nag"; +import { Construct } from "constructs"; +import { AnonymizedMetricsEnvironment } from "../anonymized-metrics-environment"; +import { cfnConditionToTrueFalse } from "../cfn"; +import { addCfnNagSuppressions } from "../cfn-nag"; +import { FunctionFactory } from "./function-factory"; + +export interface ScheduleUpdateHandlerProps { + readonly USER_AGENT_EXTRA: string; + readonly asgHandler: LambdaFunction; + readonly configTable: Table; + readonly enableDebugLogging: CfnCondition; + readonly enableSchedulingHubAccount: CfnCondition; + readonly encryptionKey: Key; + readonly factory: FunctionFactory; + readonly logRetentionDays: RetentionDays; + readonly metricsEnv: AnonymizedMetricsEnvironment; + readonly regions: string[]; + readonly snsErrorReportingTopic: Topic; +} + +export class ScheduleUpdateHandler { + readonly lambdaFunction: LambdaFunction; + + constructor(scope: Construct, props: ScheduleUpdateHandlerProps) { + const role = new Role(scope, "ScheduleUpdateHandlerRole", { + assumedBy: new ServicePrincipal("lambda.amazonaws.com"), + }); + + this.lambdaFunction = props.factory.createFunction(scope, "ScheduleUpdateHandler", { + description: `Instance Scheduler handler for updates to schedules version ${props.metricsEnv.SOLUTION_VERSION}`, + index: "instance_scheduler/handler/schedule_update.py", + handler: "lambda_handler", + memorySize: 128, + role, + timeout: Duration.minutes(1), + environment: { + USER_AGENT_EXTRA: props.USER_AGENT_EXTRA, + CONFIG_TABLE: props.configTable.tableName, + ISSUES_TOPIC_ARN: props.snsErrorReportingTopic.topicArn, + ENABLE_SCHEDULE_HUB_ACCOUNT: cfnConditionToTrueFalse(props.enableSchedulingHubAccount), + SCHEDULE_REGIONS: Fn.join(",", props.regions), + ASG_SCHEDULER_NAME: props.asgHandler.functionName, + POWERTOOLS_LOG_LEVEL: Fn.conditionIf(props.enableDebugLogging.logicalId, "DEBUG", "INFO").toString(), + POWERTOOLS_SERVICE_NAME: "sch_upd", + ...props.metricsEnv, + }, + }); + + this.lambdaFunction.addEventSource( + new DynamoEventSource(props.configTable, { + startingPosition: StartingPosition.LATEST, + maxBatchingWindow: Duration.minutes(1), + filters: [ + FilterCriteria.filter({ dynamodb: { Keys: { type: { S: ["schedule", "period"] } } } }), + FilterCriteria.filter({ eventName: ["INSERT", "MODIFY"] }), + ], + }), + ); + + const lambdaDefaultLogGroup = new LogGroup(scope, "ScheduleUpdateHandlerLogGroup", { + logGroupName: `/aws/lambda/${this.lambdaFunction.functionName}`, + removalPolicy: RemovalPolicy.RETAIN, + retention: props.logRetentionDays, + }); + + if (!this.lambdaFunction.role) { + throw new Error("lambdaFunction role is missing"); + } + + const policy = new Policy(scope, "ScheduleUpdateHandlerPolicy", { + roles: [this.lambdaFunction.role], + }); + + lambdaDefaultLogGroup.grantWrite(policy); + props.configTable.grantReadData(policy); + props.snsErrorReportingTopic.grantPublish(policy); + props.encryptionKey.grantEncryptDecrypt(policy); + props.asgHandler.grantInvoke(this.lambdaFunction.role); + + const defaultPolicy = this.lambdaFunction.role.node.tryFindChild("DefaultPolicy"); + + if (!defaultPolicy) { + throw Error("Unable to find default policy on lambda role"); + } + + addCfnNagSuppressions(defaultPolicy, { + id: "W12", + reason: "Wildcard required for xray", + }); + + NagSuppressions.addResourceSuppressions(defaultPolicy, [ + { + id: "AwsSolutions-IAM5", + appliesTo: ["Resource::*"], + reason: "required for xray", + }, + { + id: "AwsSolutions-IAM5", + appliesTo: ["Resource:::*"], + reason: "permissions to invoke all versions of the ASG scheduling request handler", + }, + ]); + + NagSuppressions.addResourceSuppressions(policy, [ + { + id: "AwsSolutions-IAM5", + appliesTo: ["Action::kms:GenerateDataKey*", "Action::kms:ReEncrypt*"], + reason: "Permission to use solution CMK with dynamo/sns", + }, + ]); + + addCfnNagSuppressions( + this.lambdaFunction, + { + id: "W89", + reason: "This Lambda function does not need to access any resource provisioned within a VPC.", + }, + { + id: "W58", + reason: "This Lambda function has permission provided to write to CloudWatch logs using the iam roles.", + }, + { + id: "W92", + reason: "Need to investigate appropriate ReservedConcurrentExecutions for this lambda", + }, + ); + + addCfnNagSuppressions(lambdaDefaultLogGroup, { + id: "W84", + reason: + "This template has to be supported in gov cloud which doesn't yet have the feature to provide kms key id to cloudwatch log group", + }); + } +} diff --git a/source/instance-scheduler/lib/lambda-functions/scheduling-orchestrator.ts b/source/instance-scheduler/lib/lambda-functions/scheduling-orchestrator.ts new file mode 100644 index 00000000..508f8b27 --- /dev/null +++ b/source/instance-scheduler/lib/lambda-functions/scheduling-orchestrator.ts @@ -0,0 +1,201 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { Aws, CfnCondition, Duration, Fn, RemovalPolicy } from "aws-cdk-lib"; +import { Table } from "aws-cdk-lib/aws-dynamodb"; +import { Effect, Policy, PolicyStatement, Role, ServicePrincipal } from "aws-cdk-lib/aws-iam"; +import { Function as LambdaFunction } from "aws-cdk-lib/aws-lambda"; +import { LogGroup, RetentionDays } from "aws-cdk-lib/aws-logs"; +import { Topic } from "aws-cdk-lib/aws-sns"; +import { NagSuppressions } from "cdk-nag"; +import { Construct } from "constructs"; +import { AnonymizedMetricsEnvironment } from "../anonymized-metrics-environment"; +import { cfnConditionToTrueFalse } from "../cfn"; +import { addCfnNagSuppressions } from "../cfn-nag"; +import { FunctionFactory } from "./function-factory"; +import { Key } from "aws-cdk-lib/aws-kms"; + +export interface SchedulingOrchestratorProps { + readonly description: string; + readonly logRetentionDays: RetentionDays; + readonly schedulingRequestHandlerLambda: LambdaFunction; + readonly enableDebugLogging: CfnCondition; + readonly configTable: Table; + readonly snsErrorReportingTopic: Topic; + readonly snsKmsKey: Key; + readonly scheduleLogGroup: LogGroup; + readonly USER_AGENT_EXTRA: string; + readonly enableSchedulingHubAccount: CfnCondition; + readonly enableEc2: CfnCondition; + readonly enableRds: CfnCondition; + readonly enableRdsClusters: CfnCondition; + readonly enableNeptune: CfnCondition; + readonly enableDocdb: CfnCondition; + readonly enableAsgs: CfnCondition; + readonly regions: string[]; + readonly defaultTimezone: string; + readonly enableRdsSnapshots: CfnCondition; + readonly enableAwsOrganizations: CfnCondition; + readonly enableEc2SsmMaintenanceWindows: CfnCondition; + readonly opsDashboardEnabled: CfnCondition; + readonly startTags: string; + readonly stopTags: string; + readonly metricsEnv: AnonymizedMetricsEnvironment; + readonly factory: FunctionFactory; +} + +export class SchedulingOrchestrator { + readonly lambdaFunction: LambdaFunction; + + constructor(scope: Construct, props: SchedulingOrchestratorProps) { + const role = new Role(scope, "SchedulingOrchestratorRole", { + assumedBy: new ServicePrincipal("lambda.amazonaws.com"), + }); + + this.lambdaFunction = props.factory.createFunction(scope, "SchedulingOrchestrator", { + description: props.description, + index: "instance_scheduler/handler/scheduling_orchestrator.py", + handler: "handle_orchestration_request", + memorySize: 128, + role: role, + timeout: Duration.minutes(5), + environment: { + USER_AGENT_EXTRA: props.USER_AGENT_EXTRA, + LOG_GROUP: props.scheduleLogGroup.logGroupName, + ISSUES_TOPIC_ARN: props.snsErrorReportingTopic.topicArn, + ENABLE_DEBUG_LOGS: cfnConditionToTrueFalse(props.enableDebugLogging), + CONFIG_TABLE: props.configTable.tableName, + SCHEDULING_REQUEST_HANDLER_NAME: props.schedulingRequestHandlerLambda.functionName, + ENABLE_SCHEDULE_HUB_ACCOUNT: cfnConditionToTrueFalse(props.enableSchedulingHubAccount), + ENABLE_EC2_SERVICE: cfnConditionToTrueFalse(props.enableEc2), + ENABLE_RDS_SERVICE: cfnConditionToTrueFalse(props.enableRds), + ENABLE_RDS_CLUSTERS: cfnConditionToTrueFalse(props.enableRdsClusters), + ENABLE_NEPTUNE_SERVICE: cfnConditionToTrueFalse(props.enableNeptune), + ENABLE_DOCDB_SERVICE: cfnConditionToTrueFalse(props.enableDocdb), + ENABLE_ASG_SERVICE: cfnConditionToTrueFalse(props.enableAsgs), + SCHEDULE_REGIONS: Fn.join(",", props.regions), + DEFAULT_TIMEZONE: props.defaultTimezone, + ENABLE_RDS_SNAPSHOTS: cfnConditionToTrueFalse(props.enableRdsSnapshots), + ENABLE_AWS_ORGANIZATIONS: cfnConditionToTrueFalse(props.enableAwsOrganizations), + ENABLE_EC2_SSM_MAINTENANCE_WINDOWS: cfnConditionToTrueFalse(props.enableEc2SsmMaintenanceWindows), + OPS_DASHBOARD_ENABLED: cfnConditionToTrueFalse(props.opsDashboardEnabled), + START_TAGS: props.startTags, + STOP_TAGS: props.stopTags, + ...props.metricsEnv, + }, + }); + + const lambdaDefaultLogGroup = new LogGroup(scope, "SchedulingOrchestratorLogGroup", { + logGroupName: `/aws/lambda/${this.lambdaFunction.functionName}`, + removalPolicy: RemovalPolicy.RETAIN, + retention: props.logRetentionDays, + }); + + if (!this.lambdaFunction.role) { + throw new Error("lambdaFunction role is missing"); + } + + const orchestratorPolicy = new Policy(scope, "SchedulingOrchestratorPermissionsPolicy", { + roles: [this.lambdaFunction.role], + }); + + //invoke must be applied to the base lambda role, not a policy + props.schedulingRequestHandlerLambda.grantInvoke(this.lambdaFunction.role); + + lambdaDefaultLogGroup.grantWrite(orchestratorPolicy); + props.configTable.grantReadData(orchestratorPolicy); + props.snsErrorReportingTopic.grantPublish(orchestratorPolicy); + props.scheduleLogGroup.grantWrite(orchestratorPolicy); + + orchestratorPolicy.addStatements( + new PolicyStatement({ actions: ["ssm:DescribeParameters"], effect: Effect.ALLOW, resources: ["*"] }), + ); + + orchestratorPolicy.addStatements( + new PolicyStatement({ + actions: ["kms:Decrypt", "kms:GenerateDataKey*"], + effect: Effect.ALLOW, + resources: [props.snsKmsKey.keyArn], + }), + ); + + orchestratorPolicy.addStatements( + new PolicyStatement({ + actions: ["ssm:GetParameter", "ssm:GetParameters"], + effect: Effect.ALLOW, + resources: [`arn:${Aws.PARTITION}:ssm:*:${Aws.ACCOUNT_ID}:parameter/*`], + }), + ); + + const defaultPolicy = this.lambdaFunction.role.node.tryFindChild("DefaultPolicy"); + if (!defaultPolicy) { + throw Error("Unable to find default policy on lambda role"); + } + + addCfnNagSuppressions(defaultPolicy, { + id: "W12", + reason: "Wildcard required for xray", + }); + + NagSuppressions.addResourceSuppressions(defaultPolicy, [ + { + id: "AwsSolutions-IAM5", + appliesTo: ["Resource::*"], + reason: "required for xray", + }, + { + id: "AwsSolutions-IAM5", + appliesTo: ["Resource:::*"], + reason: "permission to invoke request handler lambda", + }, + ]); + + addCfnNagSuppressions( + orchestratorPolicy, + { + id: "W12", + reason: "Wildcard required for ssm:DescribeParameters", + }, + { + id: "W76", + reason: "Acknowledged IAM policy document SPCM > 25", + }, + ); + + NagSuppressions.addResourceSuppressions(orchestratorPolicy, [ + { + id: "AwsSolutions-IAM5", + appliesTo: ["Action::kms:GenerateDataKey*", "Action::kms:ReEncrypt*"], + reason: "Permission to use solution CMK with dynamo/sns", + }, + { + id: "AwsSolutions-IAM5", + appliesTo: ["Resource::arn::ssm:*::parameter/*", "Resource::*"], + reason: + "Orchestrator requires access to SSM parameters for translating " + + "{param: my-param} values to configured account ids", + }, + ]); + + addCfnNagSuppressions( + this.lambdaFunction, + { + id: "W89", + reason: "This Lambda function does not need to access any resource provisioned within a VPC.", + }, + { + id: "W58", + reason: "This Lambda function has permission provided to write to CloudWatch logs using the iam roles.", + }, + { + id: "W92", + reason: "Lambda function is invoked by a scheduled rule, it does not run concurrently", + }, + ); + + addCfnNagSuppressions(lambdaDefaultLogGroup, { + id: "W84", + reason: + "This template has to be supported in gov cloud which doesn't yet have the feature to provide kms key id to cloudwatch log group", + }); + } +} diff --git a/source/instance-scheduler/lib/lambda-functions/scheduling-request-handler.ts b/source/instance-scheduler/lib/lambda-functions/scheduling-request-handler.ts new file mode 100644 index 00000000..34728b4d --- /dev/null +++ b/source/instance-scheduler/lib/lambda-functions/scheduling-request-handler.ts @@ -0,0 +1,214 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Aws, CfnCondition, Duration, RemovalPolicy, Stack } from "aws-cdk-lib"; +import { Table } from "aws-cdk-lib/aws-dynamodb"; +import { Effect, Policy, PolicyStatement, Role, ServicePrincipal } from "aws-cdk-lib/aws-iam"; +import { Function as LambdaFunction } from "aws-cdk-lib/aws-lambda"; +import { LogGroup, RetentionDays } from "aws-cdk-lib/aws-logs"; +import { Topic } from "aws-cdk-lib/aws-sns"; +import { NagSuppressions } from "cdk-nag"; +import { AnonymizedMetricsEnvironment } from "../anonymized-metrics-environment"; +import { cfnConditionToTrueFalse } from "../cfn"; +import { addCfnNagSuppressions } from "../cfn-nag"; +import { FunctionFactory } from "./function-factory"; +import { Metrics } from "../dashboard/metrics"; + +export interface SchedulingRequestHandlerProps { + readonly description: string; + readonly namespace: string; + readonly logRetentionDays: RetentionDays; + readonly memorySizeMB: number; + readonly configTable: Table; + readonly stateTable: Table; + readonly maintWindowTable: Table; + readonly scheduleLogGroup: LogGroup; + readonly snsErrorReportingTopic: Topic; + readonly enableDebugLogging: CfnCondition; + readonly metricsEnv: AnonymizedMetricsEnvironment; + readonly startTags: string; + readonly stopTags: string; + readonly tagKey: string; + readonly schedulerRoleName: string; + readonly USER_AGENT_EXTRA: string; + readonly STACK_NAME: string; + readonly DEFAULT_TIMEZONE: string; + readonly enableOpsMonitoring: CfnCondition; + readonly enableEc2SsmMaintenanceWindows: CfnCondition; + readonly enableRds: CfnCondition; + readonly enableRdsClusters: CfnCondition; + readonly enableNeptune: CfnCondition; + readonly enableDocdb: CfnCondition; + readonly enableRdsSnapshots: CfnCondition; + readonly schedulingIntervalMinutes: number; + readonly solutionName: string; + readonly factory: FunctionFactory; +} + +export class SchedulingRequestHandlerLambda { + readonly lambdaFunction: LambdaFunction; + + static roleName(namespace: string) { + return `${namespace}-SchedulingRequestHandler-Role`; + } + constructor(scope: Stack, props: SchedulingRequestHandlerProps) { + const role = new Role(scope, "schedulingRequestHandlerRole", { + roleName: SchedulingRequestHandlerLambda.roleName(props.namespace), + assumedBy: new ServicePrincipal("lambda.amazonaws.com"), + }); + + this.lambdaFunction = props.factory.createFunction(scope, "schedulingRequestHandlerLambda", { + description: props.description, + index: "instance_scheduler/handler/scheduling_request.py", + handler: "handle_scheduling_request", + memorySize: props.memorySizeMB, + role: role, + timeout: Duration.minutes(5), + environment: { + CONFIG_TABLE: props.configTable.tableName, + STATE_TABLE: props.stateTable.tableName, + MAINT_WINDOW_TABLE: props.maintWindowTable.tableName, + USER_AGENT_EXTRA: props.USER_AGENT_EXTRA, + STACK_NAME: props.STACK_NAME, + LOG_GROUP: props.scheduleLogGroup.logGroupName, + ISSUES_TOPIC_ARN: props.snsErrorReportingTopic.topicArn, + ENABLE_DEBUG_LOGS: cfnConditionToTrueFalse(props.enableDebugLogging), + SCHEDULER_ROLE_NAME: props.schedulerRoleName, + DEFAULT_TIMEZONE: props.DEFAULT_TIMEZONE, + START_TAGS: props.startTags, + STOP_TAGS: props.stopTags, + SCHEDULE_TAG_KEY: props.tagKey, + ENABLE_EC2_SSM_MAINTENANCE_WINDOWS: cfnConditionToTrueFalse(props.enableEc2SsmMaintenanceWindows), + ENABLE_RDS_SERVICE: cfnConditionToTrueFalse(props.enableRds), + ENABLE_RDS_CLUSTERS: cfnConditionToTrueFalse(props.enableRdsClusters), + ENABLE_NEPTUNE_SERVICE: cfnConditionToTrueFalse(props.enableNeptune), + ENABLE_DOCDB_SERVICE: cfnConditionToTrueFalse(props.enableDocdb), + ENABLE_RDS_SNAPSHOTS: cfnConditionToTrueFalse(props.enableRdsSnapshots), + ENABLE_OPS_MONITORING: cfnConditionToTrueFalse(props.enableOpsMonitoring), + ...props.metricsEnv, + }, + }); + + const lambdaDefaultLogGroup = new LogGroup(scope, "schedulingRequestHandlerLogGroup", { + logGroupName: `/aws/lambda/${this.lambdaFunction.functionName}`, + removalPolicy: RemovalPolicy.RETAIN, + retention: props.logRetentionDays, + }); + + if (!this.lambdaFunction.role) { + throw new Error("lambdaFunction role is missing"); + } + + const schedulingRequestHandlerPolicy = new Policy(scope, "schedulingRequestHandlerPolicy", { + roles: [this.lambdaFunction.role], + }); + + lambdaDefaultLogGroup.grantWrite(schedulingRequestHandlerPolicy); + props.configTable.grantReadData(schedulingRequestHandlerPolicy); + props.stateTable.grantReadWriteData(schedulingRequestHandlerPolicy); + props.maintWindowTable.grantReadWriteData(schedulingRequestHandlerPolicy); + props.snsErrorReportingTopic.grantPublish(schedulingRequestHandlerPolicy); + props.scheduleLogGroup.grantWrite(schedulingRequestHandlerPolicy); + + schedulingRequestHandlerPolicy.addStatements( + //assume scheduler role in hub/spoke accounts + new PolicyStatement({ + actions: ["sts:AssumeRole"], + effect: Effect.ALLOW, + resources: [`arn:${Aws.PARTITION}:iam::*:role/${props.schedulerRoleName}`], + }), + + // put metric data for ops dashboard metrics + new PolicyStatement({ + actions: ["cloudwatch:PutMetricData"], + effect: Effect.ALLOW, + resources: ["*"], + conditions: { + StringEquals: { + "cloudwatch:namespace": Metrics.metricNamespace, + }, + }, + }), + ); + + const defaultPolicy = this.lambdaFunction.role.node.tryFindChild("DefaultPolicy"); + if (!defaultPolicy) { + throw Error("Unable to find default policy on lambda role"); + } + + addCfnNagSuppressions(defaultPolicy, { + id: "W12", + reason: "Wildcard required for xray", + }); + + NagSuppressions.addResourceSuppressions(defaultPolicy, [ + { + id: "AwsSolutions-IAM5", + appliesTo: ["Resource::*"], + reason: "required for xray", + }, + { + id: "AwsSolutions-IAM5", + appliesTo: ["Resource:::*"], + reason: "ability to call spoke-registration handler", + }, + ]); + + addCfnNagSuppressions( + schedulingRequestHandlerPolicy, + { + id: "W12", + reason: "cloudwatch:PutMetricData action requires wildcard", + }, + { + id: "W76", + reason: "Acknowledged IAM policy document SPCM > 25", + }, + ); + + NagSuppressions.addResourceSuppressions(schedulingRequestHandlerPolicy, [ + { + id: "AwsSolutions-IAM5", + appliesTo: ["Action::kms:GenerateDataKey*", "Action::kms:ReEncrypt*"], + reason: "Permission to use solution CMK with dynamo/sns", + }, + { + id: "AwsSolutions-IAM5", + appliesTo: ["Resource::arn::iam::*:role/-Scheduler-Role"], + reason: "This handler's primary purpose is to assume role into spoke accounts for scheduling purposes", + }, + { + id: "AwsSolutions-IAM5", + appliesTo: ["Resource::*"], + reason: "Ability to publish custom metrics to cloudwatch", + }, + ]); + + addCfnNagSuppressions( + this.lambdaFunction, + { + id: "W89", + reason: "This Lambda function does not need to access any resource provisioned within a VPC.", + }, + { + id: "W58", + reason: "This Lambda function has permission provided to write to CloudWatch logs using the iam roles.", + }, + { + id: "W92", + reason: "Need to investigate appropriate ReservedConcurrentExecutions for this lambda", + }, + ); + + addCfnNagSuppressions(lambdaDefaultLogGroup, { + id: "W84", + reason: + "This template has to be supported in gov cloud which doesn't yet have the feature to provide kms key id to cloudwatch log group", + }); + + addCfnNagSuppressions(role, { + id: "W28", + reason: "Explicit role name required for assumedBy arn principle in spoke stack", + }); + } +} diff --git a/source/instance-scheduler/lib/lambda-functions/spoke-registration.ts b/source/instance-scheduler/lib/lambda-functions/spoke-registration.ts new file mode 100644 index 00000000..92d64533 --- /dev/null +++ b/source/instance-scheduler/lib/lambda-functions/spoke-registration.ts @@ -0,0 +1,182 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { ArnFormat, Aspects, CfnCondition, Duration, Fn, Stack } from "aws-cdk-lib"; +import { Table } from "aws-cdk-lib/aws-dynamodb"; +import { Effect, Policy, PolicyStatement, Role, ServicePrincipal } from "aws-cdk-lib/aws-iam"; +import { CfnPermission, Function as LambdaFunction } from "aws-cdk-lib/aws-lambda"; +import { LogGroup, RetentionDays } from "aws-cdk-lib/aws-logs"; +import { Topic } from "aws-cdk-lib/aws-sns"; +import { NagSuppressions } from "cdk-nag"; +import { ConditionAspect, cfnConditionToTrueFalse } from "../cfn"; +import { addCfnNagSuppressions } from "../cfn-nag"; +import { FunctionFactory } from "./function-factory"; +import { SpokeDeregistrationRunbook } from "../runbooks/spoke-deregistration"; + +export interface SpokeRegistrationLambdaProps { + readonly solutionVersion: string; + readonly logRetentionDays: RetentionDays; + readonly configTable: Table; + readonly snsErrorReportingTopic: Topic; + readonly scheduleLogGroup: LogGroup; + readonly USER_AGENT_EXTRA: string; + readonly enableDebugLogging: CfnCondition; + readonly principals: string[]; + readonly namespace: string; + readonly enableAwsOrganizations: CfnCondition; + readonly factory: FunctionFactory; +} +export class SpokeRegistrationLambda { + static getFunctionName(namespace: string) { + return `InstanceScheduler-${namespace}-SpokeRegistration`; + } + readonly lambdaFunction: LambdaFunction; + + constructor(scope: Stack, props: SpokeRegistrationLambdaProps) { + const role = new Role(scope, "SpokeRegistrationRole", { + assumedBy: new ServicePrincipal("lambda.amazonaws.com"), + }); + + const functionName = SpokeRegistrationLambda.getFunctionName(props.namespace); + + this.lambdaFunction = props.factory.createFunction(scope, "SpokeRegistrationHandler", { + functionName: functionName, + description: "spoke account registration handler, version " + props.solutionVersion, + index: "instance_scheduler/handler/spoke_registration.py", + handler: "handle_spoke_registration_event", + memorySize: 128, + role: role, + timeout: Duration.minutes(1), + environment: { + CONFIG_TABLE: props.configTable.tableName, + USER_AGENT_EXTRA: props.USER_AGENT_EXTRA, + LOG_GROUP: props.scheduleLogGroup.logGroupName, + ISSUES_TOPIC_ARN: props.snsErrorReportingTopic.topicArn, + ENABLE_DEBUG_LOGS: cfnConditionToTrueFalse(props.enableDebugLogging), + }, + }); + + if (!this.lambdaFunction.role) { + throw new Error("lambdaFunction role is missing"); + } + + // GovCloud and GCR regions do not support logging config property which was used to prevent + // log group name collisions since the lambda name must be well known. + // To work around this a lambda-managed log group and appropriate policy must be used to prevent name collisions. + // Thus, log retention cannot be set until these regions reach feature parity. + const spokeRegistrationPolicy = new Policy(scope, "SpokeRegistrationPolicy", { + roles: [this.lambdaFunction.role], + statements: [ + new PolicyStatement({ + effect: Effect.ALLOW, + actions: ["logs:CreateLogGroup"], + resources: [ + scope.formatArn({ + service: "logs", + resource: "log-group", + resourceName: `/aws/lambda/${functionName}`, + arnFormat: ArnFormat.COLON_RESOURCE_NAME, + }), + ], + }), + new PolicyStatement({ + effect: Effect.ALLOW, + actions: ["logs:CreateLogStream", "logs:PutLogEvents"], + resources: [ + scope.formatArn({ + service: "logs", + resource: "log-group", + resourceName: `/aws/lambda/${functionName}:log-stream:*`, + arnFormat: ArnFormat.COLON_RESOURCE_NAME, + }), + ], + }), + ], + }); + + props.configTable.grantReadWriteData(spokeRegistrationPolicy); + props.snsErrorReportingTopic.grantPublish(spokeRegistrationPolicy); + props.scheduleLogGroup.grantWrite(spokeRegistrationPolicy); + + // Must use the L1 construct to conditionally create the resource based permission. + const permission = new CfnPermission(scope, "SpokeRegistrationLambdaPermission", { + functionName: this.lambdaFunction.functionName, + principal: "*", + principalOrgId: Fn.select(0, props.principals), + action: "lambda:InvokeFunction", + }); + Aspects.of(permission).add(new ConditionAspect(props.enableAwsOrganizations)); + + new SpokeDeregistrationRunbook(scope, { + lambdaFunction: this.lambdaFunction, + namespace: props.namespace, + }); + + const defaultPolicy = this.lambdaFunction.role.node.tryFindChild("DefaultPolicy"); + if (!defaultPolicy) { + throw Error("Unable to find default policy on lambda role"); + } + + addCfnNagSuppressions(defaultPolicy, { + id: "W12", + reason: "Wildcard required for xray", + }); + + NagSuppressions.addResourceSuppressions(defaultPolicy, [ + { + id: "AwsSolutions-IAM5", + appliesTo: ["Resource::*"], + reason: "required for xray", + }, + ]); + + NagSuppressions.addResourceSuppressions(spokeRegistrationPolicy, [ + { + id: "AwsSolutions-IAM5", + appliesTo: ["Action::kms:GenerateDataKey*", "Action::kms:ReEncrypt*"], + reason: "Permission to use solution CMK with dynamo/sns", + }, + { + id: "AwsSolutions-IAM5", + appliesTo: [ + "Resource::arn::logs:::log-group:/aws/lambda/InstanceScheduler--SpokeRegistration:log-stream:*", + ], + reason: "Wildcard required for creating and writing to log stream", + }, + ]); + + addCfnNagSuppressions( + this.lambdaFunction, + { + id: "W89", + reason: "This Lambda function does not need to access any resource provisioned within a VPC.", + }, + { + id: "W58", + reason: "This Lambda function has permission provided to write to CloudWatch logs using the iam roles.", + }, + { + id: "W92", + reason: + "Lambda function is invoke by new account registration/deregistration events and is not likely to have much concurrency", + }, + { + id: "F13", + reason: + "This lambda scopes invoke permissions to members of the same AWS organization. This is the narrowest possible" + + " scope that still allows new spoke accounts to register themselves with the hub after being deployed", + }, + ); + + // This L1 resource does not work with the addCfnNagSuppressions helper function + permission.addMetadata("cfn_nag", { + rules_to_suppress: [ + { + id: "F13", + reason: + "Lambda permission policy requires principal wildcard for spoke accounts to self register by invoking this function." + + "This is acceptable as we are narrowing the authorized accounts to only those contained within the org via principalOrgId", + }, + ], + }); + } +} diff --git a/source/instance-scheduler/lib/remote-stack.ts b/source/instance-scheduler/lib/remote-stack.ts index 57e44337..6368dd2f 100644 --- a/source/instance-scheduler/lib/remote-stack.ts +++ b/source/instance-scheduler/lib/remote-stack.ts @@ -1,295 +1,112 @@ -#!/usr/bin/env node // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 - -import * as cdk from "aws-cdk-lib"; -import * as iam from "aws-cdk-lib/aws-iam"; -import * as events from "aws-cdk-lib/aws-events"; -import * as ssm from "aws-cdk-lib/aws-ssm"; -import { ArnPrincipal, CompositePrincipal, Effect, PolicyStatement } from "aws-cdk-lib/aws-iam"; +import { Stack, StackProps } from "aws-cdk-lib"; +import { ArnPrincipal } from "aws-cdk-lib/aws-iam"; import { Construct } from "constructs"; import { AppRegistryForInstanceScheduler } from "./app-registry"; -import { NagSuppressions } from "cdk-nag"; +import { ParameterWithLabel, YesNoParameter, YesNoType, addParameterGroup, overrideLogicalId } from "./cfn"; +import { SchedulerRole } from "./iam/scheduler-role"; +import { roleArnFor } from "./iam/roles"; +import { SchedulingRequestHandlerLambda } from "./lambda-functions/scheduling-request-handler"; +import { AsgHandler } from "./lambda-functions/asg-handler"; + +import { AsgSchedulingRole } from "./iam/asg-scheduling-role"; +import { RemoteRegistrationCustomResource } from "./lambda-functions/remote-registration"; +import { FunctionFactory, PythonFunctionFactory } from "./lambda-functions/function-factory"; -export interface InstanceSchedulerRemoteStackProps extends cdk.StackProps { - readonly description: string; +export interface SpokeStackProps extends StackProps { readonly solutionId: string; readonly solutionName: string; readonly solutionVersion: string; readonly appregApplicationName: string; readonly appregSolutionName: string; + readonly factory?: FunctionFactory; } -export class InstanceSchedulerRemoteStack extends cdk.Stack { - constructor(scope: Construct, id: string, props: InstanceSchedulerRemoteStackProps) { +export class SpokeStack extends Stack { + constructor(scope: Construct, id: string, props: SpokeStackProps) { super(scope, id, props); - //CFN Parameters - const instanceSchedulerAccount = new cdk.CfnParameter(this, "InstanceSchedulerAccount", { + const instanceSchedulerAccount = new ParameterWithLabel(this, "InstanceSchedulerAccount", { + label: "Hub Account ID", description: - "AccountID of the Instance Scheduler Hub stack that should be allowed to schedule resources in this account.", - type: "String", - allowedPattern: "(^[0-9]{12}$)", + "Account ID of the Instance Scheduler Hub stack that should be allowed to schedule resources in this account.", + allowedPattern: String.raw`^\d{12}$`, constraintDescription: "Account number is a 12 digit number", }); + const hubAccountId = instanceSchedulerAccount.valueAsString; - const namespace = new cdk.CfnParameter(this, "Namespace", { - type: "String", + const usingAWSOrganizations = new YesNoParameter(this, "UsingAWSOrganizations", { + label: "Use AWS Organizations", description: - "Unique identifier used to differentiate between multiple solution deployments. " + + "Use AWS Organizations to automate spoke account registration. " + "Must be set to the same value as the Hub stack", + default: YesNoType.No, }); - const usingAWSOrganizations = new cdk.CfnParameter(this, "UsingAWSOrganizations", { - type: "String", + const namespace = new ParameterWithLabel(this, "Namespace", { + label: "Namespace", description: - "Use AWS Organizations to automate spoke account registration. " + - "Must be set to the same value as the Hub stack", - allowedValues: ["Yes", "No"], - default: "No", + "Unique identifier used to differentiate between multiple solution deployments. " + + "Must be set to the same value as the Hub stack. Must be non-empty for Organizations deployments.", + default: "default", }); - // CFN Conditions - const isMemberOfOrganization = new cdk.CfnCondition(this, "IsMemberOfOrganization", { - expression: cdk.Fn.conditionEquals(usingAWSOrganizations, "Yes"), + addParameterGroup(this, { + label: "Account structure", + parameters: [instanceSchedulerAccount, usingAWSOrganizations, namespace], }); - const mappings = new cdk.CfnMapping(this, "mappings"); - mappings.setValue("SchedulerRole", "Name", "Scheduler-Role"); - mappings.setValue("SchedulerEventBusName", "Name", "scheduler-event-bus"); + const kmsKeyArns = new ParameterWithLabel(this, "KmsKeyArns", { + label: "Kms Key Arns for EC2", + description: + "comma-separated list of kms arns to grant Instance Scheduler kms:CreateGrant permissions to provide the EC2 " + + " service with Decrypt permissions for encrypted EBS volumes." + + " This allows the scheduler to start EC2 instances with attached encrypted EBS volumes." + + " provide just (*) to give limited access to all kms keys, leave blank to disable." + + " For details on the exact policy created, refer to security section of the implementation guide" + + " (https://aws.amazon.com/solutions/implementations/instance-scheduler-on-aws/)", + type: "CommaDelimitedList", + default: "", + }); + + addParameterGroup(this, { + label: "Service-specific", + parameters: [kmsKeyArns], + }); + + const USER_AGENT_EXTRA = `AwsSolution/${props.solutionId}/${props.solutionVersion}`; new AppRegistryForInstanceScheduler(this, "AppRegistryForInstanceScheduler", { solutionId: props.solutionId, solutionName: props.solutionName, solutionVersion: props.solutionVersion, - appregSolutionName: props.appregSolutionName, appregAppName: props.appregApplicationName, + appregSolutionName: props.appregSolutionName, }); - const accountPrincipal = new ArnPrincipal( - cdk.Fn.sub("arn:${AWS::Partition}:iam::${accountId}:root", { - accountId: instanceSchedulerAccount.valueAsString, - }), - ); - const servicePrincipal = new iam.ServicePrincipal("lambda.amazonaws.com"); - - const principalPolicyStatement = new PolicyStatement(); - principalPolicyStatement.addActions("sts:AssumeRole"); - principalPolicyStatement.effect = Effect.ALLOW; - - const principals = new CompositePrincipal(accountPrincipal, servicePrincipal); - principals.addToPolicy(principalPolicyStatement); - - const ec2SchedulerCrossAccountRole = new iam.Role(this, "EC2SchedulerCrossAccountRole", { - roleName: cdk.Fn.sub("${Namespace}-${Name}", { - Name: mappings.findInMap("SchedulerRole", "Name"), - }), - path: "/", - assumedBy: principals, - inlinePolicies: { - EC2InstanceSchedulerRemote: new iam.PolicyDocument({ - statements: [ - new PolicyStatement({ - actions: ["rds:DeleteDBSnapshot", "rds:DescribeDBSnapshots", "rds:StopDBInstance"], - effect: Effect.ALLOW, - resources: [cdk.Fn.sub("arn:${AWS::Partition}:rds:*:${AWS::AccountId}:snapshot:*")], - }), - new PolicyStatement({ - actions: [ - "rds:AddTagsToResource", - "rds:RemoveTagsFromResource", - "rds:DescribeDBSnapshots", - "rds:StartDBInstance", - "rds:StopDBInstance", - ], - effect: Effect.ALLOW, - resources: [cdk.Fn.sub("arn:${AWS::Partition}:rds:*:${AWS::AccountId}:db:*")], - }), - new PolicyStatement({ - actions: [ - "rds:AddTagsToResource", - "rds:RemoveTagsFromResource", - "rds:StartDBCluster", - "rds:StopDBCluster", - ], - effect: Effect.ALLOW, - resources: [cdk.Fn.sub("arn:${AWS::Partition}:rds:*:${AWS::AccountId}:cluster:*")], - }), - new PolicyStatement({ - actions: ["ec2:StartInstances", "ec2:StopInstances", "ec2:CreateTags", "ec2:DeleteTags"], - effect: Effect.ALLOW, - resources: [cdk.Fn.sub("arn:${AWS::Partition}:ec2:*:${AWS::AccountId}:instance/*")], - }), - new PolicyStatement({ - actions: [ - "rds:DescribeDBClusters", - "rds:DescribeDBInstances", - "ec2:DescribeInstances", - "ssm:DescribeMaintenanceWindows", - "ssm:DescribeMaintenanceWindowExecutions", - "tag:GetResources", - ], - effect: Effect.ALLOW, - resources: ["*"], - }), - ], - }), - }, - }); - - const ec2ModifyInstancePolicy = new iam.Policy(this, "Ec2ModifyInstanceAttrPolicy", { - roles: [ec2SchedulerCrossAccountRole], - statements: [ - new PolicyStatement({ - actions: ["ec2:ModifyInstanceAttribute"], - effect: Effect.ALLOW, - resources: [cdk.Fn.sub("arn:${AWS::Partition}:ec2:*:${AWS::AccountId}:instance/*")], - }), - ], - }); - - NagSuppressions.addResourceSuppressions(ec2ModifyInstancePolicy, [ - { - id: "AwsSolutions-IAM5", - reason: - "All policies have been scoped to be as restrictive as possible. This solution needs to access ec2/rds resources across all regions.", - }, - ]); - - // Event Rule to capture SSM Parameter Store creation by this stack - // SSM parameter to invoke event rule - const ssmParameterNamespace = new ssm.StringParameter(this, "SSMParameterNamespace", { - description: "This parameter is for Instance Scheduler solution to support accounts in AWS Organizations.", - stringValue: namespace.valueAsString, - parameterName: "/instance-scheduler/do-not-delete-manually", - }); - - const ssmParameterNamespace_ref = ssmParameterNamespace.node.defaultChild as ssm.CfnParameter; - - // Event Delivery Role and Policy necessary to migrate a sender-receiver relationship to Use AWS Organizations - const schedulerEventDeliveryRole = new iam.Role(this, "SchedulerEventDeliveryRole", { - description: - "Event Role to add the permissions necessary to migrate a sender-receiver relationship to Use AWS Organizations", - assumedBy: new iam.ServicePrincipal("events.amazonaws.com"), - }); - const schedulerEventDeliveryPolicy = new iam.Policy(this, "SchedulerEventDeliveryPolicy", { - roles: [schedulerEventDeliveryRole], - statements: [ - new iam.PolicyStatement({ - actions: ["events:PutEvents"], - effect: iam.Effect.ALLOW, - resources: [ - cdk.Fn.sub( - "arn:${AWS::Partition}:events:${AWS::Region}:${InstanceSchedulerAccount}:event-bus/${Namespace}-${EventBusName}", - { - EventBusName: mappings.findInMap("SchedulerEventBusName", "Name"), - }, - ), - ], - }), - ], + const schedulingRole = new SchedulerRole(this, "EC2SchedulerCrossAccountRole", { + assumedBy: new ArnPrincipal( + roleArnFor(hubAccountId, SchedulingRequestHandlerLambda.roleName(namespace.valueAsString)), + ), + namespace: namespace.valueAsString, + kmsKeys: kmsKeyArns.valueAsList, }); + overrideLogicalId(schedulingRole, "EC2SchedulerCrossAccountRole"); - const parameterStoreEventRule = new events.CfnRule(this, "scheduler-ssm-parameter-store-event", { - description: - "Event rule to invoke Instance Scheduler lambda function to store spoke account id in configuration.", - state: "ENABLED", - targets: [ - { - arn: cdk.Fn.sub( - "arn:${AWS::Partition}:events:${AWS::Region}:${InstanceSchedulerAccount}:event-bus/${Namespace}-${EventBusName}", - { - EventBusName: mappings.findInMap("SchedulerEventBusName", "Name"), - }, - ), - id: "Spoke-SSM-Parameter-Event", - roleArn: schedulerEventDeliveryRole.roleArn, - }, - ], - eventPattern: { - account: [this.account], - source: ["aws.ssm"], - "detail-type": ["Parameter Store Change"], - detail: { - name: ["/instance-scheduler/do-not-delete-manually"], - operation: ["Create", "Delete"], - type: ["String"], - }, - }, + new AsgSchedulingRole(this, "AsgSchedulingRole", { + assumedBy: new ArnPrincipal(roleArnFor(hubAccountId, AsgHandler.roleName(namespace.valueAsString))), + namespace: namespace.valueAsString, }); - const schedulerEventDeliveryRole_ref = schedulerEventDeliveryRole.node.findChild("Resource") as iam.CfnRole; - const schedulerEventDeliveryPolicy_ref = schedulerEventDeliveryPolicy.node.findChild("Resource") as iam.CfnPolicy; - - // wait for the events rule to be created before creating/deleting the SSM parameter - parameterStoreEventRule.addDependency(schedulerEventDeliveryRole_ref); - ssmParameterNamespace_ref.addDependency(parameterStoreEventRule); - schedulerEventDeliveryPolicy_ref.cfnOptions.condition = isMemberOfOrganization; - schedulerEventDeliveryRole_ref.cfnOptions.condition = isMemberOfOrganization; - parameterStoreEventRule.cfnOptions.condition = isMemberOfOrganization; - ssmParameterNamespace_ref.cfnOptions.condition = isMemberOfOrganization; + const factory = props.factory ?? new PythonFunctionFactory(); - //CFN Output - new cdk.CfnOutput(this, "CrossAccountRole", { - value: ec2SchedulerCrossAccountRole.roleArn, - description: - "Arn for cross account role for Instance scheduler, add this arn to the list of crossaccount roles (CrossAccountRoles) parameter of the Instance Scheduler template.", + new RemoteRegistrationCustomResource(this, "RemoteRegistrationCustomResource", { + hubAccountId: hubAccountId, + namespace: namespace.valueAsString, + shouldRegisterSpokeAccountCondition: usingAWSOrganizations.getCondition(), + factory: factory, + USER_AGENT_EXTRA: USER_AGENT_EXTRA, }); - - const ec2SchedulerCrossAccountRole_cfn_ref = ec2SchedulerCrossAccountRole.node.defaultChild as iam.CfnRole; - ec2SchedulerCrossAccountRole_cfn_ref.overrideLogicalId("EC2SchedulerCrossAccountRole"); - ec2SchedulerCrossAccountRole_cfn_ref.cfnOptions.metadata = { - cfn_nag: { - rules_to_suppress: [ - { - id: "W11", - reason: - "All policies have been scoped to be as restrictive as possible. This solution needs to access ec2/rds resources across all regions.", - }, - { - id: "W28", - reason: "The role name is defined to allow cross account access from the hub account.", - }, - { - id: "W76", - reason: - "All policies have been scoped to be as restrictive as possible. This solution needs to access ec2/rds resources across all regions.", - }, - ], - }, - }; - NagSuppressions.addResourceSuppressions(ec2SchedulerCrossAccountRole_cfn_ref, [ - { - id: "AwsSolutions-IAM5", - reason: - "All policies have been scoped to be as restrictive as possible. This solution needs to access ec2/rds resources across all regions.", - }, - ]); - - const stack = cdk.Stack.of(this); - - stack.templateOptions.metadata = { - "AWS::CloudFormation::Interface": { - ParameterGroups: [ - { - Label: { default: "Namespace Configuration" }, - Parameters: ["Namespace"], - }, - { - Label: { default: "Account Structure" }, - Parameters: ["InstanceSchedulerAccount", "UsingAWSOrganizations"], - }, - ], - ParameterLabels: { - InstanceSchedulerAccount: { - default: "Hub Account ID", - }, - UsingAWSOrganizations: { - default: "Use AWS Organizations", - }, - }, - }, - }; - stack.templateOptions.templateFormatVersion = "2010-09-09"; } } diff --git a/source/instance-scheduler/lib/runbooks/spoke-deregistration.ts b/source/instance-scheduler/lib/runbooks/spoke-deregistration.ts new file mode 100644 index 00000000..c6fd9374 --- /dev/null +++ b/source/instance-scheduler/lib/runbooks/spoke-deregistration.ts @@ -0,0 +1,70 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { + AutomationDocument, + DocumentFormat, + Input, + HardCodedString, + InvokeLambdaFunctionStep, + StringVariable, + HardCodedStringMap, +} from "@cdklabs/cdk-ssm-documents"; +import { Stack } from "aws-cdk-lib"; +import { Role, ServicePrincipal } from "aws-cdk-lib/aws-iam"; +import { Function as LambdaFunction } from "aws-cdk-lib/aws-lambda"; +import { InvocationType } from "aws-cdk-lib/triggers"; +import { NagSuppressions } from "cdk-nag"; + +export interface SpokeDeregistrationRunbookProperties { + lambdaFunction: LambdaFunction; + namespace: string; +} + +export class SpokeDeregistrationRunbook { + constructor(scope: Stack, props: SpokeDeregistrationRunbookProperties) { + const role = new Role(scope, "SpokeDeregistrationRunbookRole", { + assumedBy: new ServicePrincipal("ssm.amazonaws.com"), + description: "Role assumed by SSM Automation to call the spoke registration lambda", + }); + props.lambdaFunction.grantInvoke(role); + + const automationDocument = new AutomationDocument(scope, "SpokeDeregistrationRunbook", { + description: "Deregister a spoke account from Instance Scheduler on AWS on demand", + documentFormat: DocumentFormat.YAML, + assumeRole: HardCodedString.of(role.roleArn), + docInputs: [ + Input.ofTypeString("AccountId", { + description: "Spoke Account ID used for registration", + allowedPattern: "^\\d{12}$", + }), + ], + }); + + automationDocument.addStep( + new InvokeLambdaFunctionStep(scope, "InvokeSpokeRegistrationLambdaStep", { + name: "InvokeSpokeRegistrationLambda", + description: + "Invokes the Instance Scheduler on AWS spoke registration lambda to deregister a given AWS Account ID", + functionName: HardCodedString.of(props.lambdaFunction.functionArn), + invocationType: HardCodedString.of(InvocationType.REQUEST_RESPONSE), + payload: HardCodedStringMap.of({ + account: StringVariable.of("AccountId"), + operation: "Deregister", + }), + }), + ); + + const defaultPolicy = role.node.tryFindChild("DefaultPolicy"); + if (!defaultPolicy) { + throw Error("Unable to find default policy on role"); + } + + NagSuppressions.addResourceSuppressions(defaultPolicy, [ + { + id: "AwsSolutions-IAM5", + appliesTo: ["Resource:::*"], + reason: "permissions to invoke all versions of the spoke registration lambda", + }, + ]); + } +} diff --git a/source/instance-scheduler/lib/scheduling-interval-mappings.ts b/source/instance-scheduler/lib/scheduling-interval-mappings.ts new file mode 100644 index 00000000..ed944784 --- /dev/null +++ b/source/instance-scheduler/lib/scheduling-interval-mappings.ts @@ -0,0 +1,41 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { CfnMapping, CfnMappingProps } from "aws-cdk-lib"; +import { Construct } from "constructs"; + +export const schedulerIntervalValues = ["1", "2", "5", "10", "15", "30", "60"]; +export class SchedulingIntervalToCron extends CfnMapping { + private readonly key = "IntervalMinutesToCron"; + constructor(scope: Construct, id: string, props: CfnMappingProps) { + super(scope, id, props); + this.setValue(this.key, "1", "cron(0/1 * * * ? *)"); + this.setValue(this.key, "2", "cron(0/2 * * * ? *)"); + this.setValue(this.key, "5", "cron(0/5 * * * ? *)"); + this.setValue(this.key, "10", "cron(0/10 * * * ? *)"); + this.setValue(this.key, "15", "cron(0/15 * * * ? *)"); + this.setValue(this.key, "30", "cron(0/30 * * * ? *)"); + this.setValue(this.key, "60", "cron(0 0/1 * * ? *)"); + } + + getMapping(schedulingInterval: string) { + return this.findInMap(this.key, schedulingInterval); + } +} + +export class SchedulingIntervalToSeconds extends CfnMapping { + private readonly key = "MinutesToSeconds"; + constructor(scope: Construct, id: string, props: CfnMappingProps) { + super(scope, id, props); + this.setValue(this.key, "1", "60"); + this.setValue(this.key, "2", "120"); + this.setValue(this.key, "5", "300"); + this.setValue(this.key, "10", "600"); + this.setValue(this.key, "15", "900"); + this.setValue(this.key, "30", "1800"); + this.setValue(this.key, "60", "3600"); + } + + getMapping(schedulingInterval: string) { + return this.findInMap(this.key, schedulingInterval); + } +} diff --git a/source/instance-scheduler/tests/__snapshots__/instance-scheduler-remote-stack.test.ts.snap b/source/instance-scheduler/tests/__snapshots__/instance-scheduler-remote-stack.test.ts.snap index 3e31943c..5ce9ac03 100644 --- a/source/instance-scheduler/tests/__snapshots__/instance-scheduler-remote-stack.test.ts.snap +++ b/source/instance-scheduler/tests/__snapshots__/instance-scheduler-remote-stack.test.ts.snap @@ -2,9 +2,37 @@ exports[`InstanceSchedulerRemoteStack snapshot test 1`] = ` { - "AWSTemplateFormatVersion": "2010-09-09", "Conditions": { - "IsMemberOfOrganization": { + "AppRegistryForInstanceSchedulerShouldDeployBA583A67": { + "Fn::Not": [ + { + "Fn::Equals": [ + { + "Ref": "AWS::Partition", + }, + "aws-cn", + ], + }, + ], + }, + "EC2SchedulerCrossAccountRolekmsAccessCondition6C83D407": { + "Fn::Not": [ + { + "Fn::Equals": [ + { + "Fn::Select": [ + 0, + { + "Ref": "KmsKeyArns", + }, + ], + }, + "", + ], + }, + ], + }, + "UsingAWSOrganizationsCondition": { "Fn::Equals": [ { "Ref": "UsingAWSOrganizations", @@ -13,23 +41,14 @@ exports[`InstanceSchedulerRemoteStack snapshot test 1`] = ` ], }, }, - "Description": "", "Mappings": { "AppRegistryForInstanceSchedulerSolution25A90F05": { "Data": { - "AppRegistryApplicationName": "instance-scheduler-on-aws", - "ApplicationType": "AWS-Solutions", - "ID": "SO0030", - "SolutionName": "instance-scheduler-on-aws", - "Version": "v1.5.0", - }, - }, - "mappings": { - "SchedulerEventBusName": { - "Name": "scheduler-event-bus", - }, - "SchedulerRole": { - "Name": "Scheduler-Role", + "AppRegistryApplicationName": "my-appreg-solution-name", + "ApplicationType": "my-appreg-app-name", + "ID": "my-solution-id", + "SolutionName": "my-solution-name", + "Version": "v9.9.9", }, }, }, @@ -38,19 +57,20 @@ exports[`InstanceSchedulerRemoteStack snapshot test 1`] = ` "ParameterGroups": [ { "Label": { - "default": "Namespace Configuration", + "default": "Account structure", }, "Parameters": [ + "InstanceSchedulerAccount", + "UsingAWSOrganizations", "Namespace", ], }, { "Label": { - "default": "Account Structure", + "default": "Service-specific", }, "Parameters": [ - "InstanceSchedulerAccount", - "UsingAWSOrganizations", + "KmsKeyArns", ], }, ], @@ -58,32 +78,38 @@ exports[`InstanceSchedulerRemoteStack snapshot test 1`] = ` "InstanceSchedulerAccount": { "default": "Hub Account ID", }, + "KmsKeyArns": { + "default": "Kms Key Arns for EC2", + }, + "Namespace": { + "default": "Namespace", + }, "UsingAWSOrganizations": { "default": "Use AWS Organizations", }, }, }, }, - "Outputs": { - "CrossAccountRole": { - "Description": "Arn for cross account role for Instance scheduler, add this arn to the list of crossaccount roles (CrossAccountRoles) parameter of the Instance Scheduler template.", - "Value": { - "Fn::GetAtt": [ - "EC2SchedulerCrossAccountRole", - "Arn", - ], - }, - }, - }, "Parameters": { + "BootstrapVersion": { + "Default": "/cdk-bootstrap/hnb659fds/version", + "Description": "Version of the CDK Bootstrap resources in this environment, automatically retrieved from SSM Parameter Store. [cdk:skip]", + "Type": "AWS::SSM::Parameter::Value", + }, "InstanceSchedulerAccount": { - "AllowedPattern": "(^[0-9]{12}$)", + "AllowedPattern": "^\\d{12}$", "ConstraintDescription": "Account number is a 12 digit number", - "Description": "AccountID of the Instance Scheduler Hub stack that should be allowed to schedule resources in this account.", + "Description": "Account ID of the Instance Scheduler Hub stack that should be allowed to schedule resources in this account.", "Type": "String", }, + "KmsKeyArns": { + "Default": "", + "Description": "comma-separated list of kms arns to grant Instance Scheduler kms:CreateGrant permissions to provide the EC2 service with Decrypt permissions for encrypted EBS volumes. This allows the scheduler to start EC2 instances with attached encrypted EBS volumes. provide just (*) to give limited access to all kms keys, leave blank to disable. For details on the exact policy created, refer to security section of the implementation guide (https://aws.amazon.com/solutions/implementations/instance-scheduler-on-aws/)", + "Type": "CommaDelimitedList", + }, "Namespace": { - "Description": "Unique identifier used to differentiate between multiple solution deployments. Must be set to the same value as the Hub stack", + "Default": "default", + "Description": "Unique identifier used to differentiate between multiple solution deployments. Must be set to the same value as the Hub stack. Must be non-empty for Organizations deployments.", "Type": "String", }, "UsingAWSOrganizations": { @@ -98,6 +124,7 @@ exports[`InstanceSchedulerRemoteStack snapshot test 1`] = ` }, "Resources": { "AppRegistry968496A3": { + "Condition": "AppRegistryForInstanceSchedulerShouldDeployBA583A67", "Properties": { "Description": { "Fn::Join": [ @@ -171,6 +198,7 @@ exports[`InstanceSchedulerRemoteStack snapshot test 1`] = ` "Type": "AWS::ServiceCatalogAppRegistry::Application", }, "AppRegistryAssociation": { + "Condition": "AppRegistryForInstanceSchedulerShouldDeployBA583A67", "Properties": { "Application": { "Fn::GetAtt": [ @@ -186,6 +214,7 @@ exports[`InstanceSchedulerRemoteStack snapshot test 1`] = ` "Type": "AWS::ServiceCatalogAppRegistry::ResourceAssociation", }, "AppRegistryAttributeGroupAssociationf823ba38a843A987197E": { + "Condition": "AppRegistryForInstanceSchedulerShouldDeployBA583A67", "Properties": { "Application": { "Fn::GetAtt": [ @@ -203,6 +232,7 @@ exports[`InstanceSchedulerRemoteStack snapshot test 1`] = ` "Type": "AWS::ServiceCatalogAppRegistry::AttributeGroupAssociation", }, "AppRegistryDefaultApplicationAttributes15279635": { + "Condition": "AppRegistryForInstanceSchedulerShouldDeployBA583A67", "Properties": { "Attributes": { "applicationType": { @@ -289,29 +319,89 @@ exports[`InstanceSchedulerRemoteStack snapshot test 1`] = ` }, "Type": "AWS::ServiceCatalogAppRegistry::AttributeGroup", }, - "EC2SchedulerCrossAccountRole": { + "AsgSchedulingRoleASGSchedulingPermissions6DEABC8F": { "Metadata": { "cdk_nag": { "rules_to_suppress": [ { + "applies_to": [ + "Resource::*", + ], + "id": "AwsSolutions-IAM5", + "reason": "Required permissions to describe AutoScaling Groups", + }, + { + "applies_to": [ + "Resource::arn::autoscaling:*::autoScalingGroup:*:autoScalingGroupName/*", + ], "id": "AwsSolutions-IAM5", - "reason": "All policies have been scoped to be as restrictive as possible. This solution needs to access ec2/rds resources across all regions.", + "reason": "Required permissions to modify scheduled scaling actions on AutoScaling Groups", }, ], }, "cfn_nag": { "rules_to_suppress": [ { - "id": "W11", - "reason": "All policies have been scoped to be as restrictive as possible. This solution needs to access ec2/rds resources across all regions.", + "id": "W12", + "reason": "DescribeAutoScalingGroups and autoscaling:DescribeScheduledActions actions require wildcard permissions", }, + ], + }, + }, + "Properties": { + "PolicyDocument": { + "Statement": [ { - "id": "W28", - "reason": "The role name is defined to allow cross account access from the hub account.", + "Action": [ + "autoscaling:BatchPutScheduledUpdateGroupAction", + "autoscaling:BatchDeleteScheduledAction", + "autoscaling:CreateOrUpdateTags", + ], + "Effect": "Allow", + "Resource": { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition", + }, + ":autoscaling:*:", + { + "Ref": "AWS::AccountId", + }, + ":autoScalingGroup:*:autoScalingGroupName/*", + ], + ], + }, }, { - "id": "W76", - "reason": "All policies have been scoped to be as restrictive as possible. This solution needs to access ec2/rds resources across all regions.", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeScheduledActions", + ], + "Effect": "Allow", + "Resource": "*", + }, + ], + "Version": "2012-10-17", + }, + "PolicyName": "AsgSchedulingRoleASGSchedulingPermissions6DEABC8F", + "Roles": [ + { + "Ref": "AsgSchedulingRoleF04B8CC9", + }, + ], + }, + "Type": "AWS::IAM::Policy", + }, + "AsgSchedulingRoleF04B8CC9": { + "Metadata": { + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W28", + "reason": "The role name is defined to allow cross account access from the hub account.", }, ], }, @@ -324,122 +414,180 @@ exports[`InstanceSchedulerRemoteStack snapshot test 1`] = ` "Effect": "Allow", "Principal": { "AWS": { - "Fn::Sub": [ - "arn:\${AWS::Partition}:iam::\${accountId}:root", - { - "accountId": { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition", + }, + ":iam::", + { "Ref": "InstanceSchedulerAccount", }, - }, + ":role/", + { + "Ref": "Namespace", + }, + "-AsgRequestHandler-Role", + ], ], }, }, }, + ], + "Version": "2012-10-17", + }, + "RoleName": { + "Fn::Join": [ + "", + [ + { + "Ref": "Namespace", + }, + "-ASG-Scheduling-Role", + ], + ], + }, + }, + "Type": "AWS::IAM::Role", + }, + "EC2SchedulerCrossAccountRole": { + "Metadata": { + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W28", + "reason": "The role name is defined to allow cross account access from the hub account.", + }, + ], + }, + }, + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ { "Action": "sts:AssumeRole", "Effect": "Allow", "Principal": { - "Service": "lambda.amazonaws.com", + "AWS": { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition", + }, + ":iam::", + { + "Ref": "InstanceSchedulerAccount", + }, + ":role/", + { + "Ref": "Namespace", + }, + "-SchedulingRequestHandler-Role", + ], + ], + }, }, }, ], "Version": "2012-10-17", }, - "Path": "/", - "Policies": [ - { - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "rds:DeleteDBSnapshot", - "rds:DescribeDBSnapshots", - "rds:StopDBInstance", - ], - "Effect": "Allow", - "Resource": { - "Fn::Sub": "arn:\${AWS::Partition}:rds:*:\${AWS::AccountId}:snapshot:*", - }, + "RoleName": { + "Fn::Join": [ + "", + [ + { + "Ref": "Namespace", + }, + "-Scheduler-Role", + ], + ], + }, + }, + "Type": "AWS::IAM::Role", + }, + "EC2SchedulerCrossAccountRoleKmsPermissions93DB5FB5": { + "Condition": "EC2SchedulerCrossAccountRolekmsAccessCondition6C83D407", + "Metadata": { + "cdk_nag": { + "rules_to_suppress": [ + { + "applies_to": [ + "Resource::*", + ], + "id": "AwsSolutions-IAM5", + "reason": "Specific kms keys are unknown until runtime, for security, access is instead restricted to only granting decryption permissions to the ec2 service for encrypted EBS volumes", + }, + ], + }, + }, + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": "kms:CreateGrant", + "Condition": { + "Bool": { + "kms:GrantIsForAWSResource": true, }, - { - "Action": [ - "rds:AddTagsToResource", - "rds:RemoveTagsFromResource", - "rds:DescribeDBSnapshots", - "rds:StartDBInstance", - "rds:StopDBInstance", + "ForAllValues:StringEquals": { + "kms:EncryptionContextKeys": [ + "aws:ebs:id", ], - "Effect": "Allow", - "Resource": { - "Fn::Sub": "arn:\${AWS::Partition}:rds:*:\${AWS::AccountId}:db:*", - }, - }, - { - "Action": [ - "rds:AddTagsToResource", - "rds:RemoveTagsFromResource", - "rds:StartDBCluster", - "rds:StopDBCluster", + "kms:GrantOperations": [ + "Decrypt", ], - "Effect": "Allow", - "Resource": { - "Fn::Sub": "arn:\${AWS::Partition}:rds:*:\${AWS::AccountId}:cluster:*", - }, }, - { - "Action": [ - "ec2:StartInstances", - "ec2:StopInstances", - "ec2:CreateTags", - "ec2:DeleteTags", - ], - "Effect": "Allow", - "Resource": { - "Fn::Sub": "arn:\${AWS::Partition}:ec2:*:\${AWS::AccountId}:instance/*", - }, + "Null": { + "kms:EncryptionContextKeys": false, + "kms:GrantOperations": false, }, - { - "Action": [ - "rds:DescribeDBClusters", - "rds:DescribeDBInstances", - "ec2:DescribeInstances", - "ssm:DescribeMaintenanceWindows", - "ssm:DescribeMaintenanceWindowExecutions", - "tag:GetResources", - ], - "Effect": "Allow", - "Resource": "*", + "StringLike": { + "kms:ViaService": "ec2.*.amazonaws.com", }, - ], - "Version": "2012-10-17", - }, - "PolicyName": "EC2InstanceSchedulerRemote", - }, - ], - "RoleName": { - "Fn::Sub": [ - "\${Namespace}-\${Name}", - { - "Name": { - "Fn::FindInMap": [ - "mappings", - "SchedulerRole", - "Name", - ], + }, + "Effect": "Allow", + "Resource": { + "Ref": "KmsKeyArns", }, }, ], + "Version": "2012-10-17", }, + "PolicyName": "EC2SchedulerCrossAccountRoleKmsPermissions93DB5FB5", + "Roles": [ + { + "Ref": "EC2SchedulerCrossAccountRole", + }, + ], }, - "Type": "AWS::IAM::Role", + "Type": "AWS::IAM::Policy", }, - "Ec2ModifyInstanceAttrPolicy4B693ACF": { + "EC2SchedulerCrossAccountRoleSchedulingPermissions3E73CF8A": { "Metadata": { "cdk_nag": { "rules_to_suppress": [ { + "applies_to": [ + "Resource::arn::rds:*::db:*", + "Resource::arn::rds:*::cluster:*", + "Resource::arn::ec2:*::instance/*", + "Resource::arn::rds:*::snapshot:*", + "Resource::*", + ], "id": "AwsSolutions-IAM5", - "reason": "All policies have been scoped to be as restrictive as possible. This solution needs to access ec2/rds resources across all regions.", + "reason": "required scheduling permissions", + }, + ], + }, + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W12", + "reason": "required scheduling permissions", }, ], }, @@ -448,16 +596,76 @@ exports[`InstanceSchedulerRemoteStack snapshot test 1`] = ` "PolicyDocument": { "Statement": [ { - "Action": "ec2:ModifyInstanceAttribute", + "Action": "ec2:DescribeInstances", + "Effect": "Allow", + "Resource": "*", + }, + { + "Action": [ + "ec2:StartInstances", + "ec2:StopInstances", + "ec2:CreateTags", + "ec2:DeleteTags", + "ec2:ModifyInstanceAttribute", + ], "Effect": "Allow", "Resource": { "Fn::Sub": "arn:\${AWS::Partition}:ec2:*:\${AWS::AccountId}:instance/*", }, }, + { + "Action": "ssm:DescribeMaintenanceWindows", + "Effect": "Allow", + "Resource": "*", + }, + { + "Action": [ + "rds:DescribeDBClusters", + "rds:DescribeDBInstances", + "tag:GetResources", + ], + "Effect": "Allow", + "Resource": "*", + }, + { + "Action": [ + "rds:DeleteDBSnapshot", + "rds:DescribeDBSnapshots", + "rds:StopDBInstance", + ], + "Effect": "Allow", + "Resource": { + "Fn::Sub": "arn:\${AWS::Partition}:rds:*:\${AWS::AccountId}:snapshot:*", + }, + }, + { + "Action": [ + "rds:AddTagsToResource", + "rds:RemoveTagsFromResource", + "rds:StartDBInstance", + "rds:StopDBInstance", + ], + "Effect": "Allow", + "Resource": { + "Fn::Sub": "arn:\${AWS::Partition}:rds:*:\${AWS::AccountId}:db:*", + }, + }, + { + "Action": [ + "rds:AddTagsToResource", + "rds:RemoveTagsFromResource", + "rds:StartDBCluster", + "rds:StopDBCluster", + ], + "Effect": "Allow", + "Resource": { + "Fn::Sub": "arn:\${AWS::Partition}:rds:*:\${AWS::AccountId}:cluster:*", + }, + }, ], "Version": "2012-10-17", }, - "PolicyName": "Ec2ModifyInstanceAttrPolicy4B693ACF", + "PolicyName": "EC2SchedulerCrossAccountRoleSchedulingPermissions3E73CF8A", "Roles": [ { "Ref": "EC2SchedulerCrossAccountRole", @@ -466,58 +674,153 @@ exports[`InstanceSchedulerRemoteStack snapshot test 1`] = ` }, "Type": "AWS::IAM::Policy", }, - "SSMParameterNamespace2002A907": { - "Condition": "IsMemberOfOrganization", + "RegisterSpokeAccountCustomResourceLambda8BF25EDC": { + "Condition": "UsingAWSOrganizationsCondition", "DependsOn": [ - "schedulerssmparameterstoreevent", + "RegisterSpokeAccountCustomResourceLambdaRoleDefaultPolicy3599775F", + "RegisterSpokeAccountCustomResourceLambdaRoleDD724340", ], + "Metadata": { + "cdk_nag": { + "rules_to_suppress": [ + { + "id": "AwsSolutions-L1", + "reason": "Python 3.11 is the newest available runtime. This finding is a false positive.", + }, + ], + }, + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W89", + "reason": "This Lambda function does not need to access any resource provisioned within a VPC.", + }, + { + "id": "W58", + "reason": "This Lambda function has permission provided to write to CloudWatch logs using the iam roles.", + }, + { + "id": "W92", + "reason": "Lambda function is a custom resource. Concurrent calls are very limited.", + }, + ], + }, + }, "Properties": { - "Description": "This parameter is for Instance Scheduler solution to support accounts in AWS Organizations.", - "Name": "/instance-scheduler/do-not-delete-manually", - "Type": "String", - "Value": { - "Ref": "Namespace", + "Code": "Omitted to remove snapshot dependency on code hash", + "Description": "Custom Resource Provider used for spoke account self registration via aws organization", + "Environment": { + "Variables": { + "HUB_REGISTRATION_LAMBDA_ARN": { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition", + }, + ":lambda:", + { + "Ref": "AWS::Region", + }, + ":", + { + "Ref": "InstanceSchedulerAccount", + }, + ":function:InstanceScheduler-", + { + "Ref": "Namespace", + }, + "-SpokeRegistration", + ], + ], + }, + "USER_AGENT_EXTRA": "AwsSolution/my-solution-id/v9.9.9", + }, + }, + "Handler": "handle_remote_registration_request", + "MemorySize": 128, + "Role": { + "Fn::GetAtt": [ + "RegisterSpokeAccountCustomResourceLambdaRoleDD724340", + "Arn", + ], + }, + "Runtime": "python3.11", + "Timeout": 60, + "TracingConfig": { + "Mode": "Active", }, }, - "Type": "AWS::SSM::Parameter", + "Type": "AWS::Lambda::Function", }, - "SchedulerEventDeliveryPolicyD8B17948": { - "Condition": "IsMemberOfOrganization", + "RegisterSpokeAccountCustomResourceLambdaPolicyAE6C7E85": { + "Condition": "UsingAWSOrganizationsCondition", "Properties": { "PolicyDocument": { "Statement": [ { - "Action": "events:PutEvents", + "Action": "lambda:InvokeFunction", "Effect": "Allow", "Resource": { - "Fn::Sub": [ - "arn:\${AWS::Partition}:events:\${AWS::Region}:\${InstanceSchedulerAccount}:event-bus/\${Namespace}-\${EventBusName}", - { - "EventBusName": { - "Fn::FindInMap": [ - "mappings", - "SchedulerEventBusName", - "Name", - ], + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition", }, - }, + ":lambda:", + { + "Ref": "AWS::Region", + }, + ":", + { + "Ref": "InstanceSchedulerAccount", + }, + ":function:InstanceScheduler-", + { + "Ref": "Namespace", + }, + "-SpokeRegistration", + ], + ], + }, + }, + { + "Action": [ + "logs:CreateLogStream", + "logs:PutLogEvents", + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "SpokeRegistrationLogGroup7EBCC472", + "Arn", ], }, }, ], "Version": "2012-10-17", }, - "PolicyName": "SchedulerEventDeliveryPolicyD8B17948", + "PolicyName": "RegisterSpokeAccountCustomResourceLambdaPolicyAE6C7E85", "Roles": [ { - "Ref": "SchedulerEventDeliveryRole5AE883C1", + "Ref": "RegisterSpokeAccountCustomResourceLambdaRoleDD724340", }, ], }, "Type": "AWS::IAM::Policy", }, - "SchedulerEventDeliveryRole5AE883C1": { - "Condition": "IsMemberOfOrganization", + "RegisterSpokeAccountCustomResourceLambdaPolicyWaiter": { + "Condition": "UsingAWSOrganizationsCondition", + "DependsOn": [ + "RegisterSpokeAccountCustomResourceLambdaPolicyAE6C7E85", + ], + "Type": "AWS::CloudFormation::WaitConditionHandle", + }, + "RegisterSpokeAccountCustomResourceLambdaRoleDD724340": { + "Condition": "UsingAWSOrganizationsCondition", "Properties": { "AssumeRolePolicyDocument": { "Statement": [ @@ -525,74 +828,134 @@ exports[`InstanceSchedulerRemoteStack snapshot test 1`] = ` "Action": "sts:AssumeRole", "Effect": "Allow", "Principal": { - "Service": "events.amazonaws.com", + "Service": "lambda.amazonaws.com", }, }, ], "Version": "2012-10-17", }, - "Description": "Event Role to add the permissions necessary to migrate a sender-receiver relationship to Use AWS Organizations", }, "Type": "AWS::IAM::Role", }, - "schedulerssmparameterstoreevent": { - "Condition": "IsMemberOfOrganization", - "DependsOn": [ - "SchedulerEventDeliveryRole5AE883C1", - ], - "Properties": { - "Description": "Event rule to invoke Instance Scheduler lambda function to store spoke account id in configuration.", - "EventPattern": { - "account": [ - "111111111111", - ], - "detail": { - "name": [ - "/instance-scheduler/do-not-delete-manually", - ], - "operation": [ - "Create", - "Delete", - ], - "type": [ - "String", - ], - }, - "detail-type": [ - "Parameter Store Change", - ], - "source": [ - "aws.ssm", + "RegisterSpokeAccountCustomResourceLambdaRoleDefaultPolicy3599775F": { + "Condition": "UsingAWSOrganizationsCondition", + "Metadata": { + "cdk_nag": { + "rules_to_suppress": [ + { + "applies_to": [ + "Resource::*", + ], + "id": "AwsSolutions-IAM5", + "reason": "required for xray", + }, ], }, - "State": "ENABLED", - "Targets": [ - { - "Arn": { - "Fn::Sub": [ - "arn:\${AWS::Partition}:events:\${AWS::Region}:\${InstanceSchedulerAccount}:event-bus/\${Namespace}-\${EventBusName}", - { - "EventBusName": { - "Fn::FindInMap": [ - "mappings", - "SchedulerEventBusName", - "Name", - ], - }, - }, - ], + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W12", + "reason": "Wildcard required for xray", }, - "Id": "Spoke-SSM-Parameter-Event", - "RoleArn": { - "Fn::GetAtt": [ - "SchedulerEventDeliveryRole5AE883C1", - "Arn", + ], + }, + }, + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "xray:PutTraceSegments", + "xray:PutTelemetryRecords", ], + "Effect": "Allow", + "Resource": "*", }, + ], + "Version": "2012-10-17", + }, + "PolicyName": "RegisterSpokeAccountCustomResourceLambdaRoleDefaultPolicy3599775F", + "Roles": [ + { + "Ref": "RegisterSpokeAccountCustomResourceLambdaRoleDD724340", }, ], }, - "Type": "AWS::Events::Rule", + "Type": "AWS::IAM::Policy", + }, + "RemoteRegistrationCustomResource": { + "Condition": "UsingAWSOrganizationsCondition", + "DeletionPolicy": "Delete", + "DependsOn": [ + "RegisterSpokeAccountCustomResourceLambdaPolicyWaiter", + ], + "Properties": { + "ServiceToken": { + "Fn::GetAtt": [ + "RegisterSpokeAccountCustomResourceLambda8BF25EDC", + "Arn", + ], + }, + }, + "Type": "Custom::RegisterSpokeAccount", + "UpdateReplacePolicy": "Delete", + }, + "SpokeRegistrationLogGroup7EBCC472": { + "Condition": "UsingAWSOrganizationsCondition", + "DeletionPolicy": "Retain", + "Metadata": { + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W84", + "reason": "This template has to be supported in gov cloud which doesn't yet have the feature to provide kms key id to cloudwatch log group", + }, + ], + }, + }, + "Properties": { + "LogGroupName": { + "Fn::Join": [ + "", + [ + "/aws/lambda/", + { + "Ref": "RegisterSpokeAccountCustomResourceLambda8BF25EDC", + }, + ], + ], + }, + "RetentionInDays": 365, + }, + "Type": "AWS::Logs::LogGroup", + "UpdateReplacePolicy": "Retain", + }, + }, + "Rules": { + "CheckBootstrapVersion": { + "Assertions": [ + { + "Assert": { + "Fn::Not": [ + { + "Fn::Contains": [ + [ + "1", + "2", + "3", + "4", + "5", + ], + { + "Ref": "BootstrapVersion", + }, + ], + }, + ], + }, + "AssertDescription": "CDK bootstrap stack version 6 required. Please run 'cdk bootstrap' with a recent version of the CDK CLI.", + }, + ], }, }, } diff --git a/source/instance-scheduler/tests/__snapshots__/instance-scheduler-stack.test.ts.snap b/source/instance-scheduler/tests/__snapshots__/instance-scheduler-stack.test.ts.snap index de7ecfbd..edd176aa 100644 --- a/source/instance-scheduler/tests/__snapshots__/instance-scheduler-stack.test.ts.snap +++ b/source/instance-scheduler/tests/__snapshots__/instance-scheduler-stack.test.ts.snap @@ -2,97 +2,173 @@ exports[`InstanceSchedulerStack snapshot test 1`] = ` { - "AWSTemplateFormatVersion": "2010-09-09", "Conditions": { - "IsMemberOfOrganization": { + "AnonymizedMetricsEnabled": { "Fn::Equals": [ { - "Ref": "UsingAWSOrganizations", + "Fn::FindInMap": [ + "Send", + "AnonymousUsage", + "Data", + ], }, "Yes", ], }, - "ScheduleEC2": { - "Fn::Or": [ + "AppRegistryForInstanceSchedulerShouldDeployBA583A67": { + "Fn::Not": [ { "Fn::Equals": [ { - "Ref": "ScheduledServices", + "Ref": "AWS::Partition", }, - "EC2", + "aws-cn", ], }, + ], + }, + "CreateRdsSnapshotCondition": { + "Fn::Equals": [ { - "Fn::Equals": [ - { - "Ref": "ScheduledServices", - }, - "Both", - ], + "Ref": "CreateRdsSnapshot", }, + "Yes", ], }, - "ScheduleRDS": { - "Fn::Or": [ + "EnableRdsClusterSchedulingCondition": { + "Fn::Equals": [ { - "Fn::Equals": [ - { - "Ref": "ScheduledServices", - }, - "RDS", - ], + "Ref": "EnableRdsClusterScheduling", + }, + "Enabled", + ], + }, + "EnableSSMMaintenanceWindowsCondition": { + "Fn::Equals": [ + { + "Ref": "EnableSSMMaintenanceWindows", + }, + "Yes", + ], + }, + "OpsMonitoringCondition": { + "Fn::Equals": [ + { + "Ref": "OpsMonitoring", + }, + "Enabled", + ], + }, + "ScheduleASGsCondition": { + "Fn::Equals": [ + { + "Ref": "ScheduleASGs", + }, + "Enabled", + ], + }, + "ScheduleDocDbCondition": { + "Fn::Equals": [ + { + "Ref": "ScheduleDocDb", + }, + "Enabled", + ], + }, + "ScheduleEC2Condition": { + "Fn::Equals": [ + { + "Ref": "ScheduleEC2", + }, + "Enabled", + ], + }, + "ScheduleLambdaAccountCondition": { + "Fn::Equals": [ + { + "Ref": "ScheduleLambdaAccount", }, + "Yes", + ], + }, + "ScheduleNeptuneCondition": { + "Fn::Equals": [ + { + "Ref": "ScheduleNeptune", + }, + "Enabled", + ], + }, + "ScheduleRdsCondition": { + "Fn::Equals": [ + { + "Ref": "ScheduleRds", + }, + "Enabled", + ], + }, + "SchedulerRolekmsAccessCondition93ED0C6C": { + "Fn::Not": [ { "Fn::Equals": [ { - "Ref": "ScheduledServices", + "Fn::Select": [ + 0, + { + "Ref": "KmsKeyArns", + }, + ], }, - "Both", + "", ], }, ], }, + "SchedulingActiveCondition": { + "Fn::Equals": [ + { + "Ref": "SchedulingActive", + }, + "Yes", + ], + }, + "TraceCondition": { + "Fn::Equals": [ + { + "Ref": "Trace", + }, + "Yes", + ], + }, + "UsingAWSOrganizationsCondition": { + "Fn::Equals": [ + { + "Ref": "UsingAWSOrganizations", + }, + "Yes", + ], + }, + "ddbDeletionProtectionCondition": { + "Fn::Equals": [ + { + "Ref": "ddbDeletionProtection", + }, + "Enabled", + ], + }, }, - "Description": "", "Mappings": { "AppRegistryForInstanceSchedulerSolution25A90F05": { "Data": { - "AppRegistryApplicationName": "instance-scheduler-on-aws", - "ApplicationType": "AWS-Solutions", - "ID": "SO0030", - "SolutionName": "instance-scheduler-on-aws", - "Version": "v1.5.0", - }, - }, - "Send": { - "AnonymousUsage": { - "Data": "Yes", - }, - "ParameterKey": { - "UniqueId": "/Solutions/instance-scheduler-on-aws/UUID/", + "AppRegistryApplicationName": "my-appreg-solution-name", + "ApplicationType": "my-appreg-app-name", + "ID": "my-solution-id", + "SolutionName": "my-solution-name", + "Version": "v9.9.9", }, }, - "mappings": { - "EnabledDisabled": { - "No": "DISABLED", - "Yes": "ENABLED", - }, - "SchedulerEventBusName": { - "Name": "scheduler-event-bus", - }, - "SchedulerRole": { - "Name": "Scheduler-Role", - }, - "Services": { - "Both": "ec2,rds", - "EC2": "ec2", - "RDS": "rds", - }, - "Settings": { - "MetricsSolutionId": "S00030", - "MetricsUrl": "https://metrics.awssolutionsbuilder.com/generic", - }, - "Timeouts": { + "CronExpressionsForSchedulingIntervals": { + "IntervalMinutesToCron": { "1": "cron(0/1 * * * ? *)", "10": "cron(0/10 * * * ? *)", "15": "cron(0/15 * * * ? *)", @@ -101,9 +177,21 @@ exports[`InstanceSchedulerStack snapshot test 1`] = ` "5": "cron(0/5 * * * ? *)", "60": "cron(0 0/1 * * ? *)", }, - "TrueFalse": { - "No": "False", - "Yes": "True", + }, + "MetricsSchedulingIntervalToSeconds": { + "MinutesToSeconds": { + "1": "60", + "10": "600", + "15": "900", + "2": "120", + "30": "1800", + "5": "300", + "60": "3600", + }, + }, + "Send": { + "AnonymousUsage": { + "Data": "Yes", }, }, }, @@ -112,127 +200,183 @@ exports[`InstanceSchedulerStack snapshot test 1`] = ` "ParameterGroups": [ { "Label": { - "default": "Scheduler (version v1.5.0)", + "default": "Scheduler (v9.9.9)", }, "Parameters": [ "TagName", - "ScheduledServices", - "ScheduleRdsClusters", - "CreateRdsSnapshot", - "SchedulingActive", - "DefaultTimezone", - "ScheduleLambdaAccount", "SchedulerFrequency", - "MemorySize", + "DefaultTimezone", + "SchedulingActive", ], }, { "Label": { - "default": "Namespace Configuration", + "default": "Services", }, "Parameters": [ - "Namespace", + "ScheduleEC2", + "ScheduleRds", + "EnableRdsClusterScheduling", + "ScheduleNeptune", + "ScheduleDocDb", + "ScheduleASGs", + ], + }, + { + "Label": { + "default": "Tagging", + }, + "Parameters": [ + "StartedTags", + "StoppedTags", + ], + }, + { + "Label": { + "default": "Service-specific", + }, + "Parameters": [ + "EnableSSMMaintenanceWindows", + "KmsKeyArns", + "CreateRdsSnapshot", + "AsgScheduledTagKey", + "AsgRulePrefix", ], }, { "Label": { - "default": "Account Structure", + "default": "Account structure", }, "Parameters": [ "UsingAWSOrganizations", + "Namespace", "Principals", "Regions", + "ScheduleLambdaAccount", ], }, { "Label": { - "default": "Options", + "default": "Monitoring", }, "Parameters": [ - "UseCloudWatchMetrics", + "LogRetentionDays", "Trace", - "EnableSSMMaintenanceWindows", + "OpsMonitoring", ], }, { "Label": { - "default": "Other parameters", + "default": "Other", }, "Parameters": [ - "LogRetentionDays", - "StartedTags", - "StoppedTags", + "MemorySize", + "ddbDeletionProtection", ], }, ], "ParameterLabels": { + "AsgRulePrefix": { + "default": "ASG action name prefix", + }, + "AsgScheduledTagKey": { + "default": "ASG scheduled tag key", + }, "CreateRdsSnapshot": { - "default": "Create RDS instance snapshot", + "default": "Create RDS instance snapshots on stop", }, "DefaultTimezone": { "default": "Default time zone", }, + "EnableRdsClusterScheduling": { + "default": "Enable RDS cluster scheduling", + }, "EnableSSMMaintenanceWindows": { - "default": "Enable SSM Maintenance windows", + "default": "Enable EC2 SSM Maintenance Windows", + }, + "KmsKeyArns": { + "default": "Kms Key Arns for EC2", }, "LogRetentionDays": { - "default": "Log retention days", + "default": "Log retention period (days)", }, "MemorySize": { - "default": "Memory size", + "default": "Memory size (MB)", }, "Namespace": { "default": "Namespace", }, + "OpsMonitoring": { + "default": "Operational Monitoring", + }, "Principals": { - "default": "Organization Id/Remote Account Ids", + "default": "Organization ID/remote account IDs", }, "Regions": { "default": "Region(s)", }, + "ScheduleASGs": { + "default": "Enable AutoScaling Group scheduling", + }, + "ScheduleDocDb": { + "default": "Enable DocumentDB cluster scheduling", + }, + "ScheduleEC2": { + "default": "Enable EC2 scheduling", + }, "ScheduleLambdaAccount": { - "default": "This account", + "default": "Enable hub account scheduling", }, - "ScheduleRdsClusters": { - "default": "Schedule Aurora Clusters", + "ScheduleNeptune": { + "default": "Enable Neptune cluster scheduling", }, - "ScheduledServices": { - "default": "Service(s) to schedule", + "ScheduleRds": { + "default": "Enable RDS instance scheduling", }, "SchedulerFrequency": { - "default": "Frequency", + "default": "Scheduling interval (minutes)", }, "SchedulingActive": { - "default": "Scheduling enabled", + "default": "Enable scheduling", }, "StartedTags": { - "default": "Started tags", + "default": "Start tags", }, "StoppedTags": { - "default": "Stopped tags", + "default": "Stop tags", }, "TagName": { - "default": "Instance Scheduler tag name", + "default": "Schedule tag key", }, "Trace": { - "default": "Enable CloudWatch Debug Logs", - }, - "UseCloudWatchMetrics": { - "default": "Enable CloudWatch Metrics", + "default": "Enable CloudWatch debug Logs", }, "UsingAWSOrganizations": { "default": "Use AWS Organizations", }, + "ddbDeletionProtection": { + "default": "Protect DynamoDB Tables", + }, }, }, + "cdk_nag": { + "rules_to_suppress": [ + { + "id": "AwsSolutions-L1", + "reason": "Python 3.11 is the newest available runtime. This finding is a false positive.", + }, + ], + }, }, "Outputs": { "AccountId": { - "Description": "Account to give access to when creating cross-account access role for cross account scenario ", - "Value": "111111111111", + "Description": "Hub Account ID - for use in corresponding spoke stack parameter", + "Value": { + "Ref": "AWS::AccountId", + }, }, "ConfigurationTable": { - "Description": "Name of the DynamoDB configuration table", + "Description": "DynamoDB Configuration table ARN", "Value": { "Fn::GetAtt": [ "ConfigTable", @@ -241,22 +385,22 @@ exports[`InstanceSchedulerStack snapshot test 1`] = ` }, }, "IssueSnsTopicArn": { - "Description": "Topic to subscribe to for notifications of errors and warnings", + "Description": "Notification SNS Topic ARN", "Value": { "Ref": "InstanceSchedulerSnsTopic", }, }, "SchedulerRoleArn": { - "Description": "Role for the instance scheduler lambda function", + "Description": "Scheduler role ARN", "Value": { "Fn::GetAtt": [ - "SchedulerRole", + "SchedulerRole59E73443", "Arn", ], }, }, "ServiceInstanceScheduleServiceToken": { - "Description": "Arn to use as ServiceToken property for custom resource type Custom::ServiceInstanceSchedule", + "Description": "Custom resource provider ARN - use as ServiceToken property value for CloudFormation Schedules", "Value": { "Fn::GetAtt": [ "Main", @@ -266,13 +410,28 @@ exports[`InstanceSchedulerStack snapshot test 1`] = ` }, }, "Parameters": { + "AsgRulePrefix": { + "Default": "is-", + "Description": "The prefix Instance Scheduler will use when naming Scheduled Scaling actions for AutoScaling Groups. Actions with this prefix will be added and removed by Instance Scheduler as needed.", + "Type": "String", + }, + "AsgScheduledTagKey": { + "Default": "scheduled", + "Description": "Key for the tag Instance Scheduler will add to scheduled AutoScaling Groups", + "Type": "String", + }, + "BootstrapVersion": { + "Default": "/cdk-bootstrap/hnb659fds/version", + "Description": "Version of the CDK Bootstrap resources in this environment, automatically retrieved from SSM Parameter Store. [cdk:skip]", + "Type": "AWS::SSM::Parameter::Value", + }, "CreateRdsSnapshot": { "AllowedValues": [ "Yes", "No", ], "Default": "No", - "Description": "Create snapshot before stopping RDS instances (does not apply to Aurora Clusters).", + "Description": "Create snapshots before stopping RDS instances (not clusters).", "Type": "String", }, "DefaultTimezone": { @@ -710,7 +869,16 @@ exports[`InstanceSchedulerStack snapshot test 1`] = ` "UTC", ], "Default": "UTC", - "Description": "Choose the default Time Zone. Default is 'UTC'.", + "Description": "Default IANA time zone identifier used by schedules that do not specify a time zone.", + "Type": "String", + }, + "EnableRdsClusterScheduling": { + "AllowedValues": [ + "Enabled", + "Disabled", + ], + "Default": "Enabled", + "Description": "Enable scheduling RDS clusters (multi-AZ and Aurora).", "Type": "String", }, "EnableSSMMaintenanceWindows": { @@ -719,9 +887,14 @@ exports[`InstanceSchedulerStack snapshot test 1`] = ` "No", ], "Default": "No", - "Description": "Enable the solution to load SSM Maintenance Windows, so that they can be used for EC2 instance Scheduling.", + "Description": "Allow schedules to specify a maintenance window name. Instance Scheduler will ensure the instance is running during that maintenance window.", "Type": "String", }, + "KmsKeyArns": { + "Default": "", + "Description": "comma-separated list of kms arns to grant Instance Scheduler kms:CreateGrant permissions to provide the EC2 service with Decrypt permissions for encrypted EBS volumes. This allows the scheduler to start EC2 instances with attached encrypted EBS volumes. provide just (*) to give limited access to all kms keys, leave blank to disable. For details on the exact policy created, refer to security section of the implementation guide (https://aws.amazon.com/solutions/implementations/instance-scheduler-on-aws/)", + "Type": "CommaDelimitedList", + }, "LogRetentionDays": { "AllowedValues": [ "1", @@ -743,7 +916,7 @@ exports[`InstanceSchedulerStack snapshot test 1`] = ` "3653", ], "Default": 30, - "Description": "Retention days for scheduler logs.", + "Description": "Retention period in days for logs.", "Type": "Number", }, "MemorySize": { @@ -761,50 +934,85 @@ exports[`InstanceSchedulerStack snapshot test 1`] = ` "1536", ], "Default": 128, - "Description": "Size of the Lambda function running the scheduler, increase size when processing large numbers of instances.", + "Description": "Memory size of the Lambda function that schedules EC2 and RDS resources. Increase if you are experiencing high memory usage or timeouts.", "Type": "Number", }, "Namespace": { - "Default": "", - "Description": "Provide unique identifier to differentiate between multiple solution deployments (No Spaces). Example: Dev", + "Default": "default", + "Description": "Unique identifier per deployment. Cannot contain spaces.", + "Type": "String", + }, + "OpsMonitoring": { + "AllowedValues": [ + "Enabled", + "Disabled", + ], + "Default": "Enabled", + "Description": "Deploy operational metrics and an Ops Monitoring Dashboard to Cloudwatch", "Type": "String", }, "Principals": { "Default": "", - "Description": "(Required) If using AWS Organizations, provide the Organization ID. Eg. o-xxxxyyy. Else, provide a comma separated list of spoke account ids to schedule. Eg.: 1111111111, 2222222222 or {param: ssm-param-name}", + "Description": "(Required) If using AWS Organizations, provide the Organization ID. Eg. o-xxxxyyy. Else, provide a comma-separated list of spoke account ids to schedule. Eg.: 1111111111, 2222222222 or {param: ssm-param-name}", "Type": "CommaDelimitedList", }, "Regions": { "Default": "", - "Description": "List of regions in which instances should be scheduled, leave blank for current region only.", + "Description": "Comma-separated List of regions in which resources should be scheduled. Leave blank for current region only.", "Type": "CommaDelimitedList", }, + "ScheduleASGs": { + "AllowedValues": [ + "Enabled", + "Disabled", + ], + "Default": "Enabled", + "Description": "Enable scheduling AutoScaling Groups", + "Type": "String", + }, + "ScheduleDocDb": { + "AllowedValues": [ + "Enabled", + "Disabled", + ], + "Default": "Enabled", + "Description": "Enable scheduling DocumentDB clusters.", + "Type": "String", + }, + "ScheduleEC2": { + "AllowedValues": [ + "Enabled", + "Disabled", + ], + "Default": "Enabled", + "Description": "Enable scheduling EC2 instances.", + "Type": "String", + }, "ScheduleLambdaAccount": { "AllowedValues": [ "Yes", "No", ], "Default": "Yes", - "Description": "Schedule instances in this account.", + "Description": "Enable scheduling in this account.", "Type": "String", }, - "ScheduleRdsClusters": { + "ScheduleNeptune": { "AllowedValues": [ - "Yes", - "No", + "Enabled", + "Disabled", ], - "Default": "No", - "Description": "Enable scheduling of Aurora clusters for RDS Service.", + "Default": "Enabled", + "Description": "Enable scheduling Neptune clusters.", "Type": "String", }, - "ScheduledServices": { + "ScheduleRds": { "AllowedValues": [ - "EC2", - "RDS", - "Both", + "Enabled", + "Disabled", ], - "Default": "EC2", - "Description": "Scheduled Services.", + "Default": "Enabled", + "Description": "Enable scheduling individual RDS instances (not clusters).", "Type": "String", }, "SchedulerFrequency": { @@ -818,8 +1026,8 @@ exports[`InstanceSchedulerStack snapshot test 1`] = ` "60", ], "Default": "5", - "Description": "Scheduler running frequency in minutes.", - "Type": "String", + "Description": "Interval in minutes between scheduler executions. For EC2 and RDS", + "Type": "Number", }, "SchedulingActive": { "AllowedValues": [ @@ -827,22 +1035,22 @@ exports[`InstanceSchedulerStack snapshot test 1`] = ` "No", ], "Default": "Yes", - "Description": "Activate or deactivate scheduling.", + "Description": "Set to "No" to disable scheduling for all services.", "Type": "String", }, "StartedTags": { - "Default": "InstanceScheduler-LastAction=Started By {scheduler} {year}/{month}/{day} {hour}:{minute}{timezone}, ", - "Description": "Comma separated list of tag keys and values of the format key=value, key=value,... that are set on started instances. Leave blank to disable.", + "Default": "InstanceScheduler-LastAction=Started By {scheduler} {year}-{month}-{day} {hour}:{minute} {timezone}", + "Description": "Comma-separated list of tag keys and values of the format key=value, key=value,... that are set on started instances. Leave blank to disable.", "Type": "String", }, "StoppedTags": { - "Default": "InstanceScheduler-LastAction=Stopped By {scheduler} {year}/{month}/{day} {hour}:{minute}{timezone}, ", - "Description": "Comma separated list of tag keys and values of the format key=value, key=value,... that are set on stopped instances. Leave blank to disable.", + "Default": "InstanceScheduler-LastAction=Stopped By {scheduler} {year}-{month}-{day} {hour}:{minute} {timezone}", + "Description": "Comma-separated list of tag keys and values of the format key=value, key=value,... that are set on stopped instances. Leave blank to disable.", "Type": "String", }, "TagName": { "Default": "Schedule", - "Description": "Name of tag to use for associating instance schedule schemas with service instances.", + "Description": "The tag key Instance Scheduler will read to determine the schedule for a resource. The value of the tag with this key on a resource specifies the name of the schedule.", "MaxLength": 127, "MinLength": 1, "Type": "String", @@ -853,274 +1061,189 @@ exports[`InstanceSchedulerStack snapshot test 1`] = ` "No", ], "Default": "No", - "Description": "Enable debug-level logging in CloudWatch logs.", + "Description": "Enable debug-level logging in CloudWatch Logs.", "Type": "String", }, - "UseCloudWatchMetrics": { + "UsingAWSOrganizations": { "AllowedValues": [ "Yes", "No", ], "Default": "No", - "Description": "Collect instance scheduling data using CloudWatch metrics.", + "Description": "Deploy resources to enable automatic spoke stack registration using AWS Organizations.", "Type": "String", }, - "UsingAWSOrganizations": { + "ddbDeletionProtection": { "AllowedValues": [ - "Yes", - "No", + "Enabled", + "Disabled", ], - "Default": "No", - "Description": "Use AWS Organizations to automate spoke account registration.", + "Default": "Enabled", + "Description": "Enable deletion protection for DynamoDB tables used by the solution. This will cause the tables to be retained when deleting this stack. To delete the tables when deleting this stack, first disable this parameter.", "Type": "String", }, }, "Resources": { - "AppRegistry968496A3": { - "Properties": { - "Description": { - "Fn::Join": [ - "", - [ - "Service Catalog application to track and manage all your resources for the solution ", - { - "Fn::FindInMap": [ - "AppRegistryForInstanceSchedulerSolution25A90F05", - "Data", - "SolutionName", - ], - }, - ], + "ASGHandler0F6D6751": { + "DependsOn": [ + "ASGRoleDefaultPolicy0DBFE29F", + "ASGRole21C54AF6", + ], + "Metadata": { + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W89", + "reason": "This Lambda function does not need to access any resource provisioned within a VPC.", + }, + { + "id": "W58", + "reason": "This Lambda function has permission provided to write to CloudWatch logs using the iam roles.", + }, + { + "id": "W92", + "reason": "Need to investigate appropriate ReservedConcurrentExecutions for this lambda", + }, ], }, - "Name": { - "Fn::Join": [ - "-", - [ - { - "Fn::FindInMap": [ - "AppRegistryForInstanceSchedulerSolution25A90F05", - "Data", - "AppRegistryApplicationName", + }, + "Properties": { + "Code": "Omitted to remove snapshot dependency on code hash", + "Description": "Instance Scheduler for AutoScaling Groups version v9.9.9", + "Environment": { + "Variables": { + "ASG_SCHEDULING_ROLE_NAME": { + "Fn::Join": [ + "", + [ + { + "Ref": "Namespace", + }, + "-ASG-Scheduling-Role", ], - }, - { - "Ref": "AWS::Region", - }, - { - "Ref": "AWS::AccountId", - }, - { - "Ref": "AWS::StackName", - }, - ], - ], - }, - "Tags": { - "Solutions:ApplicationType": { - "Fn::FindInMap": [ - "AppRegistryForInstanceSchedulerSolution25A90F05", - "Data", - "ApplicationType", - ], - }, - "Solutions:SolutionID": { - "Fn::FindInMap": [ - "AppRegistryForInstanceSchedulerSolution25A90F05", - "Data", - "ID", - ], - }, - "Solutions:SolutionName": { - "Fn::FindInMap": [ - "AppRegistryForInstanceSchedulerSolution25A90F05", - "Data", - "SolutionName", - ], - }, - "Solutions:SolutionVersion": { - "Fn::FindInMap": [ - "AppRegistryForInstanceSchedulerSolution25A90F05", - "Data", - "Version", - ], + ], + }, + "CONFIG_TABLE": { + "Ref": "ConfigTable", + }, + "DEFAULT_TIMEZONE": { + "Ref": "DefaultTimezone", + }, + "ISSUES_TOPIC_ARN": { + "Ref": "InstanceSchedulerSnsTopic", + }, + "METRICS_URL": "https://metrics.awssolutionsbuilder.com/generic", + "METRICS_UUID": { + "Fn::GetAtt": [ + "MetricsUuidProvider", + "Uuid", + ], + }, + "POWERTOOLS_LOG_LEVEL": { + "Fn::If": [ + "TraceCondition", + "DEBUG", + "INFO", + ], + }, + "POWERTOOLS_SERVICE_NAME": "asg", + "RULE_PREFIX": { + "Ref": "AsgRulePrefix", + }, + "SCHEDULED_TAG_KEY": { + "Ref": "AsgScheduledTagKey", + }, + "SCHEDULE_TAG_KEY": { + "Ref": "TagName", + }, + "SCHEDULING_INTERVAL_MINUTES": { + "Ref": "SchedulerFrequency", + }, + "SEND_METRICS": { + "Fn::If": [ + "AnonymizedMetricsEnabled", + "True", + "False", + ], + }, + "SOLUTION_ID": "my-solution-id", + "SOLUTION_VERSION": "v9.9.9", + "USER_AGENT_EXTRA": "AwsSolution/my-solution-id/v9.9.9", }, }, - }, - "Type": "AWS::ServiceCatalogAppRegistry::Application", - }, - "AppRegistryAssociation": { - "Properties": { - "Application": { + "Handler": "lambda_handler", + "MemorySize": 128, + "Role": { "Fn::GetAtt": [ - "AppRegistry968496A3", - "Id", + "ASGRole21C54AF6", + "Arn", ], }, - "Resource": { - "Ref": "AWS::StackId", + "Runtime": "python3.11", + "Timeout": 300, + "TracingConfig": { + "Mode": "Active", }, - "ResourceType": "CFN_STACK", }, - "Type": "AWS::ServiceCatalogAppRegistry::ResourceAssociation", + "Type": "AWS::Lambda::Function", }, - "AppRegistryAttributeGroupAssociationf823ba38a843A987197E": { - "Properties": { - "Application": { - "Fn::GetAtt": [ - "AppRegistry968496A3", - "Id", - ], - }, - "AttributeGroup": { - "Fn::GetAtt": [ - "AppRegistryDefaultApplicationAttributes15279635", - "Id", + "ASGHandlerLogGroupA8EE11F3": { + "DeletionPolicy": "Retain", + "Metadata": { + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W84", + "reason": "This template has to be supported in gov cloud which doesn't yet have the feature to provide kms key id to cloudwatch log group", + }, ], }, }, - "Type": "AWS::ServiceCatalogAppRegistry::AttributeGroupAssociation", - }, - "AppRegistryDefaultApplicationAttributes15279635": { "Properties": { - "Attributes": { - "applicationType": { - "Fn::FindInMap": [ - "AppRegistryForInstanceSchedulerSolution25A90F05", - "Data", - "ApplicationType", - ], - }, - "solutionID": { - "Fn::FindInMap": [ - "AppRegistryForInstanceSchedulerSolution25A90F05", - "Data", - "ID", - ], - }, - "solutionName": { - "Fn::FindInMap": [ - "AppRegistryForInstanceSchedulerSolution25A90F05", - "Data", - "SolutionName", - ], - }, - "version": { - "Fn::FindInMap": [ - "AppRegistryForInstanceSchedulerSolution25A90F05", - "Data", - "Version", - ], - }, - }, - "Description": "Attribute group for solution information", - "Name": { + "LogGroupName": { "Fn::Join": [ "", [ - "attgroup-", + "/aws/lambda/", { - "Fn::Join": [ - "-", - [ - { - "Ref": "AWS::Region", - }, - { - "Ref": "AWS::StackName", - }, - ], - ], + "Ref": "ASGHandler0F6D6751", }, ], ], }, - "Tags": { - "Solutions:ApplicationType": { - "Fn::FindInMap": [ - "AppRegistryForInstanceSchedulerSolution25A90F05", - "Data", - "ApplicationType", - ], - }, - "Solutions:SolutionID": { - "Fn::FindInMap": [ - "AppRegistryForInstanceSchedulerSolution25A90F05", - "Data", - "ID", - ], - }, - "Solutions:SolutionName": { - "Fn::FindInMap": [ - "AppRegistryForInstanceSchedulerSolution25A90F05", - "Data", - "SolutionName", - ], - }, - "Solutions:SolutionVersion": { - "Fn::FindInMap": [ - "AppRegistryForInstanceSchedulerSolution25A90F05", - "Data", - "Version", - ], - }, - }, - }, - "Type": "AWS::ServiceCatalogAppRegistry::AttributeGroup", - }, - "ConfigTable": { - "DeletionPolicy": "Delete", - "Properties": { - "AttributeDefinitions": [ - { - "AttributeName": "type", - "AttributeType": "S", - }, - { - "AttributeName": "name", - "AttributeType": "S", - }, - ], - "BillingMode": "PAY_PER_REQUEST", - "KeySchema": [ - { - "AttributeName": "type", - "KeyType": "HASH", - }, - { - "AttributeName": "name", - "KeyType": "RANGE", - }, - ], - "PointInTimeRecoverySpecification": { - "PointInTimeRecoveryEnabled": true, - }, - "SSESpecification": { - "KMSMasterKeyId": { - "Ref": "InstanceSchedulerEncryptionKey", - }, - "SSEEnabled": true, - "SSEType": "KMS", + "RetentionInDays": { + "Ref": "LogRetentionDays", }, }, - "Type": "AWS::DynamoDB::Table", - "UpdateReplacePolicy": "Delete", + "Type": "AWS::Logs::LogGroup", + "UpdateReplacePolicy": "Retain", }, - "EC2DynamoDBPolicy": { + "ASGPolicy3B6FAA4E": { "Metadata": { "cdk_nag": { "rules_to_suppress": [ { + "applies_to": [ + "Action::kms:GenerateDataKey*", + "Action::kms:ReEncrypt*", + ], + "id": "AwsSolutions-IAM5", + "reason": "Permission to use solution CMK with dynamo/sns", + }, + { + "applies_to": [ + "Resource::arn::iam::*:role/-ASG-Scheduling-Role", + ], "id": "AwsSolutions-IAM5", - "reason": "All policies have been scoped to be as restrictive as possible. This solution needs to access ec2/rds resources across all regions.", + "reason": "This handler's primary purpose is to assume role into spoke accounts for scheduling purposes", }, ], }, "cfn_nag": { "rules_to_suppress": [ { - "id": "W12", - "reason": "All policies have been scoped to be as restrictive as possible. This solution needs to access ec2/rds resources across all regions.", + "id": "W76", + "reason": "Acknowledged IAM policy document SPCM > 25", }, ], }, @@ -1130,230 +1253,3869 @@ exports[`InstanceSchedulerStack snapshot test 1`] = ` "Statement": [ { "Action": [ - "ssm:GetParameter", - "ssm:GetParameters", + "logs:CreateLogStream", + "logs:PutLogEvents", + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "ASGHandlerLogGroupA8EE11F3", + "Arn", + ], + }, + }, + { + "Action": [ + "kms:Decrypt", + "kms:DescribeKey", ], "Effect": "Allow", "Resource": { - "Fn::Sub": "arn:\${AWS::Partition}:ssm:*:\${AWS::AccountId}:parameter/*", + "Fn::GetAtt": [ + "InstanceSchedulerEncryptionKey", + "Arn", + ], }, }, { "Action": [ - "rds:DescribeDBClusters", - "rds:DescribeDBInstances", - "ec2:DescribeInstances", - "cloudwatch:PutMetricData", - "ssm:DescribeMaintenanceWindows", - "tag:GetResources", + "dynamodb:BatchGetItem", + "dynamodb:GetRecords", + "dynamodb:GetShardIterator", + "dynamodb:Query", + "dynamodb:GetItem", + "dynamodb:Scan", + "dynamodb:ConditionCheckItem", + "dynamodb:DescribeTable", ], "Effect": "Allow", - "Resource": "*", + "Resource": [ + { + "Fn::GetAtt": [ + "ConfigTable", + "Arn", + ], + }, + { + "Ref": "AWS::NoValue", + }, + ], + }, + { + "Action": "sns:Publish", + "Effect": "Allow", + "Resource": { + "Ref": "InstanceSchedulerSnsTopic", + }, + }, + { + "Action": [ + "kms:Decrypt", + "kms:Encrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "InstanceSchedulerEncryptionKey", + "Arn", + ], + }, + }, + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Resource": { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition", + }, + ":iam::*:role/", + { + "Ref": "Namespace", + }, + "-ASG-Scheduling-Role", + ], + ], + }, + }, + ], + "Version": "2012-10-17", + }, + "PolicyName": "ASGPolicy3B6FAA4E", + "Roles": [ + { + "Ref": "ASGRole21C54AF6", + }, + ], + }, + "Type": "AWS::IAM::Policy", + }, + "ASGRole21C54AF6": { + "Metadata": { + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W28", + "reason": "Explicit role name required for assumedBy arn principle in spoke stack", + }, + ], + }, + }, + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com", + }, + }, + ], + "Version": "2012-10-17", + }, + "RoleName": { + "Fn::Join": [ + "", + [ + { + "Ref": "Namespace", + }, + "-AsgRequestHandler-Role", + ], + ], + }, + }, + "Type": "AWS::IAM::Role", + }, + "ASGRoleDefaultPolicy0DBFE29F": { + "Metadata": { + "cdk_nag": { + "rules_to_suppress": [ + { + "applies_to": [ + "Resource::*", + ], + "id": "AwsSolutions-IAM5", + "reason": "required for xray", + }, + ], + }, + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W12", + "reason": "Wildcard required for xray", + }, + ], + }, + }, + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "xray:PutTraceSegments", + "xray:PutTelemetryRecords", + ], + "Effect": "Allow", + "Resource": "*", + }, + ], + "Version": "2012-10-17", + }, + "PolicyName": "ASGRoleDefaultPolicy0DBFE29F", + "Roles": [ + { + "Ref": "ASGRole21C54AF6", + }, + ], + }, + "Type": "AWS::IAM::Policy", + }, + "ASGSchedulerASGOrchLogGroup58E06A86": { + "Condition": "ScheduleASGsCondition", + "DeletionPolicy": "Retain", + "Metadata": { + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W84", + "reason": "This template has to be supported in gov cloud which doesn't yet have the feature to provide kms key id to cloudwatch log group", + }, + ], + }, + }, + "Properties": { + "LogGroupName": { + "Fn::Join": [ + "", + [ + "/aws/lambda/", + { + "Ref": "ASGSchedulerASGOrchestrator6629B72C", + }, + ], + ], + }, + "RetentionInDays": { + "Ref": "LogRetentionDays", + }, + }, + "Type": "AWS::Logs::LogGroup", + "UpdateReplacePolicy": "Retain", + }, + "ASGSchedulerASGOrchPolicyB5F2EC10": { + "Condition": "ScheduleASGsCondition", + "Metadata": { + "cdk_nag": { + "rules_to_suppress": [ + { + "applies_to": [ + "Action::kms:GenerateDataKey*", + "Action::kms:ReEncrypt*", + ], + "id": "AwsSolutions-IAM5", + "reason": "Permission to use solution CMK with dynamo/sns", + }, + ], + }, + }, + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "logs:CreateLogStream", + "logs:PutLogEvents", + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "ASGSchedulerASGOrchLogGroup58E06A86", + "Arn", + ], + }, + }, + { + "Action": [ + "kms:Decrypt", + "kms:DescribeKey", + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "InstanceSchedulerEncryptionKey", + "Arn", + ], + }, + }, + { + "Action": [ + "dynamodb:BatchGetItem", + "dynamodb:GetRecords", + "dynamodb:GetShardIterator", + "dynamodb:Query", + "dynamodb:GetItem", + "dynamodb:Scan", + "dynamodb:ConditionCheckItem", + "dynamodb:DescribeTable", + ], + "Effect": "Allow", + "Resource": [ + { + "Fn::GetAtt": [ + "ConfigTable", + "Arn", + ], + }, + { + "Ref": "AWS::NoValue", + }, + ], + }, + { + "Action": "sns:Publish", + "Effect": "Allow", + "Resource": { + "Ref": "InstanceSchedulerSnsTopic", + }, + }, + { + "Action": [ + "kms:Decrypt", + "kms:Encrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "InstanceSchedulerEncryptionKey", + "Arn", + ], + }, + }, + ], + "Version": "2012-10-17", + }, + "PolicyName": "ASGSchedulerASGOrchPolicyB5F2EC10", + "Roles": [ + { + "Ref": "ASGSchedulerASGOrchRole5B79F32B", + }, + ], + }, + "Type": "AWS::IAM::Policy", + }, + "ASGSchedulerASGOrchRole5B79F32B": { + "Condition": "ScheduleASGsCondition", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com", + }, + }, + ], + "Version": "2012-10-17", + }, + }, + "Type": "AWS::IAM::Role", + }, + "ASGSchedulerASGOrchRoleDefaultPolicy55DE89B0": { + "Condition": "ScheduleASGsCondition", + "Metadata": { + "cdk_nag": { + "rules_to_suppress": [ + { + "applies_to": [ + "Resource::*", + ], + "id": "AwsSolutions-IAM5", + "reason": "required for xray", + }, + { + "applies_to": [ + "Resource:::*", + ], + "id": "AwsSolutions-IAM5", + "reason": "permissions to invoke all versions of the ASG scheduling request handler", + }, + ], + }, + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W12", + "reason": "Wildcard required for xray", + }, + ], + }, + }, + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "xray:PutTraceSegments", + "xray:PutTelemetryRecords", + ], + "Effect": "Allow", + "Resource": "*", + }, + { + "Action": "lambda:InvokeFunction", + "Effect": "Allow", + "Resource": [ + { + "Fn::GetAtt": [ + "ASGHandler0F6D6751", + "Arn", + ], + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "ASGHandler0F6D6751", + "Arn", + ], + }, + ":*", + ], + ], + }, + ], + }, + ], + "Version": "2012-10-17", + }, + "PolicyName": "ASGSchedulerASGOrchRoleDefaultPolicy55DE89B0", + "Roles": [ + { + "Ref": "ASGSchedulerASGOrchRole5B79F32B", + }, + ], + }, + "Type": "AWS::IAM::Policy", + }, + "ASGSchedulerASGOrchRule510DB4D0": { + "Condition": "ScheduleASGsCondition", + "Properties": { + "Description": "Instance Scheduler - Rule to trigger scheduling for AutoScaling Groups version v9.9.9", + "ScheduleExpression": "rate(1 hour)", + "State": "ENABLED", + "Targets": [ + { + "Arn": { + "Fn::GetAtt": [ + "ASGSchedulerASGOrchestrator6629B72C", + "Arn", + ], + }, + "Id": "Target0", + "Input": "{}", + "RetryPolicy": { + "MaximumRetryAttempts": 5, + }, + }, + ], + }, + "Type": "AWS::Events::Rule", + }, + "ASGSchedulerASGOrchRuleAllowEventRulestackASGSchedulerASGOrchestrator9436C96D61301E75": { + "Condition": "ScheduleASGsCondition", + "Properties": { + "Action": "lambda:InvokeFunction", + "FunctionName": { + "Fn::GetAtt": [ + "ASGSchedulerASGOrchestrator6629B72C", + "Arn", + ], + }, + "Principal": "events.amazonaws.com", + "SourceArn": { + "Fn::GetAtt": [ + "ASGSchedulerASGOrchRule510DB4D0", + "Arn", + ], + }, + }, + "Type": "AWS::Lambda::Permission", + }, + "ASGSchedulerASGOrchestrator6629B72C": { + "Condition": "ScheduleASGsCondition", + "DependsOn": [ + "ASGSchedulerASGOrchRoleDefaultPolicy55DE89B0", + "ASGSchedulerASGOrchRole5B79F32B", + ], + "Metadata": { + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W89", + "reason": "This Lambda function does not need to access any resource provisioned within a VPC.", + }, + { + "id": "W58", + "reason": "This Lambda function has permission provided to write to CloudWatch logs using the iam roles.", + }, + { + "id": "W92", + "reason": "Need to investigate appropriate ReservedConcurrentExecutions for this lambda", + }, + ], + }, + }, + "Properties": { + "Code": "Omitted to remove snapshot dependency on code hash", + "Description": "Instance Scheduler orchestrator for AutoScaling Groups version v9.9.9", + "Environment": { + "Variables": { + "ASG_SCHEDULER_NAME": { + "Ref": "ASGHandler0F6D6751", + }, + "CONFIG_TABLE": { + "Ref": "ConfigTable", + }, + "ENABLE_SCHEDULE_HUB_ACCOUNT": { + "Fn::If": [ + "ScheduleLambdaAccountCondition", + "True", + "False", + ], + }, + "ISSUES_TOPIC_ARN": { + "Ref": "InstanceSchedulerSnsTopic", + }, + "METRICS_URL": "https://metrics.awssolutionsbuilder.com/generic", + "METRICS_UUID": { + "Fn::GetAtt": [ + "MetricsUuidProvider", + "Uuid", + ], + }, + "POWERTOOLS_LOG_LEVEL": { + "Fn::If": [ + "TraceCondition", + "DEBUG", + "INFO", + ], + }, + "POWERTOOLS_SERVICE_NAME": "asg_orch", + "SCHEDULE_REGIONS": { + "Fn::Join": [ + ",", + { + "Ref": "Regions", + }, + ], + }, + "SCHEDULING_INTERVAL_MINUTES": { + "Ref": "SchedulerFrequency", + }, + "SEND_METRICS": { + "Fn::If": [ + "AnonymizedMetricsEnabled", + "True", + "False", + ], + }, + "SOLUTION_ID": "my-solution-id", + "SOLUTION_VERSION": "v9.9.9", + "USER_AGENT_EXTRA": "AwsSolution/my-solution-id/v9.9.9", + }, + }, + "Handler": "lambda_handler", + "MemorySize": 128, + "Role": { + "Fn::GetAtt": [ + "ASGSchedulerASGOrchRole5B79F32B", + "Arn", + ], + }, + "Runtime": "python3.11", + "Timeout": 60, + "TracingConfig": { + "Mode": "Active", + }, + }, + "Type": "AWS::Lambda::Function", + }, + "ASGSchedulerAsgSchedulingRoleASGSchedulingPermissions9522C2B1": { + "Condition": "ScheduleASGsCondition", + "Metadata": { + "cdk_nag": { + "rules_to_suppress": [ + { + "applies_to": [ + "Resource::*", + ], + "id": "AwsSolutions-IAM5", + "reason": "Required permissions to describe AutoScaling Groups", + }, + { + "applies_to": [ + "Resource::arn::autoscaling:*::autoScalingGroup:*:autoScalingGroupName/*", + ], + "id": "AwsSolutions-IAM5", + "reason": "Required permissions to modify scheduled scaling actions on AutoScaling Groups", + }, + ], + }, + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W12", + "reason": "DescribeAutoScalingGroups and autoscaling:DescribeScheduledActions actions require wildcard permissions", + }, + ], + }, + }, + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "autoscaling:BatchPutScheduledUpdateGroupAction", + "autoscaling:BatchDeleteScheduledAction", + "autoscaling:CreateOrUpdateTags", + ], + "Effect": "Allow", + "Resource": { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition", + }, + ":autoscaling:*:", + { + "Ref": "AWS::AccountId", + }, + ":autoScalingGroup:*:autoScalingGroupName/*", + ], + ], + }, + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeScheduledActions", + ], + "Effect": "Allow", + "Resource": "*", + }, + ], + "Version": "2012-10-17", + }, + "PolicyName": "ASGSchedulerAsgSchedulingRoleASGSchedulingPermissions9522C2B1", + "Roles": [ + { + "Ref": "ASGSchedulerAsgSchedulingRoleC184E725", + }, + ], + }, + "Type": "AWS::IAM::Policy", + }, + "ASGSchedulerAsgSchedulingRoleC184E725": { + "Condition": "ScheduleASGsCondition", + "Metadata": { + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W28", + "reason": "The role name is defined to allow cross account access from the hub account.", + }, + ], + }, + }, + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "AWS": { + "Fn::GetAtt": [ + "ASGRole21C54AF6", + "Arn", + ], + }, + }, + }, + ], + "Version": "2012-10-17", + }, + "RoleName": { + "Fn::Join": [ + "", + [ + { + "Ref": "Namespace", + }, + "-ASG-Scheduling-Role", + ], + ], + }, + }, + "Type": "AWS::IAM::Role", + }, + "ASGSchedulerScheduleUpdateHandlerCC8A8D00": { + "Condition": "ScheduleASGsCondition", + "DependsOn": [ + "ASGSchedulerScheduleUpdateHandlerRoleDefaultPolicy073EAF80", + "ASGSchedulerScheduleUpdateHandlerRole25C919AA", + ], + "Metadata": { + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W89", + "reason": "This Lambda function does not need to access any resource provisioned within a VPC.", + }, + { + "id": "W58", + "reason": "This Lambda function has permission provided to write to CloudWatch logs using the iam roles.", + }, + { + "id": "W92", + "reason": "Need to investigate appropriate ReservedConcurrentExecutions for this lambda", + }, + ], + }, + }, + "Properties": { + "Code": "Omitted to remove snapshot dependency on code hash", + "Description": "Instance Scheduler handler for updates to schedules version v9.9.9", + "Environment": { + "Variables": { + "ASG_SCHEDULER_NAME": { + "Ref": "ASGHandler0F6D6751", + }, + "CONFIG_TABLE": { + "Ref": "ConfigTable", + }, + "ENABLE_SCHEDULE_HUB_ACCOUNT": { + "Fn::If": [ + "ScheduleLambdaAccountCondition", + "True", + "False", + ], + }, + "ISSUES_TOPIC_ARN": { + "Ref": "InstanceSchedulerSnsTopic", + }, + "METRICS_URL": "https://metrics.awssolutionsbuilder.com/generic", + "METRICS_UUID": { + "Fn::GetAtt": [ + "MetricsUuidProvider", + "Uuid", + ], + }, + "POWERTOOLS_LOG_LEVEL": { + "Fn::If": [ + "TraceCondition", + "DEBUG", + "INFO", + ], + }, + "POWERTOOLS_SERVICE_NAME": "sch_upd", + "SCHEDULE_REGIONS": { + "Fn::Join": [ + ",", + { + "Ref": "Regions", + }, + ], + }, + "SCHEDULING_INTERVAL_MINUTES": { + "Ref": "SchedulerFrequency", + }, + "SEND_METRICS": { + "Fn::If": [ + "AnonymizedMetricsEnabled", + "True", + "False", + ], + }, + "SOLUTION_ID": "my-solution-id", + "SOLUTION_VERSION": "v9.9.9", + "USER_AGENT_EXTRA": "AwsSolution/my-solution-id/v9.9.9", + }, + }, + "Handler": "lambda_handler", + "MemorySize": 128, + "Role": { + "Fn::GetAtt": [ + "ASGSchedulerScheduleUpdateHandlerRole25C919AA", + "Arn", + ], + }, + "Runtime": "python3.11", + "Timeout": 60, + "TracingConfig": { + "Mode": "Active", + }, + }, + "Type": "AWS::Lambda::Function", + }, + "ASGSchedulerScheduleUpdateHandlerDynamoDBEventSourcestackConfigTableA8EC614E569DEAA0": { + "Condition": "ScheduleASGsCondition", + "Properties": { + "BatchSize": 100, + "EventSourceArn": { + "Fn::GetAtt": [ + "ConfigTable", + "StreamArn", + ], + }, + "FilterCriteria": { + "Filters": [ + { + "Pattern": "{"dynamodb":{"Keys":{"type":{"S":["schedule","period"]}}}}", + }, + { + "Pattern": "{"eventName":["INSERT","MODIFY"]}", + }, + ], + }, + "FunctionName": { + "Ref": "ASGSchedulerScheduleUpdateHandlerCC8A8D00", + }, + "MaximumBatchingWindowInSeconds": 60, + "StartingPosition": "LATEST", + }, + "Type": "AWS::Lambda::EventSourceMapping", + }, + "ASGSchedulerScheduleUpdateHandlerLogGroupA3E7612D": { + "Condition": "ScheduleASGsCondition", + "DeletionPolicy": "Retain", + "Metadata": { + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W84", + "reason": "This template has to be supported in gov cloud which doesn't yet have the feature to provide kms key id to cloudwatch log group", + }, + ], + }, + }, + "Properties": { + "LogGroupName": { + "Fn::Join": [ + "", + [ + "/aws/lambda/", + { + "Ref": "ASGSchedulerScheduleUpdateHandlerCC8A8D00", + }, + ], + ], + }, + "RetentionInDays": { + "Ref": "LogRetentionDays", + }, + }, + "Type": "AWS::Logs::LogGroup", + "UpdateReplacePolicy": "Retain", + }, + "ASGSchedulerScheduleUpdateHandlerPolicy81C87FA2": { + "Condition": "ScheduleASGsCondition", + "Metadata": { + "cdk_nag": { + "rules_to_suppress": [ + { + "applies_to": [ + "Action::kms:GenerateDataKey*", + "Action::kms:ReEncrypt*", + ], + "id": "AwsSolutions-IAM5", + "reason": "Permission to use solution CMK with dynamo/sns", + }, + ], + }, + }, + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "logs:CreateLogStream", + "logs:PutLogEvents", + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "ASGSchedulerScheduleUpdateHandlerLogGroupA3E7612D", + "Arn", + ], + }, + }, + { + "Action": [ + "kms:Decrypt", + "kms:DescribeKey", + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "InstanceSchedulerEncryptionKey", + "Arn", + ], + }, + }, + { + "Action": [ + "dynamodb:BatchGetItem", + "dynamodb:GetRecords", + "dynamodb:GetShardIterator", + "dynamodb:Query", + "dynamodb:GetItem", + "dynamodb:Scan", + "dynamodb:ConditionCheckItem", + "dynamodb:DescribeTable", + ], + "Effect": "Allow", + "Resource": [ + { + "Fn::GetAtt": [ + "ConfigTable", + "Arn", + ], + }, + { + "Ref": "AWS::NoValue", + }, + ], + }, + { + "Action": "sns:Publish", + "Effect": "Allow", + "Resource": { + "Ref": "InstanceSchedulerSnsTopic", + }, + }, + { + "Action": [ + "kms:Decrypt", + "kms:Encrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "InstanceSchedulerEncryptionKey", + "Arn", + ], + }, + }, + ], + "Version": "2012-10-17", + }, + "PolicyName": "ASGSchedulerScheduleUpdateHandlerPolicy81C87FA2", + "Roles": [ + { + "Ref": "ASGSchedulerScheduleUpdateHandlerRole25C919AA", + }, + ], + }, + "Type": "AWS::IAM::Policy", + }, + "ASGSchedulerScheduleUpdateHandlerRole25C919AA": { + "Condition": "ScheduleASGsCondition", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com", + }, + }, + ], + "Version": "2012-10-17", + }, + }, + "Type": "AWS::IAM::Role", + }, + "ASGSchedulerScheduleUpdateHandlerRoleDefaultPolicy073EAF80": { + "Condition": "ScheduleASGsCondition", + "Metadata": { + "cdk_nag": { + "rules_to_suppress": [ + { + "applies_to": [ + "Resource::*", + ], + "id": "AwsSolutions-IAM5", + "reason": "required for xray", + }, + { + "applies_to": [ + "Resource:::*", + ], + "id": "AwsSolutions-IAM5", + "reason": "permissions to invoke all versions of the ASG scheduling request handler", + }, + ], + }, + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W12", + "reason": "Wildcard required for xray", + }, + ], + }, + }, + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "xray:PutTraceSegments", + "xray:PutTelemetryRecords", + ], + "Effect": "Allow", + "Resource": "*", + }, + { + "Action": "dynamodb:ListStreams", + "Effect": "Allow", + "Resource": "*", + }, + { + "Action": [ + "kms:Decrypt", + "kms:DescribeKey", + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "InstanceSchedulerEncryptionKey", + "Arn", + ], + }, + }, + { + "Action": [ + "dynamodb:DescribeStream", + "dynamodb:GetRecords", + "dynamodb:GetShardIterator", + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "ConfigTable", + "StreamArn", + ], + }, + }, + { + "Action": "lambda:InvokeFunction", + "Effect": "Allow", + "Resource": [ + { + "Fn::GetAtt": [ + "ASGHandler0F6D6751", + "Arn", + ], + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "ASGHandler0F6D6751", + "Arn", + ], + }, + ":*", + ], + ], + }, + ], + }, + ], + "Version": "2012-10-17", + }, + "PolicyName": "ASGSchedulerScheduleUpdateHandlerRoleDefaultPolicy073EAF80", + "Roles": [ + { + "Ref": "ASGSchedulerScheduleUpdateHandlerRole25C919AA", + }, + ], + }, + "Type": "AWS::IAM::Policy", + }, + "AppRegistry968496A3": { + "Condition": "AppRegistryForInstanceSchedulerShouldDeployBA583A67", + "Properties": { + "Description": { + "Fn::Join": [ + "", + [ + "Service Catalog application to track and manage all your resources for the solution ", + { + "Fn::FindInMap": [ + "AppRegistryForInstanceSchedulerSolution25A90F05", + "Data", + "SolutionName", + ], + }, + ], + ], + }, + "Name": { + "Fn::Join": [ + "-", + [ + { + "Fn::FindInMap": [ + "AppRegistryForInstanceSchedulerSolution25A90F05", + "Data", + "AppRegistryApplicationName", + ], + }, + { + "Ref": "AWS::Region", + }, + { + "Ref": "AWS::AccountId", + }, + { + "Ref": "AWS::StackName", + }, + ], + ], + }, + "Tags": { + "Solutions:ApplicationType": { + "Fn::FindInMap": [ + "AppRegistryForInstanceSchedulerSolution25A90F05", + "Data", + "ApplicationType", + ], + }, + "Solutions:SolutionID": { + "Fn::FindInMap": [ + "AppRegistryForInstanceSchedulerSolution25A90F05", + "Data", + "ID", + ], + }, + "Solutions:SolutionName": { + "Fn::FindInMap": [ + "AppRegistryForInstanceSchedulerSolution25A90F05", + "Data", + "SolutionName", + ], + }, + "Solutions:SolutionVersion": { + "Fn::FindInMap": [ + "AppRegistryForInstanceSchedulerSolution25A90F05", + "Data", + "Version", + ], + }, + }, + }, + "Type": "AWS::ServiceCatalogAppRegistry::Application", + }, + "AppRegistryAssociation": { + "Condition": "AppRegistryForInstanceSchedulerShouldDeployBA583A67", + "Properties": { + "Application": { + "Fn::GetAtt": [ + "AppRegistry968496A3", + "Id", + ], + }, + "Resource": { + "Ref": "AWS::StackId", + }, + "ResourceType": "CFN_STACK", + }, + "Type": "AWS::ServiceCatalogAppRegistry::ResourceAssociation", + }, + "AppRegistryAttributeGroupAssociationf823ba38a843A987197E": { + "Condition": "AppRegistryForInstanceSchedulerShouldDeployBA583A67", + "Properties": { + "Application": { + "Fn::GetAtt": [ + "AppRegistry968496A3", + "Id", + ], + }, + "AttributeGroup": { + "Fn::GetAtt": [ + "AppRegistryDefaultApplicationAttributes15279635", + "Id", + ], + }, + }, + "Type": "AWS::ServiceCatalogAppRegistry::AttributeGroupAssociation", + }, + "AppRegistryDefaultApplicationAttributes15279635": { + "Condition": "AppRegistryForInstanceSchedulerShouldDeployBA583A67", + "Properties": { + "Attributes": { + "applicationType": { + "Fn::FindInMap": [ + "AppRegistryForInstanceSchedulerSolution25A90F05", + "Data", + "ApplicationType", + ], + }, + "solutionID": { + "Fn::FindInMap": [ + "AppRegistryForInstanceSchedulerSolution25A90F05", + "Data", + "ID", + ], + }, + "solutionName": { + "Fn::FindInMap": [ + "AppRegistryForInstanceSchedulerSolution25A90F05", + "Data", + "SolutionName", + ], + }, + "version": { + "Fn::FindInMap": [ + "AppRegistryForInstanceSchedulerSolution25A90F05", + "Data", + "Version", + ], + }, + }, + "Description": "Attribute group for solution information", + "Name": { + "Fn::Join": [ + "", + [ + "attgroup-", + { + "Fn::Join": [ + "-", + [ + { + "Ref": "AWS::Region", + }, + { + "Ref": "AWS::StackName", + }, + ], + ], + }, + ], + ], + }, + "Tags": { + "Solutions:ApplicationType": { + "Fn::FindInMap": [ + "AppRegistryForInstanceSchedulerSolution25A90F05", + "Data", + "ApplicationType", + ], + }, + "Solutions:SolutionID": { + "Fn::FindInMap": [ + "AppRegistryForInstanceSchedulerSolution25A90F05", + "Data", + "ID", + ], + }, + "Solutions:SolutionName": { + "Fn::FindInMap": [ + "AppRegistryForInstanceSchedulerSolution25A90F05", + "Data", + "SolutionName", + ], + }, + "Solutions:SolutionVersion": { + "Fn::FindInMap": [ + "AppRegistryForInstanceSchedulerSolution25A90F05", + "Data", + "Version", + ], + }, + }, + }, + "Type": "AWS::ServiceCatalogAppRegistry::AttributeGroup", + }, + "ConfigTable": { + "DeletionPolicy": { + "Fn::If": [ + "ddbDeletionProtectionCondition", + "Retain", + "Delete", + ], + }, + "Properties": { + "AttributeDefinitions": [ + { + "AttributeName": "type", + "AttributeType": "S", + }, + { + "AttributeName": "name", + "AttributeType": "S", + }, + ], + "BillingMode": "PAY_PER_REQUEST", + "DeletionProtectionEnabled": { + "Fn::If": [ + "ddbDeletionProtectionCondition", + "True", + "False", + ], + }, + "KeySchema": [ + { + "AttributeName": "type", + "KeyType": "HASH", + }, + { + "AttributeName": "name", + "KeyType": "RANGE", + }, + ], + "PointInTimeRecoverySpecification": { + "PointInTimeRecoveryEnabled": true, + }, + "SSESpecification": { + "KMSMasterKeyId": { + "Fn::GetAtt": [ + "InstanceSchedulerEncryptionKey", + "Arn", + ], + }, + "SSEEnabled": true, + "SSEType": "KMS", + }, + "StreamSpecification": { + "StreamViewType": "KEYS_ONLY", + }, + }, + "Type": "AWS::DynamoDB::Table", + "UpdateReplacePolicy": { + "Fn::If": [ + "ddbDeletionProtectionCondition", + "Retain", + "Delete", + ], + }, + }, + "InstanceSchedulerEncryptionKey": { + "DeletionPolicy": "Delete", + "Properties": { + "Description": "Key for SNS", + "EnableKeyRotation": true, + "Enabled": true, + "KeyPolicy": { + "Statement": [ + { + "Action": "kms:*", + "Effect": "Allow", + "Principal": { + "AWS": { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition", + }, + ":iam::", + { + "Ref": "AWS::AccountId", + }, + ":root", + ], + ], + }, + }, + "Resource": "*", + }, + ], + "Version": "2012-10-17", + }, + }, + "Type": "AWS::KMS::Key", + "UpdateReplacePolicy": "Delete", + }, + "InstanceSchedulerEncryptionKeyAlias": { + "Properties": { + "AliasName": { + "Fn::Join": [ + "", + [ + "alias/", + { + "Ref": "AWS::StackName", + }, + "-instance-scheduler-encryption-key", + ], + ], + }, + "TargetKeyId": { + "Fn::GetAtt": [ + "InstanceSchedulerEncryptionKey", + "Arn", + ], + }, + }, + "Type": "AWS::KMS::Alias", + }, + "InstanceSchedulerSnsTopic": { + "Properties": { + "KmsMasterKeyId": { + "Fn::GetAtt": [ + "InstanceSchedulerEncryptionKey", + "Arn", + ], + }, + }, + "Type": "AWS::SNS::Topic", + }, + "Main": { + "DependsOn": [ + "MainLambdaRoleDefaultPolicy5347C905", + "MainLambdaRole500DC55A", + ], + "Metadata": { + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W89", + "reason": "This Lambda function does not need to access any resource provisioned within a VPC.", + }, + { + "id": "W58", + "reason": "This Lambda function has permission provided to write to CloudWatch logs using the iam roles.", + }, + { + "id": "W92", + "reason": "Need to investigate appropriate ReservedConcurrentExecutions for this lambda", + }, + ], + }, + }, + "Properties": { + "Code": "Omitted to remove snapshot dependency on code hash", + "Description": "EC2 and RDS instance scheduler, version v9.9.9", + "Environment": { + "Variables": { + "APP_NAMESPACE": { + "Ref": "Namespace", + }, + "CONFIG_TABLE": { + "Ref": "ConfigTable", + }, + "DEFAULT_TIMEZONE": { + "Ref": "DefaultTimezone", + }, + "ENABLE_AWS_ORGANIZATIONS": { + "Fn::If": [ + "UsingAWSOrganizationsCondition", + "True", + "False", + ], + }, + "ENABLE_DOCDB_SERVICE": { + "Fn::If": [ + "ScheduleDocDbCondition", + "True", + "False", + ], + }, + "ENABLE_EC2_SERVICE": { + "Fn::If": [ + "ScheduleEC2Condition", + "True", + "False", + ], + }, + "ENABLE_EC2_SSM_MAINTENANCE_WINDOWS": { + "Fn::If": [ + "EnableSSMMaintenanceWindowsCondition", + "True", + "False", + ], + }, + "ENABLE_NEPTUNE_SERVICE": { + "Fn::If": [ + "ScheduleNeptuneCondition", + "True", + "False", + ], + }, + "ENABLE_RDS_CLUSTERS": { + "Fn::If": [ + "EnableRdsClusterSchedulingCondition", + "True", + "False", + ], + }, + "ENABLE_RDS_SERVICE": { + "Fn::If": [ + "ScheduleRdsCondition", + "True", + "False", + ], + }, + "ENABLE_RDS_SNAPSHOTS": { + "Fn::If": [ + "CreateRdsSnapshotCondition", + "True", + "False", + ], + }, + "ENABLE_SCHEDULE_HUB_ACCOUNT": { + "Fn::If": [ + "ScheduleLambdaAccountCondition", + "True", + "False", + ], + }, + "ISSUES_TOPIC_ARN": { + "Ref": "InstanceSchedulerSnsTopic", + }, + "LOG_GROUP": { + "Ref": "SchedulerLogGroup", + }, + "MAINTENANCE_WINDOW_TABLE": { + "Ref": "MaintenanceWindowTable", + }, + "METRICS_URL": "https://metrics.awssolutionsbuilder.com/generic", + "METRICS_UUID": { + "Fn::GetAtt": [ + "MetricsUuidProvider", + "Uuid", + ], + }, + "SCHEDULER_FREQUENCY": { + "Ref": "SchedulerFrequency", + }, + "SCHEDULER_ROLE_NAME": "Scheduler-Role", + "SCHEDULE_REGIONS": { + "Fn::Join": [ + ",", + { + "Ref": "Regions", + }, + ], + }, + "SCHEDULE_TAG_KEY": { + "Ref": "TagName", + }, + "SEND_METRICS": { + "Fn::If": [ + "AnonymizedMetricsEnabled", + "True", + "False", + ], + }, + "SOLUTION_ID": "my-solution-id", + "SOLUTION_VERSION": "v9.9.9", + "STACK_ID": { + "Ref": "AWS::StackId", + }, + "STACK_NAME": { + "Ref": "AWS::StackName", + }, + "START_EC2_BATCH_SIZE": "5", + "START_TAGS": { + "Ref": "StartedTags", + }, + "STATE_TABLE": { + "Ref": "StateTable", + }, + "STOP_TAGS": { + "Ref": "StoppedTags", + }, + "TRACE": { + "Fn::If": [ + "TraceCondition", + "True", + "False", + ], + }, + "USER_AGENT_EXTRA": "AwsSolution/my-solution-id/v9.9.9", + "UUID_KEY": "/Solutions/my-solution-name/UUID/", + }, + }, + "FunctionName": { + "Fn::Join": [ + "", + [ + { + "Ref": "AWS::StackName", + }, + "-InstanceSchedulerMain", + ], + ], + }, + "Handler": "lambda_handler", + "MemorySize": 128, + "Role": { + "Fn::GetAtt": [ + "MainLambdaRole500DC55A", + "Arn", + ], + }, + "Runtime": "python3.11", + "Timeout": 300, + "TracingConfig": { + "Mode": "Active", + }, + }, + "Type": "AWS::Lambda::Function", + }, + "MainLambdaRole500DC55A": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com", + }, + }, + ], + "Version": "2012-10-17", + }, + }, + "Type": "AWS::IAM::Role", + }, + "MainLambdaRoleDefaultPolicy5347C905": { + "Metadata": { + "cdk_nag": { + "rules_to_suppress": [ + { + "applies_to": [ + "Action::kms:GenerateDataKey*", + "Action::kms:ReEncrypt*", + ], + "id": "AwsSolutions-IAM5", + "reason": "Permission to use solution CMK with dynamo/sns", + }, + { + "applies_to": [ + "Resource::*", + ], + "id": "AwsSolutions-IAM5", + "reason": "required for xray", + }, + { + "applies_to": [ + "Resource::arn::logs:::*", + ], + "id": "AwsSolutions-IAM5", + "reason": "Permission to use the solution's custom log group", + }, + { + "applies_to": [ + "Resource::arn::logs:::log-group:/aws/lambda/-InstanceSchedulerMain:*", + ], + "id": "AwsSolutions-IAM5", + "reason": "Permission to modify own log group retention policy", + }, + ], + }, + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W12", + "reason": "Wildcard required for xray", + }, + { + "id": "W76", + "reason": "Acknowledged IAM policy document SPCM > 25", + }, + ], + }, + }, + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "xray:PutTraceSegments", + "xray:PutTelemetryRecords", + ], + "Effect": "Allow", + "Resource": "*", + }, + { + "Action": [ + "kms:Decrypt", + "kms:DescribeKey", + "kms:Encrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "InstanceSchedulerEncryptionKey", + "Arn", + ], + }, + }, + { + "Action": [ + "dynamodb:BatchGetItem", + "dynamodb:GetRecords", + "dynamodb:GetShardIterator", + "dynamodb:Query", + "dynamodb:GetItem", + "dynamodb:Scan", + "dynamodb:ConditionCheckItem", + "dynamodb:BatchWriteItem", + "dynamodb:PutItem", + "dynamodb:UpdateItem", + "dynamodb:DeleteItem", + "dynamodb:DescribeTable", + ], + "Effect": "Allow", + "Resource": [ + { + "Fn::GetAtt": [ + "ConfigTable", + "Arn", + ], + }, + { + "Ref": "AWS::NoValue", + }, + ], + }, + { + "Action": [ + "logs:CreateLogStream", + "logs:PutLogEvents", + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "SchedulerLogGroup", + "Arn", + ], + }, + }, + { + "Action": "sns:Publish", + "Effect": "Allow", + "Resource": { + "Ref": "InstanceSchedulerSnsTopic", + }, + }, + { + "Action": "logs:CreateLogGroup", + "Effect": "Allow", + "Resource": { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition", + }, + ":logs:", + { + "Ref": "AWS::Region", + }, + ":", + { + "Ref": "AWS::AccountId", + }, + ":*", + ], + ], + }, + }, + { + "Action": [ + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:PutRetentionPolicy", + ], + "Effect": "Allow", + "Resource": { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition", + }, + ":logs:", + { + "Ref": "AWS::Region", + }, + ":", + { + "Ref": "AWS::AccountId", + }, + ":log-group:/aws/lambda/", + { + "Ref": "AWS::StackName", + }, + "-InstanceSchedulerMain:*", + ], + ], + }, + }, + ], + "Version": "2012-10-17", + }, + "PolicyName": "MainLambdaRoleDefaultPolicy5347C905", + "Roles": [ + { + "Ref": "MainLambdaRole500DC55A", + }, + ], + }, + "Type": "AWS::IAM::Policy", + }, + "MaintenanceWindowTable": { + "DeletionPolicy": { + "Fn::If": [ + "ddbDeletionProtectionCondition", + "Retain", + "Delete", + ], + }, + "Properties": { + "AttributeDefinitions": [ + { + "AttributeName": "account-region", + "AttributeType": "S", + }, + { + "AttributeName": "name-id", + "AttributeType": "S", + }, + ], + "BillingMode": "PAY_PER_REQUEST", + "DeletionProtectionEnabled": { + "Fn::If": [ + "ddbDeletionProtectionCondition", + "True", + "False", + ], + }, + "KeySchema": [ + { + "AttributeName": "account-region", + "KeyType": "HASH", + }, + { + "AttributeName": "name-id", + "KeyType": "RANGE", + }, + ], + "PointInTimeRecoverySpecification": { + "PointInTimeRecoveryEnabled": true, + }, + "SSESpecification": { + "KMSMasterKeyId": { + "Fn::GetAtt": [ + "InstanceSchedulerEncryptionKey", + "Arn", + ], + }, + "SSEEnabled": true, + "SSEType": "KMS", + }, + }, + "Type": "AWS::DynamoDB::Table", + "UpdateReplacePolicy": { + "Fn::If": [ + "ddbDeletionProtectionCondition", + "Retain", + "Delete", + ], + }, + }, + "MetricsGeneratorRole83735CC4": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com", + }, + }, + ], + "Version": "2012-10-17", + }, + }, + "Type": "AWS::IAM::Role", + }, + "MetricsGeneratorRoleDefaultPolicy19F5F29A": { + "Metadata": { + "cdk_nag": { + "rules_to_suppress": [ + { + "applies_to": [ + "Resource::*", + ], + "id": "AwsSolutions-IAM5", + "reason": "required for xray", + }, + ], + }, + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W12", + "reason": "Wildcard required for xray", + }, + ], + }, + }, + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "xray:PutTraceSegments", + "xray:PutTelemetryRecords", + ], + "Effect": "Allow", + "Resource": "*", + }, + ], + "Version": "2012-10-17", + }, + "PolicyName": "MetricsGeneratorRoleDefaultPolicy19F5F29A", + "Roles": [ + { + "Ref": "MetricsGeneratorRole83735CC4", + }, + ], + }, + "Type": "AWS::IAM::Policy", + }, + "MetricsUuidGenerator172A04DB": { + "DependsOn": [ + "MetricsGeneratorRoleDefaultPolicy19F5F29A", + "MetricsGeneratorRole83735CC4", + ], + "Metadata": { + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W89", + "reason": "This Lambda function does not need to access any resource provisioned within a VPC.", + }, + { + "id": "W58", + "reason": "This Lambda function has permission provided to write to CloudWatch logs using the iam roles.", + }, + { + "id": "W92", + "reason": "Lambda function is a custom resource. Concurrent calls are very limited.", + }, + ], + }, + }, + "Properties": { + "Code": "Omitted to remove snapshot dependency on code hash", + "Description": "Custom Resource Provider used to generate unique UUIDs for solution deployments", + "Environment": { + "Variables": { + "STACK_ID": { + "Ref": "AWS::StackId", + }, + "USER_AGENT_EXTRA": "AwsSolution/my-solution-id/v9.9.9", + "UUID_KEY": "/Solutions/my-solution-name/UUID/", + }, + }, + "Handler": "handle_metrics_uuid_request", + "MemorySize": 128, + "Role": { + "Fn::GetAtt": [ + "MetricsGeneratorRole83735CC4", + "Arn", + ], + }, + "Runtime": "python3.11", + "Timeout": 60, + "TracingConfig": { + "Mode": "Active", + }, + }, + "Type": "AWS::Lambda::Function", + }, + "MetricsUuidHandlerLogGroupF41CCF90": { + "DeletionPolicy": "Retain", + "Metadata": { + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W84", + "reason": "This template has to be supported in gov cloud which doesn't yet have the feature to provide kms key id to cloudwatch log group", + }, + ], + }, + }, + "Properties": { + "LogGroupName": { + "Fn::Join": [ + "", + [ + "/aws/lambda/", + { + "Ref": "MetricsUuidGenerator172A04DB", + }, + ], + ], + }, + "RetentionInDays": { + "Ref": "LogRetentionDays", + }, + }, + "Type": "AWS::Logs::LogGroup", + "UpdateReplacePolicy": "Retain", + }, + "MetricsUuidPermissionsPolicyD0672406": { + "Metadata": { + "cdk_nag": { + "rules_to_suppress": [ + { + "applies_to": [ + "Resource::arn::ssm:::parameter/Solutions/instance-scheduler-on-aws/UUID/*", + ], + "id": "AwsSolutions-IAM5", + "reason": "backwards compatibility (<=1.5.3) -- ability to read metrics UUID from ssm parameter", + }, + ], + }, + }, + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "logs:CreateLogStream", + "logs:PutLogEvents", + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "MetricsUuidHandlerLogGroupF41CCF90", + "Arn", + ], + }, + }, + { + "Action": [ + "ssm:GetParameters", + "ssm:GetParameter", + "ssm:GetParameterHistory", + ], + "Effect": "Allow", + "Resource": { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition", + }, + ":ssm:", + { + "Ref": "AWS::Region", + }, + ":", + { + "Ref": "AWS::AccountId", + }, + ":parameter/Solutions/my-solution-name/UUID/*", + ], + ], + }, + }, + ], + "Version": "2012-10-17", + }, + "PolicyName": "MetricsUuidPermissionsPolicyD0672406", + "Roles": [ + { + "Ref": "MetricsGeneratorRole83735CC4", + }, + ], + }, + "Type": "AWS::IAM::Policy", + }, + "MetricsUuidProvider": { + "DeletionPolicy": "Delete", + "DependsOn": [ + "MetricsUuidPermissionsPolicyD0672406", + ], + "Properties": { + "ServiceToken": { + "Fn::GetAtt": [ + "MetricsUuidGenerator172A04DB", + "Arn", + ], + }, + }, + "Type": "Custom::MetricsUuid", + "UpdateReplacePolicy": "Delete", + }, + "OperationalInsightsDashboardE3C49DFC": { + "Condition": "OpsMonitoringCondition", + "Properties": { + "DashboardBody": { + "Fn::Join": [ + "", + [ + "{"start":"-P7D","periodOverride":"inherit","widgets":[{"type":"text","width":24,"height":1,"x":0,"y":0,"properties":{"markdown":"# EC2"}},{"type":"metric","width":6,"height":6,"x":0,"y":1,"properties":{"view":"singleValue","title":"Total EC2 Instances Controlled","region":"", + { + "Ref": "AWS::Region", + }, + "","metrics":[[{"label":"EC2 Instances","expression":"SUM(SEARCH('{\\"", + { + "Ref": "AWS::StackName", + }, + ":InstanceScheduler\\",Service,InstanceType,SchedulingInterval} \\"Service\\"=\\"ec2\\" \\"SchedulingInterval\\"=\\"", + { + "Ref": "SchedulerFrequency", + }, + "\\" MetricName=ManagedInstances', 'Sum', ", + { + "Fn::FindInMap": [ + "MetricsSchedulingIntervalToSeconds", + "MinutesToSeconds", + { + "Ref": "SchedulerFrequency", + }, + ], + }, + "))"}]],"period":", + { + "Fn::FindInMap": [ + "MetricsSchedulingIntervalToSeconds", + "MinutesToSeconds", + { + "Ref": "SchedulerFrequency", + }, + ], + }, + "}},{"type":"metric","width":6,"height":6,"x":6,"y":1,"properties":{"view":"pie","title":"EC2 Instances Controlled","region":"", + { + "Ref": "AWS::Region", + }, + "","metrics":[[{"label":"[\${LAST}]","expression":"SEARCH('{\\"", + { + "Ref": "AWS::StackName", + }, + ":InstanceScheduler\\",Service,InstanceType,SchedulingInterval} \\"Service\\"=\\"ec2\\" \\"SchedulingInterval\\"=\\"", + { + "Ref": "SchedulerFrequency", + }, + "\\" MetricName=ManagedInstances', 'Sum', ", + { + "Fn::FindInMap": [ + "MetricsSchedulingIntervalToSeconds", + "MinutesToSeconds", + { + "Ref": "SchedulerFrequency", + }, + ], + }, + ")"}]],"yAxis":{},"legend":{"position":"right"},"period":", + { + "Fn::FindInMap": [ + "MetricsSchedulingIntervalToSeconds", + "MinutesToSeconds", + { + "Ref": "SchedulerFrequency", + }, + ], + }, + "}},{"type":"metric","width":6,"height":6,"x":12,"y":1,"properties":{"view":"singleValue","title":"Total EC2 Hours Saved","region":"", + { + "Ref": "AWS::Region", + }, + "","metrics":[[{"label":"Hours Saved","expression":"SUM(SEARCH('{\\"", + { + "Ref": "AWS::StackName", + }, + ":InstanceScheduler\\",Service,InstanceType,SchedulingInterval} Service=\\"ec2\\" \\"SchedulingInterval\\"=\\"", + { + "Ref": "SchedulerFrequency", + }, + "\\" MetricName=\\"StoppedInstances\\"', 'Sum', ", + { + "Fn::FindInMap": [ + "MetricsSchedulingIntervalToSeconds", + "MinutesToSeconds", + { + "Ref": "SchedulerFrequency", + }, + ], + }, + ")) * ", + { + "Ref": "SchedulerFrequency", + }, + " / 60"}]],"setPeriodToTimeRange":true}},{"type":"metric","width":6,"height":6,"x":18,"y":1,"properties":{"view":"pie","title":"EC2 Hours Saved","region":"", + { + "Ref": "AWS::Region", + }, + "","metrics":[[{"label":"[\${SUM}]","expression":"SEARCH('{\\"", + { + "Ref": "AWS::StackName", + }, + ":InstanceScheduler\\",Service,InstanceType,SchedulingInterval} Service=\\"ec2\\" \\"SchedulingInterval\\"=\\"", + { + "Ref": "SchedulerFrequency", + }, + "\\" MetricName=\\"StoppedInstances\\"', 'Sum', ", + { + "Fn::FindInMap": [ + "MetricsSchedulingIntervalToSeconds", + "MinutesToSeconds", + { + "Ref": "SchedulerFrequency", + }, + ], + }, + ") * ", + { + "Ref": "SchedulerFrequency", + }, + " / 60"}]],"yAxis":{},"legend":{"position":"right"},"setPeriodToTimeRange":true,"stat":"Sum"}},{"type":"metric","width":12,"height":6,"x":0,"y":7,"properties":{"view":"timeSeries","title":"Controlled EC2 Instances by Type","region":"", + { + "Ref": "AWS::Region", + }, + "","metrics":[[{"expression":"SEARCH('{\\"", + { + "Ref": "AWS::StackName", + }, + ":InstanceScheduler\\",Service,InstanceType,SchedulingInterval} \\"Service\\"=\\"ec2\\" \\"SchedulingInterval\\"=\\"", + { + "Ref": "SchedulerFrequency", + }, + "\\" MetricName=ManagedInstances', 'Sum', ", + { + "Fn::FindInMap": [ + "MetricsSchedulingIntervalToSeconds", + "MinutesToSeconds", + { + "Ref": "SchedulerFrequency", + }, + ], + }, + ")"}]],"yAxis":{"left":{"label":"EC2 Instances","showUnits":false,"min":0}},"legend":{"position":"bottom"}}},{"type":"metric","width":12,"height":6,"x":12,"y":7,"properties":{"view":"timeSeries","title":"Running EC2 Instances by Type","region":"", + { + "Ref": "AWS::Region", + }, + "","metrics":[[{"expression":"SEARCH('{\\"", + { + "Ref": "AWS::StackName", + }, + ":InstanceScheduler\\",Service,InstanceType,SchedulingInterval} Service=\\"ec2\\" \\"SchedulingInterval\\"=\\"", + { + "Ref": "SchedulerFrequency", + }, + "\\" MetricName=\\"RunningInstances\\"', 'Sum', ", + { + "Fn::FindInMap": [ + "MetricsSchedulingIntervalToSeconds", + "MinutesToSeconds", + { + "Ref": "SchedulerFrequency", + }, + ], + }, + ")"}]],"yAxis":{"left":{"label":"Running EC2 Instances","showUnits":false,"min":0}},"legend":{"position":"bottom"}}},{"type":"metric","width":12,"height":6,"x":0,"y":13,"properties":{"view":"timeSeries","title":"Controlled EC2 Instances by Schedule","region":"", + { + "Ref": "AWS::Region", + }, + "","metrics":[[{"expression":"SEARCH('{\\"", + { + "Ref": "AWS::StackName", + }, + ":InstanceScheduler\\",Service,Schedule,SchedulingInterval} Service=\\"ec2\\" \\"SchedulingInterval\\"=\\"", + { + "Ref": "SchedulerFrequency", + }, + "\\" MetricName=\\"ManagedInstances\\"', 'Sum', ", + { + "Fn::FindInMap": [ + "MetricsSchedulingIntervalToSeconds", + "MinutesToSeconds", + { + "Ref": "SchedulerFrequency", + }, + ], + }, + ")"}]],"yAxis":{"left":{"label":"EC2 Instances","showUnits":false,"min":0}},"legend":{"position":"bottom"}}},{"type":"metric","width":12,"height":6,"x":12,"y":13,"properties":{"view":"timeSeries","title":"Running EC2 Instances by Schedule","region":"", + { + "Ref": "AWS::Region", + }, + "","metrics":[[{"expression":"SEARCH('{\\"", + { + "Ref": "AWS::StackName", + }, + ":InstanceScheduler\\",Service,Schedule,SchedulingInterval} Service=\\"ec2\\" \\"SchedulingInterval\\"=\\"", + { + "Ref": "SchedulerFrequency", + }, + "\\" MetricName=\\"RunningInstances\\"', 'Sum', ", + { + "Fn::FindInMap": [ + "MetricsSchedulingIntervalToSeconds", + "MinutesToSeconds", + { + "Ref": "SchedulerFrequency", + }, + ], + }, + ")"}]],"yAxis":{"left":{"label":"Running EC2 Instances","showUnits":false,"min":0}},"legend":{"position":"bottom"}}},{"type":"text","width":24,"height":1,"x":0,"y":19,"properties":{"markdown":"# RDS"}},{"type":"metric","width":6,"height":6,"x":0,"y":20,"properties":{"view":"singleValue","title":"Total RDS Instances Controlled","region":"", + { + "Ref": "AWS::Region", + }, + "","metrics":[[{"label":"RDS Instances","expression":"SUM(SEARCH('{\\"", + { + "Ref": "AWS::StackName", + }, + ":InstanceScheduler\\",Service,InstanceType,SchedulingInterval} \\"Service\\"=\\"rds\\" \\"SchedulingInterval\\"=\\"", + { + "Ref": "SchedulerFrequency", + }, + "\\" MetricName=ManagedInstances', 'Sum', ", + { + "Fn::FindInMap": [ + "MetricsSchedulingIntervalToSeconds", + "MinutesToSeconds", + { + "Ref": "SchedulerFrequency", + }, + ], + }, + "))"}]],"period":", + { + "Fn::FindInMap": [ + "MetricsSchedulingIntervalToSeconds", + "MinutesToSeconds", + { + "Ref": "SchedulerFrequency", + }, + ], + }, + "}},{"type":"metric","width":6,"height":6,"x":6,"y":20,"properties":{"view":"pie","title":"RDS Instances Controlled","region":"", + { + "Ref": "AWS::Region", + }, + "","metrics":[[{"label":"[\${LAST}]","expression":"SEARCH('{\\"", + { + "Ref": "AWS::StackName", + }, + ":InstanceScheduler\\",Service,InstanceType,SchedulingInterval} \\"Service\\"=\\"rds\\" \\"SchedulingInterval\\"=\\"", + { + "Ref": "SchedulerFrequency", + }, + "\\" MetricName=\\"ManagedInstances\\"', 'Sum', ", + { + "Fn::FindInMap": [ + "MetricsSchedulingIntervalToSeconds", + "MinutesToSeconds", + { + "Ref": "SchedulerFrequency", + }, + ], + }, + ")"}]],"yAxis":{},"legend":{"position":"right"},"period":", + { + "Fn::FindInMap": [ + "MetricsSchedulingIntervalToSeconds", + "MinutesToSeconds", + { + "Ref": "SchedulerFrequency", + }, + ], + }, + "}},{"type":"metric","width":6,"height":6,"x":12,"y":20,"properties":{"view":"singleValue","title":"Total RDS Hours Saved","region":"", + { + "Ref": "AWS::Region", + }, + "","metrics":[[{"label":"Hours Saved","expression":"SUM(SEARCH('{\\"", + { + "Ref": "AWS::StackName", + }, + ":InstanceScheduler\\",Service,InstanceType,SchedulingInterval} Service=\\"rds\\" \\"SchedulingInterval\\"=\\"", + { + "Ref": "SchedulerFrequency", + }, + "\\" MetricName=\\"StoppedInstances\\"', 'Sum', ", + { + "Fn::FindInMap": [ + "MetricsSchedulingIntervalToSeconds", + "MinutesToSeconds", + { + "Ref": "SchedulerFrequency", + }, + ], + }, + ")) * ", + { + "Ref": "SchedulerFrequency", + }, + " / 60"}]],"setPeriodToTimeRange":true}},{"type":"metric","width":6,"height":6,"x":18,"y":20,"properties":{"view":"pie","title":"RDS Hours Saved","region":"", + { + "Ref": "AWS::Region", + }, + "","metrics":[[{"label":"[\${SUM}]","expression":"SEARCH('{\\"", + { + "Ref": "AWS::StackName", + }, + ":InstanceScheduler\\",Service,InstanceType,SchedulingInterval} Service=\\"rds\\" \\"SchedulingInterval\\"=\\"", + { + "Ref": "SchedulerFrequency", + }, + "\\" MetricName=\\"StoppedInstances\\"', 'Sum', ", + { + "Fn::FindInMap": [ + "MetricsSchedulingIntervalToSeconds", + "MinutesToSeconds", + { + "Ref": "SchedulerFrequency", + }, + ], + }, + ") * ", + { + "Ref": "SchedulerFrequency", + }, + " / 60","period":2592000}]],"yAxis":{},"legend":{"position":"right"},"setPeriodToTimeRange":true,"stat":"Sum"}},{"type":"metric","width":12,"height":6,"x":0,"y":26,"properties":{"view":"timeSeries","title":"Controlled RDS Instances by Type","region":"", + { + "Ref": "AWS::Region", + }, + "","metrics":[[{"expression":"SEARCH('{\\"", + { + "Ref": "AWS::StackName", + }, + ":InstanceScheduler\\",Service,InstanceType,SchedulingInterval} \\"Service\\"=\\"rds\\" \\"SchedulingInterval\\"=\\"", + { + "Ref": "SchedulerFrequency", + }, + "\\" MetricName=\\"ManagedInstances\\"', 'Sum', ", + { + "Fn::FindInMap": [ + "MetricsSchedulingIntervalToSeconds", + "MinutesToSeconds", + { + "Ref": "SchedulerFrequency", + }, + ], + }, + ")"}]],"yAxis":{"left":{"label":"Controlled RDS Instances","showUnits":false,"min":0}},"legend":{"position":"bottom"}}},{"type":"metric","width":12,"height":6,"x":12,"y":26,"properties":{"view":"timeSeries","title":"Running RDS Instances By Type","region":"", + { + "Ref": "AWS::Region", + }, + "","metrics":[[{"expression":"SEARCH('{\\"", + { + "Ref": "AWS::StackName", + }, + ":InstanceScheduler\\",Service,InstanceType,SchedulingInterval} Service=\\"rds\\" \\"SchedulingInterval\\"=\\"", + { + "Ref": "SchedulerFrequency", + }, + "\\" MetricName=\\"RunningInstances\\"', 'Sum', ", + { + "Fn::FindInMap": [ + "MetricsSchedulingIntervalToSeconds", + "MinutesToSeconds", + { + "Ref": "SchedulerFrequency", + }, + ], + }, + ")"}]],"yAxis":{"left":{"label":"Running RDS Instances","showUnits":false,"min":0}},"legend":{"position":"bottom"}}},{"type":"metric","width":12,"height":6,"x":0,"y":32,"properties":{"view":"timeSeries","title":"Controlled RDS Instances By Schedule","region":"", + { + "Ref": "AWS::Region", + }, + "","metrics":[[{"expression":"SEARCH('{\\"", + { + "Ref": "AWS::StackName", + }, + ":InstanceScheduler\\",Service,Schedule,SchedulingInterval} Service=\\"rds\\" \\"SchedulingInterval\\"=\\"", + { + "Ref": "SchedulerFrequency", + }, + "\\" MetricName=\\"ManagedInstances\\"', 'Sum', ", + { + "Fn::FindInMap": [ + "MetricsSchedulingIntervalToSeconds", + "MinutesToSeconds", + { + "Ref": "SchedulerFrequency", + }, + ], + }, + ")"}]],"yAxis":{"left":{"label":"Controlled RDS Instances","showUnits":false,"min":0}},"legend":{"position":"bottom"}}},{"type":"metric","width":12,"height":6,"x":12,"y":32,"properties":{"view":"timeSeries","title":"Running RDS Instances by Schedule","region":"", + { + "Ref": "AWS::Region", + }, + "","metrics":[[{"expression":"SEARCH('{\\"", + { + "Ref": "AWS::StackName", + }, + ":InstanceScheduler\\",Service,Schedule,SchedulingInterval} Service=\\"rds\\" \\"SchedulingInterval\\"=\\"", + { + "Ref": "SchedulerFrequency", + }, + "\\" MetricName=\\"RunningInstances\\"', 'Sum', ", + { + "Fn::FindInMap": [ + "MetricsSchedulingIntervalToSeconds", + "MinutesToSeconds", + { + "Ref": "SchedulerFrequency", + }, + ], + }, + ")"}]],"yAxis":{"left":{"label":"Running RDS Instances","showUnits":false,"min":0}},"legend":{"position":"bottom"}}},{"type":"text","width":24,"height":1,"x":0,"y":38,"properties":{"markdown":"# Lambda"}},{"type":"metric","width":12,"height":6,"x":0,"y":39,"properties":{"view":"timeSeries","title":"Lambda Duration (P99)","region":"", + { + "Ref": "AWS::Region", + }, + "","metrics":[["AWS/Lambda","Duration","FunctionName","", + { + "Ref": "SchedulingOrchestratorBB4C4C5A", + }, + "",{"label":"Orchestrator"}],["AWS/Lambda","Duration","FunctionName","", + { + "Ref": "schedulingRequestHandlerLambdaC395DC9E", + }, + "",{"label":"SchedulingRequestHandler"}],["AWS/Lambda","Duration","FunctionName","", + { + "Ref": "ASGHandler0F6D6751", + }, + "",{"label":"AsgHandler"}]],"annotations":{"horizontal":[{"value":300000,"fill":"below","color":"#d62728","label":"Timeout Threshold (5 minutes)","yAxis":"left"},{"value":240000,"fill":"below","color":"#ff7f0e","yAxis":"left"},{"value":180000,"fill":"below","color":"#2ca02c","yAxis":"left"}]},"yAxis":{"left":{"label":"duration (ms)","showUnits":false}},"legend":{"position":"bottom"},"liveData":true,"period":1800,"stat":"p99"}},{"type":"metric","width":12,"height":6,"x":12,"y":39,"properties":{"view":"timeSeries","title":"Lambda Errors","region":"", + { + "Ref": "AWS::Region", + }, + "","metrics":[["AWS/Lambda","Errors","FunctionName","", + { + "Ref": "SchedulingOrchestratorBB4C4C5A", + }, + "",{"label":"Orchestrator"}],["AWS/Lambda","Errors","FunctionName","", + { + "Ref": "schedulingRequestHandlerLambdaC395DC9E", + }, + "",{"label":"SchedulingRequestHandler"}],["AWS/Lambda","Errors","FunctionName","", + { + "Ref": "ASGHandler0F6D6751", + }, + "",{"label":"AsgHandler"}]],"yAxis":{"left":{"label":"Errors","showUnits":false}},"legend":{"position":"bottom"},"liveData":true,"period":1800,"stat":"Sum"}}]}", + ], + ], + }, + "DashboardName": { + "Fn::Join": [ + "", + [ + { + "Ref": "AWS::StackName", + }, + "-Operational-Insights-Dashboard", + ], + ], + }, + }, + "Type": "AWS::CloudWatch::Dashboard", + }, + "SchedulerConfigHelper": { + "DeletionPolicy": "Delete", + "DependsOn": [ + "SchedulerLogGroup", + ], + "Properties": { + "ServiceToken": { + "Fn::GetAtt": [ + "Main", + "Arn", + ], + }, + "log_retention_days": { + "Ref": "LogRetentionDays", + }, + "remote_account_ids": { + "Ref": "Principals", + }, + "timeout": 120, + }, + "Type": "Custom::ServiceSetup", + "UpdateReplacePolicy": "Delete", + }, + "SchedulerEventRuleAllowEventRulestackSchedulingOrchestrator54551718E1E4729D": { + "Properties": { + "Action": "lambda:InvokeFunction", + "FunctionName": { + "Fn::GetAtt": [ + "SchedulingOrchestratorBB4C4C5A", + "Arn", + ], + }, + "Principal": "events.amazonaws.com", + "SourceArn": { + "Fn::GetAtt": [ + "SchedulerEventRuleDD9A6B32", + "Arn", + ], + }, + }, + "Type": "AWS::Lambda::Permission", + }, + "SchedulerEventRuleDD9A6B32": { + "Properties": { + "Description": "Instance Scheduler - Rule to trigger instance for scheduler function version v9.9.9", + "ScheduleExpression": { + "Fn::FindInMap": [ + "CronExpressionsForSchedulingIntervals", + "IntervalMinutesToCron", + { + "Ref": "SchedulerFrequency", + }, + ], + }, + "State": { + "Fn::If": [ + "SchedulingActiveCondition", + "ENABLED", + "DISABLED", + ], + }, + "Targets": [ + { + "Arn": { + "Fn::GetAtt": [ + "SchedulingOrchestratorBB4C4C5A", + "Arn", + ], + }, + "Id": "Target0", + "Input": "{"scheduled_action":"run_orchestrator"}", + "RetryPolicy": { + "MaximumRetryAttempts": 5, + }, + }, + ], + }, + "Type": "AWS::Events::Rule", + }, + "SchedulerLogGroup": { + "DeletionPolicy": "Delete", + "Metadata": { + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W84", + "reason": "CloudWatch log groups only have transactional data from the Lambda function, this template has to be supported in gov cloud which doesn't yet have the feature to provide kms key id to cloudwatch log group.", + }, + ], + }, + }, + "Properties": { + "LogGroupName": { + "Fn::Join": [ + "", + [ + { + "Ref": "AWS::StackName", + }, + "-logs", + ], + ], + }, + "RetentionInDays": { + "Ref": "LogRetentionDays", + }, + }, + "Type": "AWS::Logs::LogGroup", + "UpdateReplacePolicy": "Delete", + }, + "SchedulerRole59E73443": { + "Metadata": { + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W28", + "reason": "The role name is defined to allow cross account access from the hub account.", + }, + ], + }, + }, + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "AWS": { + "Fn::GetAtt": [ + "schedulingRequestHandlerRoleD87803EB", + "Arn", + ], + }, + }, + }, + ], + "Version": "2012-10-17", + }, + "RoleName": { + "Fn::Join": [ + "", + [ + { + "Ref": "Namespace", + }, + "-Scheduler-Role", + ], + ], + }, + }, + "Type": "AWS::IAM::Role", + }, + "SchedulerRoleKmsPermissionsBF1B9F02": { + "Condition": "SchedulerRolekmsAccessCondition93ED0C6C", + "Metadata": { + "cdk_nag": { + "rules_to_suppress": [ + { + "applies_to": [ + "Resource::*", + ], + "id": "AwsSolutions-IAM5", + "reason": "Specific kms keys are unknown until runtime, for security, access is instead restricted to only granting decryption permissions to the ec2 service for encrypted EBS volumes", + }, + ], + }, + }, + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": "kms:CreateGrant", + "Condition": { + "Bool": { + "kms:GrantIsForAWSResource": true, + }, + "ForAllValues:StringEquals": { + "kms:EncryptionContextKeys": [ + "aws:ebs:id", + ], + "kms:GrantOperations": [ + "Decrypt", + ], + }, + "Null": { + "kms:EncryptionContextKeys": false, + "kms:GrantOperations": false, + }, + "StringLike": { + "kms:ViaService": "ec2.*.amazonaws.com", + }, + }, + "Effect": "Allow", + "Resource": { + "Ref": "KmsKeyArns", + }, + }, + ], + "Version": "2012-10-17", + }, + "PolicyName": "SchedulerRoleKmsPermissionsBF1B9F02", + "Roles": [ + { + "Ref": "SchedulerRole59E73443", + }, + ], + }, + "Type": "AWS::IAM::Policy", + }, + "SchedulerRoleSchedulingPermissions2C1B256A": { + "Metadata": { + "cdk_nag": { + "rules_to_suppress": [ + { + "applies_to": [ + "Resource::arn::rds:*::db:*", + "Resource::arn::rds:*::cluster:*", + "Resource::arn::ec2:*::instance/*", + "Resource::arn::rds:*::snapshot:*", + "Resource::*", + ], + "id": "AwsSolutions-IAM5", + "reason": "required scheduling permissions", + }, + ], + }, + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W12", + "reason": "required scheduling permissions", + }, + ], + }, + }, + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": "ec2:DescribeInstances", + "Effect": "Allow", + "Resource": "*", + }, + { + "Action": [ + "ec2:StartInstances", + "ec2:StopInstances", + "ec2:CreateTags", + "ec2:DeleteTags", + "ec2:ModifyInstanceAttribute", + ], + "Effect": "Allow", + "Resource": { + "Fn::Sub": "arn:\${AWS::Partition}:ec2:*:\${AWS::AccountId}:instance/*", + }, + }, + { + "Action": "ssm:DescribeMaintenanceWindows", + "Effect": "Allow", + "Resource": "*", + }, + { + "Action": [ + "rds:DescribeDBClusters", + "rds:DescribeDBInstances", + "tag:GetResources", + ], + "Effect": "Allow", + "Resource": "*", + }, + { + "Action": [ + "rds:DeleteDBSnapshot", + "rds:DescribeDBSnapshots", + "rds:StopDBInstance", + ], + "Effect": "Allow", + "Resource": { + "Fn::Sub": "arn:\${AWS::Partition}:rds:*:\${AWS::AccountId}:snapshot:*", + }, + }, + { + "Action": [ + "rds:AddTagsToResource", + "rds:RemoveTagsFromResource", + "rds:StartDBInstance", + "rds:StopDBInstance", + ], + "Effect": "Allow", + "Resource": { + "Fn::Sub": "arn:\${AWS::Partition}:rds:*:\${AWS::AccountId}:db:*", + }, + }, + { + "Action": [ + "rds:AddTagsToResource", + "rds:RemoveTagsFromResource", + "rds:StartDBCluster", + "rds:StopDBCluster", + ], + "Effect": "Allow", + "Resource": { + "Fn::Sub": "arn:\${AWS::Partition}:rds:*:\${AWS::AccountId}:cluster:*", + }, + }, + ], + "Version": "2012-10-17", + }, + "PolicyName": "SchedulerRoleSchedulingPermissions2C1B256A", + "Roles": [ + { + "Ref": "SchedulerRole59E73443", + }, + ], + }, + "Type": "AWS::IAM::Policy", + }, + "SchedulingOrchestratorBB4C4C5A": { + "DependsOn": [ + "SchedulingOrchestratorRoleDefaultPolicy29DE8B0D", + "SchedulingOrchestratorRoleAD0FF7B1", + ], + "Metadata": { + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W89", + "reason": "This Lambda function does not need to access any resource provisioned within a VPC.", + }, + { + "id": "W58", + "reason": "This Lambda function has permission provided to write to CloudWatch logs using the iam roles.", + }, + { + "id": "W92", + "reason": "Lambda function is invoked by a scheduled rule, it does not run concurrently", + }, + ], + }, + }, + "Properties": { + "Code": "Omitted to remove snapshot dependency on code hash", + "Description": "scheduling orchestrator for Instance Scheduler on AWS, version v9.9.9", + "Environment": { + "Variables": { + "CONFIG_TABLE": { + "Ref": "ConfigTable", + }, + "DEFAULT_TIMEZONE": { + "Ref": "DefaultTimezone", + }, + "ENABLE_ASG_SERVICE": { + "Fn::If": [ + "ScheduleASGsCondition", + "True", + "False", + ], + }, + "ENABLE_AWS_ORGANIZATIONS": { + "Fn::If": [ + "UsingAWSOrganizationsCondition", + "True", + "False", + ], + }, + "ENABLE_DEBUG_LOGS": { + "Fn::If": [ + "TraceCondition", + "True", + "False", + ], + }, + "ENABLE_DOCDB_SERVICE": { + "Fn::If": [ + "ScheduleDocDbCondition", + "True", + "False", + ], + }, + "ENABLE_EC2_SERVICE": { + "Fn::If": [ + "ScheduleEC2Condition", + "True", + "False", + ], + }, + "ENABLE_EC2_SSM_MAINTENANCE_WINDOWS": { + "Fn::If": [ + "EnableSSMMaintenanceWindowsCondition", + "True", + "False", + ], + }, + "ENABLE_NEPTUNE_SERVICE": { + "Fn::If": [ + "ScheduleNeptuneCondition", + "True", + "False", + ], + }, + "ENABLE_RDS_CLUSTERS": { + "Fn::If": [ + "EnableRdsClusterSchedulingCondition", + "True", + "False", + ], + }, + "ENABLE_RDS_SERVICE": { + "Fn::If": [ + "ScheduleRdsCondition", + "True", + "False", + ], + }, + "ENABLE_RDS_SNAPSHOTS": { + "Fn::If": [ + "CreateRdsSnapshotCondition", + "True", + "False", + ], + }, + "ENABLE_SCHEDULE_HUB_ACCOUNT": { + "Fn::If": [ + "ScheduleLambdaAccountCondition", + "True", + "False", + ], + }, + "ISSUES_TOPIC_ARN": { + "Ref": "InstanceSchedulerSnsTopic", + }, + "LOG_GROUP": { + "Ref": "SchedulerLogGroup", + }, + "METRICS_URL": "https://metrics.awssolutionsbuilder.com/generic", + "METRICS_UUID": { + "Fn::GetAtt": [ + "MetricsUuidProvider", + "Uuid", + ], + }, + "OPS_DASHBOARD_ENABLED": { + "Fn::If": [ + "OpsMonitoringCondition", + "True", + "False", + ], + }, + "SCHEDULE_REGIONS": { + "Fn::Join": [ + ",", + { + "Ref": "Regions", + }, + ], + }, + "SCHEDULING_INTERVAL_MINUTES": { + "Ref": "SchedulerFrequency", + }, + "SCHEDULING_REQUEST_HANDLER_NAME": { + "Ref": "schedulingRequestHandlerLambdaC395DC9E", + }, + "SEND_METRICS": { + "Fn::If": [ + "AnonymizedMetricsEnabled", + "True", + "False", + ], + }, + "SOLUTION_ID": "my-solution-id", + "SOLUTION_VERSION": "v9.9.9", + "START_TAGS": { + "Ref": "StartedTags", + }, + "STOP_TAGS": { + "Ref": "StoppedTags", + }, + "USER_AGENT_EXTRA": "AwsSolution/my-solution-id/v9.9.9", + }, + }, + "Handler": "handle_orchestration_request", + "MemorySize": 128, + "Role": { + "Fn::GetAtt": [ + "SchedulingOrchestratorRoleAD0FF7B1", + "Arn", + ], + }, + "Runtime": "python3.11", + "Timeout": 300, + "TracingConfig": { + "Mode": "Active", + }, + }, + "Type": "AWS::Lambda::Function", + }, + "SchedulingOrchestratorLogGroup24735009": { + "DeletionPolicy": "Retain", + "Metadata": { + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W84", + "reason": "This template has to be supported in gov cloud which doesn't yet have the feature to provide kms key id to cloudwatch log group", + }, + ], + }, + }, + "Properties": { + "LogGroupName": { + "Fn::Join": [ + "", + [ + "/aws/lambda/", + { + "Ref": "SchedulingOrchestratorBB4C4C5A", + }, + ], + ], + }, + "RetentionInDays": { + "Ref": "LogRetentionDays", + }, + }, + "Type": "AWS::Logs::LogGroup", + "UpdateReplacePolicy": "Retain", + }, + "SchedulingOrchestratorPermissionsPolicyC2148A26": { + "Metadata": { + "cdk_nag": { + "rules_to_suppress": [ + { + "applies_to": [ + "Action::kms:GenerateDataKey*", + "Action::kms:ReEncrypt*", + ], + "id": "AwsSolutions-IAM5", + "reason": "Permission to use solution CMK with dynamo/sns", + }, + { + "applies_to": [ + "Resource::arn::ssm:*::parameter/*", + "Resource::*", + ], + "id": "AwsSolutions-IAM5", + "reason": "Orchestrator requires access to SSM parameters for translating {param: my-param} values to configured account ids", + }, + ], + }, + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W12", + "reason": "Wildcard required for ssm:DescribeParameters", + }, + { + "id": "W76", + "reason": "Acknowledged IAM policy document SPCM > 25", + }, + ], + }, + }, + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "logs:CreateLogStream", + "logs:PutLogEvents", + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "SchedulingOrchestratorLogGroup24735009", + "Arn", + ], + }, + }, + { + "Action": [ + "kms:Decrypt", + "kms:DescribeKey", + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "InstanceSchedulerEncryptionKey", + "Arn", + ], + }, + }, + { + "Action": [ + "dynamodb:BatchGetItem", + "dynamodb:GetRecords", + "dynamodb:GetShardIterator", + "dynamodb:Query", + "dynamodb:GetItem", + "dynamodb:Scan", + "dynamodb:ConditionCheckItem", + "dynamodb:DescribeTable", + ], + "Effect": "Allow", + "Resource": [ + { + "Fn::GetAtt": [ + "ConfigTable", + "Arn", + ], + }, + { + "Ref": "AWS::NoValue", + }, + ], + }, + { + "Action": "sns:Publish", + "Effect": "Allow", + "Resource": { + "Ref": "InstanceSchedulerSnsTopic", + }, + }, + { + "Action": [ + "logs:CreateLogStream", + "logs:PutLogEvents", + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "SchedulerLogGroup", + "Arn", + ], + }, + }, + { + "Action": "ssm:DescribeParameters", + "Effect": "Allow", + "Resource": "*", + }, + { + "Action": [ + "kms:Decrypt", + "kms:GenerateDataKey*", + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "InstanceSchedulerEncryptionKey", + "Arn", + ], + }, + }, + { + "Action": [ + "ssm:GetParameter", + "ssm:GetParameters", + ], + "Effect": "Allow", + "Resource": { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition", + }, + ":ssm:*:", + { + "Ref": "AWS::AccountId", + }, + ":parameter/*", + ], + ], + }, + }, + ], + "Version": "2012-10-17", + }, + "PolicyName": "SchedulingOrchestratorPermissionsPolicyC2148A26", + "Roles": [ + { + "Ref": "SchedulingOrchestratorRoleAD0FF7B1", + }, + ], + }, + "Type": "AWS::IAM::Policy", + }, + "SchedulingOrchestratorRoleAD0FF7B1": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com", + }, + }, + ], + "Version": "2012-10-17", + }, + }, + "Type": "AWS::IAM::Role", + }, + "SchedulingOrchestratorRoleDefaultPolicy29DE8B0D": { + "Metadata": { + "cdk_nag": { + "rules_to_suppress": [ + { + "applies_to": [ + "Resource::*", + ], + "id": "AwsSolutions-IAM5", + "reason": "required for xray", + }, + { + "applies_to": [ + "Resource:::*", + ], + "id": "AwsSolutions-IAM5", + "reason": "permission to invoke request handler lambda", + }, + ], + }, + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W12", + "reason": "Wildcard required for xray", + }, + ], + }, + }, + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "xray:PutTraceSegments", + "xray:PutTelemetryRecords", + ], + "Effect": "Allow", + "Resource": "*", + }, + { + "Action": "lambda:InvokeFunction", + "Effect": "Allow", + "Resource": [ + { + "Fn::GetAtt": [ + "schedulingRequestHandlerLambdaC395DC9E", + "Arn", + ], + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "schedulingRequestHandlerLambdaC395DC9E", + "Arn", + ], + }, + ":*", + ], + ], + }, + ], + }, + ], + "Version": "2012-10-17", + }, + "PolicyName": "SchedulingOrchestratorRoleDefaultPolicy29DE8B0D", + "Roles": [ + { + "Ref": "SchedulingOrchestratorRoleAD0FF7B1", + }, + ], + }, + "Type": "AWS::IAM::Policy", + }, + "SpokeDeregistrationRunbookFF4A7B49": { + "Properties": { + "Content": { + "assumeRole": { + "Fn::GetAtt": [ + "SpokeDeregistrationRunbookRole0A1D0232", + "Arn", + ], + }, + "description": "Deregister a spoke account from Instance Scheduler on AWS on demand", + "mainSteps": [ + { + "action": "aws:invokeLambdaFunction", + "description": "Invokes the Instance Scheduler on AWS spoke registration lambda to deregister a given AWS Account ID", + "inputs": { + "FunctionName": { + "Fn::GetAtt": [ + "SpokeRegistrationHandler923F17AC", + "Arn", + ], + }, + "InputPayload": { + "account": "{{ AccountId }}", + "operation": "Deregister", + }, + "InvocationType": "RequestResponse", + }, + "name": "InvokeSpokeRegistrationLambda", + }, + ], + "parameters": { + "AccountId": { + "allowedPattern": "^\\d{12}$", + "description": "Spoke Account ID used for registration", + "type": "String", + }, + }, + "schemaVersion": "0.3", + }, + "DocumentFormat": "YAML", + "DocumentType": "Automation", + "Tags": [ + { + "Key": "CdkGenerated", + "Value": "true", + }, + ], + }, + "Type": "AWS::SSM::Document", + }, + "SpokeDeregistrationRunbookRole0A1D0232": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "ssm.amazonaws.com", + }, + }, + ], + "Version": "2012-10-17", + }, + "Description": "Role assumed by SSM Automation to call the spoke registration lambda", + }, + "Type": "AWS::IAM::Role", + }, + "SpokeDeregistrationRunbookRoleDefaultPolicy208AD52D": { + "Metadata": { + "cdk_nag": { + "rules_to_suppress": [ + { + "applies_to": [ + "Resource:::*", + ], + "id": "AwsSolutions-IAM5", + "reason": "permissions to invoke all versions of the spoke registration lambda", + }, + ], + }, + }, + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": "lambda:InvokeFunction", + "Effect": "Allow", + "Resource": [ + { + "Fn::GetAtt": [ + "SpokeRegistrationHandler923F17AC", + "Arn", + ], + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "SpokeRegistrationHandler923F17AC", + "Arn", + ], + }, + ":*", + ], + ], + }, + ], + }, + ], + "Version": "2012-10-17", + }, + "PolicyName": "SpokeDeregistrationRunbookRoleDefaultPolicy208AD52D", + "Roles": [ + { + "Ref": "SpokeDeregistrationRunbookRole0A1D0232", + }, + ], + }, + "Type": "AWS::IAM::Policy", + }, + "SpokeRegistrationHandler923F17AC": { + "DependsOn": [ + "SpokeRegistrationRoleDefaultPolicy7A7A6954", + "SpokeRegistrationRole0E2E4D3E", + ], + "Metadata": { + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W89", + "reason": "This Lambda function does not need to access any resource provisioned within a VPC.", + }, + { + "id": "W58", + "reason": "This Lambda function has permission provided to write to CloudWatch logs using the iam roles.", + }, + { + "id": "W92", + "reason": "Lambda function is invoke by new account registration/deregistration events and is not likely to have much concurrency", + }, + { + "id": "F13", + "reason": "This lambda scopes invoke permissions to members of the same AWS organization. This is the narrowest possible scope that still allows new spoke accounts to register themselves with the hub after being deployed", + }, + ], + }, + }, + "Properties": { + "Code": "Omitted to remove snapshot dependency on code hash", + "Description": "spoke account registration handler, version v9.9.9", + "Environment": { + "Variables": { + "CONFIG_TABLE": { + "Ref": "ConfigTable", + }, + "ENABLE_DEBUG_LOGS": { + "Fn::If": [ + "TraceCondition", + "True", + "False", + ], + }, + "ISSUES_TOPIC_ARN": { + "Ref": "InstanceSchedulerSnsTopic", + }, + "LOG_GROUP": { + "Ref": "SchedulerLogGroup", + }, + "USER_AGENT_EXTRA": "AwsSolution/my-solution-id/v9.9.9", + }, + }, + "FunctionName": { + "Fn::Join": [ + "", + [ + "InstanceScheduler-", + { + "Ref": "Namespace", + }, + "-SpokeRegistration", + ], + ], + }, + "Handler": "handle_spoke_registration_event", + "MemorySize": 128, + "Role": { + "Fn::GetAtt": [ + "SpokeRegistrationRole0E2E4D3E", + "Arn", + ], + }, + "Runtime": "python3.11", + "Timeout": 60, + "TracingConfig": { + "Mode": "Active", + }, + }, + "Type": "AWS::Lambda::Function", + }, + "SpokeRegistrationLambdaPermission": { + "Condition": "UsingAWSOrganizationsCondition", + "Metadata": { + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "F13", + "reason": "Lambda permission policy requires principal wildcard for spoke accounts to self register by invoking this function.This is acceptable as we are narrowing the authorized accounts to only those contained within the org via principalOrgId", + }, + ], + }, + }, + "Properties": { + "Action": "lambda:InvokeFunction", + "FunctionName": { + "Ref": "SpokeRegistrationHandler923F17AC", + }, + "Principal": "*", + "PrincipalOrgID": { + "Fn::Select": [ + 0, + { + "Ref": "Principals", + }, + ], + }, + }, + "Type": "AWS::Lambda::Permission", + }, + "SpokeRegistrationPolicy600671FC": { + "Metadata": { + "cdk_nag": { + "rules_to_suppress": [ + { + "applies_to": [ + "Action::kms:GenerateDataKey*", + "Action::kms:ReEncrypt*", + ], + "id": "AwsSolutions-IAM5", + "reason": "Permission to use solution CMK with dynamo/sns", + }, + { + "applies_to": [ + "Resource::arn::logs:::log-group:/aws/lambda/InstanceScheduler--SpokeRegistration:log-stream:*", + ], + "id": "AwsSolutions-IAM5", + "reason": "Wildcard required for creating and writing to log stream", + }, + ], + }, + }, + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": "logs:CreateLogGroup", + "Effect": "Allow", + "Resource": { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition", + }, + ":logs:", + { + "Ref": "AWS::Region", + }, + ":", + { + "Ref": "AWS::AccountId", + }, + ":log-group:/aws/lambda/InstanceScheduler-", + { + "Ref": "Namespace", + }, + "-SpokeRegistration", + ], + ], + }, + }, + { + "Action": [ + "logs:CreateLogStream", + "logs:PutLogEvents", + ], + "Effect": "Allow", + "Resource": { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition", + }, + ":logs:", + { + "Ref": "AWS::Region", + }, + ":", + { + "Ref": "AWS::AccountId", + }, + ":log-group:/aws/lambda/InstanceScheduler-", + { + "Ref": "Namespace", + }, + "-SpokeRegistration:log-stream:*", + ], + ], + }, + }, + { + "Action": [ + "kms:Decrypt", + "kms:DescribeKey", + "kms:Encrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "InstanceSchedulerEncryptionKey", + "Arn", + ], + }, + }, + { + "Action": [ + "dynamodb:BatchGetItem", + "dynamodb:GetRecords", + "dynamodb:GetShardIterator", + "dynamodb:Query", + "dynamodb:GetItem", + "dynamodb:Scan", + "dynamodb:ConditionCheckItem", + "dynamodb:BatchWriteItem", + "dynamodb:PutItem", + "dynamodb:UpdateItem", + "dynamodb:DeleteItem", + "dynamodb:DescribeTable", + ], + "Effect": "Allow", + "Resource": [ + { + "Fn::GetAtt": [ + "ConfigTable", + "Arn", + ], + }, + { + "Ref": "AWS::NoValue", + }, + ], + }, + { + "Action": "sns:Publish", + "Effect": "Allow", + "Resource": { + "Ref": "InstanceSchedulerSnsTopic", + }, }, { "Action": [ - "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents", - "logs:PutRetentionPolicy", ], "Effect": "Allow", - "Resource": [ - { - "Fn::Sub": "arn:\${AWS::Partition}:logs:\${AWS::Region}:\${AWS::AccountId}:log-group:/aws/lambda/*", - }, - { - "Fn::GetAtt": [ - "SchedulerLogGroup", - "Arn", - ], - }, - ], + "Resource": { + "Fn::GetAtt": [ + "SchedulerLogGroup", + "Arn", + ], + }, }, ], "Version": "2012-10-17", }, - "PolicyName": "EC2DynamoDBPolicy", + "PolicyName": "SpokeRegistrationPolicy600671FC", "Roles": [ { - "Ref": "SchedulerRole", + "Ref": "SpokeRegistrationRole0E2E4D3E", }, ], }, "Type": "AWS::IAM::Policy", }, - "Ec2PermissionsB6E87802": { - "Metadata": { - "cdk_nag": { - "rules_to_suppress": [ - { - "id": "AwsSolutions-IAM5", - "reason": "This Lambda function needs to be able to modify ec2 instances for scheduling purposes.", - }, - ], - }, - }, + "SpokeRegistrationRole0E2E4D3E": { "Properties": { - "PolicyDocument": { + "AssumeRolePolicyDocument": { "Statement": [ - { - "Action": "ec2:ModifyInstanceAttribute", - "Effect": "Allow", - "Resource": { - "Fn::Sub": "arn:\${AWS::Partition}:ec2:*:\${AWS::AccountId}:instance/*", - }, - }, { "Action": "sts:AssumeRole", "Effect": "Allow", - "Resource": { - "Fn::Sub": [ - "arn:\${AWS::Partition}:iam::*:role/\${Namespace}-\${Name}", - { - "Name": { - "Fn::FindInMap": [ - "mappings", - "SchedulerRole", - "Name", - ], - }, - }, - ], + "Principal": { + "Service": "lambda.amazonaws.com", }, }, ], "Version": "2012-10-17", }, - "PolicyName": "Ec2PermissionsB6E87802", - "Roles": [ - { - "Ref": "SchedulerRole", - }, - ], }, - "Type": "AWS::IAM::Policy", + "Type": "AWS::IAM::Role", }, - "EventBusRuleLambdaPermission": { - "Condition": "IsMemberOfOrganization", - "Properties": { - "Action": "lambda:InvokeFunction", - "FunctionName": { - "Ref": "Main", + "SpokeRegistrationRoleDefaultPolicy7A7A6954": { + "Metadata": { + "cdk_nag": { + "rules_to_suppress": [ + { + "applies_to": [ + "Resource::*", + ], + "id": "AwsSolutions-IAM5", + "reason": "required for xray", + }, + ], }, - "Principal": "events.amazonaws.com", - "SourceArn": { - "Fn::GetAtt": [ - "schedulerssmparametercrossaccountevents", - "Arn", + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W12", + "reason": "Wildcard required for xray", + }, ], }, }, - "Type": "AWS::Lambda::Permission", - }, - "InstanceSchedulerEncryptionKey": { - "DeletionPolicy": "Delete", "Properties": { - "Description": "Key for SNS", - "EnableKeyRotation": true, - "Enabled": true, - "KeyPolicy": { + "PolicyDocument": { "Statement": [ - { - "Action": "kms:*", - "Effect": "Allow", - "Principal": { - "AWS": { - "Fn::Join": [ - "", - [ - "arn:", - { - "Ref": "AWS::Partition", - }, - ":iam::111111111111:root", - ], - ], - }, - }, - "Resource": "*", - "Sid": "default", - }, { "Action": [ - "kms:GenerateDataKey*", - "kms:Decrypt", + "xray:PutTraceSegments", + "xray:PutTelemetryRecords", ], "Effect": "Allow", - "Principal": { - "AWS": { - "Fn::GetAtt": [ - "SchedulerRole", - "Arn", - ], - }, - }, "Resource": "*", - "Sid": "Allows use of key", }, ], "Version": "2012-10-17", }, + "PolicyName": "SpokeRegistrationRoleDefaultPolicy7A7A6954", + "Roles": [ + { + "Ref": "SpokeRegistrationRole0E2E4D3E", + }, + ], }, - "Type": "AWS::KMS::Key", - "UpdateReplacePolicy": "Delete", + "Type": "AWS::IAM::Policy", }, - "InstanceSchedulerEncryptionKeyAlias": { + "StateTable": { + "DeletionPolicy": { + "Fn::If": [ + "ddbDeletionProtectionCondition", + "Retain", + "Delete", + ], + }, "Properties": { - "AliasName": { - "Fn::Join": [ - "", - [ - "alias/", - { - "Ref": "AWS::StackName", - }, - "-instance-scheduler-encryption-key", - ], + "AttributeDefinitions": [ + { + "AttributeName": "service", + "AttributeType": "S", + }, + { + "AttributeName": "account-region", + "AttributeType": "S", + }, + ], + "BillingMode": "PAY_PER_REQUEST", + "DeletionProtectionEnabled": { + "Fn::If": [ + "ddbDeletionProtectionCondition", + "True", + "False", ], }, - "TargetKeyId": { - "Fn::GetAtt": [ - "InstanceSchedulerEncryptionKey", - "Arn", - ], + "KeySchema": [ + { + "AttributeName": "service", + "KeyType": "HASH", + }, + { + "AttributeName": "account-region", + "KeyType": "RANGE", + }, + ], + "PointInTimeRecoverySpecification": { + "PointInTimeRecoveryEnabled": true, }, - }, - "Type": "AWS::KMS::Alias", - }, - "InstanceSchedulerSnsTopic": { - "Properties": { - "KmsMasterKeyId": { - "Fn::GetAtt": [ - "InstanceSchedulerEncryptionKey", - "Arn", - ], + "SSESpecification": { + "KMSMasterKeyId": { + "Fn::GetAtt": [ + "InstanceSchedulerEncryptionKey", + "Arn", + ], + }, + "SSEEnabled": true, + "SSEType": "KMS", }, }, - "Type": "AWS::SNS::Topic", + "Type": "AWS::DynamoDB::Table", + "UpdateReplacePolicy": { + "Fn::If": [ + "ddbDeletionProtectionCondition", + "Retain", + "Delete", + ], + }, }, - "Main": { + "schedulingRequestHandlerLambdaC395DC9E": { "DependsOn": [ - "EC2DynamoDBPolicy", - "Ec2PermissionsB6E87802", - "SchedulerPolicy", - "SchedulerRDSPolicy2E7C328A", - "SchedulerRoleDefaultPolicy66F774B8", - "SchedulerRole", + "schedulingRequestHandlerRoleDefaultPolicy938ADC4A", + "schedulingRequestHandlerRoleD87803EB", ], "Metadata": { - "cdk_nag": { - "rules_to_suppress": [ - { - "id": "AwsSolutions-L1", - "reason": "Lambda runtime held back to the newest supported by all partitions", - }, - ], - }, "cfn_nag": { "rules_to_suppress": [ { @@ -1366,95 +5128,76 @@ exports[`InstanceSchedulerStack snapshot test 1`] = ` }, { "id": "W92", - "reason": "Lambda function is only used by the event rule periodically, concurrent calls are very limited.", + "reason": "Need to investigate appropriate ReservedConcurrentExecutions for this lambda", }, ], }, }, "Properties": { "Code": "Omitted to remove snapshot dependency on code hash", - "Description": "EC2 and RDS instance scheduler, version v1.5.0", + "Description": "Handles scheduling requests for Instance Scheduler on AWS, version v9.9.9", "Environment": { "Variables": { - "ACCOUNT": "111111111111", - "APP_NAMESPACE": { - "Ref": "Namespace", - }, "CONFIG_TABLE": { "Ref": "ConfigTable", }, - "DDB_TABLE_NAME": { - "Ref": "StateTable", - }, "DEFAULT_TIMEZONE": { "Ref": "DefaultTimezone", }, - "ENABLE_AWS_ORGANIZATIONS": { - "Fn::FindInMap": [ - "mappings", - "TrueFalse", - { - "Ref": "UsingAWSOrganizations", - }, + "ENABLE_DEBUG_LOGS": { + "Fn::If": [ + "TraceCondition", + "True", + "False", ], }, - "ENABLE_CLOUDWATCH_METRICS": { - "Fn::FindInMap": [ - "mappings", - "TrueFalse", - { - "Ref": "UseCloudWatchMetrics", - }, + "ENABLE_DOCDB_SERVICE": { + "Fn::If": [ + "ScheduleDocDbCondition", + "True", + "False", ], }, - "ENABLE_EC2_SERVICE": { + "ENABLE_EC2_SSM_MAINTENANCE_WINDOWS": { "Fn::If": [ - "ScheduleEC2", + "EnableSSMMaintenanceWindowsCondition", "True", "False", ], }, - "ENABLE_EC2_SSM_MAINTENANCE_WINDOWS": { - "Fn::FindInMap": [ - "mappings", - "TrueFalse", - { - "Ref": "EnableSSMMaintenanceWindows", - }, + "ENABLE_NEPTUNE_SERVICE": { + "Fn::If": [ + "ScheduleNeptuneCondition", + "True", + "False", + ], + }, + "ENABLE_OPS_MONITORING": { + "Fn::If": [ + "OpsMonitoringCondition", + "True", + "False", ], }, "ENABLE_RDS_CLUSTERS": { - "Fn::FindInMap": [ - "mappings", - "TrueFalse", - { - "Ref": "ScheduleRdsClusters", - }, + "Fn::If": [ + "EnableRdsClusterSchedulingCondition", + "True", + "False", ], }, "ENABLE_RDS_SERVICE": { "Fn::If": [ - "ScheduleRDS", + "ScheduleRdsCondition", "True", "False", ], }, "ENABLE_RDS_SNAPSHOTS": { - "Fn::FindInMap": [ - "mappings", - "TrueFalse", - { - "Ref": "CreateRdsSnapshot", - }, - ], - }, - "ENABLE_SCHEDULE_HUB_ACCOUNT": { - "Fn::FindInMap": [ - "mappings", - "TrueFalse", - { - "Ref": "ScheduleLambdaAccount", - }, + "Fn::If": [ + "CreateRdsSnapshotCondition", + "True", + "False", ], }, "ISSUES_TOPIC_ARN": { @@ -1463,65 +5206,45 @@ exports[`InstanceSchedulerStack snapshot test 1`] = ` "LOG_GROUP": { "Ref": "SchedulerLogGroup", }, - "MAINTENANCE_WINDOW_TABLE": { + "MAINT_WINDOW_TABLE": { "Ref": "MaintenanceWindowTable", }, - "METRICS_URL": { - "Fn::FindInMap": [ - "mappings", - "Settings", - "MetricsUrl", + "METRICS_URL": "https://metrics.awssolutionsbuilder.com/generic", + "METRICS_UUID": { + "Fn::GetAtt": [ + "MetricsUuidProvider", + "Uuid", ], }, - "SCHEDULER_FREQUENCY": { - "Ref": "SchedulerFrequency", - }, "SCHEDULER_ROLE_NAME": { - "Fn::FindInMap": [ - "mappings", - "SchedulerRole", - "Name", - ], - }, - "SCHEDULE_REGIONS": { "Fn::Join": [ - ",", - { - "Ref": "Regions", - }, + "", + [ + { + "Ref": "Namespace", + }, + "-Scheduler-Role", + ], ], }, "SCHEDULE_TAG_KEY": { "Ref": "TagName", }, - "SEND_METRICS": { - "Fn::FindInMap": [ - "mappings", - "TrueFalse", - { - "Fn::FindInMap": [ - "Send", - "AnonymousUsage", - "Data", - ], - }, - ], + "SCHEDULING_INTERVAL_MINUTES": { + "Ref": "SchedulerFrequency", }, - "SOLUTION_ID": { - "Fn::FindInMap": [ - "mappings", - "Settings", - "MetricsSolutionId", + "SEND_METRICS": { + "Fn::If": [ + "AnonymizedMetricsEnabled", + "True", + "False", ], }, - "SOLUTION_VERSION": "v1.5.0", - "STACK_ID": { - "Ref": "AWS::StackId", - }, + "SOLUTION_ID": "my-solution-id", + "SOLUTION_VERSION": "v9.9.9", "STACK_NAME": { "Ref": "AWS::StackName", }, - "START_EC2_BATCH_SIZE": "5", "START_TAGS": { "Ref": "StartedTags", }, @@ -1531,47 +5254,20 @@ exports[`InstanceSchedulerStack snapshot test 1`] = ` "STOP_TAGS": { "Ref": "StoppedTags", }, - "TRACE": { - "Fn::FindInMap": [ - "mappings", - "TrueFalse", - { - "Ref": "Trace", - }, - ], - }, - "USER_AGENT_EXTRA": "AwsSolution/SO0030/v1.5.0", - "UUID_KEY": { - "Fn::FindInMap": [ - "Send", - "ParameterKey", - "UniqueId", - ], - }, + "USER_AGENT_EXTRA": "AwsSolution/my-solution-id/v9.9.9", }, }, - "FunctionName": { - "Fn::Join": [ - "", - [ - { - "Ref": "AWS::StackName", - }, - "-InstanceSchedulerMain", - ], - ], - }, - "Handler": "instance_scheduler.main.lambda_handler", + "Handler": "handle_scheduling_request", "MemorySize": { "Ref": "MemorySize", }, "Role": { "Fn::GetAtt": [ - "SchedulerRole", + "schedulingRequestHandlerRoleD87803EB", "Arn", ], }, - "Runtime": "python3.10", + "Runtime": "python3.11", "Timeout": 300, "TracingConfig": { "Mode": "Active", @@ -1579,200 +5275,14 @@ exports[`InstanceSchedulerStack snapshot test 1`] = ` }, "Type": "AWS::Lambda::Function", }, - "MaintenanceWindowTable": { - "DeletionPolicy": "Delete", - "Properties": { - "AttributeDefinitions": [ - { - "AttributeName": "Name", - "AttributeType": "S", - }, - { - "AttributeName": "account-region", - "AttributeType": "S", - }, - ], - "BillingMode": "PAY_PER_REQUEST", - "KeySchema": [ - { - "AttributeName": "Name", - "KeyType": "HASH", - }, - { - "AttributeName": "account-region", - "KeyType": "RANGE", - }, - ], - "PointInTimeRecoverySpecification": { - "PointInTimeRecoveryEnabled": true, - }, - "SSESpecification": { - "KMSMasterKeyId": { - "Ref": "InstanceSchedulerEncryptionKey", - }, - "SSEEnabled": true, - "SSEType": "KMS", - }, - }, - "Type": "AWS::DynamoDB::Table", - "UpdateReplacePolicy": "Delete", - }, - "SchedulerConfigHelper": { - "DeletionPolicy": "Delete", - "DependsOn": [ - "SchedulerLogGroup", - ], - "Properties": { - "ServiceToken": { - "Fn::GetAtt": [ - "Main", - "Arn", - ], - }, - "aws_partition": { - "Fn::Sub": "\${AWS::Partition}", - }, - "create_rds_snapshot": { - "Fn::FindInMap": [ - "mappings", - "TrueFalse", - { - "Ref": "CreateRdsSnapshot", - }, - ], - }, - "default_timezone": { - "Ref": "DefaultTimezone", - }, - "enable_ssm_maintenance_windows": { - "Fn::FindInMap": [ - "mappings", - "TrueFalse", - { - "Ref": "EnableSSMMaintenanceWindows", - }, - ], - }, - "log_retention_days": { - "Ref": "LogRetentionDays", - }, - "namespace": { - "Ref": "Namespace", - }, - "regions": { - "Ref": "Regions", - }, - "remote_account_ids": { - "Ref": "Principals", - }, - "schedule_clusters": { - "Fn::FindInMap": [ - "mappings", - "TrueFalse", - { - "Ref": "ScheduleRdsClusters", - }, - ], - }, - "schedule_lambda_account": { - "Fn::FindInMap": [ - "mappings", - "TrueFalse", - { - "Ref": "ScheduleLambdaAccount", - }, - ], - }, - "scheduled_services": { - "Fn::Split": [ - ",", - { - "Fn::FindInMap": [ - "mappings", - "Services", - { - "Ref": "ScheduledServices", - }, - ], - }, - ], - }, - "scheduler_role_name": { - "Fn::FindInMap": [ - "mappings", - "SchedulerRole", - "Name", - ], - }, - "stack_version": "v1.5.0", - "started_tags": { - "Ref": "StartedTags", - }, - "stopped_tags": { - "Ref": "StoppedTags", - }, - "tagname": { - "Ref": "TagName", - }, - "timeout": 120, - "trace": { - "Fn::FindInMap": [ - "mappings", - "TrueFalse", - { - "Ref": "Trace", - }, - ], - }, - "use_aws_organizations": { - "Fn::FindInMap": [ - "mappings", - "TrueFalse", - { - "Ref": "UsingAWSOrganizations", - }, - ], - }, - "use_metrics": { - "Fn::FindInMap": [ - "mappings", - "TrueFalse", - { - "Ref": "UseCloudWatchMetrics", - }, - ], - }, - }, - "Type": "Custom::ServiceSetup", - "UpdateReplacePolicy": "Delete", - }, - "SchedulerEventRuleAllowEventRulestackschedulerlambdaC9E2D53DB26536BE": { - "Properties": { - "Action": "lambda:InvokeFunction", - "FunctionName": { - "Fn::GetAtt": [ - "Main", - "Arn", - ], - }, - "Principal": "events.amazonaws.com", - "SourceArn": { - "Fn::GetAtt": [ - "SchedulerRule", - "Arn", - ], - }, - }, - "Type": "AWS::Lambda::Permission", - }, - "SchedulerLogGroup": { - "DeletionPolicy": "Delete", + "schedulingRequestHandlerLogGroup92A14530": { + "DeletionPolicy": "Retain", "Metadata": { "cfn_nag": { "rules_to_suppress": [ { "id": "W84", - "reason": "CloudWatch log groups only have transactional data from the Lambda function, this template has to be supported in gov cloud which doesn't yet have the feature to provide kms key id to cloudwatch log group.", + "reason": "This template has to be supported in gov cloud which doesn't yet have the feature to provide kms key id to cloudwatch log group", }, ], }, @@ -1782,10 +5292,10 @@ exports[`InstanceSchedulerStack snapshot test 1`] = ` "Fn::Join": [ "", [ + "/aws/lambda/", { - "Ref": "AWS::StackName", + "Ref": "schedulingRequestHandlerLambdaC395DC9E", }, - "-logs", ], ], }, @@ -1794,93 +5304,45 @@ exports[`InstanceSchedulerStack snapshot test 1`] = ` }, }, "Type": "AWS::Logs::LogGroup", - "UpdateReplacePolicy": "Delete", + "UpdateReplacePolicy": "Retain", }, - "SchedulerPolicy": { + "schedulingRequestHandlerPolicy1BE05FC4": { "Metadata": { "cdk_nag": { "rules_to_suppress": [ { - "id": "AwsSolutions-IAM5", - "reason": "All policies have been scoped to be as restrictive as possible. This solution needs to access ec2/rds resources across all regions.", - }, - ], - }, - }, - "Properties": { - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "rds:AddTagsToResource", - "rds:RemoveTagsFromResource", - "rds:DescribeDBSnapshots", - "rds:StartDBInstance", - "rds:StopDBInstance", + "applies_to": [ + "Action::kms:GenerateDataKey*", + "Action::kms:ReEncrypt*", ], - "Effect": "Allow", - "Resource": { - "Fn::Sub": "arn:\${AWS::Partition}:rds:*:\${AWS::AccountId}:db:*", - }, + "id": "AwsSolutions-IAM5", + "reason": "Permission to use solution CMK with dynamo/sns", }, { - "Action": [ - "ec2:StartInstances", - "ec2:StopInstances", - "ec2:CreateTags", - "ec2:DeleteTags", + "applies_to": [ + "Resource::arn::iam::*:role/-Scheduler-Role", ], - "Effect": "Allow", - "Resource": { - "Fn::Sub": "arn:\${AWS::Partition}:ec2:*:\${AWS::AccountId}:instance/*", - }, - }, - { - "Action": "sns:Publish", - "Effect": "Allow", - "Resource": { - "Ref": "InstanceSchedulerSnsTopic", - }, - }, - { - "Action": "lambda:InvokeFunction", - "Effect": "Allow", - "Resource": { - "Fn::Sub": "arn:\${AWS::Partition}:lambda:\${AWS::Region}:\${AWS::AccountId}:function:\${AWS::StackName}-InstanceSchedulerMain", - }, + "id": "AwsSolutions-IAM5", + "reason": "This handler's primary purpose is to assume role into spoke accounts for scheduling purposes", }, { - "Action": [ - "kms:GenerateDataKey*", - "kms:Decrypt", + "applies_to": [ + "Resource::*", ], - "Effect": "Allow", - "Resource": { - "Fn::GetAtt": [ - "InstanceSchedulerEncryptionKey", - "Arn", - ], - }, + "id": "AwsSolutions-IAM5", + "reason": "Ability to publish custom metrics to cloudwatch", }, ], - "Version": "2012-10-17", }, - "PolicyName": "SchedulerPolicy", - "Roles": [ - { - "Ref": "SchedulerRole", - }, - ], - }, - "Type": "AWS::IAM::Policy", - }, - "SchedulerRDSPolicy2E7C328A": { - "Metadata": { - "cdk_nag": { + "cfn_nag": { "rules_to_suppress": [ { - "id": "AwsSolutions-IAM5", - "reason": "All policies have been scoped to be as restrictive as possible. This solution needs to access ec2/rds resources across all regions.", + "id": "W12", + "reason": "cloudwatch:PutMetricData action requires wildcard", + }, + { + "id": "W76", + "reason": "Acknowledged IAM policy document SPCM > 25", }, ], }, @@ -1890,75 +5352,69 @@ exports[`InstanceSchedulerStack snapshot test 1`] = ` "Statement": [ { "Action": [ - "rds:DeleteDBSnapshot", - "rds:DescribeDBSnapshots", - "rds:StopDBInstance", - ], - "Effect": "Allow", - "Resource": { - "Fn::Sub": "arn:\${AWS::Partition}:rds:*:\${AWS::AccountId}:snapshot:*", - }, - }, - { - "Action": [ - "rds:AddTagsToResource", - "rds:RemoveTagsFromResource", - "rds:StartDBCluster", - "rds:StopDBCluster", + "logs:CreateLogStream", + "logs:PutLogEvents", ], "Effect": "Allow", "Resource": { - "Fn::Sub": "arn:\${AWS::Partition}:rds:*:\${AWS::AccountId}:cluster:*", + "Fn::GetAtt": [ + "schedulingRequestHandlerLogGroup92A14530", + "Arn", + ], }, - }, - ], - "Version": "2012-10-17", - }, - "PolicyName": "SchedulerRDSPolicy2E7C328A", - "Roles": [ - { - "Ref": "SchedulerRole", - }, - ], - }, - "Type": "AWS::IAM::Policy", - }, - "SchedulerRole": { - "Properties": { - "AssumeRolePolicyDocument": { - "Statement": [ + }, { - "Action": "sts:AssumeRole", + "Action": [ + "kms:Decrypt", + "kms:DescribeKey", + ], "Effect": "Allow", - "Principal": { - "Service": "events.amazonaws.com", + "Resource": { + "Fn::GetAtt": [ + "InstanceSchedulerEncryptionKey", + "Arn", + ], }, }, { - "Action": "sts:AssumeRole", + "Action": [ + "dynamodb:BatchGetItem", + "dynamodb:GetRecords", + "dynamodb:GetShardIterator", + "dynamodb:Query", + "dynamodb:GetItem", + "dynamodb:Scan", + "dynamodb:ConditionCheckItem", + "dynamodb:DescribeTable", + ], "Effect": "Allow", - "Principal": { - "Service": "lambda.amazonaws.com", - }, + "Resource": [ + { + "Fn::GetAtt": [ + "ConfigTable", + "Arn", + ], + }, + { + "Ref": "AWS::NoValue", + }, + ], }, - ], - "Version": "2012-10-17", - }, - "Path": "/", - }, - "Type": "AWS::IAM::Role", - }, - "SchedulerRoleDefaultPolicy66F774B8": { - "Properties": { - "PolicyDocument": { - "Statement": [ { "Action": [ - "xray:PutTraceSegments", - "xray:PutTelemetryRecords", + "kms:Decrypt", + "kms:DescribeKey", + "kms:Encrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", ], "Effect": "Allow", - "Resource": "*", + "Resource": { + "Fn::GetAtt": [ + "InstanceSchedulerEncryptionKey", + "Arn", + ], + }, }, { "Action": [ @@ -1990,226 +5446,220 @@ exports[`InstanceSchedulerStack snapshot test 1`] = ` }, { "Action": [ - "dynamodb:DeleteItem", - "dynamodb:GetItem", - "dynamodb:PutItem", + "dynamodb:BatchGetItem", + "dynamodb:GetRecords", + "dynamodb:GetShardIterator", "dynamodb:Query", + "dynamodb:GetItem", "dynamodb:Scan", + "dynamodb:ConditionCheckItem", "dynamodb:BatchWriteItem", + "dynamodb:PutItem", "dynamodb:UpdateItem", + "dynamodb:DeleteItem", + "dynamodb:DescribeTable", ], "Effect": "Allow", "Resource": [ { "Fn::GetAtt": [ - "ConfigTable", + "MaintenanceWindowTable", "Arn", ], }, { - "Fn::GetAtt": [ - "MaintenanceWindowTable", - "Arn", - ], + "Ref": "AWS::NoValue", }, ], }, + { + "Action": "sns:Publish", + "Effect": "Allow", + "Resource": { + "Ref": "InstanceSchedulerSnsTopic", + }, + }, { "Action": [ - "ssm:PutParameter", - "ssm:GetParameter", + "logs:CreateLogStream", + "logs:PutLogEvents", ], "Effect": "Allow", "Resource": { - "Fn::Sub": "arn:\${AWS::Partition}:ssm:\${AWS::Region}:\${AWS::AccountId}:parameter/Solutions/instance-scheduler-on-aws/UUID/*", + "Fn::GetAtt": [ + "SchedulerLogGroup", + "Arn", + ], + }, + }, + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Resource": { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition", + }, + ":iam::*:role/", + { + "Ref": "Namespace", + }, + "-Scheduler-Role", + ], + ], + }, + }, + { + "Action": "cloudwatch:PutMetricData", + "Condition": { + "StringEquals": { + "cloudwatch:namespace": { + "Fn::Join": [ + "", + [ + { + "Ref": "AWS::StackName", + }, + ":InstanceScheduler", + ], + ], + }, + }, }, + "Effect": "Allow", + "Resource": "*", }, ], "Version": "2012-10-17", }, - "PolicyName": "SchedulerRoleDefaultPolicy66F774B8", + "PolicyName": "schedulingRequestHandlerPolicy1BE05FC4", "Roles": [ { - "Ref": "SchedulerRole", + "Ref": "schedulingRequestHandlerRoleD87803EB", }, ], }, "Type": "AWS::IAM::Policy", }, - "SchedulerRule": { - "Properties": { - "Description": "Instance Scheduler - Rule to trigger instance for scheduler function version v1.5.0", - "ScheduleExpression": { - "Fn::FindInMap": [ - "mappings", - "Timeouts", + "schedulingRequestHandlerRoleD87803EB": { + "Metadata": { + "cfn_nag": { + "rules_to_suppress": [ { - "Ref": "SchedulerFrequency", + "id": "W28", + "reason": "Explicit role name required for assumedBy arn principle in spoke stack", }, ], }, - "State": { - "Fn::FindInMap": [ - "mappings", - "EnabledDisabled", + }, + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ { - "Ref": "SchedulingActive", + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com", + }, }, ], + "Version": "2012-10-17", }, - "Targets": [ - { - "Arn": { - "Fn::GetAtt": [ - "Main", - "Arn", - ], - }, - "Id": "Target0", - "Input": "{"scheduled_action":"run_orchestrator"}", - }, - ], - }, - "Type": "AWS::Events::Rule", - }, - "StateTable": { - "DeletionPolicy": "Delete", - "Properties": { - "AttributeDefinitions": [ - { - "AttributeName": "service", - "AttributeType": "S", - }, - { - "AttributeName": "account-region", - "AttributeType": "S", - }, - ], - "BillingMode": "PAY_PER_REQUEST", - "KeySchema": [ - { - "AttributeName": "service", - "KeyType": "HASH", - }, - { - "AttributeName": "account-region", - "KeyType": "RANGE", - }, - ], - "PointInTimeRecoverySpecification": { - "PointInTimeRecoveryEnabled": true, - }, - "SSESpecification": { - "KMSMasterKeyId": { - "Ref": "InstanceSchedulerEncryptionKey", - }, - "SSEEnabled": true, - "SSEType": "KMS", - }, - }, - "Type": "AWS::DynamoDB::Table", - "UpdateReplacePolicy": "Delete", - }, - "schedulereventbus": { - "Condition": "IsMemberOfOrganization", - "Properties": { - "Name": { + "RoleName": { "Fn::Join": [ "", [ { "Ref": "Namespace", }, - "-", - { - "Fn::FindInMap": [ - "mappings", - "SchedulerEventBusName", - "Name", - ], - }, + "-SchedulingRequestHandler-Role", ], ], }, }, - "Type": "AWS::Events::EventBus", + "Type": "AWS::IAM::Role", }, - "schedulereventbuspolicy": { - "Condition": "IsMemberOfOrganization", - "Properties": { - "Action": "events:PutEvents", - "Condition": { - "Key": "aws:PrincipalOrgID", - "Type": "StringEquals", - "Value": { - "Fn::Select": [ - 0, - { - "Ref": "Principals", - }, - ], - }, - }, - "EventBusName": { - "Fn::GetAtt": [ - "schedulereventbus", - "Name", + "schedulingRequestHandlerRoleDefaultPolicy938ADC4A": { + "Metadata": { + "cdk_nag": { + "rules_to_suppress": [ + { + "applies_to": [ + "Resource::*", + ], + "id": "AwsSolutions-IAM5", + "reason": "required for xray", + }, + { + "applies_to": [ + "Resource:::*", + ], + "id": "AwsSolutions-IAM5", + "reason": "ability to call spoke-registration handler", + }, ], }, - "Principal": "*", - "StatementId": { - "Fn::GetAtt": [ - "schedulereventbus", - "Name", + "cfn_nag": { + "rules_to_suppress": [ + { + "id": "W12", + "reason": "Wildcard required for xray", + }, ], }, }, - "Type": "AWS::Events::EventBusPolicy", - }, - "schedulerssmparametercrossaccountevents": { - "Condition": "IsMemberOfOrganization", "Properties": { - "Description": "Event rule to invoke Instance Scheduler lambda function to store spoke account id(s) in configuration.", - "EventBusName": { - "Fn::GetAtt": [ - "schedulereventbus", - "Name", - ], - }, - "EventPattern": { - "detail": { - "name": [ - "/instance-scheduler/do-not-delete-manually", - ], - "operation": [ - "Create", - "Delete", - ], - "type": [ - "String", - ], - }, - "detail-type": [ - "Parameter Store Change", - ], - "source": [ - "aws.ssm", + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "xray:PutTraceSegments", + "xray:PutTelemetryRecords", + ], + "Effect": "Allow", + "Resource": "*", + }, ], + "Version": "2012-10-17", }, - "State": "ENABLED", - "Targets": [ + "PolicyName": "schedulingRequestHandlerRoleDefaultPolicy938ADC4A", + "Roles": [ { - "Arn": { - "Fn::GetAtt": [ - "Main", - "Arn", - ], - }, - "Id": "Scheduler-Lambda-Function", + "Ref": "schedulingRequestHandlerRoleD87803EB", }, ], }, - "Type": "AWS::Events::Rule", + "Type": "AWS::IAM::Policy", + }, + }, + "Rules": { + "CheckBootstrapVersion": { + "Assertions": [ + { + "Assert": { + "Fn::Not": [ + { + "Fn::Contains": [ + [ + "1", + "2", + "3", + "4", + "5", + ], + { + "Ref": "BootstrapVersion", + }, + ], + }, + ], + }, + "AssertDescription": "CDK bootstrap stack version 6 required. Please run 'cdk bootstrap' with a recent version of the CDK CLI.", + }, + ], }, }, } diff --git a/source/instance-scheduler/tests/init-jest-extended.ts b/source/instance-scheduler/tests/init-jest-extended.ts new file mode 100644 index 00000000..8ac20f2b --- /dev/null +++ b/source/instance-scheduler/tests/init-jest-extended.ts @@ -0,0 +1,6 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import * as matchers from "jest-extended"; +import "jest-extended/all.js"; + +expect.extend(matchers); diff --git a/source/instance-scheduler/tests/instance-scheduler-remote-stack.test.ts b/source/instance-scheduler/tests/instance-scheduler-remote-stack.test.ts index a9134892..32e909b1 100644 --- a/source/instance-scheduler/tests/instance-scheduler-remote-stack.test.ts +++ b/source/instance-scheduler/tests/instance-scheduler-remote-stack.test.ts @@ -4,5 +4,13 @@ import { createSpokeStack } from "./instance-scheduler-stack-factory"; import { Template } from "aws-cdk-lib/assertions"; test("InstanceSchedulerRemoteStack snapshot test", () => { - expect(Template.fromStack(createSpokeStack())).toMatchSnapshot(); + const remoteStack = Template.fromStack(createSpokeStack()); + const resources = remoteStack.findResources("AWS::Lambda::Function"); + const remoteStackJson = remoteStack.toJSON(); + + for (const lambda_function in resources) { + remoteStackJson["Resources"][lambda_function]["Properties"]["Code"] = + "Omitted to remove snapshot dependency on code hash"; + } + expect(remoteStackJson).toMatchSnapshot(); }); diff --git a/source/instance-scheduler/tests/instance-scheduler-stack-factory.ts b/source/instance-scheduler/tests/instance-scheduler-stack-factory.ts index 0980badc..147d958e 100644 --- a/source/instance-scheduler/tests/instance-scheduler-stack-factory.ts +++ b/source/instance-scheduler/tests/instance-scheduler-stack-factory.ts @@ -1,39 +1,33 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 +import { App, Aspects, Stack } from "aws-cdk-lib"; +import { AwsSolutionsChecks } from "cdk-nag"; +import { InstanceSchedulerStack } from "../lib/instance-scheduler-stack"; +import { TestFunctionFactory } from "../lib/lambda-functions/function-factory"; +import { SpokeStack } from "../lib/remote-stack"; -import * as instanceSchedulerHubStack from "../lib/instance-scheduler-stack"; -import * as instanceSchedulerSpokeStack from "../lib/remote-stack"; -import * as cdk from "aws-cdk-lib"; -import { DefaultStackSynthesizer } from "aws-cdk-lib"; - -export function createHubStack(): cdk.Stack { - const app = new cdk.App(); - const envEU = { account: "111111111111", region: "eu-west-1" }; - const stack = new instanceSchedulerHubStack.InstanceSchedulerStack(app, "stack", { - env: envEU, - description: "", - solutionId: "SO0030", - solutionName: "instance-scheduler-on-aws", - solutionVersion: "v1.5.0", - appregApplicationName: "AWS-Solutions", - appregSolutionName: "instance-scheduler-on-aws", - synthesizer: new DefaultStackSynthesizer({ generateBootstrapVersionRule: false }), +export function createHubStack(): Stack { + const app = new App(); + Aspects.of(app).add(new AwsSolutionsChecks({ verbose: true })); + return new InstanceSchedulerStack(app, "stack", { + solutionId: "my-solution-id", + solutionName: "my-solution-name", + solutionVersion: "v9.9.9", + appregApplicationName: "my-appreg-app-name", + appregSolutionName: "my-appreg-solution-name", + factory: new TestFunctionFactory(), }); - return stack; } -export function createSpokeStack(): cdk.Stack { - const envEU = { account: "111111111111", region: "eu-west-1" }; - const app = new cdk.App(); - const stack = new instanceSchedulerSpokeStack.InstanceSchedulerRemoteStack(app, "stack", { - env: envEU, - description: "", - solutionId: "SO0030", - solutionName: "instance-scheduler-on-aws", - solutionVersion: "v1.5.0", - appregApplicationName: "AWS-Solutions", - appregSolutionName: "instance-scheduler-on-aws", - synthesizer: new DefaultStackSynthesizer({ generateBootstrapVersionRule: false }), +export function createSpokeStack(): Stack { + const app = new App(); + Aspects.of(app).add(new AwsSolutionsChecks({ verbose: true })); + return new SpokeStack(app, "stack", { + solutionId: "my-solution-id", + solutionName: "my-solution-name", + solutionVersion: "v9.9.9", + appregApplicationName: "my-appreg-app-name", + appregSolutionName: "my-appreg-solution-name", + factory: new TestFunctionFactory(), }); - return stack; } diff --git a/source/instance-scheduler/tests/instance-scheduler-stack.test.ts b/source/instance-scheduler/tests/instance-scheduler-stack.test.ts index 234d53cf..88a4c3dc 100644 --- a/source/instance-scheduler/tests/instance-scheduler-stack.test.ts +++ b/source/instance-scheduler/tests/instance-scheduler-stack.test.ts @@ -3,18 +3,67 @@ import { Template } from "aws-cdk-lib/assertions"; import { createHubStack } from "./instance-scheduler-stack-factory"; +// share a single Template for testing to avoid redundant Docker builds +const hubStack = Template.fromStack(createHubStack()); + test("InstanceSchedulerStack snapshot test", () => { - const hubStackJson = Template.fromStack(createHubStack()).toJSON(); - hubStackJson.Resources.Main.Properties.Code = "Omitted to remove snapshot dependency on code hash"; + const resources = hubStack.findResources("AWS::Lambda::Function"); + const hubStackJson = hubStack.toJSON(); + + for (const lambda_function in resources) { + hubStackJson["Resources"][lambda_function]["Properties"]["Code"] = + "Omitted to remove snapshot dependency on code hash"; + } expect(hubStackJson).toMatchSnapshot(); }); test("Hub stack has expected defaults for started and stopped tags", () => { - const hubStackTemplate = Template.fromStack(createHubStack()); - expect(hubStackTemplate.findParameters("StartedTags")["StartedTags"]["Default"]).toBe( - "InstanceScheduler-LastAction=Started By {scheduler} {year}/{month}/{day} {hour}:{minute}{timezone}, ", + expect(hubStack.findParameters("StartedTags")["StartedTags"]["Default"]).toBe( + "InstanceScheduler-LastAction=Started By {scheduler} {year}-{month}-{day} {hour}:{minute} {timezone}", ); - expect(hubStackTemplate.findParameters("StoppedTags")["StoppedTags"]["Default"]).toBe( - "InstanceScheduler-LastAction=Stopped By {scheduler} {year}/{month}/{day} {hour}:{minute}{timezone}, ", + expect(hubStack.findParameters("StoppedTags")["StoppedTags"]["Default"]).toBe( + "InstanceScheduler-LastAction=Stopped By {scheduler} {year}-{month}-{day} {hour}:{minute} {timezone}", ); }); + +type CfnParameterGroup = { Label: { default: string }; Parameters: string[] }; + +describe("hub template", function () { + const hubTemplateJson = hubStack.toJSON(); + + describe("parameters", function () { + const parameters = hubStack.findParameters("*"); + const cfnInterface = hubTemplateJson.Metadata["AWS::CloudFormation::Interface"]; + + expect(Object.getOwnPropertyNames(parameters).length).toBeGreaterThan(0); + + Object.getOwnPropertyNames(parameters).forEach((parameterName: string) => { + if (parameterName === "BootstrapVersion") { + // skip automatically-generated parameter, it will not be present in the prod template + return; + } + + describe(parameterName, function () { + it("has a label", function () { + const label = cfnInterface.ParameterLabels[parameterName].default; + expect(typeof label).toStrictEqual("string"); + expect(label.length).toBeGreaterThan(0); + }); + + it("belongs to a group", function () { + expect(Array.isArray(cfnInterface.ParameterGroups)).toStrictEqual(true); + expect( + cfnInterface.ParameterGroups.some((group: CfnParameterGroup) => { + return ( + Array.isArray(group.Parameters) && + group.Parameters.includes(parameterName) && + typeof group.Label.default === "string" && + group.Label.default.length > 0 + ); + }), + ).toStrictEqual(true); + }); + }); + }); + }); +}); diff --git a/source/instance-scheduler/tests/lib/asg-scheduler.test.ts b/source/instance-scheduler/tests/lib/asg-scheduler.test.ts new file mode 100644 index 00000000..66af7a2c --- /dev/null +++ b/source/instance-scheduler/tests/lib/asg-scheduler.test.ts @@ -0,0 +1,31 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Template } from "aws-cdk-lib/assertions"; +import { conditions, createAsgSchedulerStack } from "../test_utils/stack-factories"; + +// Brief type of CloudFormation resource for testing +type CfnResourceType = { + readonly Type: string; + readonly Properties: unknown; + readonly Condition?: string; +}; + +it("should put a condition on every resource in AsgScheduler", () => { + const id = "ASGSchedulerTest"; + const asgSchedulerStack = createAsgSchedulerStack(id); + const jsonTemplate = Template.fromStack(asgSchedulerStack).toJSON(); + const resources: { [key: string]: CfnResourceType } = jsonTemplate.Resources; + + if (!resources) throw new Error("Resources not found."); + + for (const key in resources) { + const condition = resources[key].Condition; + + if (key.startsWith(id)) { + expect(condition).toEqual(conditions.enableAsgs); + } else { + expect(condition).not.toEqual(conditions.enableAsgs); + } + } +}); diff --git a/source/instance-scheduler/tests/lib/cfn-nag.test.ts b/source/instance-scheduler/tests/lib/cfn-nag.test.ts new file mode 100644 index 00000000..f24882fa --- /dev/null +++ b/source/instance-scheduler/tests/lib/cfn-nag.test.ts @@ -0,0 +1,32 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { CfnResource, Stack } from "aws-cdk-lib"; +import { Bucket } from "aws-cdk-lib/aws-s3"; +import { addCfnNagSuppressions, CfnNagSuppression } from "../../lib/cfn-nag"; + +describe("add cfn-nag suppression", function () { + it("adds suppression when none present", function () { + const stack = new Stack(); + const bucket = new Bucket(stack, "Bucket"); + const suppression: CfnNagSuppression = { id: "my id", reason: "my reason" }; + addCfnNagSuppressions(bucket, suppression); + expect((bucket.node.defaultChild as CfnResource).cfnOptions.metadata?.cfn_nag?.rules_to_suppress).toStrictEqual( + expect.arrayContaining([suppression]), + ); + }); + + it("adds suppression when already present", function () { + const stack = new Stack(); + const bucket = new Bucket(stack, "Bucket"); + const firstSuppression: CfnNagSuppression = { id: "my id", reason: "my reason" }; + const secondSuppression: CfnNagSuppression = { id: "another id", reason: "another reason" }; + const thirdSuppression: CfnNagSuppression = { id: "final id", reason: "final reason" }; + (bucket.node.defaultChild as CfnResource).cfnOptions.metadata = { + cfn_nag: { rules_to_suppress: [firstSuppression] }, + }; + addCfnNagSuppressions(bucket, secondSuppression, thirdSuppression); + expect((bucket.node.defaultChild as CfnResource).cfnOptions.metadata?.cfn_nag?.rules_to_suppress).toStrictEqual( + expect.arrayContaining([firstSuppression, secondSuppression, thirdSuppression]), + ); + }); +}); diff --git a/source/instance-scheduler/tests/lib/cfn.test.ts b/source/instance-scheduler/tests/lib/cfn.test.ts new file mode 100644 index 00000000..e0c56d0f --- /dev/null +++ b/source/instance-scheduler/tests/lib/cfn.test.ts @@ -0,0 +1,183 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { CfnCondition, CfnOutput, CfnParameter, Stack } from "aws-cdk-lib"; +import { Template } from "aws-cdk-lib/assertions"; +import { Bucket } from "aws-cdk-lib/aws-s3"; +import { Construct } from "constructs"; +import { + EnabledDisabledParameter, + ParameterWithLabel, + YesNoParameter, + addParameterGroup, + addParameterLabel, + cfnConditionToTrueFalse, + enabledDisabledCondition, + overrideLogicalId, + trueCondition, + yesNoCondition, +} from "../../lib/cfn"; + +describe("override logical id", function () { + it("sets id to expected value", function () { + const stack = new Stack(); + const bucket = new Bucket(stack, "Bucket"); + const myLogicalId = "MyLogicalId"; + overrideLogicalId(bucket, myLogicalId); + Template.fromStack(stack).templateMatches({ Resources: { [myLogicalId]: { Type: "AWS::S3::Bucket" } } }); + }); + + it("fails on non-CfnResource", function () { + const stack = new Stack(); + const construct = new Construct(stack, "Construct"); + expect(function () { + overrideLogicalId(construct, "MyLogicalId"); + }).toThrow(); + }); +}); + +describe("yes/no condition", function () { + it("resolves to a condition that is true when the value is Yes", function () { + const stack = new Stack(); + const conditionId = "Condition"; + yesNoCondition(stack, conditionId, "Yes"); + const template = Template.fromStack(stack); + const conditions = template.findConditions(conditionId); + const conditionIds = Object.getOwnPropertyNames(conditions); + expect(conditionIds).toHaveLength(1); + const condition = conditions[conditionIds[0]]; + expect(condition).toEqual({ "Fn::Equals": ["Yes", "Yes"] }); + }); +}); + +describe("enabled/disabled condition", function () { + it("resolves to a condition that is true when the value is Enabled", function () { + const stack = new Stack(); + const conditionId = "Condition"; + enabledDisabledCondition(stack, conditionId, "Enabled"); + const template = Template.fromStack(stack); + const conditions = template.findConditions(conditionId); + const conditionIds = Object.getOwnPropertyNames(conditions); + expect(conditionIds).toHaveLength(1); + const condition = conditions[conditionIds[0]]; + expect(condition).toEqual({ "Fn::Equals": ["Enabled", "Enabled"] }); + }); +}); + +describe("true condition", function () { + it("resolves to a true condition", function () { + const stack = new Stack(); + const conditionId = "Condition"; + trueCondition(stack, conditionId); + const template = Template.fromStack(stack); + const conditions = template.findConditions(conditionId); + const conditionIds = Object.getOwnPropertyNames(conditions); + expect(conditionIds).toHaveLength(1); + const condition = conditions[conditionIds[0]]; + expect(condition).toEqual({ "Fn::Equals": [true, true] }); + }); +}); + +describe("condition to true/false", function () { + it("resolves to True or False depending on condition", function () { + const stack = new Stack(); + const conditionId = "Condition"; + const condition = new CfnCondition(stack, conditionId); + const outputId = "Output"; + new CfnOutput(stack, outputId, { value: cfnConditionToTrueFalse(condition) }); + const template = Template.fromStack(stack); + const outputs = template.findOutputs(outputId); + const outputIds = Object.getOwnPropertyNames(outputs); + expect(outputIds).toHaveLength(1); + const output = outputs[outputIds[0]]; + expect(output.Value).toEqual({ "Fn::If": [conditionId, "True", "False"] }); + }); +}); + +describe("parameter label helpers", function () { + it("add expected labels and groups", function () { + const stack = new Stack(); + const firstParamId = "FirstParam"; + const firstParam = new CfnParameter(stack, firstParamId); + const secondParamId = "SecondParam"; + const secondParam = new CfnParameter(stack, secondParamId); + + const firstParamLabel = "my-first-param"; + addParameterLabel(firstParam, firstParamLabel); + const secondParamLabel = "my-second-param"; + addParameterLabel(secondParam, secondParamLabel); + + const groupLabel = "my-group"; + addParameterGroup(stack, { label: groupLabel, parameters: [firstParam, secondParam] }); + + const cfnInterface = Template.fromStack(stack).toJSON().Metadata["AWS::CloudFormation::Interface"]; + expect(cfnInterface.ParameterGroups).toEqual([ + { Label: { default: groupLabel }, Parameters: expect.arrayContaining([firstParamId, secondParamId]) }, + ]); + expect(cfnInterface.ParameterLabels[firstParamId]).toEqual({ default: firstParamLabel }); + expect(cfnInterface.ParameterLabels[secondParamId]).toEqual({ default: secondParamLabel }); + }); +}); + +describe("parameter with label", function () { + it("adds expected label", function () { + const stack = new Stack(); + const paramId = "MyParam"; + const label = "my-param"; + new ParameterWithLabel(stack, paramId, { label }); + + const cfnInterface = Template.fromStack(stack).toJSON().Metadata["AWS::CloudFormation::Interface"]; + expect(cfnInterface.ParameterLabels[paramId]).toEqual({ default: label }); + }); +}); + +describe("yes/no parameter", function () { + it("does not add condition if not used", function () { + const stack = new Stack(); + const paramId = "MyParam"; + new YesNoParameter(stack, paramId); + const template = Template.fromStack(stack); + const conditions = template.findConditions("*"); + const conditionIds = Object.getOwnPropertyNames(conditions); + expect(conditionIds).toHaveLength(0); + }); + + it("adds condition", function () { + const stack = new Stack(); + const paramId = "MyParam"; + const param = new YesNoParameter(stack, paramId); + param.getCondition(); + const template = Template.fromStack(stack); + const conditions = template.findConditions("*"); + const conditionIds = Object.getOwnPropertyNames(conditions); + expect(conditionIds).toHaveLength(1); + expect(conditionIds[0]).toStrictEqual(`${paramId}Condition`); + const condition = conditions[conditionIds[0]]; + expect(condition).toEqual({ "Fn::Equals": [{ Ref: paramId }, "Yes"] }); + }); +}); + +describe("enabled/disabled parameter", function () { + it("does not add condition if not used", function () { + const stack = new Stack(); + const paramId = "MyParam"; + new EnabledDisabledParameter(stack, paramId); + const template = Template.fromStack(stack); + const conditions = template.findConditions("*"); + const conditionIds = Object.getOwnPropertyNames(conditions); + expect(conditionIds).toHaveLength(0); + }); + + it("adds condition", function () { + const stack = new Stack(); + const paramId = "MyParam"; + const param = new EnabledDisabledParameter(stack, paramId); + param.getCondition(); + const template = Template.fromStack(stack); + const conditions = template.findConditions("*"); + const conditionIds = Object.getOwnPropertyNames(conditions); + expect(conditionIds).toHaveLength(1); + expect(conditionIds[0]).toStrictEqual(`${paramId}Condition`); + const condition = conditions[conditionIds[0]]; + expect(condition).toEqual({ "Fn::Equals": [{ Ref: paramId }, "Enabled"] }); + }); +}); diff --git a/source/instance-scheduler/tests/lib/core-scheduler.test.ts b/source/instance-scheduler/tests/lib/core-scheduler.test.ts index 1e4787df..1e43df8a 100644 --- a/source/instance-scheduler/tests/lib/core-scheduler.test.ts +++ b/source/instance-scheduler/tests/lib/core-scheduler.test.ts @@ -1,33 +1,107 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -import { Stack } from "aws-cdk-lib"; -import { Template } from "aws-cdk-lib/assertions"; -import { CompositePrincipal, Role, ServicePrincipal } from "aws-cdk-lib/aws-iam"; -import { Key } from "aws-cdk-lib/aws-kms"; -import { CoreScheduler } from "../../lib/core-scheduler"; +import { + conditions, + coreScheduler, + defaultTimezone, + logRetentionDays, + memorySizeMB, + namespace, + principals, + regions, + schedulingIntervalMinutes, + solutionId, + solutionName, + solutionVersion, + startTags, + stopTags, + tagKey, +} from "../test_utils/stack-factories"; describe("core scheduler", function () { - const stack = new Stack(); - const key = new Key(stack, "Key"); - const role = new Role(stack, "Role", { - assumedBy: new CompositePrincipal( - new ServicePrincipal("events.amazonaws.com"), - new ServicePrincipal("lambda.amazonaws.com"), - ), - }); + const functions = coreScheduler.findResources("AWS::Lambda::Function"); + const functionIds = Object.getOwnPropertyNames(functions); + expect(functionIds).toHaveLength(8); + const mainFunctionId = functionIds.find((funcId: string) => funcId == "Main"); + if (!mainFunctionId) { + throw Error("unable to locate main function"); + } + const lambdaFunction = functions[mainFunctionId]; + + const roleId = lambdaFunction.Properties.Role["Fn::GetAtt"][0]; + const roles = coreScheduler.findResources("AWS::IAM::Role"); + const role = roles[roleId]; + + const keys = coreScheduler.findResources("AWS::KMS::Key"); + const keyIds = Object.getOwnPropertyNames(keys); + expect(keyIds).toHaveLength(1); + const keyId = keyIds[0]; + const key = keys[keyId]; + + describe("key", function () { + it("has expected id", function () { + expect(keyId).toEqual("InstanceSchedulerEncryptionKey"); + }); + + it("is enabled", function () { + expect(key.Properties.Enabled).toEqual(true); + }); + + it("has rotation enabled", function () { + expect(key.Properties.EnableKeyRotation).toEqual(true); + }); + + it("is not retained", function () { + expect(key.DeletionPolicy).toEqual("Delete"); + expect(key.UpdateReplacePolicy).toEqual("Delete"); + }); - new CoreScheduler(stack, { - kmsEncryptionKey: key, - memorySize: 128, - schedulerRole: role, - solutionVersion: "v9.9.9", + describe("policy", function () { + it("grants admin access to root principal", function () { + expect(key.Properties.KeyPolicy.Statement).toEqual( + expect.arrayContaining([ + { + Action: "kms:*", + Effect: "Allow", + Principal: { + AWS: { + "Fn::Join": ["", ["arn:", { Ref: "AWS::Partition" }, ":iam::", { Ref: "AWS::AccountId" }, ":root"]], + }, + }, + Resource: "*", + }, + ]), + ); + }); + }); + + describe("alias", function () { + const aliases = coreScheduler.findResources("AWS::KMS::Alias"); + const aliasIds = Object.getOwnPropertyNames(aliases); + expect(aliasIds).toHaveLength(1); + const aliasId = aliasIds[0]; + const alias = aliases[aliasId]; + + it("has expected id", function () { + expect(aliasId).toEqual("InstanceSchedulerEncryptionKeyAlias"); + }); + + it("has expected name", function () { + expect(alias.Properties.AliasName).toEqual({ + "Fn::Join": ["", ["alias/", { Ref: "AWS::StackName" }, "-instance-scheduler-encryption-key"]], + }); + }); + + it("targets key", function () { + expect(alias.Properties.TargetKeyId).toEqual({ "Fn::GetAtt": [keyId, "Arn"] }); + }); + }); }); - const template = Template.fromStack(stack); const stateTableLogicalId = "StateTable"; describe("state table", function () { - const table = template.findResources("AWS::DynamoDB::Table")[stateTableLogicalId]; + const table = coreScheduler.findResources("AWS::DynamoDB::Table")[stateTableLogicalId]; it("partition key is service", function () { const key = "service"; @@ -82,26 +156,27 @@ describe("core scheduler", function () { }); it("is encrypted with KMS key", function () { - const keys = template.findResources("AWS::KMS::Key"); + const keys = coreScheduler.findResources("AWS::KMS::Key"); const keyIds = Object.getOwnPropertyNames(keys); expect(keyIds).toHaveLength(1); expect(table.Properties.SSESpecification).toStrictEqual({ - KMSMasterKeyId: { Ref: keyIds[0] }, + KMSMasterKeyId: { "Fn::GetAtt": [keyIds[0], "Arn"] }, SSEEnabled: true, SSEType: "KMS", }); }); - it("is not retained", function () { - expect(table.DeletionPolicy).toStrictEqual("Delete"); - expect(table.UpdateReplacePolicy).toStrictEqual("Delete"); + it("has deletion protection enabled", function () { + expect(table.Properties.DeletionProtectionEnabled).toEqual({ + "Fn::If": [conditions.enableDdbDeletionProtection, "True", "False"], + }); }); }); const configTableLogicalId = "ConfigTable"; describe("config table", function () { - const table = template.findResources("AWS::DynamoDB::Table")[configTableLogicalId]; + const table = coreScheduler.findResources("AWS::DynamoDB::Table")[configTableLogicalId]; it("partition key is type", function () { const key = "type"; @@ -156,29 +231,30 @@ describe("core scheduler", function () { }); it("is encrypted with KMS key", function () { - const keys = template.findResources("AWS::KMS::Key"); + const keys = coreScheduler.findResources("AWS::KMS::Key"); const keyIds = Object.getOwnPropertyNames(keys); expect(keyIds).toHaveLength(1); expect(table.Properties.SSESpecification).toStrictEqual({ - KMSMasterKeyId: { Ref: keyIds[0] }, + KMSMasterKeyId: { "Fn::GetAtt": [keyIds[0], "Arn"] }, SSEEnabled: true, SSEType: "KMS", }); }); - it("is not retained", function () { - expect(table.DeletionPolicy).toStrictEqual("Delete"); - expect(table.UpdateReplacePolicy).toStrictEqual("Delete"); + it("has deletion protection enabled", function () { + expect(table.Properties.DeletionProtectionEnabled).toEqual({ + "Fn::If": [conditions.enableDdbDeletionProtection, "True", "False"], + }); }); }); const maintenanceWindowTableLogicalId = "MaintenanceWindowTable"; describe("maintenance window table", function () { - const table = template.findResources("AWS::DynamoDB::Table")[maintenanceWindowTableLogicalId]; + const table = coreScheduler.findResources("AWS::DynamoDB::Table")[maintenanceWindowTableLogicalId]; - it("partition key is Name", function () { - const key = "Name"; + it("partition key is account-region", function () { + const key = "account-region"; expect(table.Properties.KeySchema).toEqual( expect.arrayContaining([ @@ -198,8 +274,8 @@ describe("core scheduler", function () { ); }); - it("sort key is account-region", function () { - const key = "account-region"; + it("sort key is name-id", function () { + const key = "name-id"; expect(table.Properties.KeySchema).toEqual( expect.arrayContaining([ @@ -230,31 +306,342 @@ describe("core scheduler", function () { }); it("is encrypted with KMS key", function () { - const keys = template.findResources("AWS::KMS::Key"); + const keys = coreScheduler.findResources("AWS::KMS::Key"); const keyIds = Object.getOwnPropertyNames(keys); expect(keyIds).toHaveLength(1); expect(table.Properties.SSESpecification).toStrictEqual({ - KMSMasterKeyId: { Ref: keyIds[0] }, + KMSMasterKeyId: { "Fn::GetAtt": [keyIds[0], "Arn"] }, SSEEnabled: true, SSEType: "KMS", }); }); + it("has deletion protection enabled", function () { + expect(table.Properties.DeletionProtectionEnabled).toEqual({ + "Fn::If": [conditions.enableDdbDeletionProtection, "True", "False"], + }); + }); + }); + + const logGroups = coreScheduler.findResources("AWS::Logs::LogGroup"); + const logGroupIds = Object.getOwnPropertyNames(logGroups); + expect(logGroupIds).toHaveLength(7); + + describe("setup custom resource", function () { + const setupResources = coreScheduler.findResources("Custom::ServiceSetup"); + const setupResourceIds = Object.getOwnPropertyNames(setupResources); + expect(setupResourceIds).toHaveLength(1); + const setupResourceId = setupResourceIds[0]; + const setupResource = setupResources[setupResourceId]; + + it("targets function", function () { + expect(setupResource.Properties.ServiceToken).toEqual({ + "Fn::GetAtt": [mainFunctionId, "Arn"], + }); + }); + + it("has expected properties", function () { + expect(setupResourceId).toEqual("SchedulerConfigHelper"); + expect(setupResource.Properties).toHaveProperty("log_retention_days", logRetentionDays); + expect(setupResource.Properties).toHaveProperty("remote_account_ids", principals); + expect(setupResource.Properties).toHaveProperty("timeout", 120); + }); + it("is not retained", function () { - expect(table.DeletionPolicy).toStrictEqual("Delete"); - expect(table.UpdateReplacePolicy).toStrictEqual("Delete"); + expect(setupResource.DeletionPolicy).toStrictEqual("Delete"); + }); + }); + + describe("orchestrator-rule", function () { + const rules = coreScheduler.findResources("AWS::Events::Rule"); + + const ruleName = Object.keys(rules).find((rule) => rule.includes("SchedulerEventRule")); + const scheduleRule = ruleName ? rules[ruleName] : null; + + if (!scheduleRule) { + throw new Error("Could not find schedule rule"); + } + + it("has expected rate expression", function () { + const mappingLogicalId = "CronExpressionsForSchedulingIntervals"; + const mappings = coreScheduler.findMappings(mappingLogicalId); + const mappingIds = Object.getOwnPropertyNames(mappings); + expect(mappingIds).toHaveLength(1); + + const mappingKey = "IntervalMinutesToCron"; + expect(scheduleRule.Properties.ScheduleExpression).toEqual({ + "Fn::FindInMap": [mappingLogicalId, mappingKey, schedulingIntervalMinutes.toString()], + }); + expect(mappings[mappingIds[0]][mappingKey][schedulingIntervalMinutes.toString()]).toEqual( + `cron(0/${schedulingIntervalMinutes} * * * ? *)`, + ); + }); + + it("has expected state", function () { + expect(scheduleRule.Properties.State).toEqual({ + "Fn::If": [conditions.schedulingEnabled, "ENABLED", "DISABLED"], + }); + }); + + it("targets orchestrator", function () { + expect(scheduleRule.Properties.Targets).toEqual( + expect.arrayContaining([ + { + Arn: { + "Fn::GetAtt": [expect.stringContaining("SchedulingOrchestrator"), "Arn"], + }, + Id: expect.any(String), + Input: JSON.stringify({ scheduled_action: "run_orchestrator" }), + RetryPolicy: { MaximumRetryAttempts: 5 }, + }, + ]), + ); }); }); + const topics = coreScheduler.findResources("AWS::SNS::Topic"); + const topicIds = Object.getOwnPropertyNames(topics); + expect(topicIds).toHaveLength(1); + const topicId = topicIds[0]; + describe("function", function () { - const functions = template.findResources("AWS::Lambda::Function"); - const functionIds = Object.getOwnPropertyNames(functions); - expect(functionIds).toHaveLength(1); - const lambdaFunction = functions[functionIds[0]]; + it("has expected id", function () { + expect(mainFunctionId).toEqual("Main"); + }); + + it("has expected memory", function () { + expect(lambdaFunction.Properties.MemorySize).toEqual(memorySizeMB); + }); describe("environment", function () { const env = lambdaFunction.Properties.Environment.Variables; + it("has scheduler interval", function () { + expect(env).toEqual( + expect.objectContaining({ + SCHEDULER_FREQUENCY: schedulingIntervalMinutes.toString(), + }), + ); + }); + + it("has stack name", function () { + expect(env).toEqual( + expect.objectContaining({ + STACK_NAME: { Ref: "AWS::StackName" }, + }), + ); + }); + + it("has send metrics", function () { + expect(env).toEqual( + expect.objectContaining({ + SEND_METRICS: { "Fn::If": [conditions.sendMetrics, "True", "False"] }, + }), + ); + }); + + it("has solution id", function () { + expect(env).toEqual( + expect.objectContaining({ + SOLUTION_ID: solutionId, + }), + ); + }); + + it("has solution version", function () { + expect(env).toEqual( + expect.objectContaining({ + SOLUTION_VERSION: solutionVersion, + }), + ); + }); + + it("has enable debug logging", function () { + expect(env).toEqual( + expect.objectContaining({ + TRACE: { "Fn::If": [conditions.enableDebugLogging, "True", "False"] }, + }), + ); + }); + + it("has user agent extra", function () { + expect(env).toEqual( + expect.objectContaining({ + USER_AGENT_EXTRA: `AwsSolution/${solutionId}/${solutionVersion}`, + }), + ); + }); + + it("has metrics url", function () { + expect(env).toEqual( + expect.objectContaining({ + METRICS_URL: "https://metrics.awssolutionsbuilder.com/generic", + }), + ); + }); + + it("has stack id", function () { + expect(env).toEqual( + expect.objectContaining({ + STACK_ID: { Ref: "AWS::StackId" }, + }), + ); + }); + + it("has uuid key", function () { + expect(env).toEqual( + expect.objectContaining({ + UUID_KEY: `/Solutions/${solutionName}/UUID/`, + }), + ); + }); + + it("has start ec2 batch size", function () { + expect(env).toEqual( + expect.objectContaining({ + START_EC2_BATCH_SIZE: "5", + }), + ); + }); + + it("has schedule tag key", function () { + expect(env).toEqual( + expect.objectContaining({ + SCHEDULE_TAG_KEY: tagKey, + }), + ); + }); + + it("has default timezone", function () { + expect(env).toEqual( + expect.objectContaining({ + DEFAULT_TIMEZONE: defaultTimezone, + }), + ); + }); + + it("has enable ec2", function () { + expect(env).toEqual( + expect.objectContaining({ + ENABLE_EC2_SERVICE: { "Fn::If": [conditions.enableEc2, "True", "False"] }, + }), + ); + }); + + it("has enable rds", function () { + expect(env).toEqual( + expect.objectContaining({ + ENABLE_RDS_SERVICE: { "Fn::If": [conditions.enableRds, "True", "False"] }, + }), + ); + }); + + it("has enable rds clusters", function () { + expect(env).toEqual( + expect.objectContaining({ + ENABLE_RDS_CLUSTERS: { "Fn::If": [conditions.enableRdsClusters, "True", "False"] }, + }), + ); + }); + + it("has enable neptune", function () { + expect(env).toEqual( + expect.objectContaining({ + ENABLE_NEPTUNE_SERVICE: { "Fn::If": [conditions.enableNeptune, "True", "False"] }, + }), + ); + }); + + it("has enable docdb", function () { + expect(env).toEqual( + expect.objectContaining({ + ENABLE_DOCDB_SERVICE: { "Fn::If": [conditions.enableDocDb, "True", "False"] }, + }), + ); + }); + + it("has enable rds snapshots", function () { + expect(env).toEqual( + expect.objectContaining({ + ENABLE_RDS_SNAPSHOTS: { "Fn::If": [conditions.enableRdsSnapshots, "True", "False"] }, + }), + ); + }); + + it("has schedule regions", function () { + expect(env).toEqual( + expect.objectContaining({ + SCHEDULE_REGIONS: regions.join(","), + }), + ); + }); + + it("has namespace", function () { + expect(env).toEqual( + expect.objectContaining({ + APP_NAMESPACE: namespace, + }), + ); + }); + + it("has scheduler role name", function () { + expect(env).toEqual( + expect.objectContaining({ + SCHEDULER_ROLE_NAME: "Scheduler-Role", + }), + ); + }); + + it("has enable schedule hub account", function () { + expect(env).toEqual( + expect.objectContaining({ + ENABLE_SCHEDULE_HUB_ACCOUNT: { "Fn::If": [conditions.enableHubAcctScheduling, "True", "False"] }, + }), + ); + }); + + it("has enable ec2 ssm maintenance windows", function () { + expect(env).toEqual( + expect.objectContaining({ + ENABLE_EC2_SSM_MAINTENANCE_WINDOWS: { + "Fn::If": [conditions.enableEc2MaintWindows, "True", "False"], + }, + }), + ); + }); + + it("has start tags", function () { + expect(env).toEqual( + expect.objectContaining({ + START_TAGS: startTags, + }), + ); + }); + + it("has stop tags", function () { + expect(env).toEqual( + expect.objectContaining({ + STOP_TAGS: stopTags, + }), + ); + }); + + it("has enable aws organizations", function () { + expect(env).toEqual( + expect.objectContaining({ + ENABLE_AWS_ORGANIZATIONS: { "Fn::If": [conditions.enableAwsOrgs, "True", "False"] }, + }), + ); + }); + + it("has topic arn", function () { + expect(env).toEqual( + expect.objectContaining({ + ISSUES_TOPIC_ARN: { Ref: topicId }, + }), + ); + }); + it("has state table name", function () { expect(env).toEqual( expect.objectContaining({ @@ -281,10 +668,6 @@ describe("core scheduler", function () { }); describe("role", function () { - const roleId = lambdaFunction.Properties.Role["Fn::GetAtt"][0]; - const roles = template.findResources("AWS::IAM::Role"); - const role = roles[roleId]; - describe("trust relationship", function () { it("includes lambda", function () { expect(role.Properties.AssumeRolePolicyDocument.Statement).toEqual( @@ -297,29 +680,20 @@ describe("core scheduler", function () { ]), ); }); - - it("includes eventbridge", function () { - expect(role.Properties.AssumeRolePolicyDocument.Statement).toEqual( - expect.arrayContaining([ - { - Action: "sts:AssumeRole", - Effect: "Allow", - Principal: { Service: "events.amazonaws.com" }, - }, - ]), - ); - }); }); describe("policy", function () { - const policies = template.findResources("AWS::IAM::Policy", { + const policies = coreScheduler.findResources("AWS::IAM::Policy", { Properties: { Roles: [{ Ref: roleId }], }, }); const policyIds = Object.getOwnPropertyNames(policies); - expect(policyIds).toHaveLength(1); - const policy = policies[policyIds[0]]; + const defaultPolicyId = policyIds.find((policyId: string) => policyId.includes("DefaultPolicy")); + if (!defaultPolicyId) { + throw new Error("Could not find default policy"); + } + const policy = policies[defaultPolicyId]; it("has xray permissions", function () { expect(policy.Properties.PolicyDocument.Statement).toEqual( @@ -333,28 +707,30 @@ describe("core scheduler", function () { ); }); - it("has state table permissions", function () { + const readWritePermissions = [ + "dynamodb:BatchGetItem", + "dynamodb:GetRecords", + "dynamodb:GetShardIterator", + "dynamodb:Query", + "dynamodb:GetItem", + "dynamodb:Scan", + "dynamodb:ConditionCheckItem", + "dynamodb:BatchWriteItem", + "dynamodb:PutItem", + "dynamodb:UpdateItem", + "dynamodb:DeleteItem", + "dynamodb:DescribeTable", + ]; + + it("has config table permissions", function () { expect(policy.Properties.PolicyDocument.Statement).toEqual( expect.arrayContaining([ { - Action: expect.arrayContaining([ - "dynamodb:BatchGetItem", - "dynamodb:GetRecords", - "dynamodb:GetShardIterator", - "dynamodb:Query", - "dynamodb:GetItem", - "dynamodb:Scan", - "dynamodb:ConditionCheckItem", - "dynamodb:BatchWriteItem", - "dynamodb:PutItem", - "dynamodb:UpdateItem", - "dynamodb:DeleteItem", - "dynamodb:DescribeTable", - ]), + Action: expect.arrayContaining(readWritePermissions), Effect: "Allow", Resource: expect.arrayContaining([ { - ["Fn::GetAtt"]: [stateTableLogicalId, "Arn"], + ["Fn::GetAtt"]: [configTableLogicalId, "Arn"], }, ]), }, @@ -362,47 +738,92 @@ describe("core scheduler", function () { ); }); - it("has config table permissions", function () { + it("has key permissions", function () { + const keys = coreScheduler.findResources("AWS::KMS::Key"); + const keyIds = Object.getOwnPropertyNames(keys); + expect(keyIds).toHaveLength(1); expect(policy.Properties.PolicyDocument.Statement).toEqual( expect.arrayContaining([ { Action: expect.arrayContaining([ - "dynamodb:DeleteItem", - "dynamodb:GetItem", - "dynamodb:PutItem", - "dynamodb:Query", - "dynamodb:Scan", - "dynamodb:BatchWriteItem", + "kms:Decrypt", + "kms:DescribeKey", + "kms:Encrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", ]), Effect: "Allow", - Resource: expect.arrayContaining([ - { - ["Fn::GetAtt"]: [configTableLogicalId, "Arn"], - }, - ]), + Resource: { "Fn::GetAtt": [keyIds[0], "Arn"] }, }, ]), ); }); - it("has maintenance window table permissions", function () { + const functionName = lambdaFunction.Properties.FunctionName; + const functionNameSuffix = "-InstanceSchedulerMain"; + expect(functionName).toEqual({ "Fn::Join": ["", [{ Ref: "AWS::StackName" }, functionNameSuffix]] }); + + it("has basic logging permissions", function () { + expect(policy.Properties.PolicyDocument.Statement).toEqual( + expect.arrayContaining([ + { + Action: "logs:CreateLogGroup", + Effect: "Allow", + Resource: { + "Fn::Join": [ + "", + [ + "arn:", + { Ref: "AWS::Partition" }, + ":logs:", + { Ref: "AWS::Region" }, + ":", + { Ref: "AWS::AccountId" }, + ":*", + ], + ], + }, + }, + ]), + ); + expect(policy.Properties.PolicyDocument.Statement).toEqual( expect.arrayContaining([ { Action: expect.arrayContaining([ - "dynamodb:DeleteItem", - "dynamodb:GetItem", - "dynamodb:PutItem", - "dynamodb:Query", - "dynamodb:Scan", - "dynamodb:BatchWriteItem", + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:PutRetentionPolicy", ]), Effect: "Allow", - Resource: expect.arrayContaining([ - { - ["Fn::GetAtt"]: [maintenanceWindowTableLogicalId, "Arn"], - }, - ]), + Resource: { + "Fn::Join": [ + "", + [ + "arn:", + { Ref: "AWS::Partition" }, + ":logs:", + { Ref: "AWS::Region" }, + ":", + { Ref: "AWS::AccountId" }, + ":log-group:/aws/lambda/", + { Ref: "AWS::StackName" }, + `${functionNameSuffix}:*`, + ], + ], + }, + }, + ]), + ); + }); + + it("has sns permissions", function () { + expect(policy.Properties.PolicyDocument.Statement).toEqual( + expect.arrayContaining([ + { + Action: "sns:Publish", + Effect: "Allow", + Resource: { Ref: topicId }, }, ]), ); diff --git a/source/instance-scheduler/tests/lib/lambda-functions/asg-handler.test.ts b/source/instance-scheduler/tests/lib/lambda-functions/asg-handler.test.ts new file mode 100644 index 00000000..c5cc3742 --- /dev/null +++ b/source/instance-scheduler/tests/lib/lambda-functions/asg-handler.test.ts @@ -0,0 +1,26 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { findResource } from "../../test_utils/stack-factories"; + +describe("asg-handler", function () { + const asgPermissionsPolicy = findResource("AWS::IAM::Policy", "ASGPolicy"); + + test("has SNS publish permissions", function () { + expect(asgPermissionsPolicy.Properties.PolicyDocument.Statement).toEqual( + expect.arrayContaining([ + { + Action: expect.arrayContaining(["kms:Decrypt", "kms:GenerateDataKey*"]), + Effect: "Allow", + Resource: { "Fn::GetAtt": ["InstanceSchedulerEncryptionKey", "Arn"] }, + }, + { + Action: "sns:Publish", + Effect: "Allow", + Resource: { + Ref: "InstanceSchedulerSnsTopic", + }, + }, + ]), + ); + }); +}); diff --git a/source/instance-scheduler/tests/lib/lambda-functions/scheduling-orchestrator.test.ts b/source/instance-scheduler/tests/lib/lambda-functions/scheduling-orchestrator.test.ts new file mode 100644 index 00000000..93f3913d --- /dev/null +++ b/source/instance-scheduler/tests/lib/lambda-functions/scheduling-orchestrator.test.ts @@ -0,0 +1,27 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { findResource } from "../../test_utils/stack-factories"; + +describe("scheduling-orchestrator", function () { + const orchestratorPermissionsPolicy = findResource("AWS::IAM::Policy", "SchedulingOrchestratorPermissionsPolicy"); + + test("has SNS publish permissions", function () { + expect(orchestratorPermissionsPolicy.Properties.PolicyDocument.Statement).toEqual( + expect.arrayContaining([ + { + Action: expect.arrayContaining(["kms:Decrypt", "kms:GenerateDataKey*"]), + Effect: "Allow", + Resource: { "Fn::GetAtt": ["InstanceSchedulerEncryptionKey", "Arn"] }, + }, + { + Action: "sns:Publish", + Effect: "Allow", + Resource: { + Ref: "InstanceSchedulerSnsTopic", + }, + }, + ]), + ); + }); +}); diff --git a/source/instance-scheduler/tests/lib/lambda-functions/scheduling-request-handler.test.ts b/source/instance-scheduler/tests/lib/lambda-functions/scheduling-request-handler.test.ts new file mode 100644 index 00000000..45ca8a9d --- /dev/null +++ b/source/instance-scheduler/tests/lib/lambda-functions/scheduling-request-handler.test.ts @@ -0,0 +1,26 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { findResource } from "../../test_utils/stack-factories"; + +describe("scheduling-request-handler", function () { + const schedulingRequestHandlerPolicy = findResource("AWS::IAM::Policy", "schedulingRequestHandlerPolicy"); + + test("has SNS publish permissions", function () { + expect(schedulingRequestHandlerPolicy.Properties.PolicyDocument.Statement).toEqual( + expect.arrayContaining([ + { + Action: expect.arrayContaining(["kms:Decrypt", "kms:GenerateDataKey*"]), + Effect: "Allow", + Resource: { "Fn::GetAtt": ["InstanceSchedulerEncryptionKey", "Arn"] }, + }, + { + Action: "sns:Publish", + Effect: "Allow", + Resource: { + Ref: "InstanceSchedulerSnsTopic", + }, + }, + ]), + ); + }); +}); diff --git a/source/instance-scheduler/tests/lib/lambda-functions/spoke-registration.test.ts b/source/instance-scheduler/tests/lib/lambda-functions/spoke-registration.test.ts new file mode 100644 index 00000000..62d9308b --- /dev/null +++ b/source/instance-scheduler/tests/lib/lambda-functions/spoke-registration.test.ts @@ -0,0 +1,26 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { findResource } from "../../test_utils/stack-factories"; + +describe("spoke-registration-handler", function () { + const spokeRegistrationPolicy = findResource("AWS::IAM::Policy", "SpokeRegistrationPolicy"); + + test("has SNS publish permissions", function () { + expect(spokeRegistrationPolicy.Properties.PolicyDocument.Statement).toEqual( + expect.arrayContaining([ + { + Action: expect.arrayContaining(["kms:Decrypt", "kms:GenerateDataKey*"]), + Effect: "Allow", + Resource: { "Fn::GetAtt": ["InstanceSchedulerEncryptionKey", "Arn"] }, + }, + { + Action: "sns:Publish", + Effect: "Allow", + Resource: { + Ref: "InstanceSchedulerSnsTopic", + }, + }, + ]), + ); + }); +}); diff --git a/source/instance-scheduler/tests/lib/ops-insights-dashboard.test.ts b/source/instance-scheduler/tests/lib/ops-insights-dashboard.test.ts new file mode 100644 index 00000000..73243021 --- /dev/null +++ b/source/instance-scheduler/tests/lib/ops-insights-dashboard.test.ts @@ -0,0 +1,11 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { conditions, findResource } from "../test_utils/stack-factories"; + +describe("OpsInsights dashboard", function () { + const opsInsightsDashboard = findResource("AWS::CloudWatch::Dashboard", "OperationalInsightsDashboard"); + + test("is conditional on being enabled", function () { + expect(opsInsightsDashboard).toHaveProperty("Condition", conditions.deployOpsInsightsDashboard); + }); +}); diff --git a/source/instance-scheduler/tests/lib/runbooks/spoke-deregistration.test.ts b/source/instance-scheduler/tests/lib/runbooks/spoke-deregistration.test.ts new file mode 100644 index 00000000..50bbe0f5 --- /dev/null +++ b/source/instance-scheduler/tests/lib/runbooks/spoke-deregistration.test.ts @@ -0,0 +1,33 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { findResource } from "../../test_utils/stack-factories"; + +describe("SpokeDeregistrationRunbook", () => { + it("", () => { + const runbookPolicy = findResource("AWS::IAM::Policy", "SpokeDeregistrationRunbookRoleDefaultPolicy"); + expect(runbookPolicy.Properties.PolicyDocument.Statement).toEqual( + expect.arrayContaining([ + { + Action: "lambda:InvokeFunction", + Effect: "Allow", + Resource: [ + { + "Fn::GetAtt": [expect.stringContaining("SpokeRegistrationHandler"), "Arn"], + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [expect.stringContaining("SpokeRegistrationHandler"), "Arn"], + }, + ":*", + ], + ], + }, + ], + }, + ]), + ); + }); +}); diff --git a/source/instance-scheduler/tests/lib/spoke-registration.test.ts b/source/instance-scheduler/tests/lib/spoke-registration.test.ts new file mode 100644 index 00000000..ac23d30b --- /dev/null +++ b/source/instance-scheduler/tests/lib/spoke-registration.test.ts @@ -0,0 +1,61 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { RemovalPolicy, Stack } from "aws-cdk-lib"; +import { Template } from "aws-cdk-lib/assertions"; +import { AttributeType, BillingMode, Table } from "aws-cdk-lib/aws-dynamodb"; +import { LogGroup, RetentionDays } from "aws-cdk-lib/aws-logs"; +import { Topic } from "aws-cdk-lib/aws-sns"; +import { trueCondition } from "../../lib/cfn"; +import { TestFunctionFactory } from "../../lib/lambda-functions/function-factory"; +import { SpokeRegistrationLambda } from "../../lib/lambda-functions/spoke-registration"; + +function mockConfigTable(scope: Stack) { + return new Table(scope, "ConfigTable", { + sortKey: { name: "name", type: AttributeType.STRING }, + partitionKey: { name: "type", type: AttributeType.STRING }, + billingMode: BillingMode.PAY_PER_REQUEST, + removalPolicy: RemovalPolicy.DESTROY, + pointInTimeRecovery: true, + }); +} + +function mockErrorTopic(scope: Stack) { + return new Topic(scope, "mockedErrorTopic", {}); +} + +function mockLogGroup(scope: Stack) { + return new LogGroup(scope, "mockedLogGroup", {}); +} +describe("spoke-registration", function () { + describe("with aws-organizations enabled", function () { + //setup + const stack = new Stack(); + const configTable = mockConfigTable(stack); + const errorTopic = mockErrorTopic(stack); + const logGroup = mockLogGroup(stack); + new SpokeRegistrationLambda(stack, { + solutionVersion: "v9.9.9", + logRetentionDays: RetentionDays.FIVE_DAYS, + configTable: configTable, + snsErrorReportingTopic: errorTopic, + scheduleLogGroup: logGroup, + USER_AGENT_EXTRA: "user-agent-extra", + enableDebugLogging: trueCondition(stack, "EnableDebugLogging"), + principals: ["o-1234567"], + namespace: "namespace", + enableAwsOrganizations: trueCondition(stack, "EnableAwsOrganizations"), + factory: new TestFunctionFactory(), + }); + + const template = Template.fromStack(stack); + + describe("spoke-registration-lambda", function () { + const lambdaPermissionResources = template.findResources("AWS::Lambda::Permission"); + expect(lambdaPermissionResources).toContainKey("SpokeRegistrationLambdaPermission"); + const lambdaPermission = lambdaPermissionResources["SpokeRegistrationLambdaPermission"]; + it("is conditional on AwsOrganizations", function () { + expect(lambdaPermission["Condition"]).toEqual("EnableAwsOrganizations"); + }); + }); + }); +}); diff --git a/source/instance-scheduler/tests/test_function/__init__.py b/source/instance-scheduler/tests/test_function/__init__.py new file mode 100644 index 00000000..04f8b7b7 --- /dev/null +++ b/source/instance-scheduler/tests/test_function/__init__.py @@ -0,0 +1,2 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 diff --git a/source/instance-scheduler/tests/test_function/test_function.py b/source/instance-scheduler/tests/test_function/test_function.py new file mode 100644 index 00000000..4ce97baa --- /dev/null +++ b/source/instance-scheduler/tests/test_function/test_function.py @@ -0,0 +1,28 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from aws_lambda_powertools.utilities.typing import LambdaContext +else: + LambdaContext = object + + +def lambda_handler(_: dict[str, Any], __: LambdaContext) -> None: + """noop""" + + +def handle_metrics_uuid_request(_: dict[str, Any], __: LambdaContext) -> None: + """noop""" + + +def handle_orchestration_request(_: dict[str, Any], __: LambdaContext) -> None: + """noop""" + + +def handle_spoke_registration_event(_: dict[str, Any], __: LambdaContext) -> None: + """noop""" + + +def handle_scheduling_request(_: dict[str, Any], __: LambdaContext) -> None: + """noop""" diff --git a/source/instance-scheduler/tests/test_utils/stack-factories.ts b/source/instance-scheduler/tests/test_utils/stack-factories.ts new file mode 100644 index 00000000..d757aaf8 --- /dev/null +++ b/source/instance-scheduler/tests/test_utils/stack-factories.ts @@ -0,0 +1,175 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { Stack } from "aws-cdk-lib"; +import { Template } from "aws-cdk-lib/assertions"; +import { AttributeType, BillingMode, StreamViewType, Table, TableEncryption } from "aws-cdk-lib/aws-dynamodb"; +import { Key } from "aws-cdk-lib/aws-kms"; +import { Topic } from "aws-cdk-lib/aws-sns"; +import { AnonymizedMetricsEnvironment } from "../../lib/anonymized-metrics-environment"; +import { AsgScheduler } from "../../lib/asg-scheduler"; +import { trueCondition } from "../../lib/cfn"; +import { CoreScheduler } from "../../lib/core-scheduler"; +import { AsgSchedulingRole } from "../../lib/iam/asg-scheduling-role"; +import { AsgHandler } from "../../lib/lambda-functions/asg-handler"; +import { TestFunctionFactory } from "../../lib/lambda-functions/function-factory"; + +export const solutionName = "my-solution-name"; +export const solutionVersion = "v9.9.9"; +export const solutionId = "my-solution-id"; +export const memorySizeMB = 128; +export const logRetentionDays = 90; +export const principals: string[] = []; +export const schedulingIntervalMinutes = 5; +export const namespace = "prod"; +export const tagKey = "my-tag-key"; +export const defaultTimezone = "my-timezone"; +export const regions = ["us-east-1", "us-west-2"]; +export const startTags = "my-start-tags"; +export const stopTags = "my-stop-tags"; +export const appregApplicationName = "my-appreg-application-name"; +export const appregSolutionName = "my-appreg-solution-name"; +export const scheduledTagKey = "scheduled"; +export const rulePrefix = "is-"; +export const userAgentExtra = `AwsSolution/${solutionId}/${solutionVersion}`; +export const metricsEnv: AnonymizedMetricsEnvironment = { + SEND_METRICS: "True", + METRICS_URL: "https://example.com", + SOLUTION_ID: solutionId, + SOLUTION_VERSION: solutionVersion, + SCHEDULING_INTERVAL_MINUTES: schedulingIntervalMinutes.toString(), + METRICS_UUID: "metrics-uuid", +}; + +/** + * testable condition values for use in tests of the format: + * expect(myCfnObject).toHaveProperty("Condition", conditions.ConditionToTest); + */ +export const conditions = { + schedulingEnabled: "SchedulingEnabledCond", + enableDebugLogging: "EnableDebugLoggingCond", + enableCloudwatchMetrics: "EnableCloudwatchMetricsCond", + sendMetrics: "SendMetricsCond", + enableEc2: "EnableEc2Cond", + enableRds: "EnableRdsCond", + enableRdsClusters: "EnableRdsClustersCond", + enableNeptune: "EnableNeptuneCond", + enableDocDb: "EnableDocdbCond", + enableRdsSnapshots: "EnableRdsSnapshotCond", + enableHubAcctScheduling: "EnableHubAccountSchedulingCond", + enableEc2MaintWindows: "EnableEc2MaintenanceWindowsCond", + enableAwsOrgs: "EnableAwsOrgsCond", + enableDdbDeletionProtection: "EnableDdbDeletionProtectionCond", + enableAsgs: "EnableAsgsCond", + provideKmsToScheduler: "ProvideKmsAccesstoScheduler", + deployOpsInsightsDashboard: "DeployPropsInsightsDashboardCond", + gatherPerInstanceTypeMetrics: "GatherPerInstanceTypeMetricsCond", + gatherPerScheduleMetrics: "GatherPerScheduleMetricsCond", +}; + +export const coreScheduler = newCoreScheduler(); +export function findResource(resourceType: string, partialId: string) { + const resources = coreScheduler.findResources(resourceType); + const resourceIds = Object.getOwnPropertyNames(resources); + const foundResourceId = resourceIds.find((id: string) => id.includes(partialId)); + if (foundResourceId) { + return resources[foundResourceId]; + } else { + throw new Error( + `unable to find resource of type ${resourceType} containing ${partialId}.\nResources searched: ${resourceIds}`, + ); + } +} +export function newCoreScheduler(): Template { + const stack = new Stack(); + + new CoreScheduler(stack, { + solutionName, + solutionVersion, + solutionId, + memorySizeMB, + principals, + logRetentionDays, + schedulingEnabled: trueCondition(stack, conditions.schedulingEnabled), + schedulingIntervalMinutes, + namespace, + sendAnonymizedMetrics: trueCondition(stack, conditions.sendMetrics), + enableDebugLogging: trueCondition(stack, conditions.enableDebugLogging), + tagKey, + defaultTimezone, + enableEc2: trueCondition(stack, conditions.enableEc2), + enableRds: trueCondition(stack, conditions.enableRds), + enableRdsClusters: trueCondition(stack, conditions.enableRdsClusters), + enableNeptune: trueCondition(stack, conditions.enableNeptune), + enableDocdb: trueCondition(stack, conditions.enableDocDb), + enableRdsSnapshots: trueCondition(stack, conditions.enableRdsSnapshots), + regions, + enableSchedulingHubAccount: trueCondition(stack, conditions.enableHubAcctScheduling), + enableEc2SsmMaintenanceWindows: trueCondition(stack, conditions.enableEc2MaintWindows), + startTags, + stopTags, + enableAwsOrganizations: trueCondition(stack, conditions.enableAwsOrgs), + appregApplicationName, + appregSolutionName, + enableOpsInsights: trueCondition(stack, conditions.deployOpsInsightsDashboard), + kmsKeyArns: ["*"], + factory: new TestFunctionFactory(), + enableDdbDeletionProtection: trueCondition(stack, conditions.enableDdbDeletionProtection), + enableAsgs: trueCondition(stack, conditions.enableAsgs), + scheduledTagKey, + rulePrefix, + }); + + return Template.fromStack(stack); +} + +export function createAsgSchedulerStack(id: string): Stack { + const stack = new Stack(); + const key = new Key(stack, "Key"); + const configTable = new Table(stack, "ConfigTable", { + sortKey: { name: "name", type: AttributeType.STRING }, + partitionKey: { name: "type", type: AttributeType.STRING }, + billingMode: BillingMode.PAY_PER_REQUEST, + pointInTimeRecovery: true, + encryption: TableEncryption.CUSTOMER_MANAGED, + encryptionKey: key, + stream: StreamViewType.KEYS_ONLY, + }); + const topic = new Topic(stack, "Topic"); + const enableDebugLogging = trueCondition(stack, conditions.enableDebugLogging); + + const asgHandler = new AsgHandler(stack, { + USER_AGENT_EXTRA: userAgentExtra, + DEFAULT_TIMEZONE: defaultTimezone, + asgSchedulingRoleName: AsgSchedulingRole.roleName(namespace), + configTable, + enableDebugLogging, + encryptionKey: key, + factory: new TestFunctionFactory(), + logRetentionDays, + metricsEnv, + namespace, + rulePrefix, + scheduledTagKey, + snsErrorReportingTopic: topic, + tagKey, + }); + + new AsgScheduler(stack, id, { + USER_AGENT_EXTRA: userAgentExtra, + asgHandler, + configTable, + enableAsgs: trueCondition(stack, conditions.enableAsgs), + enableDebugLogging, + enableSchedulingHubAccount: trueCondition(stack, conditions.enableHubAcctScheduling), + encryptionKey: key, + factory: new TestFunctionFactory(), + logRetentionDays, + metricsEnv, + namespace, + regions, + snsErrorReportingTopic: topic, + solutionVersion, + }); + + return stack; +} diff --git a/source/pipeline/e2e-tests/asg-configure.test.resources.ts b/source/pipeline/e2e-tests/asg-configure.test.resources.ts new file mode 100644 index 00000000..48a753d9 --- /dev/null +++ b/source/pipeline/e2e-tests/asg-configure.test.resources.ts @@ -0,0 +1,55 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { CfnOutput } from "aws-cdk-lib"; +import { AutoScalingGroup } from "aws-cdk-lib/aws-autoscaling"; +import { + AmazonLinuxCpuType, + InstanceClass, + InstanceSize, + InstanceType, + LaunchTemplate, + MachineImage, +} from "aws-cdk-lib/aws-ec2"; +import { NagSuppressions } from "cdk-nag"; +import { Construct } from "constructs"; +import { TestResourceProvider } from "."; +import { defaultTestVPC } from "./utils/vpc-utils"; + +const envKeys = { + configureGroup: "ConfigureGroupName", +}; + +export const resourceParams = { + configureGroupName: process.env[envKeys.configureGroup], + scheduleName: "asg-schedule", +}; + +export class AsgConfigureTestResources implements TestResourceProvider { + createTestResources(scope: Construct): Record { + const launchTemplate = new LaunchTemplate(scope, "ConfigureTemplate", { + instanceType: InstanceType.of(InstanceClass.T4G, InstanceSize.NANO), + machineImage: MachineImage.latestAmazonLinux2023({ cpuType: AmazonLinuxCpuType.ARM_64 }), + }); + const testGroup = new AutoScalingGroup(scope, "ConfigureGroup", { + vpc: defaultTestVPC(scope), + launchTemplate, + }); + + NagSuppressions.addResourceSuppressions(testGroup, [ + { + id: "AwsSolutions-EC26", + reason: "This is an automated test group without any need for encrypted EBS volumes", + }, + { + id: "AwsSolutions-AS3", + reason: "This is an automated test group without any need for notifications", + }, + ]); + + const testGroupOutput = new CfnOutput(scope, envKeys.configureGroup, { + value: testGroup.autoScalingGroupName, + }); + + return { [envKeys.configureGroup]: testGroupOutput }; + } +} diff --git a/source/pipeline/e2e-tests/asg-configure.test.ts b/source/pipeline/e2e-tests/asg-configure.test.ts new file mode 100644 index 00000000..5399a8dc --- /dev/null +++ b/source/pipeline/e2e-tests/asg-configure.test.ts @@ -0,0 +1,152 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { + AutoScalingClient, + BatchDeleteScheduledActionCommand, + CreateOrUpdateTagsCommand, + DeleteTagsCommand, + DescribeScheduledActionsCommand, + ScheduledUpdateGroupAction, + UpdateAutoScalingGroupCommand, +} from "@aws-sdk/client-auto-scaling"; +import { InvokeCommand, LambdaClient } from "@aws-sdk/client-lambda"; +import { delayMinutes } from "."; +import { resourceParams } from "./asg-configure.test.resources"; +import { createSchedule } from "./utils/schedule-test-utils"; + +const asgClient = new AutoScalingClient(); +const lambdaClient = new LambdaClient(); +const groupName = resourceParams.configureGroupName; + +test("group name exists", () => { + expect(groupName).not.toBeUndefined(); +}); + +test("configure AutoScaling Group", async () => { + if (resourceParams.configureGroupName === undefined) { + throw new Error("Unknown group name"); + } + + await deleteAllScheduledScalingActions(resourceParams.configureGroupName); + + await asgClient.send( + new UpdateAutoScalingGroupCommand({ + AutoScalingGroupName: groupName, + MinSize: 1, + DesiredCapacity: 1, + MaxSize: 1, + }), + ); + + await createSchedule({ + name: resourceParams.scheduleName, + description: "testing schedule", + periods: [ + { + name: "asg-period", + description: "testing period", + begintime: "09:00", + endtime: "17:00", + }, + ], + }); + + await asgClient.send( + new CreateOrUpdateTagsCommand({ + Tags: [ + { + Key: "Schedule", + Value: resourceParams.scheduleName, + ResourceType: "auto-scaling-group", + ResourceId: resourceParams.configureGroupName, + PropagateAtLaunch: true, + }, + ], + }), + ); + + const asgOrch = process.env["AsgOrchName"]; + await lambdaClient.send( + new InvokeCommand({ + FunctionName: asgOrch, + InvocationType: "Event", + Payload: JSON.stringify({}), + }), + ); + + await delayMinutes(1); + + const actions = await asgClient.send( + new DescribeScheduledActionsCommand({ + AutoScalingGroupName: groupName, + }), + ); + + expect(actions.ScheduledUpdateGroupActions).not.toBeUndefined(); + + if (actions.ScheduledUpdateGroupActions === undefined) { + throw new Error("No actions"); + } + + expect(actions.ScheduledUpdateGroupActions).toHaveLength(2); + const expectedStartAction: ScheduledUpdateGroupAction = { + AutoScalingGroupName: resourceParams.configureGroupName, + ScheduledActionName: expect.any(String), + ScheduledActionARN: expect.any(String), + StartTime: expect.any(Date), + Time: expect.any(Date), + Recurrence: "0 9 * * *", + MinSize: 1, + DesiredCapacity: 1, + MaxSize: 1, + TimeZone: "UTC", + }; + const expectedStopAction: ScheduledUpdateGroupAction = { + AutoScalingGroupName: resourceParams.configureGroupName, + ScheduledActionName: expect.any(String), + ScheduledActionARN: expect.any(String), + StartTime: expect.any(Date), + Time: expect.any(Date), + Recurrence: "0 17 * * *", + MinSize: 0, + DesiredCapacity: 0, + MaxSize: 0, + TimeZone: "UTC", + }; + expect(actions.ScheduledUpdateGroupActions).toContainEqual(expectedStartAction); + expect(actions.ScheduledUpdateGroupActions).toContainEqual(expectedStopAction); +}, 180_000); + +async function deleteAllScheduledScalingActions(groupName: string) { + const actions = await asgClient.send( + new DescribeScheduledActionsCommand({ + AutoScalingGroupName: groupName, + }), + ); + if (actions.ScheduledUpdateGroupActions !== undefined && actions.ScheduledUpdateGroupActions.length > 0) { + await asgClient.send( + new BatchDeleteScheduledActionCommand({ + AutoScalingGroupName: groupName, + ScheduledActionNames: actions.ScheduledUpdateGroupActions.map((action) => action.ScheduledActionName).filter( + (name): name is string => name !== undefined, + ), + }), + ); + } +} + +afterAll(async () => { + if (resourceParams.configureGroupName === undefined) { + throw new Error("Unknown group name"); + } + + await asgClient.send( + new DeleteTagsCommand({ + Tags: [ + { Key: "Schedule", ResourceType: "auto-scaling-group", ResourceId: resourceParams.configureGroupName }, + { Key: "scheduled", ResourceType: "auto-scaling-group", ResourceId: resourceParams.configureGroupName }, + ], + }), + ); + await deleteAllScheduledScalingActions(resourceParams.configureGroupName); +}); diff --git a/source/pipeline/e2e-tests/basic-ec2-start-stop.test.resources.ts b/source/pipeline/e2e-tests/basic-ec2-start-stop.test.resources.ts index 4a623b1f..df0143df 100644 --- a/source/pipeline/e2e-tests/basic-ec2-start-stop.test.resources.ts +++ b/source/pipeline/e2e-tests/basic-ec2-start-stop.test.resources.ts @@ -10,7 +10,7 @@ import { TestResourceProvider } from "./index"; import { defaultTestVPC } from "./utils/vpc-utils"; const envKeys = { - ec2InstanceId: "basic_start_stop_instance_id", + ec2InstanceId: "BasicStartStopInstanceId", }; export const resourceParams = { // eslint-disable-next-line @typescript-eslint/no-non-null-assertion @@ -19,14 +19,12 @@ export const resourceParams = { }; export class EC2StartStopTestResources implements TestResourceProvider { createTestResources(scope: Construct) { - const testInstance = new ec2.Instance(scope, "basic-start-stop-instance", { + const testInstance = new ec2.Instance(scope, "basicStartStopInstance", { instanceType: ec2.InstanceType.of(ec2.InstanceClass.T2, ec2.InstanceSize.MICRO), machineImage: ec2.MachineImage.latestAmazonLinux2(), vpc: defaultTestVPC(scope), }); - cdk.Tags.of(testInstance).add("Schedule", resourceParams.startStopTestScheduleName); - const startStopOut = new cdk.CfnOutput(scope, envKeys.ec2InstanceId, { value: testInstance.instanceId, }); diff --git a/source/pipeline/e2e-tests/basic-ec2-start-stop.test.ts b/source/pipeline/e2e-tests/basic-ec2-start-stop.test.ts index 0240695c..4a5c05f1 100644 --- a/source/pipeline/e2e-tests/basic-ec2-start-stop.test.ts +++ b/source/pipeline/e2e-tests/basic-ec2-start-stop.test.ts @@ -5,8 +5,14 @@ import * as ec2 from "@aws-sdk/client-ec2"; import { resourceParams } from "./basic-ec2-start-stop.test.resources"; import { delayMinutes } from "./index"; -import { getInstanceState } from "./utils/ec2-test-utils"; -import { createSchedule, currentTimePlus, toTimeStr } from "./utils/schedule-test-utils"; +import { clearScheduleTag, getInstanceState, setScheduleTag } from "./utils/ec2-test-utils"; +import { + createSchedule, + currentTimePlus, + toTimeStr, + minutesToMillis, + waitForExpect, +} from "./utils/schedule-test-utils"; const ec2Client = new ec2.EC2Client({}); const instanceId = resourceParams.ec2InstanceId; @@ -14,37 +20,65 @@ const instanceId = resourceParams.ec2InstanceId; test("instanceId exists", () => { expect(instanceId).not.toBeUndefined(); }); -test("basic ec2 start-stop schedule", async () => { - //stop instance - await ec2Client.send( - new ec2.StopInstancesCommand({ - InstanceIds: [instanceId], - }), - ); - - //confirm stopped - await delayMinutes(1); - expect(await getInstanceState(ec2Client, instanceId)).toBe(ec2.InstanceStateName.stopped); - - //create schedule - await createSchedule({ - name: resourceParams.startStopTestScheduleName, - description: `testing schedule`, - periods: [ - { - name: "ec2-start-stop-period", - description: `testing period`, - begintime: toTimeStr(currentTimePlus(3)), - endtime: toTimeStr(currentTimePlus(7)), +test( + "basic ec2 start-stop schedule", + async () => { + //stop instance + await ec2Client.send( + new ec2.StopInstancesCommand({ + InstanceIds: [instanceId], + }), + ); + + //confirm stopped + await ec2.waitUntilInstanceStopped({ client: ec2Client, maxWaitTime: 300 }, { InstanceIds: [instanceId] }); + expect(await getInstanceState(ec2Client, instanceId)).toBe(ec2.InstanceStateName.stopped); + + //create schedule + await createSchedule({ + name: resourceParams.startStopTestScheduleName, + description: `testing schedule`, + periods: [ + { + name: "ec2-start-stop-period", + description: `testing period`, + begintime: toTimeStr(currentTimePlus(2)), + endtime: toTimeStr(currentTimePlus(5)), + }, + ], + }); + + await setScheduleTag(ec2Client, instanceId, resourceParams.startStopTestScheduleName); + + //confirm running during running period + await delayMinutes(2); + await waitForExpect( + async () => { + expect(await getInstanceState(ec2Client, instanceId)).toBeOneOf([ + ec2.InstanceStateName.pending, + ec2.InstanceStateName.running, + ]); }, - ], - }); + minutesToMillis(5), + minutesToMillis(0.5), + ); - //confirm running during running period - await delayMinutes(5); - expect(await getInstanceState(ec2Client, instanceId)).toBe(ec2.InstanceStateName.running); + //confirm stopped after stop time + await delayMinutes(5); + await waitForExpect( + async () => { + expect(await getInstanceState(ec2Client, instanceId)).toBeOneOf([ + ec2.InstanceStateName.stopping, + ec2.InstanceStateName.stopped, + ]); + }, + minutesToMillis(5), + minutesToMillis(0.5), + ); + }, + minutesToMillis(15), +); - //confirm stopped after stop time - await delayMinutes(4); - expect(await getInstanceState(ec2Client, instanceId)).toBe(ec2.InstanceStateName.stopped); -}, 900_000); +afterAll(async () => { + await clearScheduleTag(ec2Client, instanceId); +}); diff --git a/source/pipeline/e2e-tests/basic-rds-start-stop.test.resources.ts b/source/pipeline/e2e-tests/basic-rds-start-stop.test.resources.ts index 1dd80edc..bc279f9b 100644 --- a/source/pipeline/e2e-tests/basic-rds-start-stop.test.resources.ts +++ b/source/pipeline/e2e-tests/basic-rds-start-stop.test.resources.ts @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 import * as cdk from "aws-cdk-lib"; +import { RemovalPolicy } from "aws-cdk-lib"; import * as rds from "aws-cdk-lib/aws-rds"; import { Construct } from "constructs"; @@ -10,7 +11,7 @@ import { defaultTestVPC } from "./utils/vpc-utils"; import { NagSuppressions } from "cdk-nag"; const envKeys = { - rdsInstanceId: "basic_start_stop_rds_instance_id", + rdsInstanceId: "BasicStartStopRdsInstanceId", }; export const resourceParams = { // eslint-disable-next-line @typescript-eslint/no-non-null-assertion @@ -19,9 +20,11 @@ export const resourceParams = { }; export class BasicRdsStartStopTestResources implements TestResourceProvider { createTestResources(scope: Construct) { - const rdsInstance = new rds.DatabaseInstance(scope, "rds-basic-start-stop", { + const rdsInstance = new rds.DatabaseInstance(scope, "rdsBasicStartStop", { engine: rds.DatabaseInstanceEngine.POSTGRES, vpc: defaultTestVPC(scope), + backupRetention: cdk.Duration.days(0), // disable automated backups to avoid interfering with tests + removalPolicy: RemovalPolicy.DESTROY, }); cdk.Tags.of(rdsInstance).add("Schedule", resourceParams.taggedScheduleName); @@ -51,6 +54,10 @@ export class BasicRdsStartStopTestResources implements TestResourceProvider { id: "AwsSolutions-SMG4", reason: "Short-lived test instance with no need for secrets rotation", }, + { + id: "AwsSolutions-RDS13", + reason: "Test instance with no content, no need for backups", + }, ]); NagSuppressions.addResourceSuppressions( diff --git a/source/pipeline/e2e-tests/basic-rds-start-stop.test.ts b/source/pipeline/e2e-tests/basic-rds-start-stop.test.ts index cc8fdaac..f121242b 100644 --- a/source/pipeline/e2e-tests/basic-rds-start-stop.test.ts +++ b/source/pipeline/e2e-tests/basic-rds-start-stop.test.ts @@ -5,7 +5,13 @@ import * as rds from "@aws-sdk/client-rds"; import { resourceParams } from "./basic-rds-start-stop.test.resources"; import { delayMinutes } from "./index"; import { getInstanceState } from "./utils/rds-test-utils"; -import { createSchedule, currentTimePlus, toTimeStr } from "./utils/schedule-test-utils"; +import { + createSchedule, + currentTimePlus, + toTimeStr, + minutesToMillis, + waitForExpect, +} from "./utils/schedule-test-utils"; const rdsClient = new rds.RDSClient({}); @@ -28,7 +34,16 @@ test("basic rds start-stop schedule", async () => { DBInstanceIdentifier: resourceParams.rdsInstanceId, }), ); - await delayMinutes(5); + } + + let currentDelayMinutes = 1; + const maxDelayMinutes = 5; + while ( + (await rdsClient.send(new rds.DescribeDBInstancesCommand({ DBInstanceIdentifier: resourceParams.rdsInstanceId }))) + .DBInstances?.[0].DBInstanceStatus != "stopped" + ) { + await delayMinutes(currentDelayMinutes); + currentDelayMinutes = Math.min(currentDelayMinutes * 2, maxDelayMinutes); } //create test schedule @@ -39,17 +54,30 @@ test("basic rds start-stop schedule", async () => { { name: "rds-start-stop-period", description: `testing period`, - begintime: toTimeStr(currentTimePlus(3)), - endtime: toTimeStr(currentTimePlus(7)), + begintime: toTimeStr(currentTimePlus(2)), + endtime: toTimeStr(currentTimePlus(5)), }, ], }); //confirm running during running period - await delayMinutes(5); - expect(await getInstanceState(rdsClient, resourceParams.rdsInstanceId)).toBeOneOf(["available", "starting"]); + await delayMinutes(2); //wait for begintime + await waitForExpect( + async () => { + expect(await getInstanceState(rdsClient, resourceParams.rdsInstanceId)).toBeOneOf(["available", "starting"]); + }, + minutesToMillis(5), + minutesToMillis(0.5), + ); //confirm stopped after stop time - await delayMinutes(4); - expect(await getInstanceState(rdsClient, resourceParams.rdsInstanceId)).toBeOneOf(["stopped", "stopping"]); -}, 1_200_000); + await delayMinutes(3); //wait for endtime + + await waitForExpect( + async () => { + expect(await getInstanceState(rdsClient, resourceParams.rdsInstanceId)).toBeOneOf(["stopped", "stopping"]); + }, + minutesToMillis(5), + minutesToMillis(0.5), + ); +}, 2_400_000); diff --git a/source/pipeline/e2e-tests/cli.test.ts b/source/pipeline/e2e-tests/cli.test.ts new file mode 100644 index 00000000..3d5eb866 --- /dev/null +++ b/source/pipeline/e2e-tests/cli.test.ts @@ -0,0 +1,427 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { execSync } from "child_process"; +import { + createPeriod, + createSchedule, + deleteConfigTableItemsWithNamePrefix, + getConfigTableItem, +} from "./utils/cli-utils"; +import { v4 } from "uuid"; + +const hubStack = process.env["HUB_STACK"]; +const dynamoDbItemNamePrefix = `cli-test-${v4()}`; +const reusablePeriodName = `${dynamoDbItemNamePrefix}-reusable-period`; + +beforeAll(async () => { + await createPeriod({ + name: reusablePeriodName, + description: "cli-test", + begintime: "23:00", + endtime: "23:59", + }); +}); + +afterAll(async () => { + await deleteConfigTableItemsWithNamePrefix("period", dynamoDbItemNamePrefix); + await deleteConfigTableItemsWithNamePrefix("schedule", dynamoDbItemNamePrefix); +}); + +test("should successfully print usage instructions", () => { + const result = execSync("python -m instance_scheduler_cli"); + expect(result.toString()).toContain("usage: scheduler-cli"); +}); + +describe("period", () => { + // create-period + test("should successfully create period with all values using the create-period cli command", async () => { + const periodName = `${dynamoDbItemNamePrefix}-create-period-all-values`; + + execSync( + `python -m instance_scheduler_cli create-period` + + ` --stack ${hubStack}` + + ` --name ${periodName}` + + ` --begintime 00:00` + + ` --endtime 12:00` + + ` --description cli-test` + + ` --monthdays 1` + + ` --months "*"` + + ` --weekdays mon-fri`, + ); + expect(await getConfigTableItem("period", periodName)).toEqual({ + type: "period", + name: periodName, + description: "cli-test", + begintime: "00:00", + endtime: "12:00", + weekdays: new Set(["mon-fri"]), + monthdays: new Set(["1"]), + months: new Set(["*"]), + }); + }, 10_000); + + test("should successfully create period with min values using the create-period cli command", async () => { + const periodName = `${dynamoDbItemNamePrefix}-create-period-min-values`; + + execSync( + `python -m instance_scheduler_cli create-period` + + ` --stack ${hubStack}` + + ` --name ${periodName}` + + ` --endtime 12:00`, + ); + expect(await getConfigTableItem("period", periodName)).toEqual({ + type: "period", + name: periodName, + endtime: "12:00", + }); + }, 10_000); + + test("should error from create-period cli command when period already exists", async () => { + const periodName = `${dynamoDbItemNamePrefix}-create-period-already-exists`; + await createPeriod({ + name: periodName, + description: "cli-test", + begintime: "00:00", + endtime: "12:00", + }); + + expect(() => + execSync( + `python -m instance_scheduler_cli create-period` + + ` --stack ${hubStack}` + + ` --name ${periodName}` + + ` --description cli-test` + + ` --begintime 00:00` + + ` --endtime 12:00`, + ), + ).toThrow("Command failed:"); + }, 10_000); + + // delete-period + test("should successfully delete period with delete-period cli command", async () => { + const periodName = `${dynamoDbItemNamePrefix}-delete-period`; + await createPeriod({ + name: periodName, + description: "cli-test", + begintime: "00:00", + endtime: "12:00", + }); + + const response = execSync( + `python -m instance_scheduler_cli delete-period --stack ${hubStack} --name ${periodName}`, + ); + expect(JSON.parse(response.toString()).Period).toEqual(periodName); + expect(async () => await getConfigTableItem("period", periodName)).rejects.toThrow("Did not find item"); + }, 10_000); + + test("should error from delete-period cli command when period does not exist", () => { + const periodName = `${dynamoDbItemNamePrefix}-delete-period-does-not-exist`; + + expect(() => + execSync(`python -m instance_scheduler_cli delete-period --stack ${hubStack} --name ${periodName}`), + ).toThrow("Command failed:"); + }); + + // describe-periods + test("should successfully describe all periods with describe-periods cli command", () => { + const response = execSync(`python -m instance_scheduler_cli describe-periods --stack ${hubStack}`); + expect(JSON.parse(response.toString()).Periods).toEqual(expect.any(Array)); + }); + + test("should successfully describe single period with describe-periods cli command", async () => { + const periodName = `${dynamoDbItemNamePrefix}-describe-single-period`; + await createPeriod({ + name: periodName, + description: "cli-test", + begintime: "00:00", + endtime: "12:00", + }); + + const response = execSync( + `python -m instance_scheduler_cli describe-periods --stack ${hubStack} --name ${periodName}`, + ); + expect(JSON.parse(response.toString()).Periods[0]).toEqual({ + Type: "period", + Name: periodName, + Description: "cli-test", + Begintime: "00:00", + Endtime: "12:00", + }); + }, 10_000); + + test("should error from describe-period cli command when period does not exist", () => { + const periodName = `${dynamoDbItemNamePrefix}-describe-periods-does-not-exist`; + + expect(() => + execSync(`python -m instance_scheduler_cli describe-periods --stack ${hubStack} --name ${periodName}`), + ).toThrow("Command failed:"); + }); + + // update-period + test("should successfully update period with update-period cli command", async () => { + const periodName = `${dynamoDbItemNamePrefix}-update-period`; + await createPeriod({ + name: periodName, + description: "cli-test", + begintime: "00:00", + endtime: "12:00", + }); + + const response = execSync( + `python -m instance_scheduler_cli update-period --stack ${hubStack} --name ${periodName} --weekdays sat-sun`, + ); + expect(JSON.parse(response.toString()).Period).toEqual({ + Type: "period", + Name: periodName, + Weekdays: ["sat-sun"], + }); + expect(await getConfigTableItem("period", periodName)).toEqual({ + type: "period", + name: periodName, + weekdays: new Set(["sat-sun"]), + }); + }, 10_000); + + test("should error from update-period cli command when period does not exist", () => { + const periodName = `${dynamoDbItemNamePrefix}-update-period-does-not-exist`; + + expect(() => + execSync( + `python -m instance_scheduler_cli update-period --stack ${hubStack} --name ${periodName} --weekdays sat-sun`, + ), + ).toThrow("Command failed:"); + }); +}); + +describe("schedule", () => { + // create-schedule + test("should successfully create schedule with all values using the create-schedule cli command", async () => { + const scheduleName = `${dynamoDbItemNamePrefix}-create-schedule-all-values`; + + execSync( + `python -m instance_scheduler_cli create-schedule` + + ` --stack ${hubStack}` + + ` --name ${scheduleName}` + + ` --periods ${reusablePeriodName}` + + ` --description test` + + ` --timezone UTC` + + ` --override-status running` + + ` --do-not-stop-new-instances` + + ` --ssm-maintenance-window test` + + ` --retain-running` + + ` --enforced` + + ` --hibernate`, + ); + expect(await getConfigTableItem("schedule", scheduleName)).toEqual({ + description: "test", + enforced: true, + hibernate: true, + name: scheduleName, + override_status: "running", + periods: new Set([reusablePeriodName]), + retain_running: true, + ssm_maintenance_window: new Set(["test"]), + stop_new_instances: false, + timezone: "UTC", + type: "schedule", + }); + }, 10_000); + + test("should successfully create schedule with min values using the create-schedule cli command", async () => { + const scheduleName = `${dynamoDbItemNamePrefix}-create-schedule-min-values`; + + execSync( + `python -m instance_scheduler_cli create-schedule` + + ` --stack ${hubStack}` + + ` --name ${scheduleName}` + + ` --periods ${reusablePeriodName}`, + ); + expect(await getConfigTableItem("schedule", scheduleName)).toEqual({ + type: "schedule", + enforced: false, + hibernate: false, + name: scheduleName, + periods: new Set([reusablePeriodName]), + retain_running: false, + stop_new_instances: true, + }); + }, 10_000); + + test("should error from create-schedule cli command when schedule already exists", async () => { + const scheduleName = `${dynamoDbItemNamePrefix}-create-schedule-already-exists`; + await createSchedule({ + name: scheduleName, + description: "cli-test", + periods: new Set([reusablePeriodName]), + }); + + expect(() => + execSync( + `python -m instance_scheduler_cli create-schedule` + + ` --stack ${hubStack}` + + ` --name ${scheduleName}` + + ` --periods ${reusablePeriodName}`, + ), + ).toThrow("Command failed:"); + }, 10_000); + + // delete-schedule + test("should successfully delete schedule with delete-schedule cli command", async () => { + const scheduleName = `${dynamoDbItemNamePrefix}-delete-schedule`; + await createSchedule({ + name: scheduleName, + description: "cli-test", + periods: new Set([reusablePeriodName]), + }); + + const response = execSync( + `python -m instance_scheduler_cli delete-schedule --stack ${hubStack} --name ${scheduleName}`, + ); + expect(JSON.parse(response.toString()).Schedule).toEqual(scheduleName); + expect(async () => await getConfigTableItem("schedule", scheduleName)).rejects.toThrow("Did not find item"); + }, 10_000); + + test("should error from delete-schedule cli command when schedule does not exist", () => { + const scheduleName = `${dynamoDbItemNamePrefix}-delete-schedule-does-not-exist`; + + expect(() => + execSync(`python -m instance_scheduler_cli delete-schedule --stack ${hubStack} --name ${scheduleName}`), + ).toThrow("Command failed:"); + }); + + // describe-schedule + test("should successfully describe all schedules with describe-schedules cli command", () => { + const response = execSync(`python -m instance_scheduler_cli describe-schedules --stack ${hubStack}`); + expect(JSON.parse(response.toString()).Schedules).toEqual(expect.any(Array)); + }); + + test("should successfully describe single schedule with describe-schedules cli command", async () => { + const scheduleName = `${dynamoDbItemNamePrefix}-describe-single-schedule`; + await createSchedule({ + name: scheduleName, + description: "cli-test", + periods: new Set([reusablePeriodName]), + }); + + const response = execSync( + `python -m instance_scheduler_cli describe-schedules --stack ${hubStack} --name ${scheduleName}`, + ); + expect(JSON.parse(response.toString()).Schedules[0]).toEqual({ + Type: "schedule", + Name: scheduleName, + Description: "cli-test", + Periods: [reusablePeriodName], + }); + }, 10_000); + + test("should error from describe-schedule cli command when schedule does not exist", () => { + const scheduleName = `${dynamoDbItemNamePrefix}-describe-schedules-does-not-exist`; + + expect(() => + execSync(`python -m instance_scheduler_cli describe-schedules --stack ${hubStack} --name ${scheduleName}`), + ).toThrow("Command failed:"); + }); + + // update-schedule + test("should successfully update schedule with update-schedule cli command", async () => { + const scheduleName = `${dynamoDbItemNamePrefix}-update-schedule`; + await createSchedule({ + name: scheduleName, + description: "cli-test", + periods: new Set([reusablePeriodName]), + }); + + const response = execSync( + `python -m instance_scheduler_cli update-schedule` + + ` --stack ${hubStack}` + + ` --name ${scheduleName}` + + ` --periods ${reusablePeriodName}` + + ` --description updated-description`, + ); + expect(JSON.parse(response.toString()).Schedule).toEqual({ + Description: "updated-description", + Enforced: false, + Hibernate: false, + Name: scheduleName, + Type: "schedule", + Periods: [reusablePeriodName], + RetainRunning: false, + StopNewInstances: true, + }); + expect(await getConfigTableItem("schedule", scheduleName)).toEqual({ + description: "updated-description", + enforced: false, + hibernate: false, + name: scheduleName, + type: "schedule", + periods: new Set([reusablePeriodName]), + retain_running: false, + stop_new_instances: true, + }); + }, 10_000); + + test("should error from update-schedule cli command when schedule does not exist", () => { + const scheduleName = `${dynamoDbItemNamePrefix}-update-schedule-does-not-exist`; + + expect(() => + execSync( + `python -m instance_scheduler_cli update-schedule` + + ` --stack ${hubStack}` + + ` --name ${scheduleName}` + + ` --periods ${reusablePeriodName}` + + ` --description updated-description`, + ), + ).toThrow("Command failed:"); + }); + + // describe-schedule-usage + test("should successfully describe schedule usage with describe-schedule-usage cli command", async () => { + const scheduleName = `${dynamoDbItemNamePrefix}-describe-schedule-usage`; + await createSchedule({ + name: scheduleName, + description: "cli-test", + periods: new Set([reusablePeriodName]), + timezone: "UTC", + }); + + // Response contains current date in two different formats (yyyy-mm-dd, mm/dd/yy) + // Need to calculate those for the current date to form the expected response object + const date = new Date(); + const day = String(date.getUTCDate()).padStart(2, "0"); + const month = String(date.getUTCMonth() + 1).padStart(2, "0"); // month is 0 indexed + const year = date.getUTCFullYear().toString(); + + const yyyymmdd = `${year}-${month}-${day}`; + const mmddyy = `${month}/${day}/${year.slice(-2)}`; + + const response = execSync( + `python -m instance_scheduler_cli describe-schedule-usage --stack ${hubStack} --name ${scheduleName}`, + ); + expect(JSON.parse(response.toString())).toEqual({ + Schedule: scheduleName, + Usage: { + [yyyymmdd]: { + RunningPeriods: { + [reusablePeriodName.charAt(0).toUpperCase() + reusablePeriodName.slice(1)]: { + // cli handler converts periodNames to PascalCase + Begin: `${mmddyy} 23:00:00`, + End: `${mmddyy} 23:59:00`, + BillingHours: 1, + BillingSeconds: 3540, + }, + }, + BillingSeconds: 3540, + BillingHours: 1, + }, + }, + }); + }); + + test("should error from describe-schedule-usage cli command when schedule does not exist", () => { + const scheduleName = `${dynamoDbItemNamePrefix}-describe-schedule-usage-does-not-exist`; + + expect(() => + execSync(`python -m instance_scheduler_cli describe-schedule-usage --stack ${hubStack} --name ${scheduleName}`), + ).toThrow("Command failed:"); + }, 10_000); +}); diff --git a/source/pipeline/e2e-tests/docdb-start-stop-test-resources.ts b/source/pipeline/e2e-tests/docdb-start-stop-test-resources.ts new file mode 100644 index 00000000..ede40e71 --- /dev/null +++ b/source/pipeline/e2e-tests/docdb-start-stop-test-resources.ts @@ -0,0 +1,63 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as docdb from "aws-cdk-lib/aws-docdb"; +import * as ec2 from "aws-cdk-lib/aws-ec2"; + +import { TestResourceProvider } from "./index"; +import { Construct } from "constructs"; +import { defaultTestVPC } from "./utils/vpc-utils"; +import { CfnOutput, RemovalPolicy, SecretValue, Tags } from "aws-cdk-lib"; +import { NagSuppressions } from "cdk-nag"; + +const envKeys = { + docdbInstanceId: "DocdbStartStopInstance", +}; +export const resourceParams = { + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + docdbInstanceId: process.env[envKeys.docdbInstanceId]!, + startStopTestScheduleName: "docdb_start_stop_test_schedule", +}; + +export class DocdbStartStopTestResources implements TestResourceProvider { + createTestResources(scope: Construct): Record { + const docdbCluster = new docdb.DatabaseCluster(scope, "docdbStartStopTestCluster", { + instanceType: ec2.InstanceType.of(ec2.InstanceClass.R6G, ec2.InstanceSize.LARGE), //smallest supported size + vpc: defaultTestVPC(scope), + masterUser: { + username: "username", + password: SecretValue.unsafePlainText("password"), + }, + removalPolicy: RemovalPolicy.DESTROY, + }); + + Tags.of(docdbCluster).add("Schedule", resourceParams.startStopTestScheduleName); + + const docdbInstanceIdOut = new CfnOutput(scope, envKeys.docdbInstanceId, { + value: docdbCluster.clusterIdentifier, + }); + + NagSuppressions.addResourceSuppressions(docdbCluster, [ + { + id: "AwsSolutions-DOC2", + reason: "This is an automated test instance that will be only started and then stopped again", + }, + { + id: "AwsSolutions-DOC3", + reason: "This is an automated test instance that will be only started and then stopped again", + }, + { + id: "AwsSolutions-DOC4", + reason: "This is an automated test instance that will be only started and then stopped again", + }, + { + id: "AwsSolutions-DOC5", + reason: "This is an automated test instance that will be only started and then stopped again", + }, + ]); + + return { + [envKeys.docdbInstanceId]: docdbInstanceIdOut, + }; + } +} diff --git a/source/pipeline/e2e-tests/docdb-start-stop-test.ts b/source/pipeline/e2e-tests/docdb-start-stop-test.ts new file mode 100644 index 00000000..fb7ec414 --- /dev/null +++ b/source/pipeline/e2e-tests/docdb-start-stop-test.ts @@ -0,0 +1,63 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import * as docdb from "@aws-sdk/client-docdb"; +import { resourceParams } from "./docdb-start-stop-test-resources"; +import { createSchedule, currentTimePlus, toTimeStr } from "./utils/schedule-test-utils"; +import { delayMinutes } from "./index"; + +async function getClusterState(client: docdb.DocDBClient, clusterId: string) { + const result = await client.send( + new docdb.DescribeDBClustersCommand({ + DBClusterIdentifier: clusterId, + }), + ); + + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + if (!result.DBClusters) throw new Error(`Cluster with id of ${clusterId} Not Found`); + + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + return result.DBClusters![0].Status!; +} + +describe("docdb cluster", () => { + const clusterId = resourceParams.docdbInstanceId; + const docdbClient = new docdb.DocDBClient({}); + + test("clusterId exists", () => { + expect(clusterId).not.toBeUndefined(); + }); + + test("basic docdb start-stop", async () => { + const preTestState = await getClusterState(docdbClient, clusterId); + //ensure instance is stopped + if (!["stopped", "stopping"].includes(preTestState)) { + console.log(`cluster in state ${preTestState} before test. Attempting to stop before running test...`); + await docdbClient.send( + new docdb.StopDBClusterCommand({ + DBClusterIdentifier: clusterId, + }), + ); + } + + await createSchedule({ + name: resourceParams.startStopTestScheduleName, + description: "docdb test schedule", + periods: [ + { + name: "docdb-start-stop-period", + description: `testing period`, + begintime: toTimeStr(currentTimePlus(3)), + endtime: toTimeStr(currentTimePlus(7)), + }, + ], + }); + + //confirm running during running period + await delayMinutes(5); //2 minutes after start defined in schedule (1-2 scheduling executions) + expect(await getClusterState(docdbClient, resourceParams.docdbInstanceId)).toBeOneOf(["available", "starting"]); + + //confirm stopped after stop time + await delayMinutes(5); //2 minutes after stop defined in schedule (1-2 scheduling executions) + expect(await getClusterState(docdbClient, resourceParams.docdbInstanceId)).toBeOneOf(["stopped", "stopping"]); + }); +}); diff --git a/source/pipeline/e2e-tests/ec2-maintenance-window.test.resources.ts b/source/pipeline/e2e-tests/ec2-maintenance-window.test.resources.ts index d9cfd926..a6707079 100644 --- a/source/pipeline/e2e-tests/ec2-maintenance-window.test.resources.ts +++ b/source/pipeline/e2e-tests/ec2-maintenance-window.test.resources.ts @@ -10,7 +10,7 @@ import { TestResourceProvider } from "./index"; import { defaultTestVPC } from "./utils/vpc-utils"; const envKeys = { - ec2InstanceId: "ec2_maintenance_window_start_instance_id", + ec2InstanceId: "Ec2MaintenanceWindowStartInstanceId", }; export const resourceParams = { // eslint-disable-next-line @typescript-eslint/no-non-null-assertion @@ -19,14 +19,12 @@ export const resourceParams = { }; export class EC2MaintenanceWindowStartTestResource implements TestResourceProvider { createTestResources(scope: Construct) { - const testInstance = new ec2.Instance(scope, "maint_window_start_instance", { + const testInstance = new ec2.Instance(scope, "maintWindowStartInstance", { instanceType: ec2.InstanceType.of(ec2.InstanceClass.T2, ec2.InstanceSize.MICRO), machineImage: ec2.MachineImage.latestAmazonLinux2(), vpc: defaultTestVPC(scope), }); - cdk.Tags.of(testInstance).add("Schedule", resourceParams.maintWindowTestScheduleName); - const startStopOut = new cdk.CfnOutput(scope, envKeys.ec2InstanceId, { value: testInstance.instanceId, }); diff --git a/source/pipeline/e2e-tests/ec2-maintenance-window.test.ts b/source/pipeline/e2e-tests/ec2-maintenance-window.test.ts index dc9adea8..b1bd9b9c 100644 --- a/source/pipeline/e2e-tests/ec2-maintenance-window.test.ts +++ b/source/pipeline/e2e-tests/ec2-maintenance-window.test.ts @@ -3,11 +3,17 @@ import * as ssm from "@aws-sdk/client-ssm"; import { CreateMaintenanceWindowCommand, DeleteMaintenanceWindowCommand } from "@aws-sdk/client-ssm"; -import { createSchedule, currentTimePlus } from "./utils/schedule-test-utils"; +import { + createSchedule, + currentTimePlus, + toTimeStr, + minutesToMillis, + waitForExpect, +} from "./utils/schedule-test-utils"; import * as ec2 from "@aws-sdk/client-ec2"; import { resourceParams } from "./ec2-maintenance-window.test.resources"; -import { delayMinutes } from "./index"; -import { getInstanceState } from "./utils/ec2-test-utils"; +import { clearScheduleTag, getInstanceState, setScheduleTag } from "./utils/ec2-test-utils"; +import { v4 as uuidv4 } from "uuid"; const ssmClient = new ssm.SSMClient({}); const ec2Client = new ec2.EC2Client({}); @@ -16,54 +22,79 @@ const instanceId = resourceParams.ec2InstanceId; function getCronStrForTime(time: Date) { return `cron(0 ${time.getUTCMinutes()} ${time.getUTCHours()} ? * *)`; } -test("maintenance window start behavior", async () => { - //stop instance - await ec2Client.send( - new ec2.StopInstancesCommand({ - InstanceIds: [instanceId], - }), - ); +test( + "maintenance window start behavior", + async () => { + //stop instance + await ec2Client.send( + new ec2.StopInstancesCommand({ + InstanceIds: [instanceId], + }), + ); - //confirm stopped - await delayMinutes(1); - expect(await getInstanceState(ec2Client, instanceId)).toBe(ec2.InstanceStateName.stopped); + //confirm stopped + await ec2.waitUntilInstanceStopped({ client: ec2Client, maxWaitTime: 300 }, { InstanceIds: [instanceId] }); + expect(await getInstanceState(ec2Client, instanceId)).toBe(ec2.InstanceStateName.stopped); - //create maintenance window - let window_id: string | undefined; - await ssmClient - .send( - new CreateMaintenanceWindowCommand({ - Name: "test-window", - Description: "e2e test window", - Schedule: getCronStrForTime(currentTimePlus(12)), - ScheduleTimezone: "UTC", - Duration: 1, - Cutoff: 0, - AllowUnassociatedTargets: false, - Tags: [], - }), - ) - .then((response) => { - window_id = response.WindowId; - }); + //create maintenance window + let windowId: string | undefined; + const windowName = `test-window-${uuidv4()}`; + await ssmClient + .send( + new CreateMaintenanceWindowCommand({ + Name: windowName, + Description: "e2e test window", + Schedule: getCronStrForTime(currentTimePlus(12)), + ScheduleTimezone: "UTC", + Duration: 1, + Cutoff: 0, + AllowUnassociatedTargets: false, + Tags: [], + }), + ) + .then((response) => { + windowId = response.WindowId; + }); - try { - //create schedule - await createSchedule({ - name: resourceParams.maintWindowTestScheduleName, - description: `testing schedule`, - use_maintenance_window: true, - ssm_maintenance_window: "test-window", - }); + try { + //create schedule + await createSchedule({ + name: resourceParams.maintWindowTestScheduleName, + description: `testing schedule`, + ssm_maintenance_window: [windowName], + periods: [ + { + name: "ec2-mw-unused-period", + description: "testing period", + begintime: toTimeStr(currentTimePlus(60)), + endtime: toTimeStr(currentTimePlus(65)), + }, + ], + }); + await setScheduleTag(ec2Client, instanceId, resourceParams.maintWindowTestScheduleName); - //confirm instance started in anticipation of upcoming maintenance window - await delayMinutes(5); - expect(await getInstanceState(ec2Client, instanceId)).toBe(ec2.InstanceStateName.running); - } finally { - await ssmClient.send( - new DeleteMaintenanceWindowCommand({ - WindowId: window_id, - }), - ); - } -}, 900_000); + //confirm instance started in anticipation of upcoming maintenance window + await waitForExpect( + async () => { + expect(await getInstanceState(ec2Client, instanceId)).toBeOneOf([ + ec2.InstanceStateName.pending, + ec2.InstanceStateName.running, + ]); + }, + minutesToMillis(5), + minutesToMillis(0.5), + ); + } finally { + await ssmClient.send( + new DeleteMaintenanceWindowCommand({ + WindowId: windowId, + }), + ); + } + }, + minutesToMillis(15), +); + +afterAll(async () => { + await clearScheduleTag(ec2Client, instanceId); +}); diff --git a/source/pipeline/e2e-tests/encrypted-ec2-start.test.resources.ts b/source/pipeline/e2e-tests/encrypted-ec2-start.test.resources.ts new file mode 100644 index 00000000..f84eeec4 --- /dev/null +++ b/source/pipeline/e2e-tests/encrypted-ec2-start.test.resources.ts @@ -0,0 +1,68 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as ec2 from "aws-cdk-lib/aws-ec2"; +import * as cdk from "aws-cdk-lib"; +import { RemovalPolicy } from "aws-cdk-lib"; +import * as kms from "aws-cdk-lib/aws-kms"; + +import { Construct } from "constructs"; +import { NagSuppressions } from "cdk-nag"; +import { TestResourceProvider } from "./index"; +import { defaultTestVPC } from "./utils/vpc-utils"; + +const envKeys = { + ec2InstanceId: "EncryptedEc2InstanceId", +}; +export const resourceParams = { + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + ec2InstanceId: process.env[envKeys.ec2InstanceId]!, + encryptedEc2ScheduleName: "ec2_encrypted_start", +}; +export class EncryptedEc2StartTestResources implements TestResourceProvider { + createTestResources(scope: Construct) { + const kmsKey = new kms.Key(scope, "encryptedEc2TestKey", { + enableKeyRotation: true, + removalPolicy: RemovalPolicy.DESTROY, + }); + + const testInstance = new ec2.Instance(scope, "EncryptedStartInstance", { + instanceType: ec2.InstanceType.of(ec2.InstanceClass.T2, ec2.InstanceSize.MICRO), + machineImage: ec2.MachineImage.latestAmazonLinux2023(), + vpc: defaultTestVPC(scope), + blockDevices: [ + { + deviceName: "/dev/sdg", + volume: ec2.BlockDeviceVolume.ebs(5, { + encrypted: true, + kmsKey: kmsKey, + deleteOnTermination: true, + }), + }, + ], + }); + + const encryptedEc2Out = new cdk.CfnOutput(scope, envKeys.ec2InstanceId, { + value: testInstance.instanceId, + }); + + NagSuppressions.addResourceSuppressions(testInstance, [ + { + id: "AwsSolutions-EC26", + reason: "This is an automated test instance without any need for encrypted EBS volumes", + }, + { + id: "AwsSolutions-EC28", + reason: "This is a test instance that only ever needs to be started/stopped (work workloads)", + }, + { + id: "AwsSolutions-EC29", + reason: "This is an automated test instance without any need for termination protection", + }, + ]); + + return { + [envKeys.ec2InstanceId]: encryptedEc2Out, + }; + } +} diff --git a/source/pipeline/e2e-tests/encrypted-ec2-start.test.ts b/source/pipeline/e2e-tests/encrypted-ec2-start.test.ts new file mode 100644 index 00000000..eb4a20a3 --- /dev/null +++ b/source/pipeline/e2e-tests/encrypted-ec2-start.test.ts @@ -0,0 +1,48 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as ec2 from "@aws-sdk/client-ec2"; + +import { resourceParams } from "./encrypted-ec2-start.test.resources"; +import { delayMinutes } from "./index"; +import { clearScheduleTag, getInstanceState, setScheduleTag } from "./utils/ec2-test-utils"; +import { createSchedule } from "./utils/schedule-test-utils"; + +const ec2Client = new ec2.EC2Client({}); +const instanceId = resourceParams.ec2InstanceId; + +test("instanceId exists", () => { + expect(instanceId).not.toBeUndefined(); +}); +test("encrypted EC2 start", async () => { + //stop instance + await ec2Client.send( + new ec2.StopInstancesCommand({ + InstanceIds: [instanceId], + }), + ); + + //confirm stopped + await ec2.waitUntilInstanceStopped({ client: ec2Client, maxWaitTime: 300 }, { InstanceIds: [instanceId] }); + expect(await getInstanceState(ec2Client, instanceId)).toBe(ec2.InstanceStateName.stopped); + + //create schedule + await createSchedule({ + name: resourceParams.encryptedEc2ScheduleName, + description: `always-running test schedule`, + enforced: true, + override_status: "running", + }); + await setScheduleTag(ec2Client, instanceId, resourceParams.encryptedEc2ScheduleName); + + //confirm running during running period + await delayMinutes(2); + expect(await getInstanceState(ec2Client, instanceId)).toBeOneOf([ + ec2.InstanceStateName.pending, + ec2.InstanceStateName.running, + ]); +}, 900_000); + +afterAll(async () => { + await clearScheduleTag(ec2Client, instanceId); +}); diff --git a/source/pipeline/e2e-tests/index.ts b/source/pipeline/e2e-tests/index.ts index c75335d0..b6078ead 100644 --- a/source/pipeline/e2e-tests/index.ts +++ b/source/pipeline/e2e-tests/index.ts @@ -1,11 +1,14 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 - +import { CfnOutput } from "aws-cdk-lib"; import { Construct } from "constructs"; +import { AsgConfigureTestResources } from "./asg-configure.test.resources"; import { EC2StartStopTestResources } from "./basic-ec2-start-stop.test.resources"; -import { CfnOutput } from "aws-cdk-lib"; import { BasicRdsStartStopTestResources } from "./basic-rds-start-stop.test.resources"; +import { DocdbStartStopTestResources } from "./docdb-start-stop-test-resources"; import { EC2MaintenanceWindowStartTestResource } from "./ec2-maintenance-window.test.resources"; +import { EncryptedEc2StartTestResources } from "./encrypted-ec2-start.test.resources"; +import { NeptuneStartStopTestResources } from "./neptune-start-stop-test.resources"; export interface TestResourceProvider { createTestResources(scope: Construct): Record; @@ -15,6 +18,10 @@ export const testResourceProviders: TestResourceProvider[] = [ new EC2StartStopTestResources(), new BasicRdsStartStopTestResources(), new EC2MaintenanceWindowStartTestResource(), + new DocdbStartStopTestResources(), + new NeptuneStartStopTestResources(), + new EncryptedEc2StartTestResources(), + new AsgConfigureTestResources(), ]; export const delaySeconds = (seconds: number) => new Promise((res) => setTimeout(res, seconds * 1000)); diff --git a/source/pipeline/e2e-tests/neptune-start-stop-test.resources.ts b/source/pipeline/e2e-tests/neptune-start-stop-test.resources.ts new file mode 100644 index 00000000..3a7a69ab --- /dev/null +++ b/source/pipeline/e2e-tests/neptune-start-stop-test.resources.ts @@ -0,0 +1,58 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as neptune from "@aws-cdk/aws-neptune-alpha"; +import * as cdk from "aws-cdk-lib"; +import { CfnOutput, RemovalPolicy } from "aws-cdk-lib"; + +import { TestResourceProvider } from "./index"; +import { Construct } from "constructs"; +import { defaultTestVPC } from "./utils/vpc-utils"; +import { NagSuppressions } from "cdk-nag"; + +const envKeys = { + neptuneInstanceId: "NeptuneStartStopInstanceId", +}; +export const resourceParams = { + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + neptuneInstanceId: process.env[envKeys.neptuneInstanceId]!, + startStopTestScheduleName: "neptune_start_stop_test_schedule", +}; +export class NeptuneStartStopTestResources implements TestResourceProvider { + createTestResources(scope: Construct): Record { + const neptuneCluster = new neptune.DatabaseCluster(scope, "NeptuneStartStopTestCluster", { + instanceType: neptune.InstanceType.T3_MEDIUM, + vpc: defaultTestVPC(scope), + removalPolicy: RemovalPolicy.DESTROY, + }); + + cdk.Tags.of(neptuneCluster).add("Schedule", resourceParams.startStopTestScheduleName); + + const neptuneInstanceIdOut = new cdk.CfnOutput(scope, envKeys.neptuneInstanceId, { + value: neptuneCluster.clusterIdentifier, + }); + + NagSuppressions.addResourceSuppressions( + neptuneCluster, + [ + { + id: "AwsSolutions-N2", + reason: "This is an automated test instance that will be only started and then stopped again", + }, + { + id: "AwsSolutions-N3", + reason: "This is an automated test instance that will be only started and then stopped again", + }, + { + id: "AwsSolutions-N5", + reason: "This is an automated test instance that will be only started and then stopped again", + }, + ], + true, + ); + + return { + [envKeys.neptuneInstanceId]: neptuneInstanceIdOut, + }; + } +} diff --git a/source/pipeline/e2e-tests/neptune-start-stop-test.ts b/source/pipeline/e2e-tests/neptune-start-stop-test.ts new file mode 100644 index 00000000..35183860 --- /dev/null +++ b/source/pipeline/e2e-tests/neptune-start-stop-test.ts @@ -0,0 +1,62 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import * as neptune from "@aws-sdk/client-neptune"; +import { resourceParams } from "./neptune-start-stop-test.resources"; +import { createSchedule, currentTimePlus, toTimeStr } from "./utils/schedule-test-utils"; +import { delayMinutes } from "./index"; + +async function getClusterState(client: neptune.NeptuneClient, clusterId: string) { + const result = await client.send( + new neptune.DescribeDBClustersCommand({ + DBClusterIdentifier: clusterId, + }), + ); + + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + if (!result.DBClusters) throw new Error(`Cluster with id of ${clusterId} Not Found`); + + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + return result.DBClusters![0].Status!; +} +describe("neptune cluster", () => { + const clusterId = resourceParams.neptuneInstanceId; + const neptuneClient = new neptune.NeptuneClient({}); + + test("clusterId exists", () => { + expect(clusterId).not.toBeUndefined(); + }); + + test("basic neptune start-stop", async () => { + const preTestState = await getClusterState(neptuneClient, clusterId); + //ensure instance is stopped + if (!["stopped", "stopping"].includes(preTestState)) { + console.log(`cluster in state ${preTestState} before test. Attempting to stop before running test...`); + await neptuneClient.send( + new neptune.StopDBClusterCommand({ + DBClusterIdentifier: clusterId, + }), + ); + } + + await createSchedule({ + name: resourceParams.startStopTestScheduleName, + description: "neptune test schedule", + periods: [ + { + name: "neptune-start-stop-period", + description: `testing period`, + begintime: toTimeStr(currentTimePlus(3)), + endtime: toTimeStr(currentTimePlus(7)), + }, + ], + }); + + //confirm running during running period + await delayMinutes(5); //2 minutes after start defined in schedule (1-2 scheduling executions) + expect(await getClusterState(neptuneClient, resourceParams.neptuneInstanceId)).toBeOneOf(["available", "starting"]); + + //confirm stopped after stop time + await delayMinutes(5); //2 minutes after stop defined in schedule (1-2 scheduling executions) + expect(await getClusterState(neptuneClient, resourceParams.neptuneInstanceId)).toBeOneOf(["stopped", "stopping"]); + }); +}); diff --git a/source/pipeline/e2e-tests/spoke-deregistration-runbook.test.ts b/source/pipeline/e2e-tests/spoke-deregistration-runbook.test.ts new file mode 100644 index 00000000..90b127ca --- /dev/null +++ b/source/pipeline/e2e-tests/spoke-deregistration-runbook.test.ts @@ -0,0 +1,63 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import * as dynamodb from "@aws-sdk/client-dynamodb"; +import * as ssm from "@aws-sdk/client-ssm"; +import { CfnStackResourceFinder } from "./utils/cfn-utils"; +import { delaySeconds } from "./index"; + +const dynamoClient = new dynamodb.DynamoDBClient(); +const ssmClient = new ssm.SSMClient(); + +describe("SpokeRegistrationRunbook", () => { + test("deregister account", async () => { + const hubStackName = process.env["HUB_STACK"]; + if (!hubStackName) { + throw new Error(`Missing required environment variable: HUB_STACK`); + } + const stackResourceFinder = await CfnStackResourceFinder.fromStackName(hubStackName); + const configTableName = stackResourceFinder.findResourceByPartialId("ConfigTable")?.PhysicalResourceId; + const spokeRegistrationRunbookName = + stackResourceFinder.findResourceByPartialId("SpokeDeregistrationRunbook")?.PhysicalResourceId; + + await dynamoClient.send( + new dynamodb.UpdateItemCommand({ + TableName: configTableName, + Key: { type: { S: "config" }, name: { S: "scheduler" } }, + UpdateExpression: "ADD remote_account_ids :a", + ExpressionAttributeValues: { ":a": { SS: ["111111111111", "222222222222", "333333333333"] } }, + }), + ); + + await ssmClient.send( + new ssm.StartAutomationExecutionCommand({ + DocumentName: spokeRegistrationRunbookName, + Parameters: { + AccountId: ["111111111111"], + }, + }), + ); + + // The automation runs almost instantly ( < 1s ) but delay is still required to check results + await delaySeconds(10); + + const accounts = await dynamoClient.send( + new dynamodb.GetItemCommand({ + TableName: configTableName, + Key: { type: { S: "config" }, name: { S: "scheduler" } }, + ProjectionExpression: "remote_account_ids", + }), + ); + + expect(accounts.Item?.remote_account_ids.SS).not.toContainEqual(["111111111111"]); + expect(accounts.Item?.remote_account_ids.SS).toEqual(expect.arrayContaining(["222222222222", "333333333333"])); + + await dynamoClient.send( + new dynamodb.UpdateItemCommand({ + TableName: configTableName, + Key: { type: { S: "config" }, name: { S: "scheduler" } }, + UpdateExpression: "DELETE remote_account_ids :a", + ExpressionAttributeValues: { ":a": { SS: ["111111111111", "222222222222", "333333333333"] } }, + }), + ); + }, 30000); +}); diff --git a/source/pipeline/e2e-tests/utils/cfn-utils.ts b/source/pipeline/e2e-tests/utils/cfn-utils.ts new file mode 100644 index 00000000..05e52422 --- /dev/null +++ b/source/pipeline/e2e-tests/utils/cfn-utils.ts @@ -0,0 +1,38 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import * as cfn from "@aws-sdk/client-cloudformation"; + +export class CfnStackResourceFinder { + readonly resources: cfn.StackResourceSummary[]; + constructor(resources: cfn.StackResourceSummary[]) { + this.resources = resources; + } + + static async fromStackName(stackName: string) { + const cfnClient = new cfn.CloudFormationClient(); + + const paginatorConfig = { + client: cfnClient, + pageSize: 20, + }; + + const listStackResourcesPaginator = cfn.paginateListStackResources(paginatorConfig, { + StackName: stackName, + }); + + const stackResources: cfn.StackResourceSummary[] = []; + for await (const { StackResourceSummaries } of listStackResourcesPaginator) { + if (StackResourceSummaries) { + stackResources.push(...StackResourceSummaries); + } + } + + return new CfnStackResourceFinder(stackResources); + } + + findResourceByPartialId(partialId: string) { + return this.resources.find( + (resource: cfn.StackResourceSummary) => resource.LogicalResourceId?.startsWith(partialId), + ); + } +} diff --git a/source/pipeline/e2e-tests/utils/cli-utils.ts b/source/pipeline/e2e-tests/utils/cli-utils.ts new file mode 100644 index 00000000..bca442b4 --- /dev/null +++ b/source/pipeline/e2e-tests/utils/cli-utils.ts @@ -0,0 +1,101 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { hubStackParams } from "./hub-stack-utils"; +import * as dynamodb from "@aws-sdk/client-dynamodb"; +import { unmarshall, marshall } from "@aws-sdk/util-dynamodb"; + +const configTableName = hubStackParams.configTableArn.substring(hubStackParams.configTableArn.lastIndexOf("/") + 1); +const dynamodbClient = new dynamodb.DynamoDBClient(); + +export interface Period { + name: string; + description: string; + begintime: string; + endtime: string; + monthdays?: string; + months?: string; + weekdays?: string; +} + +export interface Schedule { + name: string; + description: string; + periods: Set; + ssm_maintenance_window?: Set; + override_status?: string; + enforced?: boolean; + timezone?: string; + doNotStopNewInstance?: boolean; + retainRunning?: boolean; + hibernate?: boolean; + useMetrics?: boolean; +} + +export async function deleteConfigTableItemsWithNamePrefix(type: string, namePrefix: string) { + const periodQueryResponse = await dynamodbClient.send( + new dynamodb.QueryCommand({ + TableName: configTableName, + KeyConditionExpression: "#type = :type AND begins_with(#name, :namePrefix)", + ExpressionAttributeNames: { + "#type": "type", + "#name": "name", + }, + ExpressionAttributeValues: { + ":type": { S: type }, + ":namePrefix": { S: namePrefix }, + }, + }), + ); + + periodQueryResponse.Items?.forEach(async (item) => { + await dynamodbClient.send( + new dynamodb.DeleteItemCommand({ + TableName: configTableName, + Key: { + type: { S: item.type.S! }, + name: { S: item.name.S! }, + }, + }), + ); + }); +} + +export async function getConfigTableItem(type: string, name: string) { + const response = await dynamodbClient.send( + new dynamodb.GetItemCommand({ + TableName: configTableName, + Key: { + type: { S: type }, + name: { S: name }, + }, + }), + ); + + if (response.Item === undefined) throw new Error("Did not find item"); + + return unmarshall(response.Item); +} + +export async function createSchedule(schedule: Schedule) { + await dynamodbClient.send( + new dynamodb.PutItemCommand({ + TableName: configTableName, + Item: { + ...marshall(schedule), + type: { S: "schedule" }, + }, + }), + ); +} + +export async function createPeriod(period: Period) { + await dynamodbClient.send( + new dynamodb.PutItemCommand({ + TableName: configTableName, + Item: { + ...marshall(period), + type: { S: "period" }, + }, + }), + ); +} diff --git a/source/pipeline/e2e-tests/utils/ec2-test-utils.ts b/source/pipeline/e2e-tests/utils/ec2-test-utils.ts index 4b24e99f..a2fe74a9 100644 --- a/source/pipeline/e2e-tests/utils/ec2-test-utils.ts +++ b/source/pipeline/e2e-tests/utils/ec2-test-utils.ts @@ -17,3 +17,25 @@ export async function getInstanceState(client: ec2.EC2Client, instanceId: string // eslint-disable-next-line @typescript-eslint/no-non-null-assertion return result.InstanceStatuses![0].InstanceState!.Name; } + +export async function setScheduleTag(client: ec2.EC2Client, instanceId: string, schedule: string) { + await client.send( + new ec2.CreateTagsCommand({ + Resources: [instanceId], + Tags: [ + { + Key: "Schedule", + Value: schedule, + }, + ], + }), + ); +} +export async function clearScheduleTag(client: ec2.EC2Client, instanceId: string) { + await client.send( + new ec2.DeleteTagsCommand({ + Resources: [instanceId], + Tags: [{ Key: "Schedule" }], + }), + ); +} diff --git a/source/pipeline/e2e-tests/utils/hub-stack-utils.ts b/source/pipeline/e2e-tests/utils/hub-stack-utils.ts index 46228cc1..cbc9cce9 100644 --- a/source/pipeline/e2e-tests/utils/hub-stack-utils.ts +++ b/source/pipeline/e2e-tests/utils/hub-stack-utils.ts @@ -1,13 +1,12 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 - -import { InstanceSchedulerStack } from "../../../instance-scheduler/lib/instance-scheduler-stack"; import { CfnOutput } from "aws-cdk-lib"; +import { InstanceSchedulerTestingStack } from "../../lib/instance-scheduler-testing-stack"; const envKeys = { - CONFIG_TABLE: "ConfigTable", - ISSUE_SNS_TOPIC_ARN: "IssueSNSTopic", - SCHEDULER_ROLE_ARN: "IssueSNSTopic", + CONFIG_TABLE: "ConfigTableArn", + ISSUE_SNS_TOPIC_ARN: "TopicArn", + SCHEDULER_ROLE_ARN: "SchedulerRoleArn", }; export const hubStackParams = { @@ -19,10 +18,10 @@ export const hubStackParams = { schedulerRoleArn: process.env[envKeys.SCHEDULER_ROLE_ARN]!, }; -export function extractOutputsFrom(hubStack: InstanceSchedulerStack): Record { +export function extractOutputsFrom(hubStack: InstanceSchedulerTestingStack): Record { return { - [envKeys.CONFIG_TABLE]: hubStack.configurationTableOutput, - [envKeys.ISSUE_SNS_TOPIC_ARN]: hubStack.issueSnsTopicArn, + [envKeys.CONFIG_TABLE]: hubStack.configTableArn, + [envKeys.ISSUE_SNS_TOPIC_ARN]: hubStack.topicArn, [envKeys.SCHEDULER_ROLE_ARN]: hubStack.schedulerRoleArn, }; } diff --git a/source/pipeline/e2e-tests/utils/schedule-test-utils.ts b/source/pipeline/e2e-tests/utils/schedule-test-utils.ts index 01da9c67..fc84f4c1 100644 --- a/source/pipeline/e2e-tests/utils/schedule-test-utils.ts +++ b/source/pipeline/e2e-tests/utils/schedule-test-utils.ts @@ -4,6 +4,7 @@ import * as dynamodb from "@aws-sdk/client-dynamodb"; import { hubStackParams } from "./hub-stack-utils"; import { AttributeValue } from "@aws-sdk/client-dynamodb"; +import { setTimeout } from "timers/promises"; const configTableName = hubStackParams.configTableArn.substring(hubStackParams.configTableArn.lastIndexOf("/") + 1); export interface Period { @@ -17,8 +18,9 @@ export interface Schedule { name: string; description: string; periods?: Period[]; - use_maintenance_window?: boolean; - ssm_maintenance_window?: string; + ssm_maintenance_window?: string[]; + override_status?: string; + enforced?: boolean; } export async function createSchedule(schedule: Schedule) { const dynamoClient = new dynamodb.DynamoDBClient({}); @@ -44,11 +46,17 @@ export function currentTimePlus(minutes: number): Date { return targetTime; } +export function zeroPadToTwoDigits(value: number): string { + return ("0" + value).slice(-2); +} + export function toTimeStr(targetTime: Date): string { - return `${targetTime.getUTCHours()}:${targetTime.getUTCMinutes()}`; + const hours = zeroPadToTwoDigits(targetTime.getHours()); + const minutes = zeroPadToTwoDigits(targetTime.getMinutes()); + return `${hours}:${minutes}`; } -function minutesToMillis(minutes: number) { +export function minutesToMillis(minutes: number) { return minutes * 60_000; } @@ -68,8 +76,9 @@ function putRequestForSchedule(schedule: Schedule): dynamodb.WriteRequest { }; if (schedule.periods) item.periods = { SS: schedule.periods.map((period) => period.name) }; - if (schedule.use_maintenance_window) item.use_maintenance_window = { BOOL: schedule.use_maintenance_window }; - if (schedule.ssm_maintenance_window) item.ssm_maintenance_window = { S: schedule.ssm_maintenance_window }; + if (schedule.ssm_maintenance_window) item.ssm_maintenance_window = { SS: schedule.ssm_maintenance_window }; + if (schedule.enforced) item.enforced = { BOOL: schedule.enforced }; + if (schedule.override_status) item.override_status = { S: schedule.override_status }; return { PutRequest: { @@ -91,3 +100,27 @@ function putRequestForPeriod(period: Period): dynamodb.WriteRequest { }, }; } + +export async function waitForExpect( + expectation: () => void | Promise, + timeout: number, + interval: number, +): Promise { + const maxTries = Math.ceil(timeout / interval); + + let errorOnLastAttempt; + + for (let tries = 0; tries <= maxTries; tries++) { + try { + await expectation(); + return; + } catch (error) { + errorOnLastAttempt = error; + await setTimeout(interval); + } + } + + throw new Error( + `Timed out waiting for expectation to pass after ${timeout}ms. Error thrown by last attempt: ${errorOnLastAttempt}`, + ); +} diff --git a/source/pipeline/e2e-tests/utils/vpc-utils.ts b/source/pipeline/e2e-tests/utils/vpc-utils.ts index 5cd01663..a10502e6 100644 --- a/source/pipeline/e2e-tests/utils/vpc-utils.ts +++ b/source/pipeline/e2e-tests/utils/vpc-utils.ts @@ -24,7 +24,6 @@ export function defaultTestVPC(scope: Construct): ec2.Vpc { function createNewVpcInScope(scope: Construct) { const vpc = new ec2.Vpc(scope, "basic-test-vpc", { - natGateways: 0, ipAddresses: ec2.IpAddresses.cidr("10.0.0.0/16"), subnetConfiguration: [ { diff --git a/source/pipeline/jest.config.json b/source/pipeline/jest.config.json new file mode 100644 index 00000000..12c16a58 --- /dev/null +++ b/source/pipeline/jest.config.json @@ -0,0 +1,47 @@ +{ + "coverageProvider": "v8", + "reporters": [ + "default", + [ + "jest-junit", + { + "outputDirectory": "deployment/test-reports", + "outputName": "e2e-test-report.xml" + } + ] + ], + "roots": [ + "/e2e-tests" + ], + "setupFilesAfterEnv": [ + "jest-extended/all" + ], + "transform": { + "^.+\\.tsx?$": "ts-jest" + }, + "globalSetup": "./setup.ts", + "clearMocks": true, + "collectCoverage": true, + "coverageReporters": [ + "json", + "lcov", + "clover", + "cobertura", + "text" + ], + "coverageDirectory": "coverage", + "coveragePathIgnorePatterns": [ + "/node_modules/" + ], + "testPathIgnorePatterns": [ + "/node_modules/" + ], + "watchPathIgnorePatterns": [ + "/node_modules/" + ], + "testMatch": [ + "**/__tests__/**/*.[jt]s?(x)", + "**/?(*.)+(spec|test).[tj]s?(x)" + ], + "//": "~~ Generated by projen. To modify, edit .projenrc.ts and run \"npx projen\"." +} diff --git a/source/pipeline/jest.config.ts b/source/pipeline/jest.config.ts deleted file mode 100644 index 1bb68b71..00000000 --- a/source/pipeline/jest.config.ts +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: Apache-2.0 -module.exports = { - roots: ["/e2e-tests"], - testMatch: ["**/*.test.ts"], - transform: { - "^.+\\.tsx?$": "ts-jest", - }, - reporters: [ - "default", - [ - "jest-junit", - { - outputDirectory: "deployment/test-reports", - outputName: "e2e-test-report.xml", - }, - ], - ], - setupFilesAfterEnv: ["jest-extended/all"], -}; diff --git a/source/pipeline/lib/instance-scheduler-testing-stack.ts b/source/pipeline/lib/instance-scheduler-testing-stack.ts new file mode 100644 index 00000000..c02667cf --- /dev/null +++ b/source/pipeline/lib/instance-scheduler-testing-stack.ts @@ -0,0 +1,64 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import { CfnCondition, CfnOutput, Fn, Stack } from "aws-cdk-lib"; +import { Construct } from "constructs"; +import { CoreScheduler } from "../../instance-scheduler/lib/core-scheduler"; +import { InstanceSchedulerStackProps } from "../../instance-scheduler/lib/instance-scheduler-stack"; +import { PythonFunctionFactory } from "../../instance-scheduler/lib/lambda-functions/function-factory"; + +export class InstanceSchedulerTestingStack extends Stack { + readonly configTableArn: CfnOutput; + readonly topicArn: CfnOutput; + readonly schedulerRoleArn: CfnOutput; + + constructor(scope: Construct, id: string, props: InstanceSchedulerStackProps) { + super(scope, id, props); + + const enabledCondition = new CfnCondition(this, "EnabledCondition", { expression: Fn.conditionEquals(true, true) }); + const disabledCondition = new CfnCondition(this, "DisabledCondition", { + expression: Fn.conditionEquals(false, true), + }); + + const coreScheduler = new CoreScheduler(this, { + solutionName: props.solutionName, + solutionVersion: props.solutionVersion, + solutionId: props.solutionId, + memorySizeMB: 128, + principals: [], + logRetentionDays: 90, + schedulingEnabled: enabledCondition, + schedulingIntervalMinutes: 1, + namespace: "e2etesting", + sendAnonymizedMetrics: disabledCondition, + enableDebugLogging: enabledCondition, + tagKey: "Schedule", + defaultTimezone: "UTC", + enableEc2: enabledCondition, + enableRds: enabledCondition, + enableRdsClusters: enabledCondition, + enableNeptune: enabledCondition, + enableDocdb: enabledCondition, + enableRdsSnapshots: enabledCondition, + regions: [""], // must have a value or Fn::Join will error + enableSchedulingHubAccount: enabledCondition, + enableEc2SsmMaintenanceWindows: enabledCondition, + startTags: "InstanceScheduler-LastAction=Started By {scheduler} {year}/{month}/{day} {hour}:{minute}{timezone}", + stopTags: "InstanceScheduler-LastAction=Stopped By {scheduler} {year}/{month}/{day} {hour}:{minute}{timezone}", + enableAwsOrganizations: disabledCondition, + appregSolutionName: props.appregSolutionName, + appregApplicationName: props.appregApplicationName, + enableOpsInsights: enabledCondition, + factory: new PythonFunctionFactory(), + enableDdbDeletionProtection: disabledCondition, + kmsKeyArns: ["*"], + enableAsgs: enabledCondition, + scheduledTagKey: "scheduled", + rulePrefix: "is-", + }); + + this.configTableArn = new CfnOutput(this, "ConfigTableArn", { value: coreScheduler.configTable.tableArn }); + this.topicArn = new CfnOutput(this, "TopicArn", { value: coreScheduler.topic.topicArn }); + this.schedulerRoleArn = new CfnOutput(this, "SchedulerRoleArn", { value: coreScheduler.hubSchedulerRole.roleArn }); + new CfnOutput(this, "AsgOrchName", { value: coreScheduler.asgOrch.functionName }); + } +} diff --git a/source/pipeline/lib/testing-pipeline-stack.ts b/source/pipeline/lib/testing-pipeline-stack.ts index 7cb47ad8..9750bdc1 100644 --- a/source/pipeline/lib/testing-pipeline-stack.ts +++ b/source/pipeline/lib/testing-pipeline-stack.ts @@ -1,18 +1,15 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 - +import { Stack, Stage } from "aws-cdk-lib"; import * as codebuild from "aws-cdk-lib/aws-codebuild"; +import { Effect, PolicyStatement } from "aws-cdk-lib/aws-iam"; import * as pipelines from "aws-cdk-lib/pipelines"; -import { Construct } from "constructs"; -import { Stack, Stage } from "aws-cdk-lib"; -import { InstanceSchedulerStack } from "../../instance-scheduler/lib/instance-scheduler-stack"; import { NagSuppressions } from "cdk-nag"; +import { Construct } from "constructs"; import { E2eTestStack } from "./e2e-test-stack"; -import { Effect, PolicyStatement } from "aws-cdk-lib/aws-iam"; -import * as hubStackUtils from "../e2e-tests/utils/hub-stack-utils"; +import { InstanceSchedulerTestingStack } from "./instance-scheduler-testing-stack"; const DEPLOY_STAGE_NAME = "Deployment-Test"; -const END_TO_END_STAGE_NAME = "End-to-End-Tests"; const STACK_NAME = "InstanceScheduler"; const TEST_RESOURCES_STACK_NAME = "InstanceSchedulerE2ETestResources"; @@ -42,29 +39,25 @@ export class TestingPipelineStack extends Stack { constructor(scope: Construct, construct_id: string, sourceProvider: SourceProvider) { super(scope, construct_id); + const synth = this.synthStep(sourceProvider.getSource(this)); const pipeline = new pipelines.CodePipeline(this, "Pipeline", { - synth: this.synthStep(sourceProvider.getSource(this)), + synth, codeBuildDefaults: { buildEnvironment: { - buildImage: codebuild.LinuxBuildImage.STANDARD_6_0, - computeType: codebuild.ComputeType.LARGE, - privileged: true, + buildImage: codebuild.LinuxBuildImage.STANDARD_7_0, + computeType: codebuild.ComputeType.X_LARGE, }, }, }); const deployStage = new DeployStage(this, DEPLOY_STAGE_NAME); pipeline.addStage(deployStage, { - pre: [this.unitTestStep()], + post: [this.endToEndTestStep(deployStage), this.fullTestStep()], }); - const e2eTestStage = new EndToEndTestStage(this, END_TO_END_STAGE_NAME); - pipeline.addStage(e2eTestStage, { - post: [this.endToEndTestStep(deployStage.instanceSchedulerStack, e2eTestStage.e2eTestResourcesStack)], - }); + pipeline.buildPipeline(); //pipeline must be built before findings can be suppressed - pipeline.buildPipeline(); NagSuppressions.addStackSuppressions(this, [ { id: "AwsSolutions-IAM5", @@ -76,6 +69,10 @@ export class TestingPipelineStack extends Stack { }, ]); + NagSuppressions.addResourceSuppressions(synth.project, [ + { id: "AwsSolutions-CB3", reason: "Privileged mode is required to build Lambda PythonFunctions with Docker" }, + ]); + NagSuppressions.addResourceSuppressions(pipeline.pipeline.artifactBucket, [ { id: "AwsSolutions-S1", @@ -86,23 +83,38 @@ export class TestingPipelineStack extends Stack { synthStep(source: pipelines.IFileSetProducer) { return new pipelines.CodeBuildStep("Synth", { + buildEnvironment: { privileged: true }, input: source, - installCommands: ["npm install --location=global npm@^9", "npm ci"], - commands: ["npm run synth"], + installCommands: [ + "pyenv global $PYTHON_311_VERSION", + "python -m pip install -U pip setuptools poetry tox", + "npm ci", + ], + commands: ["npm run test", "npm run synth"], primaryOutputDirectory: "build/cdk.out", }); } - unitTestStep() { - return new pipelines.CodeBuildStep("unitTests", { + fullTestStep() { + return new pipelines.CodeBuildStep("FullTest", { + buildEnvironment: { privileged: true }, installCommands: [ - "npm install --location=global npm@^9", - "pyenv install -s 3.8 3.9 3.11", - "pyenv global 3.11 3.9 3.8", - "python -m pip install -U pip setuptools tox", + "n 20", + "npm install --location=global npm@^10", + "pyenv install -s 3.8 3.9 3.12", + "pyenv global $PYTHON_311_VERSION 3.12 3.9 3.8", + "python -m pip install -U pip setuptools poetry tox", "npm ci", ], - commands: ["npm run test:ci"], + commands: [ + "cd deployment", + "./build-open-source-dist.sh", + "cd ..", + "npm run test:ci", + "cd deployment", + "./build-s3-dist.sh solutions instance-scheduler-on-aws v0.0.0", + ], + primaryOutputDirectory: "build/cdk.out", partialBuildSpec: codebuild.BuildSpec.fromObject({ reports: { cdk_test_reports: { @@ -122,17 +134,22 @@ export class TestingPipelineStack extends Stack { }, }, }), - rolePolicyStatements: [], }); } - endToEndTestStep(mainInstanceSchedulerStack: InstanceSchedulerStack, testingResourcesStack: E2eTestStack) { + endToEndTestStep(deployStage: DeployStage) { return new pipelines.CodeBuildStep("EndToEndTests", { - installCommands: ["npm install --location=global npm@^9", "npm ci"], + installCommands: [ + "n 20", + "npm install --location=global npm@^10", + "npm ci", + "pyenv global $PYTHON_311_VERSION", + "python -m pip install -U pip setuptools poetry tox ./source/cli", + ], commands: ["npm run e2e-tests"], - envFromCfnOutputs: { - ...testingResourcesStack.outputs, - ...hubStackUtils.extractOutputsFrom(mainInstanceSchedulerStack), + env: { + TEST_ASSETS_STACK: deployStage.e2eTestResourcesStack.stackName, + HUB_STACK: deployStage.instanceSchedulerStack.stackName, }, partialBuildSpec: codebuild.BuildSpec.fromObject({ reports: { @@ -155,32 +172,14 @@ export class TestingPipelineStack extends Stack { } class DeployStage extends Stage { - constructor(scope: Construct, construct_id: string) { - super(scope, construct_id); - } - - instanceSchedulerStack = new InstanceSchedulerStack(this, STACK_NAME, { + readonly instanceSchedulerStack = new InstanceSchedulerTestingStack(this, STACK_NAME, { appregApplicationName: "AWS-Solutions", appregSolutionName: "instance-scheduler-on-aws", description: "test deployment from the InstanceScheduler e2e pipeline", solutionId: "SO0030", solutionName: "instance-scheduler-on-aws", solutionVersion: "pipeline", - paramOverrides: { - schedulerFrequency: "1", - scheduledServices: "Both", - namespace: "e2etesting", - enableSSMMaintenanceWindows: "Yes", - trace: "Yes", - }, - disableOpMetrics: true, }); -} - -class EndToEndTestStage extends Stage { - constructor(scope: Construct, construct_id: string) { - super(scope, construct_id); - } - e2eTestResourcesStack = new E2eTestStack(this, TEST_RESOURCES_STACK_NAME); + readonly e2eTestResourcesStack = new E2eTestStack(this, TEST_RESOURCES_STACK_NAME); } diff --git a/source/pipeline/setup.ts b/source/pipeline/setup.ts new file mode 100644 index 00000000..47dec09a --- /dev/null +++ b/source/pipeline/setup.ts @@ -0,0 +1,50 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import * as cfn from "@aws-sdk/client-cloudformation"; + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +export default async function setupTestSuite(_globalConfig: never, _projectConfig: never) { + const hubStackName = process.env["HUB_STACK"]; + const testAssetsStackName = process.env["TEST_ASSETS_STACK"]; + + if (hubStackName == null) { + throw new Error(`Missing required environment variable: HUB_STACK`); + } + + if (testAssetsStackName == null) { + throw new Error(`Missing required environment variable: TEST_ASSETS_STACK`); + } + + console.log(`HUB STACK: ${hubStackName}`); + console.log(`TEST ASSETS STACK: ${testAssetsStackName}`); + + copyOutputsToEnv(await describeCfnStackOutputs(hubStackName)); + copyOutputsToEnv(await describeCfnStackOutputs(testAssetsStackName)); +} + +const cfnClient = new cfn.CloudFormationClient(); +async function describeCfnStackOutputs(stackName: string) { + const stackDescription = await cfnClient.send( + new cfn.DescribeStacksCommand({ + StackName: stackName, + }), + ); + + const stackOutputs = stackDescription.Stacks?.[0].Outputs; + + if (stackOutputs == null) { + throw new Error(`unable to describe stack outputs for stack ${stackName}`); + } + + return stackOutputs; +} + +function copyOutputsToEnv(outputs: cfn.Output[]) { + for (const output of outputs) { + const key = output.OutputKey; + const value = output.OutputValue; + if (key && value) { + process.env[key] = value; + } + } +} diff --git a/tsconfig.json b/tsconfig.json index d7506b9e..f63f677f 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -14,7 +14,7 @@ "dom" ], "module": "CommonJS", - "noEmitOnError": true, + "noEmitOnError": false, "noFallthroughCasesInSwitch": true, "noImplicitAny": true, "noImplicitReturns": true, @@ -29,18 +29,15 @@ "target": "ES2022", "forceConsistentCasingInFileNames": true, "noPropertyAccessFromIndexSignature": false, - "noUncheckedIndexedAccess": false, - "allowJs": false + "noUncheckedIndexedAccess": false }, "include": [ "source/**/*.ts", - "source/pipeline/**/*.ts", "deployment/cdk-solution-helper/**/*.ts", ".projenrc.ts", "projenrc/**/*.ts" ], "exclude": [ - "node_modules", "build/cdk.out" ], "files": [