diff --git a/CHANGELOG.md b/CHANGELOG.md index e22e038ce1c..2e13e8f229b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,53 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## Unreleased +## [v10.1.1] 2022-03-04 + +### Migration steps + +- Due to a bug in the PUT `/rules/` endpoint, the rule records in PostgreSQL may be +out of sync with records in DynamoDB. In order to bring the records into sync, re-run the +[previously deployed `data-migration1` Lambda](https://nasa.github.io/cumulus/docs/upgrade-notes/upgrade-rds#3-deploy-and-run-data-migration1) with a payload of +`{"forceRulesMigration": true}`: + +```shell +aws lambda invoke --function-name $PREFIX-data-migration1 \ + --payload $(echo '{"forceRulesMigration": true}' | base64) $OUTFILE +``` + +### Added + +- **CUMULUS-2846** + - Added `@cumulus/db/translate/rule.translateApiRuleToPostgresRuleRaw` to translate API rule to PostgreSQL rules and + **keep undefined fields** + +### Changed + +- **CUMULUS-NONE** + - Adds logging to ecs/async-operation Docker conatiner that launches async + tasks on ECS. Sets default `async_operation_image_version` to 39. +- **CUMULUS-2845** + - Updated rules model to decouple `createRuleTrigger` from `create`. + - Updated rules POST endpoint to call `rulesModel.createRuleTrigger` directly to create rule trigger. + - Updated rules PUT endpoints to call `rulesModel.createRuleTrigger` if update fails and reversion needs to occur. +- **CUMULUS-2846** + - Updated version of `localstack/localstack` used in local unit testing to `0.11.5` + +### Fixed + +- Upgraded lodash to version 4.17.21 to fix vulnerability +- **CUMULUS-2845** + - Fixed bug in POST `/rules` endpoint causing rule records to be created + inconsistently in DynamoDB and PostgreSQL +- **CUMULUS-2846** + - Fixed logic for `PUT /rules/` endpoint causing rules to be saved + inconsistently between DynamoDB and PostgreSQL +- **CUMULUS-2854** + - Fixed queue granules behavior where the task was not accounting for granules that + *already* had createdAt set. Workflows downstream in this scenario should no longer + fail to write their granules due to order-of-db-writes constraints in the database + update logic. + ## [v10.1.0] 2022-02-23 ### Added @@ -42,8 +89,6 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Fixed -- Fixed IAM permissions issue with `-postgres-migration-async-operation` Lambda -which prevented it from running a Fargate task for data migration. - **CUMULUS-2853** - Move OAUTH_PROVIDER to lambda env variables to address regression in CUMULUS-2781 - Add logging output to api app router @@ -230,6 +275,69 @@ instances according to the [policy configuration](https://github.com/nasa/cumulu - **CUMULUS-2835** - Updated `hyrax-metadata-updates` task to support reading the DatasetId from ECHO10 XML, and the EntryTitle from UMM-G JSON; these are both valid alternatives to the shortname and version ID. +## [v9.9.3] 2021-02-17 [BACKPORT] + +**Please note** changes in 9.9.3 may not yet be released in future versions, as +this is a backport and patch release on the 9.9.x series of releases. Updates that +are included in the future will have a corresponding CHANGELOG entry in future +releases. + + +- **CUMULUS-2853** + - Move OAUTH_PROVIDER to lambda env variables to address regression in 9.9.2/CUMULUS-2275 + - Add logging output to api app router + +## [v9.9.2] 2021-02-10 [BACKPORT] + +**Please note** changes in 9.9.2 may not yet be released in future versions, as +this is a backport and patch release on the 9.9.x series of releases. Updates that +are included in the future will have a corresponding CHANGELOG entry in future +releases.### Added + +- **CUMULUS-2775** + - Added a configurable parameter group for the RDS serverless database cluster deployed by `tf-modules/rds-cluster-tf`. The allowed parameters for the parameter group can be found in the AWS documentation of [allowed parameters for an Aurora PostgreSQL cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Reference.ParameterGroups.html). By default, the following parameters are specified: + - `shared_preload_libraries`: `pg_stat_statements,auto_explain` + - `log_min_duration_statement`: `250` + - `auto_explain.log_min_duration`: `250` +- **CUMULUS-2840** + - Added an index on `granule_cumulus_id` to the RDS files table. + +### Changed + +- **CUMULUS-2847** + - Move DyanmoDb table name into API keystore and initialize only on lambda cold start +- **CUMULUS-2781** + - Add api_config secret to hold API/Private API lambda configuration values +- **CUMULUS-2775** + - Changed the `timeout_action` to `ForceApplyCapacityChange` by default for the RDS serverless database cluster `tf-modules/rds-cluster-tf` + +## [v9.9.1] 2021-02-10 [BACKPORT] + +**Please note** changes in 9.9.1 may not yet be released in future versions, as +this is a backport and patch release on the 9.9.x series of releases. Updates that +are included in the future will have a corresponding CHANGELOG entry in future +releases. + +### Fixed + +- **CUMULUS-2775** + - Updated `@cumulus/api-client` to not log an error for 201 response from `updateGranule` + +### Changed + +- Updated version of `@cumulus/cumulus-message-adapter-js` from `2.0.3` to `2.0.4` for +all Cumulus workflow tasks +- **CUMULUS-2775** + - Changed `@cumulus/api-client/invokeApi()` to accept a single accepted status code or an array + of accepted status codes via `expectedStatusCodes` +- **CUMULUS-2837** + - Update process-s3-dead-letter-archive to unpack SQS events in addition to + Cumulus Messages + - Update process-s3-dead-letter-archive to look up execution status using + getCumulusMessageFromExecutionEvent (common method with sfEventSqsToDbRecords) + - Move methods in api/lib/cwSfExecutionEventUtils to + @cumulus/message/StepFunctions + ## [v9.9.0] 2021-11-03 ### Added @@ -305,6 +413,15 @@ upgrades to `knex` package and to address security vulnerabilities. `s3://internal-bucket/workflows` directory into `s3://internal-bucket/buckets`. + +## [v9.7.1] 2021-12-08 [Backport] + +Please note changes in 9.7.0 may not yet be released in future versions, as this is a backport and patch release on the 9.7.x series of releases. Updates that are included in the future will have a corresponding CHANGELOG entry in future releases. +Fixed + +- **CUMULUS-2751** + - Update all tasks to update to use cumulus-message-adapter-js version 2.0.4 + ## [v9.7.0] 2021-10-01 ### Notable Changes @@ -454,6 +571,27 @@ when output of the operation is `undefined` undefined`. This function has also been updated to throw descriptive errors if an incorrectly formatted collectionId is input. +## [v9.4.1] 2022-02-14 [BACKPORT] + +**Please note** changes in 9.4.1 may not yet be released in future versions, as +this is a backport and patch release on the 9.4.x series of releases. Updates that +are included in the future will have a corresponding CHANGELOG entry in future +releases. + +- **CUMULUS-2847** + - Update dynamo configuration to read from S3 instead of System Manager + Parameter Store + - Move api configuration initialization outside the lambda handler to + eliminate unneded S3 calls/require config on cold-start only + - Moved `ssh2` package from `@cumulus/common` to `@cumulus/sftp-client` and + upgraded package from `^0.8.7` to `^1.0.0` to address security vulnerability + issue in previous version. + - Fixed hyrax task package.json dev dependency + - Update CNM lambda dependencies for Core tasks + - cumulus-cnm-response-task: 1.4.4 + - cumulus-cnm-to-granule: 1.5.4 + - Whitelist ssh2 re: https://github.com/advisories/GHSA-652h-xwhf-q4h6 + ## [v9.4.0] 2021-08-16 ### Notable changes @@ -5204,15 +5342,21 @@ Note: There was an issue publishing 1.12.0. Upgrade to 1.12.1. ## [v1.0.0] - 2018-02-23 -[unreleased]: https://github.com/nasa/cumulus/compare/v10.1.0...HEAD +[unreleased]: https://github.com/nasa/cumulus/compare/v10.1.1...HEAD +[v10.1.1]: https://github.com/nasa/cumulus/compare/v10.1.0...v10.1.1 [v10.1.0]: https://github.com/nasa/cumulus/compare/v10.0.1...v10.1.0 [v10.0.1]: https://github.com/nasa/cumulus/compare/v10.0.0...v10.0.1 [v10.0.0]: https://github.com/nasa/cumulus/compare/v9.9.0...v10.0.0 +[v9.9.3]: https://github.com/nasa/cumulus/compare/v9.9.2...v9.9.3 +[v9.9.2]: https://github.com/nasa/cumulus/compare/v9.9.1...v9.9.2 +[v9.9.1]: https://github.com/nasa/cumulus/compare/v9.9.0...v9.9.1 [v9.9.0]: https://github.com/nasa/cumulus/compare/v9.8.0...v9.9.0 [v9.8.0]: https://github.com/nasa/cumulus/compare/v9.7.0...v9.8.0 +[v9.7.1]: https://github.com/nasa/cumulus/compare/v9.7.0...v9.7.1 [v9.7.0]: https://github.com/nasa/cumulus/compare/v9.6.0...v9.7.0 [v9.6.0]: https://github.com/nasa/cumulus/compare/v9.5.0...v9.6.0 [v9.5.0]: https://github.com/nasa/cumulus/compare/v9.4.0...v9.5.0 +[v9.4.1]: https://github.com/nasa/cumulus/compare/v9.3.0...v9.4.1 [v9.4.0]: https://github.com/nasa/cumulus/compare/v9.3.0...v9.4.0 [v9.3.0]: https://github.com/nasa/cumulus/compare/v9.2.2...v9.3.0 [v9.2.2]: https://github.com/nasa/cumulus/compare/v9.2.1...v9.2.2 diff --git a/bamboo/bootstrap-unit-tests.sh b/bamboo/bootstrap-unit-tests.sh index 056edb3bff6..3d38f638f96 100755 --- a/bamboo/bootstrap-unit-tests.sh +++ b/bamboo/bootstrap-unit-tests.sh @@ -89,7 +89,7 @@ echo 'Elasticsearch status is green' $docker_command "curl -XPUT 'http://127.0.0.1:9200/_cluster/settings' -d \@/$UNIT_TEST_BUILD_DIR/bamboo/elasticsearch.config" # Lambda seems to be the last service that's started up by Localstack -while ! $docker_command 'nc -z 127.0.0.1 4574'; do +while ! $docker_command 'nc -z 127.0.0.1 4566'; do echo 'Waiting for Localstack Lambda service to start' docker ps -a sleep 2 diff --git a/bamboo/docker-compose-local.yml b/bamboo/docker-compose-local.yml index 1cafcc6a1a7..b63fde39470 100644 --- a/bamboo/docker-compose-local.yml +++ b/bamboo/docker-compose-local.yml @@ -23,7 +23,7 @@ services: - 8080:8080 - 9200:9200 localstack: - image: localstack/localstack:0.10.7 + image: localstack/localstack:0.11.5 elasticsearch: image: elasticsearch:5.3 sftp: diff --git a/bamboo/docker-compose.yml b/bamboo/docker-compose.yml index 49363bfd69a..3b0845f1208 100644 --- a/bamboo/docker-compose.yml +++ b/bamboo/docker-compose.yml @@ -41,10 +41,10 @@ services: environment: ES_JAVA_OPTS: "-Xms750m -Xmx750m" localstack: - image: maven.earthdata.nasa.gov/localstack/localstack:0.10.7 + image: maven.earthdata.nasa.gov/localstack/localstack:0.11.5 network_mode: "service:build_env" environment: - SERVICES: "cloudformation,cloudwatch,cloudwatchlogs,dynamodb,kinesis,kms,lambda,s3,secretsmanager,sns,sqs,stepfunctions,ssm" + SERVICES: "cloudformation,cloudwatch,cloudwatchlogs,dynamodb,iam,kinesis,kms,lambda,s3,secretsmanager,sns,sqs,stepfunctions,ssm,logs" build_env: image: maven.earthdata.nasa.gov/cumulus:latest volumes: diff --git a/bamboo/select-stack.js b/bamboo/select-stack.js index 04b80f9c106..f979955a39f 100755 --- a/bamboo/select-stack.js +++ b/bamboo/select-stack.js @@ -30,6 +30,7 @@ function determineIntegrationTestStackName(cb) { 'Nate Pauzenga': 'np-ci', 'Danielle Peters': 'dop-ci', 'Anthony Ortega': 'jao-ci', + vpnguye2: 'vkn-ci', }; return git('.').log({ '--max-count': '1' }, (e, r) => { diff --git a/example/config.yml b/example/config.yml index 39cfaf888ef..92d1485721a 100644 --- a/example/config.yml +++ b/example/config.yml @@ -53,5 +53,11 @@ jtran-int-tf: jtran-tf: bucket: jtran-internal +vkn-ci-tf: + bucket: vkn-ci-tf-internal + +vkn-tf: + bucket: vkn-tf-internal + np: bucket: npauzenga-internal diff --git a/example/cumulus-tf/passthrough_workflow.asl.json b/example/cumulus-tf/passthrough_workflow.asl.json new file mode 100644 index 00000000000..dd520474907 --- /dev/null +++ b/example/cumulus-tf/passthrough_workflow.asl.json @@ -0,0 +1,9 @@ +{ + "Comment": "State machine that creates a step function success event", + "StartAt": "SuccessState", + "States": { + "SuccessState": { + "Type": "Succeed" + } + } +} diff --git a/example/cumulus-tf/passthrough_workflow.tf b/example/cumulus-tf/passthrough_workflow.tf new file mode 100644 index 00000000000..f8d30fe91f7 --- /dev/null +++ b/example/cumulus-tf/passthrough_workflow.tf @@ -0,0 +1,16 @@ +module "passthrough_workflow" { + source = "../../tf-modules/workflow" + + prefix = var.prefix + name = "Passthrough" + workflow_config = module.cumulus.workflow_config + system_bucket = var.system_bucket + tags = local.tags + + state_machine_definition = templatefile( + "${path.module}/passthrough_workflow.asl.json", + { + hello_world_task_arn: module.cumulus.hello_world_task.task_arn + } + ) +} diff --git a/example/cumulus-tf/queue_granules_passthrough_workflow.asl.json b/example/cumulus-tf/queue_granules_passthrough_workflow.asl.json new file mode 100644 index 00000000000..0912dd2f74e --- /dev/null +++ b/example/cumulus-tf/queue_granules_passthrough_workflow.asl.json @@ -0,0 +1,65 @@ +{ + "Comment": "Queue Granules", + "StartAt": "QueueGranules", + "States": { + "QueueGranules": { + "Parameters": { + "cma": { + "event.$": "$", + "task_config": { + "queueUrl": "${start_sf_queue_url}", + "provider": "{$.meta.provider}", + "internalBucket": "{$.meta.buckets.internal.name}", + "stackName": "{$.meta.stack}", + "granuleIngestWorkflow": "${ingest_granule_workflow_name}", + "cumulus_message": { + "input": "{$.payload}", + "outputs": [ + { + "source": "{$.granules}", + "destination": "{$.meta.input_granules}" + }, + { + "source": "{$}", + "destination": "{$.payload}" + }, + { + "source": "{$.process}", + "destination": "{$.meta.process}" + } + ] + } + } + } + }, + "Type": "Task", + "Resource": "${queue_granules_task_arn}", + "Retry": [ + { + "ErrorEquals": [ + "Lambda.ServiceException", + "Lambda.AWSLambdaException", + "Lambda.SdkClientException" + ], + "IntervalSeconds": 2, + "MaxAttempts": 6, + "BackoffRate": 2 + } + ], + "Catch": [ + { + "ErrorEquals": [ + "States.ALL" + ], + "ResultPath": "$.exception", + "Next": "WorkflowFailed" + } + ], + "End": true + }, + "WorkflowFailed": { + "Type": "Fail", + "Cause": "Workflow failed" + } + } +} diff --git a/example/cumulus-tf/queue_granules_passthrough_workflow.tf b/example/cumulus-tf/queue_granules_passthrough_workflow.tf new file mode 100644 index 00000000000..4254cf57e4e --- /dev/null +++ b/example/cumulus-tf/queue_granules_passthrough_workflow.tf @@ -0,0 +1,18 @@ +module "queue_granules_passthrough_workflow" { + source = "../../tf-modules/workflow" + + prefix = var.prefix + name = "QueueGranulesPassthrough" + workflow_config = module.cumulus.workflow_config + system_bucket = var.system_bucket + tags = local.tags + + state_machine_definition = templatefile( + "${path.module}/queue_granules_passthrough_workflow.asl.json", + { + ingest_granule_workflow_name: module.passthrough_workflow.name, + queue_granules_task_arn: module.cumulus.queue_granules_task.task_arn, + start_sf_queue_url: module.cumulus.start_sf_queue_url + } + ) +} diff --git a/example/cumulus-tf/variables.tf b/example/cumulus-tf/variables.tf index 6158b388a59..7cdc932209a 100644 --- a/example/cumulus-tf/variables.tf +++ b/example/cumulus-tf/variables.tf @@ -338,7 +338,7 @@ variable "rds_admin_access_secret_arn" { variable "async_operation_image_version" { description = "docker image version to use for Cumulus async operations tasks" type = string - default = "36" + default = "39" } variable "cumulus_process_activity_version" { diff --git a/example/deployments/cumulus/vkn-ci-tf.tfvars b/example/deployments/cumulus/vkn-ci-tf.tfvars new file mode 100644 index 00000000000..fa5d6466415 --- /dev/null +++ b/example/deployments/cumulus/vkn-ci-tf.tfvars @@ -0,0 +1,34 @@ +prefix = "vkn-ci-tf" +key_name = "vanhk-cumulus-sandbox" +archive_api_port = 4343 + +cmr_oauth_provider = "launchpad" + +system_bucket = "vkn-ci-tf-internal" +buckets = { + glacier = { + name = "cumulus-test-sandbox-orca-glacier" + type = "orca" + }, + internal = { + name = "vkn-ci-tf-internal" + type = "internal" + } + private = { + name = "vkn-ci-tf-private" + type = "private" + } + protected = { + name = "vkn-ci-tf-protected" + type = "protected" + } + protected-2 = { + name = "vkn-ci-tf-protected-2" + type = "protected" + } + public = { + name = "vkn-ci-tf-public" + type = "public" + } +} +orca_default_bucket = "cumulus-test-sandbox-orca-glacier" diff --git a/example/deployments/data-persistence/vkn-ci-tf.tfvars b/example/deployments/data-persistence/vkn-ci-tf.tfvars new file mode 100644 index 00000000000..23784355f6f --- /dev/null +++ b/example/deployments/data-persistence/vkn-ci-tf.tfvars @@ -0,0 +1,8 @@ +prefix = "vkn-ci-tf" +elasticsearch_config = { + domain_name = "es" + instance_count = 2 + instance_type = "t2.small.elasticsearch" + version = "5.3" + volume_size = 10 +} diff --git a/example/deployments/db-migration/vkn-ci-tf.tfvars b/example/deployments/db-migration/vkn-ci-tf.tfvars new file mode 100644 index 00000000000..b9c98072ec3 --- /dev/null +++ b/example/deployments/db-migration/vkn-ci-tf.tfvars @@ -0,0 +1 @@ +prefix = "vkn-ci-tf" diff --git a/example/lambdas/asyncOperations/package.json b/example/lambdas/asyncOperations/package.json index 59a518d0949..fef2632d8ca 100644 --- a/example/lambdas/asyncOperations/package.json +++ b/example/lambdas/asyncOperations/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/test-async-operations", - "version": "10.1.0", + "version": "10.1.1", "description": "AsyncOperations Test Lambda", "main": "index.js", "private": true, diff --git a/example/lambdas/python-processing/package.json b/example/lambdas/python-processing/package.json index 7dfbca18773..ec398dd649a 100644 --- a/example/lambdas/python-processing/package.json +++ b/example/lambdas/python-processing/package.json @@ -1,7 +1,7 @@ { "name": "@cumulus/python-process-activity", "private": true, - "version": "10.1.0", + "version": "10.1.1", "description": "Python reference activity", "homepage": "https://github.com/nasa/cumulus/tree/master/example/lambdas/python-reference-activity", "repository": { diff --git a/example/lambdas/python-reference-activity/package.json b/example/lambdas/python-reference-activity/package.json index f275fd07936..996f67a1dcd 100644 --- a/example/lambdas/python-reference-activity/package.json +++ b/example/lambdas/python-reference-activity/package.json @@ -1,7 +1,7 @@ { "name": "@cumulus/python-reference-activity", "private": true, - "version": "10.1.0", + "version": "10.1.1", "description": "Python reference activity", "homepage": "https://github.com/nasa/cumulus/tree/master/example/lambdas/python-reference-activity", "repository": { diff --git a/example/lambdas/python-reference-task/package.json b/example/lambdas/python-reference-task/package.json index 970a4e4b2a4..6750d3300a8 100644 --- a/example/lambdas/python-reference-task/package.json +++ b/example/lambdas/python-reference-task/package.json @@ -1,7 +1,7 @@ { "name": "@cumulus/python-reference-task", "private": true, - "version": "10.1.0", + "version": "10.1.1", "description": "Python reference task", "main": "index.js", "homepage": "https://github.com/nasa/cumulus/tree/master/example/lambdas/python-reference-task", diff --git a/example/lambdas/s3AccessTest/package.json b/example/lambdas/s3AccessTest/package.json index a269959a7fb..d52095055d4 100644 --- a/example/lambdas/s3AccessTest/package.json +++ b/example/lambdas/s3AccessTest/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/test-s3-access", - "version": "10.1.0", + "version": "10.1.1", "description": "S3 Access Test Lambda", "main": "index.js", "private": true, diff --git a/example/lambdas/snsS3Test/package.json b/example/lambdas/snsS3Test/package.json index 0b3f0ae91be..e8e70081ef0 100644 --- a/example/lambdas/snsS3Test/package.json +++ b/example/lambdas/snsS3Test/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/test-sns-s3", - "version": "10.1.0", + "version": "10.1.1", "description": "SNS to S3 Test Lambda", "main": "index.js", "private": true, diff --git a/example/lambdas/versionUpTest/package.json b/example/lambdas/versionUpTest/package.json index 7d4d9a415f2..729fa0fc70a 100644 --- a/example/lambdas/versionUpTest/package.json +++ b/example/lambdas/versionUpTest/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/test-version-up", - "version": "10.1.0", + "version": "10.1.1", "description": "Version Up Test Lambda", "main": "index.js", "private": true, diff --git a/example/package.json b/example/package.json index b70da1ce63e..ccfa1604a43 100644 --- a/example/package.json +++ b/example/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/cumulus-integration-tests", - "version": "10.1.0", + "version": "10.1.1", "description": "Cumulus Integration Test Deployment", "private": true, "main": "index.js", @@ -45,32 +45,32 @@ ] }, "dependencies": { - "@cumulus/api": "10.1.0", - "@cumulus/api-client": "10.1.0", - "@cumulus/async-operations": "10.1.0", - "@cumulus/aws-client": "10.1.0", - "@cumulus/checksum": "10.1.0", - "@cumulus/cmr-client": "10.1.0", - "@cumulus/cmrjs": "10.1.0", - "@cumulus/common": "10.1.0", - "@cumulus/discover-granules": "10.1.0", - "@cumulus/discover-pdrs": "10.1.0", - "@cumulus/files-to-granules": "10.1.0", - "@cumulus/hello-world": "10.1.0", - "@cumulus/ingest": "10.1.0", - "@cumulus/integration-tests": "10.1.0", - "@cumulus/message": "10.1.0", - "@cumulus/move-granules": "10.1.0", - "@cumulus/parse-pdr": "10.1.0", - "@cumulus/pdr-status-check": "10.1.0", - "@cumulus/post-to-cmr": "10.1.0", - "@cumulus/queue-granules": "10.1.0", - "@cumulus/queue-pdrs": "10.1.0", - "@cumulus/sf-sqs-report": "10.1.0", - "@cumulus/sync-granule": "10.1.0", - "@cumulus/test-processing": "10.1.0" + "@cumulus/api": "10.1.1", + "@cumulus/api-client": "10.1.1", + "@cumulus/async-operations": "10.1.1", + "@cumulus/aws-client": "10.1.1", + "@cumulus/checksum": "10.1.1", + "@cumulus/cmr-client": "10.1.1", + "@cumulus/cmrjs": "10.1.1", + "@cumulus/common": "10.1.1", + "@cumulus/discover-granules": "10.1.1", + "@cumulus/discover-pdrs": "10.1.1", + "@cumulus/files-to-granules": "10.1.1", + "@cumulus/hello-world": "10.1.1", + "@cumulus/ingest": "10.1.1", + "@cumulus/integration-tests": "10.1.1", + "@cumulus/message": "10.1.1", + "@cumulus/move-granules": "10.1.1", + "@cumulus/parse-pdr": "10.1.1", + "@cumulus/pdr-status-check": "10.1.1", + "@cumulus/post-to-cmr": "10.1.1", + "@cumulus/queue-granules": "10.1.1", + "@cumulus/queue-pdrs": "10.1.1", + "@cumulus/sf-sqs-report": "10.1.1", + "@cumulus/sync-granule": "10.1.1", + "@cumulus/test-processing": "10.1.1" }, "devDependencies": { - "@cumulus/test-data": "10.1.0" + "@cumulus/test-data": "10.1.1" } } diff --git a/example/scripts/generate_ingest/package.json b/example/scripts/generate_ingest/package.json index dc83167a5bd..1ca1504efa4 100644 --- a/example/scripts/generate_ingest/package.json +++ b/example/scripts/generate_ingest/package.json @@ -1,7 +1,7 @@ { "name": "@cumulus/generate_ingest", "private": true, - "version": "10.1.0", + "version": "10.1.1", "description": "Script to generate test data for scaled ingest", "keywords": [ "GIBS", @@ -22,8 +22,8 @@ "directory": "packages/types" }, "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/common": "10.1.0" + "@cumulus/aws-client": "10.1.1", + "@cumulus/common": "10.1.1" }, "author": "Cumulus Authors", "license": "Apache-2.0" diff --git a/example/spec/parallel/kinesisTests/LambdaEventSourceSpec.js b/example/spec/parallel/kinesisTests/LambdaEventSourceSpec.js index 0382f24331a..6c7a96acefb 100644 --- a/example/spec/parallel/kinesisTests/LambdaEventSourceSpec.js +++ b/example/spec/parallel/kinesisTests/LambdaEventSourceSpec.js @@ -1,13 +1,12 @@ 'use strict'; const replace = require('lodash/replace'); + +const { deleteExecution } = require('@cumulus/api-client/executions'); const { getJsonS3Object } = require('@cumulus/aws-client/S3'); -const { LambdaStep } = require('@cumulus/integration-tests/sfnStep'); +const { updateRule } = require('@cumulus/api-client/rules'); +const { randomString } = require('@cumulus/common/test-utils'); const { getWorkflowFileKey } = require('@cumulus/common/workflows'); -const { Rule } = require('@cumulus/api/models'); - -jasmine.DEFAULT_TIMEOUT_INTERVAL = 9 * 60 * 1000; - const { addRulesWithPostfix, addProviders, @@ -18,9 +17,9 @@ const { deleteRules, setProcessEnvironment, } = require('@cumulus/integration-tests'); -const { randomString } = require('@cumulus/common/test-utils'); +const { LambdaStep } = require('@cumulus/integration-tests/sfnStep'); -const { deleteExecution } = require('@cumulus/api-client/executions'); +jasmine.DEFAULT_TIMEOUT_INTERVAL = 9 * 60 * 1000; const { loadConfig, @@ -126,8 +125,15 @@ describe('When adding multiple rules that share a kinesis event stream', () => { await tryCatchExit(cleanUp, async () => { // Disable rule console.log(`Disabling rule ${rules[1].name}`); - const r = new Rule(); - await r.update(rules[1], { state: 'DISABLED' }); + + await updateRule({ + prefix: testConfig.stackName, + ruleName: rules[1].name, + updateParams: { + ...rules[1], + state: 'DISABLED', + }, + }); const record = { provider: `SWOT_PODAAC${testSuffix}`, @@ -149,7 +155,7 @@ describe('When adding multiple rules that share a kinesis event stream', () => { recordIdentifier, workflowArn, maxWaitForSFExistSecs, - 2 + 1 ); }); }); diff --git a/example/spec/parallel/queueGranules/QueueGranulesSpecPostProcessing.input.payload.json b/example/spec/parallel/queueGranules/QueueGranulesSpecPostProcessing.input.payload.json new file mode 100644 index 00000000000..b6a6dcc73ef --- /dev/null +++ b/example/spec/parallel/queueGranules/QueueGranulesSpecPostProcessing.input.payload.json @@ -0,0 +1,25 @@ +{ + "granules": [ + { + "granuleId": "MOD09GQ.A2016358.h13v04.006.2016360104606", + "dataType": "MOD09GQ", + "version": "006", + "files": [ + { + "bucket": "cumulus-test-sandbox-internal", + "fileName": "MOD09GQ.A2016358.h13v04.006.2016360104606.hdf", + "key": "garbage_path/cumulus-test-data/pdrs/MOD09GQ.A2016358.h13v04.006.2016360104606.hdf", + "size": 1098034, + "type": "data" + }, + { + "bucket": "cumulus-test-sandbox-internal", + "fileName": "MOD09GQ.A2016358.h13v04.006.2016360104606.hdf", + "key": "garbage_path/cumulus-test-data/pdrs/MOD09GQ.A2016358.h13v04.006.2016360104606.hdf", + "size": 21708, + "type": "data" + } + ] + } + ] +} diff --git a/example/spec/parallel/queueGranules/queueGranulesPostProcessingSpec.js b/example/spec/parallel/queueGranules/queueGranulesPostProcessingSpec.js new file mode 100644 index 00000000000..da117c3302d --- /dev/null +++ b/example/spec/parallel/queueGranules/queueGranulesPostProcessingSpec.js @@ -0,0 +1,219 @@ +const fs = require('fs'); +const flow = require('lodash/flow'); +const replace = require('lodash/fp/replace'); + +const { + addCollections, + addProviders, + cleanupCollections, + cleanupProviders, +} = require('@cumulus/integration-tests'); +const { randomStringFromRegex } = require('@cumulus/common/test-utils'); +const { updateCollection } = require('@cumulus/integration-tests/api/api'); +const { Execution, Granule } = require('@cumulus/api/models'); +const { deleteExecution } = require('@cumulus/api-client/executions'); +const { LambdaStep } = require('@cumulus/integration-tests/sfnStep'); + +const { buildAndExecuteWorkflow } = require('../../helpers/workflowUtils'); +const { + loadConfig, + createTimestampedTestId, + createTestDataPath, + createTestSuffix, + deleteFolder, +} = require('../../helpers/testUtils'); +const { + waitForGranuleAndDelete, +} = require('../../helpers/granuleUtils'); +const { waitForModelStatus } = require('../../helpers/apiUtils'); + +const workflowName = 'QueueGranulesPassthrough'; +const providersDir = './data/providers/s3/'; +const collectionsDir = './data/collections/s3_MOD09GQ_006'; + +describe('The Queue Granules workflow triggered with a database-schema-compliant (post-sync-granules) granule in the payload that has the createdAt key-value defined', () => { + let beforeAllFailed; + let collection; + let config; + let executionModel; + let granuleModel; + let inputPayload; + let lambdaStep; + let provider; + let queuedLambdaOutput; + let queueGranulesExecutionArn; + let testDataFolder; + let testSuffix; + let workflowExecution; + + beforeAll(async () => { + try { + config = await loadConfig(); + lambdaStep = new LambdaStep(); + + process.env.GranulesTable = `${config.stackName}-GranulesTable`; + granuleModel = new Granule(); + + const granuleRegex = '^MOD09GQ\\.A[\\d]{7}\\.[\\w]{6}\\.006\\.[\\d]{13}$'; + + const testId = createTimestampedTestId(config.stackName, 'QueueGranules'); + testSuffix = createTestSuffix(testId); + testDataFolder = createTestDataPath(testId); + + const inputPayloadFilename = + './spec/parallel/queueGranules/QueueGranulesSpecPostProcessing.input.payload.json'; + + collection = { name: `MOD09GQ${testSuffix}`, version: '006' }; + provider = { id: `s3_provider${testSuffix}` }; + + process.env.ExecutionsTable = `${config.stackName}-ExecutionsTable`; + executionModel = new Execution(); + process.env.CollectionsTable = `${config.stackName}-CollectionsTable`; + + // populate collections, providers and test data + await Promise.all([ + addCollections( + config.stackName, + config.bucket, + collectionsDir, + testSuffix + ), + addProviders( + config.stackName, + config.bucket, + providersDir, + config.bucket, + testSuffix + ), + ]); + await updateCollection({ + prefix: config.stackName, + collection, + updateParams: { duplicateHandling: 'replace' }, + }); + const inputPayloadJson = JSON.parse(fs.readFileSync(inputPayloadFilename, 'utf8')); + inputPayloadJson.granules[0].files = inputPayloadJson.granules[0].files.map( + (file) => ({ ...file, bucket: config.bucket }) + ); + const oldGranuleId = inputPayloadJson.granules[0].granuleId; + + // update test data filepaths + const newGranuleId = randomStringFromRegex(granuleRegex); + inputPayload = flow([ + JSON.stringify, + replace(new RegExp(oldGranuleId, 'g'), newGranuleId), + replace(new RegExp('"MOD09GQ"', 'g'), `"MOD09GQ${testSuffix}"`), + JSON.parse, + ])(inputPayloadJson); + // Add Date.now to test queueGranules behavior + inputPayload.granules[0].createdAt = Date.now(); + + workflowExecution = await buildAndExecuteWorkflow( + config.stackName, + config.bucket, + workflowName, + collection, + provider, + inputPayload + ); + + queueGranulesExecutionArn = workflowExecution.executionArn; + } catch (error) { + beforeAllFailed = true; + throw error; + } + }); + + afterAll(async () => { + // Wait to prevent out-of-order writes fouling up cleanup due to + // no-task step function. AWS doesn't promise event timing + // so we're really just defending against the majority of observed + // cases + await new Promise((resolve) => setTimeout(resolve, 7500)); + // clean up stack state added by test + await Promise.all( + inputPayload.granules.map(async (granule) => { + await waitForGranuleAndDelete( + config.stackName, + granule.granuleId, + 'completed' + ); + }) + ); + + await deleteExecution({ + prefix: config.stackName, + executionArn: queuedLambdaOutput.payload.running[0], + }); + + await deleteExecution({ + prefix: config.stackName, + executionArn: queueGranulesExecutionArn, + }); + + await Promise.all([ + deleteFolder(config.bucket, testDataFolder), + cleanupCollections( + config.stackName, + config.bucket, + collectionsDir, + testSuffix + ), + cleanupProviders( + config.stackName, + config.bucket, + providersDir, + testSuffix + ), + ]); + }); + + it('completes execution with success status', () => { + if (beforeAllFailed) fail('beforeAll() failed'); + expect(workflowExecution.status).toEqual('completed'); + }); + + describe('the QueueGranules Lambda function', () => { + it('has expected arns output', async () => { + if (beforeAllFailed) fail('beforeAll() failed'); + queuedLambdaOutput = await lambdaStep.getStepOutput( + workflowExecution.executionArn, + 'QueueGranules' + ); + expect(queuedLambdaOutput.payload.running.length).toEqual(1); + }); + }); + + describe('the reporting lambda has received the CloudWatch step function event and', () => { + it('the execution records are added to the database', async () => { + if (beforeAllFailed) fail('beforeAll() failed'); + const queuedRecord = await waitForModelStatus( + executionModel, + { arn: workflowExecution.executionArn }, + 'completed' + ); + const childWorkflowRecord = await waitForModelStatus( + executionModel, + { arn: queuedLambdaOutput.payload.running[0] }, + 'completed' + ); + expect(queuedRecord.status).toEqual('completed'); + expect(childWorkflowRecord.status).toEqual('completed'); + }); + }); + + it('the granule is added to the database by the child workflow', async () => { + if (beforeAllFailed) fail('beforeAll() failed'); + await Promise.all( + inputPayload.granules.map(async (granule) => { + const record = await waitForModelStatus( + granuleModel, + { granuleId: granule.granuleId }, + 'completed' + ); + expect(record.status).toEqual('completed'); + expect(record.execution.replace(/https.*details\//, '')).toEqual(queuedLambdaOutput.payload.running[0]); + }) + ); + }); +}); diff --git a/example/spec/parallel/testAPI/snsRuleSpec.js b/example/spec/parallel/testAPI/snsRuleSpec.js index 31bf602575d..5f5b0ce9a92 100644 --- a/example/spec/parallel/testAPI/snsRuleSpec.js +++ b/example/spec/parallel/testAPI/snsRuleSpec.js @@ -12,6 +12,7 @@ const { waitForTestExecutionStart, } = require('@cumulus/integration-tests'); +const { getSnsTriggerPermissionId } = require('@cumulus/api/lib/snsRuleHelpers'); const { deleteExecution } = require('@cumulus/api-client/executions'); const { sns, lambda } = require('@cumulus/aws-client/services'); const { LambdaStep } = require('@cumulus/integration-tests/sfnStep'); @@ -145,13 +146,14 @@ describe('The SNS-type rule', () => { it('creates a policy when it is created in an enabled state', async () => { if (beforeAllFailed) fail(beforeAllFailed); - const { Policy } = await lambda().getPolicy({ + const response = await lambda().getPolicy({ FunctionName: consumerName, }).promise(); + const { Policy } = response; const statementSids = JSON.parse(Policy).Statement.map((s) => s.Sid); - expect(statementSids).toContain(`${ruleName}Permission`); + expect(statementSids).toContain(getSnsTriggerPermissionId(postRule.record)); }); }); @@ -289,7 +291,8 @@ describe('The SNS-type rule', () => { if (beforeAllFailed) fail(beforeAllFailed); const { Policy } = await lambda().getPolicy({ FunctionName: consumerName }).promise(); const { Statement } = JSON.parse(Policy); - expect(Statement.some((s) => s.Sid === expectedStatementId)).toBeTrue(); + expect(await getNumberOfTopicSubscriptions(newTopicArn)).toBeGreaterThan(0); + expect(Statement.some((s) => s.Sid === getSnsTriggerPermissionId(putRule))).toBeTrue(); }); }); diff --git a/lambdas/data-migration1/package.json b/lambdas/data-migration1/package.json index 86bb18b9a3a..3982a3ace6a 100644 --- a/lambdas/data-migration1/package.json +++ b/lambdas/data-migration1/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/data-migration1", - "version": "10.1.0", + "version": "10.1.1", "description": "A Lambda function used for doing data migrations", "license": "Apache-2.0", "engines": { @@ -25,18 +25,18 @@ "timeout": "15m" }, "dependencies": { - "@cumulus/api": "10.1.0", - "@cumulus/aws-client": "10.1.0", - "@cumulus/common": "10.1.0", - "@cumulus/db": "10.1.0", - "@cumulus/errors": "10.1.0", - "@cumulus/logger": "10.1.0", - "@cumulus/types": "10.1.0", + "@cumulus/api": "10.1.1", + "@cumulus/aws-client": "10.1.1", + "@cumulus/common": "10.1.1", + "@cumulus/db": "10.1.1", + "@cumulus/errors": "10.1.1", + "@cumulus/logger": "10.1.1", + "@cumulus/types": "10.1.1", "knex": "0.95.15", - "lodash": "^4.17.20", + "lodash": "^4.17.21", "pg": "^8.3.0" }, "devDependencies": { - "@cumulus/test-data": "10.1.0" + "@cumulus/test-data": "10.1.1" } } diff --git a/lambdas/data-migration1/src/index.ts b/lambdas/data-migration1/src/index.ts index fbedd90231b..e53d656f09b 100644 --- a/lambdas/data-migration1/src/index.ts +++ b/lambdas/data-migration1/src/index.ts @@ -9,7 +9,8 @@ import { migrateRules } from './rules'; const logger = new Logger({ sender: '@cumulus/data-migration1' }); export interface HandlerEvent { - env?: NodeJS.ProcessEnv + env?: NodeJS.ProcessEnv, + forceRulesMigration?: boolean, } export const handler = async (event: HandlerEvent): Promise => { @@ -20,7 +21,7 @@ export const handler = async (event: HandlerEvent): Promise => const collectionsMigrationSummary = await migrateCollections(env, knex); const providersMigrationSummary = await migrateProviders(env, knex); const asyncOpsMigrationSummary = await migrateAsyncOperations(env, knex); - const rulesMigrationSummary = await migrateRules(env, knex); + const rulesMigrationSummary = await migrateRules(env, knex, event.forceRulesMigration); const result: MigrationSummary = { MigrationSummary: { diff --git a/lambdas/data-migration1/src/rules.ts b/lambdas/data-migration1/src/rules.ts index 8ef796beb71..9767f47306d 100644 --- a/lambdas/data-migration1/src/rules.ts +++ b/lambdas/data-migration1/src/rules.ts @@ -3,7 +3,7 @@ import { Knex } from 'knex'; import DynamoDbSearchQueue from '@cumulus/aws-client/DynamoDbSearchQueue'; import { RulePgModel, - translateApiRuleToPostgresRule, + translateApiRuleToPostgresRuleRaw, } from '@cumulus/db'; import { envUtils } from '@cumulus/common'; import Logger from '@cumulus/logger'; @@ -20,12 +20,15 @@ const logger = new Logger({ sender: '@cumulus/data-migration/rules' }); * @param {AWS.DynamoDB.DocumentClient.AttributeMap} dynamoRecord * Record from DynamoDB * @param {Knex} knex - Knex client for writing to RDS database + * @param {boolean} forceRulesMigration + * If true, force migrating rules from DynamoDB to RDS regardless of timestamps. * @returns {Promise} - Cumulus ID for record * @throws {RecordAlreadyMigrated} if record was already migrated */ export const migrateRuleRecord = async ( dynamoRecord: AWS.DynamoDB.DocumentClient.AttributeMap, - knex: Knex + knex: Knex, + forceRulesMigration?: boolean ): Promise => { const rulePgModel = new RulePgModel(); @@ -42,19 +45,22 @@ export const migrateRuleRecord = async ( } // Throw error if it was already migrated. - if (existingRecord && existingRecord.updated_at >= new Date(dynamoRecord.updatedAt)) { + if (!forceRulesMigration + && existingRecord + && existingRecord.updated_at >= new Date(dynamoRecord.updatedAt)) { throw new RecordAlreadyMigrated(`Rule name ${dynamoRecord.name} was already migrated, skipping`); } // Map old record to new schema. - const updatedRecord = await translateApiRuleToPostgresRule(dynamoRecord, knex); + const updatedRecord = await translateApiRuleToPostgresRuleRaw(dynamoRecord, knex); await rulePgModel.upsert(knex, updatedRecord); }; export const migrateRules = async ( env: NodeJS.ProcessEnv, - knex: Knex + knex: Knex, + forceRulesMigration?: boolean ): Promise => { const rulesTable = envUtils.getRequiredEnvVar('RulesTable', env); @@ -75,7 +81,7 @@ export const migrateRules = async ( migrationSummary.dynamoRecords += 1; try { - await migrateRuleRecord(record, knex); + await migrateRuleRecord(record, knex, forceRulesMigration); migrationSummary.success += 1; } catch (error) { if (error instanceof RecordAlreadyMigrated) { diff --git a/lambdas/data-migration1/tests/test-index.js b/lambdas/data-migration1/tests/test-index.js index 8876b6a775b..dd2bcbbb66d 100644 --- a/lambdas/data-migration1/tests/test-index.js +++ b/lambdas/data-migration1/tests/test-index.js @@ -19,17 +19,17 @@ const { destroyLocalTestDb, localStackConnectionEnv, migrationDir, + RulePgModel, } = require('@cumulus/db'); const { handler } = require('../dist/lambda'); -const testDbName = `data_migration_1_${cryptoRandomString({ length: 10 })}`; + const workflow = cryptoRandomString({ length: 10 }); test.before(async (t) => { process.env = { ...process.env, ...localStackConnectionEnv, - PG_DATABASE: testDbName, stackName: cryptoRandomString({ length: 10 }), system_bucket: cryptoRandomString({ length: 10 }), AsyncOperationsTable: cryptoRandomString({ length: 10 }), @@ -54,13 +54,6 @@ test.before(async (t) => { t.context.providersModel = new Provider(); t.context.rulesModel = new Rule(); - await Promise.all([ - t.context.asyncOperationsModel.createTable(), - t.context.collectionsModel.createTable(), - t.context.providersModel.createTable(), - t.context.rulesModel.createTable(), - ]); - await Promise.all([ putJsonS3Object( process.env.system_bucket, @@ -73,27 +66,41 @@ test.before(async (t) => { { testworkflow: 'workflow-config' } ), ]); - const { knex, knexAdmin } = await generateLocalTestDb(testDbName, migrationDir); +}); + +test.beforeEach(async (t) => { + await Promise.all([ + t.context.asyncOperationsModel.createTable(), + t.context.collectionsModel.createTable(), + t.context.providersModel.createTable(), + t.context.rulesModel.createTable(), + ]); + + t.context.testDbName = `data_migration_1_${cryptoRandomString({ length: 10 })}`; + const { knex, knexAdmin } = await generateLocalTestDb(t.context.testDbName, migrationDir); t.context.knex = knex; t.context.knexAdmin = knexAdmin; + t.context.rulePgModel = new RulePgModel(); + + process.env = { + ...process.env, + PG_DATABASE: t.context.testDbName, + }; }); -test.after.always(async (t) => { +test.afterEach.always(async (t) => { await t.context.rulesModel.deleteTable(); await t.context.providersModel.deleteTable(); await t.context.collectionsModel.deleteTable(); await t.context.asyncOperationsModel.deleteTable(); + await destroyLocalTestDb(t.context); +}); +test.after.always(async () => { await recursivelyDeleteS3Bucket(process.env.system_bucket); - - await destroyLocalTestDb({ - knex: t.context.knex, - knexAdmin: t.context.knexAdmin, - testDbName, - }); }); -test('handler migrates async operations, collections, providers, rules', async (t) => { +test.serial('handler migrates async operations, collections, providers, rules', async (t) => { const { asyncOperationsModel, collectionsModel, @@ -155,21 +162,17 @@ test('handler migrates async operations, collections, providers, rules', async ( name: fakeCollection.name, version: fakeCollection.version, }, - rule: { type: 'onetime', value: cryptoRandomString({ length: 10 }), arn: cryptoRandomString({ length: 10 }), logEventArn: cryptoRandomString({ length: 10 }) }, - executionNamePrefix: cryptoRandomString({ length: 10 }), - meta: { key: 'value' }, - queueUrl: cryptoRandomString({ length: 10 }), - payload: { result: { key: 'value' } }, - tags: ['tag1', 'tag2'], + rule: { type: 'onetime' }, createdAt: Date.now(), updatedAt: Date.now(), }; + const ruleWithTrigger = await rulesModel.createRuleTrigger(fakeRule); await Promise.all([ collectionsModel.create(fakeCollection), asyncOperationsModel.create(fakeAsyncOperation), providersModel.create(fakeProvider), - rulesModel.create(fakeRule), + rulesModel.create(ruleWithTrigger), ]); t.teardown(() => Promise.all([ @@ -209,3 +212,103 @@ test('handler migrates async operations, collections, providers, rules', async ( }; t.deepEqual(call, expected); }); + +test.serial('handler passes along forceRulesMigration parameter correctly', async (t) => { + const { + collectionsModel, + providersModel, + rulesModel, + knex, + rulePgModel, + } = t.context; + + const fakeCollection = { + name: `${cryptoRandomString({ length: 10 })}collection`, + version: '0.0.0', + duplicateHandling: 'replace', + granuleId: '^MOD09GQ\\.A[\\d]{7}\.[\\S]{6}\\.006\\.[\\d]{13}$', + granuleIdExtraction: '(MOD09GQ\\.(.*))\\.hdf', + sampleFileName: 'MOD09GQ.A2017025.h21v00.006.2017034065104.hdf', + files: [{ regex: '^.*\\.txt$', sampleFileName: 'file.txt', bucket: 'bucket' }], + }; + + const fakeProvider = { + id: cryptoRandomString({ length: 10 }), + protocol: 's3', + host: `${cryptoRandomString({ length: 10 })}host`, + }; + + const fakeRule = { + name: cryptoRandomString({ length: 10 }), + workflow: workflow, + provider: fakeProvider.name, + state: 'DISABLED', + collection: { + name: fakeCollection.name, + version: fakeCollection.version, + }, + rule: { type: 'onetime' }, + createdAt: Date.now(), + updatedAt: Date.now(), + }; + + await Promise.all([ + collectionsModel.create(fakeCollection), + providersModel.create(fakeProvider), + rulesModel.createRuleTrigger(fakeRule) + .then((ruleWithTrigger) => rulesModel.create(ruleWithTrigger)), + ]); + + t.teardown(() => Promise.all([ + rulesModel.delete(fakeRule), + providersModel.delete(fakeProvider), + ]).then(() => collectionsModel.delete(fakeCollection))); + + // migrate records for the first time + await handler({}); + + const records = await rulePgModel.search( + knex, + {} + ); + t.is(records.length, 1); + + // re-migrate and force rules migration + const call = await handler({ + forceRulesMigration: true, + }); + const expected = { + MigrationSummary: { + async_operations: { + failed: 0, + migrated: 0, + skipped: 0, + total_dynamo_db_records: 0, + }, + collections: { + failed: 0, + migrated: 0, + skipped: 1, + total_dynamo_db_records: 1, + }, + providers: { + failed: 0, + migrated: 0, + skipped: 1, + total_dynamo_db_records: 1, + }, + rules: { + failed: 0, + migrated: 1, + skipped: 0, + total_dynamo_db_records: 1, + }, + }, + }; + t.deepEqual(call, expected); + const migratedRecords = await rulePgModel.search( + knex, + {} + ); + t.is(migratedRecords.length, 1); +}); diff --git a/lambdas/data-migration1/tests/test-rules.js b/lambdas/data-migration1/tests/test-rules.js index 57701380280..0d79fd66a50 100644 --- a/lambdas/data-migration1/tests/test-rules.js +++ b/lambdas/data-migration1/tests/test-rules.js @@ -291,7 +291,8 @@ test.serial('migrateRules skips already migrated record', async (t) => { fakeRule.queueUrl = queueUrls.queueUrl; // This always sets updatedAt to Date.now() - await rulesModel.create(fakeRule); + const ruleWithTrigger = await rulesModel.createRuleTrigger(fakeRule); + await rulesModel.create(ruleWithTrigger); // We need to make the updateAt of the record we're about to migrate later // than the record in the dynamo table. @@ -317,6 +318,46 @@ test.serial('migrateRules skips already migrated record', async (t) => { t.is(records.length, 1); }); +test.serial('migrateRules re-migrates already migrated record if forceRulesMigration is specified', async (t) => { + const { knex, fakeCollection, fakeProvider, rulePgModel } = t.context; + const fakeRule = generateFakeRule({ + collection: { + name: fakeCollection.name, + version: fakeCollection.version, + }, + provider: fakeProvider.id, + }); + const queueUrls = randomString(); + fakeRule.queueUrl = queueUrls.queueUrl; + + // This always sets updatedAt to Date.now() + const ruleWithTrigger = await rulesModel.createRuleTrigger(fakeRule); + await rulesModel.create(ruleWithTrigger); + + // We need to make the updateAt of the record we're about to migrate later + // than the record in the dynamo table. + fakeRule.updatedAt = Date.now(); + + await migrateFakeCollectionRecord(fakeCollection, knex); + await migrateFakeProviderRecord(fakeProvider, knex); + await migrateRuleRecord(fakeRule, knex); + + t.teardown(() => rulesModel.delete(fakeRule)); + const migrationSummary = await migrateRules(process.env, knex, true); + t.deepEqual(migrationSummary, { + dynamoRecords: 1, + + skipped: 0, + failed: 0, + success: 1, + }); + const records = await rulePgModel.search( + knex, + {} + ); + t.is(records.length, 1); +}); + test.serial('migrateRules processes multiple rules', async (t) => { const { knex, fakeCollection, fakeProvider, rulePgModel } = t.context; const anotherFakeCollection = fakeCollectionFactory(); @@ -355,9 +396,11 @@ test.serial('migrateRules processes multiple rules', async (t) => { fakeRule1.queueUrl = queueUrls1.queueUrl; fakeRule2.queueUrl = queueUrls2.queueUrl; + const ruleWithTrigger1 = await rulesModel.createRuleTrigger(fakeRule1); + const ruleWithTrigger2 = await rulesModel.createRuleTrigger(fakeRule2); await Promise.all([ - rulesModel.create(fakeRule1), - rulesModel.create(fakeRule2), + rulesModel.create(ruleWithTrigger1), + rulesModel.create(ruleWithTrigger2), ]); t.teardown(() => Promise.all([ rulesModel.delete(fakeRule1), @@ -437,3 +480,33 @@ test.serial('migrateRules processes all non-failing records', async (t) => { ); t.is(records.length, 1); }); + +test('migrateRuleRecord with forceRulesMigration: true overwrites existing migrated record and unsets values correctly', async (t) => { + const { knex, fakeCollection, fakeProvider, rulePgModel } = t.context; + const fakeRule = generateFakeRule({ + collection: { + name: fakeCollection.name, + version: fakeCollection.version, + }, + provider: fakeProvider.id, + updatedAt: Date.now(), + queueUrl: 'queue-url', + }); + + await migrateFakeCollectionRecord(fakeCollection, knex); + await migrateFakeProviderRecord(fakeProvider, knex); + await migrateRuleRecord(fakeRule, knex); + + const migratedRule = await rulePgModel.get(knex, { name: fakeRule.name }); + t.is(migratedRule.queue_url, 'queue-url'); + + const updatedFakeRule = { + ...fakeRule, + queueUrl: undefined, + }; + + await migrateRuleRecord(updatedFakeRule, knex, true); + + const updatedRule = await rulePgModel.get(knex, { name: fakeRule.name }); + t.is(updatedRule.queue_url, null); +}); diff --git a/lambdas/data-migration2/package.json b/lambdas/data-migration2/package.json index 60124faa5d0..2d1e7608182 100644 --- a/lambdas/data-migration2/package.json +++ b/lambdas/data-migration2/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/data-migration2", - "version": "10.1.0", + "version": "10.1.1", "description": "A Lambda function used for doing data migrations intended to be executed after data-migration1.", "license": "Apache-2.0", "engines": { @@ -25,21 +25,21 @@ "timeout": "15m" }, "dependencies": { - "@cumulus/api": "10.1.0", - "@cumulus/aws-client": "10.1.0", - "@cumulus/common": "10.1.0", - "@cumulus/db": "10.1.0", - "@cumulus/errors": "10.1.0", - "@cumulus/logger": "10.1.0", - "@cumulus/message": "10.1.0", - "@cumulus/types": "10.1.0", + "@cumulus/api": "10.1.1", + "@cumulus/aws-client": "10.1.1", + "@cumulus/common": "10.1.1", + "@cumulus/db": "10.1.1", + "@cumulus/errors": "10.1.1", + "@cumulus/logger": "10.1.1", + "@cumulus/message": "10.1.1", + "@cumulus/types": "10.1.1", "JSONStream": "1.3.5", "knex": "0.95.15", - "lodash": "^4.17.20", + "lodash": "^4.17.21", "p-map": "^4.0.0", "pg": "^8.3.0" }, "devDependencies": { - "@cumulus/test-data": "10.1.0" + "@cumulus/test-data": "10.1.1" } } diff --git a/lambdas/db-migration/package.json b/lambdas/db-migration/package.json index 04096bde682..6127f24c57b 100644 --- a/lambdas/db-migration/package.json +++ b/lambdas/db-migration/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/db-migration-lambda", - "version": "10.1.0", + "version": "10.1.1", "description": "A Lambda function used for deploying DB migrations", "license": "Apache-2.0", "engines": { @@ -20,7 +20,7 @@ "tsc:listEmittedFiles": "../../node_modules/.bin/tsc --listEmittedFiles" }, "dependencies": { - "@cumulus/db": "10.1.0", + "@cumulus/db": "10.1.1", "knex": "0.95.15", "pg": "^8.3.0" } diff --git a/lambdas/db-provision-user-database/package.json b/lambdas/db-provision-user-database/package.json index c51a24481b7..151c84df907 100644 --- a/lambdas/db-provision-user-database/package.json +++ b/lambdas/db-provision-user-database/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/db-provision-user-database-lambda", - "version": "10.1.0", + "version": "10.1.1", "description": "A Lambda function used for provisioning user databases", "engines": { "node": ">=12.18.0" @@ -24,8 +24,8 @@ "timeout": "2m" }, "dependencies": { - "@cumulus/common": "10.1.0", - "@cumulus/db": "10.1.0", + "@cumulus/common": "10.1.1", + "@cumulus/db": "10.1.1", "knex": "0.95.15", "pg": "^8.3.0" }, diff --git a/lambdas/postgres-migration-async-operation/package.json b/lambdas/postgres-migration-async-operation/package.json index 30debb3a153..a8b66a7e36e 100644 --- a/lambdas/postgres-migration-async-operation/package.json +++ b/lambdas/postgres-migration-async-operation/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/postgres-migration-async-operation", - "version": "10.1.0", + "version": "10.1.1", "description": "A Lambda function used to start an ECS task to run data-migrations2 lambda", "license": "Apache-2.0", "engines": { @@ -28,12 +28,12 @@ "timeout": "15m" }, "dependencies": { - "@cumulus/api": "10.1.0", - "@cumulus/async-operations": "10.1.0", - "@cumulus/logger": "10.1.0", - "@cumulus/types": "10.1.0" + "@cumulus/api": "10.1.1", + "@cumulus/async-operations": "10.1.1", + "@cumulus/logger": "10.1.1", + "@cumulus/types": "10.1.1" }, "devDependencies": { - "@cumulus/test-data": "10.1.0" + "@cumulus/test-data": "10.1.1" } } diff --git a/lambdas/postgres-migration-count-tool/package.json b/lambdas/postgres-migration-count-tool/package.json index 8df9a0f626e..9134ea3526d 100644 --- a/lambdas/postgres-migration-count-tool/package.json +++ b/lambdas/postgres-migration-count-tool/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/postgres-reconcile", - "version": "10.1.0", + "version": "10.1.1", "description": "A Lambda function used for generating counts between Dynamo/ES and Postgres", "license": "Apache-2.0", "engines": { @@ -21,12 +21,12 @@ "tsc:listEmittedFiles": "../../node_modules/.bin/tsc --listEmittedFiles" }, "dependencies": { - "@cumulus/api": "10.1.0", - "@cumulus/api-client": "10.1.0", - "@cumulus/common": "10.1.0", - "@cumulus/db": "10.1.0", - "@cumulus/message": "10.1.0", - "@cumulus/types": "10.1.0", + "@cumulus/api": "10.1.1", + "@cumulus/api-client": "10.1.1", + "@cumulus/common": "10.1.1", + "@cumulus/db": "10.1.1", + "@cumulus/message": "10.1.1", + "@cumulus/types": "10.1.1", "knex": "0.95.15", "p-map": "^4.0.0", "pg": "^8.3.0" diff --git a/lambdas/sqs-message-remover/package.json b/lambdas/sqs-message-remover/package.json index 75edff4af3e..0fdd3c942ef 100644 --- a/lambdas/sqs-message-remover/package.json +++ b/lambdas/sqs-message-remover/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/sqs-message-remover-lambda", - "version": "10.1.0", + "version": "10.1.1", "description": "Remove processed messages from SQS queues", "main": "src/index.js", "private": true, @@ -36,10 +36,10 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/common": "10.1.0", - "@cumulus/ingest": "10.1.0", - "@cumulus/logger": "10.1.0", - "lodash": "^4.17.20" + "@cumulus/aws-client": "10.1.1", + "@cumulus/common": "10.1.1", + "@cumulus/ingest": "10.1.1", + "@cumulus/logger": "10.1.1", + "lodash": "^4.17.21" } } diff --git a/lerna.json b/lerna.json index 658183609ab..e58a981dbf4 100644 --- a/lerna.json +++ b/lerna.json @@ -1,6 +1,6 @@ { "lerna": "3.20.2", - "version": "10.1.0", + "version": "10.1.1", "packages": [ "example", "example/lambdas/*", diff --git a/package.json b/package.json index 019f9eaab2c..57161128a29 100644 --- a/package.json +++ b/package.json @@ -123,7 +123,7 @@ "jsdoc-to-markdown": "7.1.1", "latest-version": "^4.0.0", "lerna": "4.0.0", - "lodash": "^4.17.20", + "lodash": "^4.17.21", "markdownlint-cli": "^0.19.0", "md5": "^2.2.1", "mime-types": "^2.1.22", diff --git a/packages/api-client/package.json b/packages/api-client/package.json index d1bcbec70a8..dbf4adb8cc1 100644 --- a/packages/api-client/package.json +++ b/packages/api-client/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/api-client", - "version": "10.1.0", + "version": "10.1.1", "description": "API client for working with the Cumulus archive API", "keywords": [ "GIBS", @@ -38,11 +38,11 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/logger": "10.1.0", + "@cumulus/aws-client": "10.1.1", + "@cumulus/logger": "10.1.1", "p-retry": "^2.0.0" }, "devDependencies": { - "@cumulus/types": "10.1.0" + "@cumulus/types": "10.1.1" } } diff --git a/packages/api/bin/serveUtils.js b/packages/api/bin/serveUtils.js index 37e4970d1e6..2cefe2a3a45 100644 --- a/packages/api/bin/serveUtils.js +++ b/packages/api/bin/serveUtils.js @@ -129,7 +129,8 @@ async function addRules(rules) { const rulePgModel = new RulePgModel(); return await Promise.all( rules.map(async (r) => { - const dynamoRecord = await ruleModel.create(r); + const ruleWithTrigger = await ruleModel.createRuleTrigger(r); + const dynamoRecord = await ruleModel.create(ruleWithTrigger); await indexer.indexRule(es.client, dynamoRecord, es.index); const dbRecord = await translateApiRuleToPostgresRule(dynamoRecord, knex); await rulePgModel.create(knex, dbRecord); diff --git a/packages/api/ecs/async-operation/index.js b/packages/api/ecs/async-operation/index.js index 948499344f6..db8c3ac9e2d 100644 --- a/packages/api/ecs/async-operation/index.js +++ b/packages/api/ecs/async-operation/index.js @@ -22,18 +22,20 @@ const { dynamodb } = require('@cumulus/aws-client/services'); const logger = new Logger({ sender: 'ecs/async-operation' }); +const requiredEnvironmentalVariables = [ + 'asyncOperationId', + 'asyncOperationsTable', + 'lambdaName', + 'payloadUrl', +]; + /** * Return a list of environment variables that should be set but aren't * * @returns {Array} a list of missing environment variables */ function missingEnvironmentVariables() { - return [ - 'asyncOperationId', - 'asyncOperationsTable', - 'lambdaName', - 'payloadUrl', - ].filter((key) => process.env[key] === undefined); + return requiredEnvironmentalVariables.filter((key) => process.env[key] === undefined); } /** @@ -93,6 +95,7 @@ async function fetchAndDeletePayload(payloadUrl) { * moduleFunctionName */ async function getLambdaInfo(FunctionName) { + logger.debug(`Retrieving lambda info for ${FunctionName}.`); const lambda = new AWS.Lambda(); const getFunctionResponse = await lambda.getFunction({ @@ -120,6 +123,7 @@ async function fetchLambdaFunction(codeUrl) { // Fetching the lambda zip file from S3 was failing intermittently because // of connection timeouts. If the download fails, this will retry it up to // 10 times with an exponential backoff. + logger.debug(`Fetching lambda code from ${codeUrl}.`); await pRetry( () => promisify(pipeline)( got.stream(codeUrl), @@ -135,7 +139,7 @@ async function fetchLambdaFunction(codeUrl) { }, } ); - + logger.debug('Lambda downloaded, unzipping.'); return exec('unzip -o /home/task/fn.zip -d /home/task/lambda-function'); } @@ -251,6 +255,7 @@ async function runTask() { try { // Fetch the event that will be passed to the lambda function from S3 + logger.debug(`Fetching payload from ${process.env.payloadUrl}.`); payload = await fetchAndDeletePayload(process.env.payloadUrl); } catch (error) { logger.error('Failed to fetch payload:', error); @@ -259,16 +264,17 @@ async function runTask() { } else { await updateAsyncOperation('RUNNER_FAILED', error); } - return; } let result; try { + logger.debug(`Loading lambda function ${process.env.lambdaName}`); // Load the lambda function const task = require(`/home/task/lambda-function/${lambdaInfo.moduleFileName}`); //eslint-disable-line global-require, import/no-dynamic-require // Run the lambda function + logger.debug(`Invoking task lambda function: ${process.env.lambdaName}`); result = await task[lambdaInfo.moduleFunctionName](payload); } catch (error) { logger.error('Failed to execute the lambda function:', error); @@ -283,11 +289,17 @@ async function runTask() { logger.error('Failed to update record', error); throw error; } + logger.debug('exiting async-operation runTask()'); } const missingVars = missingEnvironmentVariables(); - -if (missingVars.length === 0) runTask(); -else logger.error('Missing environment variables:', missingVars.join(', ')); +if (missingVars.length === 0) { + logger.debug( + `initiating runTask() with environment: ${requiredEnvironmentalVariables.map((v) => `${v}[${process.env[v]}];`).join(' ')}` + ); + runTask(); +} else { + logger.error('Missing environment variables:', missingVars.join(', ')); +} module.exports = { updateAsyncOperation }; diff --git a/packages/api/ecs/async-operation/package.json b/packages/api/ecs/async-operation/package.json index e2df1017018..e7f1ed91b67 100644 --- a/packages/api/ecs/async-operation/package.json +++ b/packages/api/ecs/async-operation/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/ecs-async-operation", - "version": "10.1.0", + "version": "10.1.1", "description": "The docker image for running async operations", "keywords": [ "NASA", @@ -20,13 +20,13 @@ "test": "../../../../node_modules/.bin/ava" }, "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/db": "10.1.0", - "@cumulus/logger": "10.1.0", + "@cumulus/aws-client": "10.1.1", + "@cumulus/db": "10.1.1", + "@cumulus/logger": "10.1.1", "aws-sdk": "^2.585.0", "crypto-random-string": "^3.2.0", "got": "^11.7.0", - "lodash": "^4.17.20", + "lodash": "^4.17.21", "p-retry": "^2.0.0" }, "engines": { diff --git a/packages/api/endpoints/rules.js b/packages/api/endpoints/rules.js index 4382d9d10f2..7e5af228857 100644 --- a/packages/api/endpoints/rules.js +++ b/packages/api/endpoints/rules.js @@ -2,14 +2,15 @@ const router = require('express-promise-router')(); const { inTestMode } = require('@cumulus/common/test-utils'); + const { RecordDoesNotExist } = require('@cumulus/errors'); const Logger = require('@cumulus/logger'); - const { getKnexClient, RulePgModel, TableNames, translateApiRuleToPostgresRule, + translateApiRuleToPostgresRuleRaw, } = require('@cumulus/db'); const { Search } = require('@cumulus/es-client/search'); const { addToLocalES, indexRule } = require('@cumulus/es-client/indexer'); @@ -68,27 +69,30 @@ async function get(req, res) { */ async function post(req, res) { const { - model = new models.Rule(), + ruleModel = new models.Rule(), dbClient = await getKnexClient(), + rulePgModel = new RulePgModel(), } = req.testContext || {}; let record; const apiRule = req.body || {}; const name = apiRule.name; - const rulePgModel = new RulePgModel(); - if (await model.exists(name)) { + if (await ruleModel.exists(name)) { return res.boom.conflict(`A record already exists for ${name}`); } try { apiRule.createdAt = Date.now(); apiRule.updatedAt = Date.now(); - const postgresRule = await translateApiRuleToPostgresRule(apiRule, dbClient); + + // Create rule trigger + const ruleWithTrigger = await ruleModel.createRuleTrigger(apiRule); + const postgresRule = await translateApiRuleToPostgresRule(ruleWithTrigger, dbClient); await dbClient.transaction(async (trx) => { await rulePgModel.create(trx, postgresRule); - record = await model.create(apiRule); + record = await ruleModel.create(ruleWithTrigger); }); if (inTestMode()) await addToLocalES(record, indexRule); return res.send({ message: 'Record saved', record }); @@ -113,8 +117,14 @@ async function post(req, res) { * name request parameter, or a Not Found (404) if there is no existing rule * with the specified name */ -async function put({ params: { name }, body }, res) { - const model = new models.Rule(); +async function put(req, res) { + const { + ruleModel = new models.Rule(), + dbClient = await getKnexClient(), + rulePgModel = new RulePgModel(), + } = req.testContext || {}; + const { params: { name }, body } = req; + const apiRule = { ...body }; let newRule; @@ -124,14 +134,12 @@ async function put({ params: { name }, body }, res) { } try { - const oldRule = await model.get({ name }); - const dbClient = await getKnexClient(); - const rulePgModel = new RulePgModel(); + const oldRule = await ruleModel.get({ name }); apiRule.updatedAt = Date.now(); apiRule.createdAt = oldRule.createdAt; - // If rule type is onetime no change is allowed unless it is a rerun + // If rule type is onetime no change is allowed unless it is a rerun if (apiRule.action === 'rerun') { return models.Rule.invoke(oldRule).then(() => res.send(oldRule)); } @@ -139,16 +147,30 @@ async function put({ params: { name }, body }, res) { const fieldsToDelete = Object.keys(oldRule).filter( (key) => !(key in apiRule) && key !== 'createdAt' ); - const postgresRule = await translateApiRuleToPostgresRule(apiRule, dbClient); - - await dbClient.transaction(async (trx) => { - await rulePgModel.upsert(trx, postgresRule); - newRule = await model.update(oldRule, apiRule, fieldsToDelete); - }); + const ruleWithUpdatedTrigger = await ruleModel.updateRuleTrigger(oldRule, apiRule); + + try { + await dbClient.transaction(async (trx) => { + // stores updated record in dynamo + newRule = await ruleModel.update(ruleWithUpdatedTrigger, fieldsToDelete); + // make sure we include undefined values so fields will be correctly unset in PG + const postgresRule = await translateApiRuleToPostgresRuleRaw(newRule, dbClient); + await rulePgModel.upsert(trx, postgresRule); + }); + // wait to delete original event sources until all update operations were successful + await ruleModel.deleteOldEventSourceMappings(oldRule); + } catch (innerError) { + if (newRule) { + const ruleWithRevertedTrigger = await ruleModel.updateRuleTrigger(apiRule, oldRule); + await ruleModel.update(ruleWithRevertedTrigger); + } + throw innerError; + } if (inTestMode()) await addToLocalES(newRule, indexRule); return res.send(newRule); } catch (error) { + log.error('Unexpected error when updating rule:', error); if (error instanceof RecordDoesNotExist) { return res.boom.notFound(`Rule '${name}' not found`); } @@ -204,4 +226,5 @@ router.delete('/:name', del); module.exports = { router, post, + put, }; diff --git a/packages/api/lib/snsRuleHelpers.js b/packages/api/lib/snsRuleHelpers.js new file mode 100644 index 00000000000..3eebc4493e4 --- /dev/null +++ b/packages/api/lib/snsRuleHelpers.js @@ -0,0 +1,15 @@ +const getSnsPermissionIdMaxLength = () => 64; +const getSnsPermissionIdSuffix = () => 'Permission'; + +function getSnsTriggerPermissionId(rule) { + return `${rule.rule.value.split(':').pop()}${getSnsPermissionIdSuffix()}`.substring( + 0, + getSnsPermissionIdMaxLength() + ); +} + +module.exports = { + getSnsPermissionIdMaxLength, + getSnsPermissionIdSuffix, + getSnsTriggerPermissionId, +}; diff --git a/packages/api/lib/testUtils.js b/packages/api/lib/testUtils.js index 43cdac66ac3..73757b6f196 100644 --- a/packages/api/lib/testUtils.js +++ b/packages/api/lib/testUtils.js @@ -8,6 +8,7 @@ const merge = require('lodash/merge'); const { randomId } = require('@cumulus/common/test-utils'); const { sqs } = require('@cumulus/aws-client/services'); const { putJsonS3Object } = require('@cumulus/aws-client/S3'); +const { translateApiRuleToPostgresRule } = require('@cumulus/db'); const { constructCollectionId } = require('@cumulus/message/Collections'); const { createJwtToken } = require('./token'); @@ -395,6 +396,28 @@ async function getSqsQueueMessageCounts(queueUrl) { }; } +const createRuleTestRecords = async (context, ruleParams) => { + const { + testKnex, + ruleModel, + rulePgModel, + } = context; + const originalRule = fakeRuleFactoryV2(ruleParams); + + const ruleWithTrigger = await ruleModel.createRuleTrigger(originalRule); + const originalDynamoRule = await ruleModel.create(ruleWithTrigger); + const insertPgRecord = await translateApiRuleToPostgresRule(originalDynamoRule, testKnex); + + const [ruleCumulusId] = await rulePgModel.create(testKnex, insertPgRecord); + const originalPgRecord = await rulePgModel.get( + testKnex, { cumulus_id: ruleCumulusId } + ); + return { + originalDynamoRule, + originalPgRecord, + }; +}; + module.exports = { createFakeJwtAuthToken, createSqsQueues, @@ -418,4 +441,5 @@ module.exports = { isLocalApi, testEndpoint, setAuthorizedOAuthUsers, + createRuleTestRecords, }; diff --git a/packages/api/models/rules.js b/packages/api/models/rules.js index 7311cff1ace..23645dffcda 100644 --- a/packages/api/models/rules.js +++ b/packages/api/models/rules.js @@ -11,15 +11,21 @@ const log = require('@cumulus/common/log'); const s3Utils = require('@cumulus/aws-client/S3'); const workflows = require('@cumulus/common/workflows'); const { invoke } = require('@cumulus/aws-client/Lambda'); -const { sqsQueueExists } = require('@cumulus/aws-client/SQS'); +const SQS = require('@cumulus/aws-client/SQS'); const { ValidationError } = require('@cumulus/errors'); const Manager = require('./base'); const { rule: ruleSchema } = require('./schemas'); const { isResourceNotFoundException, ResourceNotFoundError } = require('../lib/errors'); +const { getSnsTriggerPermissionId } = require('../lib/snsRuleHelpers'); class Rule extends Manager { - constructor() { + constructor({ + SqsUtils = SQS, + SqsClient = awsServices.sqs(), + SnsClient = awsServices.sns(), + LambdaClient = awsServices.lambda(), + } = {}) { super({ tableName: process.env.RulesTable, tableHash: { name: 'name', type: 'S' }, @@ -36,10 +42,15 @@ class Rule extends Manager { this.kinesisSourceEvents = [{ name: process.env.messageConsumer, eventType: 'arn' }, { name: process.env.KinesisInboundEventLogger, eventType: 'logEventArn' }]; this.targetId = 'lambdaTarget'; + + this.SqsUtils = SqsUtils; + this.SqsClient = SqsClient; + this.SnsClient = SnsClient; + this.LambdaClient = LambdaClient; } async addRule(item, payload) { - const name = `${process.env.stackName}-custom-${item.name}`; + const name = this.buildCloudWatchRuleName(item); const r = await CloudwatchEvents.putEvent( name, item.rule.value, @@ -62,10 +73,14 @@ class Rule extends Manager { return await Promise.all(deletePromises); } + buildCloudWatchRuleName(item) { + return `${process.env.stackName}-custom-${item.name}`; + } + async delete(item) { switch (item.rule.type) { case 'scheduled': { - const name = `${process.env.stackName}-custom-${item.name}`; + const name = this.buildCloudWatchRuleName(item); await CloudwatchEvents.deleteTarget(this.targetId, name); await CloudwatchEvents.deleteEvent(name); break; @@ -139,34 +154,26 @@ class Rule extends Manager { /** * Updates a rule item. * - * @param {Object} original - the original rule - * @param {Object} updates - key/value fields for update; might not be a - * complete rule item + * @param {Object} updatedRuleItem - the updated rule item * @param {Array} [fieldsToDelete] - names of fields to delete from * rule * @returns {Promise} the response from database updates */ - async update(original, updates, fieldsToDelete = []) { - // Make a copy of the existing rule to preserve existing values - let updatedRuleItem = cloneDeep(original); + update(updatedRuleItem, fieldsToDelete = []) { + return super.update({ name: updatedRuleItem.name }, updatedRuleItem, fieldsToDelete); + } + + async updateRuleTrigger(ruleItem, updates) { + let updatedRuleItem = cloneDeep(ruleItem); + + const stateChanged = updates.state && updates.state !== ruleItem.state; + const valueUpdated = updates.rule && updates.rule.value !== ruleItem.rule.value; - // Apply updates to updated rule item to be saved merge(updatedRuleItem, updates); // Validate rule before kicking off workflows or adding event source mappings await this.constructor.recordIsValid(updatedRuleItem, this.schema, this.removeAdditional); - const stateChanged = updates.state && updates.state !== original.state; - const valueUpdated = updates.rule && updates.rule.value !== original.rule.value; - - updatedRuleItem = await this.updateRuleTrigger(updatedRuleItem, stateChanged, valueUpdated); - - return super.update({ name: original.name }, updatedRuleItem, fieldsToDelete); - } - - async updateRuleTrigger(ruleItem, stateChanged, valueUpdated) { - let updatedRuleItem = cloneDeep(ruleItem); - switch (updatedRuleItem.rule.type) { case 'scheduled': { const payload = await Rule.buildPayload(updatedRuleItem); @@ -175,7 +182,6 @@ class Rule extends Manager { } case 'kinesis': if (valueUpdated) { - await this.deleteKinesisEventSources(updatedRuleItem); const updatedRuleItemArns = await this.addKinesisEventSources(updatedRuleItem); updatedRuleItem = this.updateKinesisRuleArns(updatedRuleItem, updatedRuleItemArns); @@ -187,9 +193,6 @@ class Rule extends Manager { throw new Error('Including rule.arn is not allowed when enabling a disabled rule'); } let snsSubscriptionArn; - if (updatedRuleItem.rule.arn) { - await this.deleteSnsTrigger(updatedRuleItem); - } if (updatedRuleItem.state === 'ENABLED') { snsSubscriptionArn = await this.addSnsTrigger(updatedRuleItem); } @@ -250,12 +253,7 @@ class Rule extends Manager { } // Initialize new rule object - let newRuleItem = cloneDeep(item); - - // the default state is 'ENABLED' - if (!item.state) { - newRuleItem.state = 'ENABLED'; - } + const newRuleItem = cloneDeep(item); newRuleItem.createdAt = item.createdAt || Date.now(); newRuleItem.updatedAt = item.updatedAt || Date.now(); @@ -263,8 +261,6 @@ class Rule extends Manager { // Validate rule before kicking off workflows or adding event source mappings await this.constructor.recordIsValid(newRuleItem, this.schema, this.removeAdditional); - newRuleItem = await this.createRuleTrigger(newRuleItem); - // save return super.create(newRuleItem); } @@ -272,6 +268,14 @@ class Rule extends Manager { async createRuleTrigger(ruleItem) { let newRuleItem = cloneDeep(ruleItem); + // the default state is 'ENABLED' + if (!ruleItem.state) { + newRuleItem.state = 'ENABLED'; + } + + // Validate rule before kicking off workflows or adding event source mappings + await this.constructor.recordIsValid(newRuleItem, this.schema, this.removeAdditional); + const payload = await Rule.buildPayload(newRuleItem); switch (newRuleItem.rule.type) { case 'onetime': { @@ -426,14 +430,13 @@ class Rule extends Manager { return (rules.Count && rules.Count > 0); } - async addSnsTrigger(item) { - // check for existing subscription + async checkForSnsSubscriptions(item) { let token; let subExists = false; let subscriptionArn; /* eslint-disable no-await-in-loop */ do { - const subsResponse = await awsServices.sns().listSubscriptionsByTopic({ + const subsResponse = await this.SnsClient.listSubscriptionsByTopic({ TopicArn: item.rule.value, NextToken: token, }).promise(); @@ -451,6 +454,19 @@ class Rule extends Manager { if (subExists) break; } while (token); + return { + subExists, + existingSubscriptionArn: subscriptionArn, + }; + } + + async addSnsTrigger(item) { + // check for existing subscription + const { + subExists, + existingSubscriptionArn, + } = await this.checkForSnsSubscriptions(item); + let subscriptionArn = existingSubscriptionArn; /* eslint-enable no-await-in-loop */ if (!subExists) { // create sns subscription @@ -460,34 +476,34 @@ class Rule extends Manager { Endpoint: process.env.messageConsumer, ReturnSubscriptionArn: true, }; - const r = await awsServices.sns().subscribe(subscriptionParams).promise(); + const r = await this.SnsClient.subscribe(subscriptionParams).promise(); subscriptionArn = r.SubscriptionArn; + // create permission to invoke lambda + const permissionParams = { + Action: 'lambda:InvokeFunction', + FunctionName: process.env.messageConsumer, + Principal: 'sns.amazonaws.com', + SourceArn: item.rule.value, + StatementId: getSnsTriggerPermissionId(item), + }; + await this.LambdaClient.addPermission(permissionParams).promise(); } - // create permission to invoke lambda - const permissionParams = { - Action: 'lambda:InvokeFunction', - FunctionName: process.env.messageConsumer, - Principal: 'sns.amazonaws.com', - SourceArn: item.rule.value, - StatementId: `${item.name}Permission`, - }; - await awsServices.lambda().addPermission(permissionParams).promise(); return subscriptionArn; } async deleteSnsTrigger(item) { // If event source mapping is shared by other rules, don't delete it if (await this.isEventSourceMappingShared(item, 'arn')) { - log.info(`Event source mapping ${item} with type 'arn' is shared by multiple rules, so it will not be deleted.`); + log.info(`Event source mapping ${JSON.stringify(item)} with type 'arn' is shared by multiple rules, so it will not be deleted.`); return Promise.resolve(); } // delete permission statement const permissionParams = { FunctionName: process.env.messageConsumer, - StatementId: `${item.name}Permission`, + StatementId: getSnsTriggerPermissionId(item), }; try { - await awsServices.lambda().removePermission(permissionParams).promise(); + await this.LambdaClient.removePermission(permissionParams).promise(); } catch (error) { if (isResourceNotFoundException(error)) { throw new ResourceNotFoundError(error); @@ -498,7 +514,23 @@ class Rule extends Manager { const subscriptionParams = { SubscriptionArn: item.rule.arn, }; - return awsServices.sns().unsubscribe(subscriptionParams).promise(); + return this.SnsClient.unsubscribe(subscriptionParams).promise(); + } + + async deleteOldEventSourceMappings(item) { + switch (item.rule.type) { + case 'kinesis': + await this.deleteKinesisEventSources(item); + break; + case 'sns': { + if (item.rule.arn) { + await this.deleteSnsTrigger(item); + } + break; + } + default: + break; + } } /** @@ -509,7 +541,7 @@ class Rule extends Manager { */ async validateAndUpdateSqsRule(rule) { const queueUrl = rule.rule.value; - if (!(await sqsQueueExists(queueUrl))) { + if (!(await this.SqsUtils.sqsQueueExists(queueUrl))) { throw new Error(`SQS queue ${queueUrl} does not exist or your account does not have permissions to access it`); } @@ -517,7 +549,7 @@ class Rule extends Manager { QueueUrl: queueUrl, AttributeNames: ['All'], }; - const attributes = await awsServices.sqs().getQueueAttributes(qAttrParams).promise(); + const attributes = await this.SqsClient.getQueueAttributes(qAttrParams).promise(); if (!attributes.Attributes.RedrivePolicy) { throw new Error(`SQS queue ${queueUrl} does not have a dead-letter queue configured`); } diff --git a/packages/api/package.json b/packages/api/package.json index dd182b7f2d0..581ec8fbb16 100644 --- a/packages/api/package.json +++ b/packages/api/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/api", - "version": "10.1.0", + "version": "10.1.1", "description": "Lambda functions for handling all daac's API operations", "main": "index.js", "engines": { @@ -51,26 +51,26 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/api-client": "10.1.0", - "@cumulus/async-operations": "10.1.0", - "@cumulus/aws-client": "10.1.0", - "@cumulus/cmr-client": "10.1.0", - "@cumulus/cmrjs": "10.1.0", - "@cumulus/collection-config-store": "10.1.0", - "@cumulus/common": "10.1.0", - "@cumulus/db": "10.1.0", - "@cumulus/distribution-utils": "10.1.0", - "@cumulus/errors": "10.1.0", - "@cumulus/es-client": "10.1.0", - "@cumulus/ingest": "10.1.0", - "@cumulus/launchpad-auth": "10.1.0", - "@cumulus/logger": "10.1.0", - "@cumulus/message": "10.1.0", - "@cumulus/oauth-client": "10.1.0", - "@cumulus/object-store": "10.1.0", - "@cumulus/pvl": "10.1.0", - "@cumulus/sftp-client": "10.1.0", - "@cumulus/types": "10.1.0", + "@cumulus/api-client": "10.1.1", + "@cumulus/async-operations": "10.1.1", + "@cumulus/aws-client": "10.1.1", + "@cumulus/cmr-client": "10.1.1", + "@cumulus/cmrjs": "10.1.1", + "@cumulus/collection-config-store": "10.1.1", + "@cumulus/common": "10.1.1", + "@cumulus/db": "10.1.1", + "@cumulus/distribution-utils": "10.1.1", + "@cumulus/errors": "10.1.1", + "@cumulus/es-client": "10.1.1", + "@cumulus/ingest": "10.1.1", + "@cumulus/launchpad-auth": "10.1.1", + "@cumulus/logger": "10.1.1", + "@cumulus/message": "10.1.1", + "@cumulus/oauth-client": "10.1.1", + "@cumulus/object-store": "10.1.1", + "@cumulus/pvl": "10.1.1", + "@cumulus/sftp-client": "10.1.1", + "@cumulus/types": "10.1.1", "@mapbox/dyno": "^1.4.2", "aggregate-error": "^3.1.0", "ajv": "^6.12.3", @@ -94,7 +94,7 @@ "jsonpath-plus": "^1.1.0", "jsonwebtoken": "^8.4.0", "knex": "0.95.15", - "lodash": "^4.17.20", + "lodash": "^4.17.21", "moment": "2.24.0", "morgan": "^1.9.1", "nodeify": "^1.0.1", @@ -114,6 +114,6 @@ "xml2js": "^0.4.22" }, "devDependencies": { - "@cumulus/test-data": "10.1.0" + "@cumulus/test-data": "10.1.1" } } diff --git a/packages/api/tests/endpoints/collections/active-collections.js b/packages/api/tests/endpoints/collections/active-collections.js index fbd2f96ee97..6529b10d548 100644 --- a/packages/api/tests/endpoints/collections/active-collections.js +++ b/packages/api/tests/endpoints/collections/active-collections.js @@ -26,7 +26,7 @@ process.env.AccessTokensTable = randomId('accessTokensTable'); process.env.CollectionsTable = randomId('collectionsTable'); process.env.GranulesTable = randomId('granulesTable'); process.env.stackName = randomId('stackName'); -process.env.system_bucket = randomId('systemBucket'); +process.env.system_bucket = randomId('bucket'); process.env.TOKEN_SECRET = randomId('tokenSecret'); // import the express app after setting the env variables diff --git a/packages/api/tests/endpoints/collections/delete-collection.js b/packages/api/tests/endpoints/collections/delete-collection.js index 5b9bab0aa0b..382e13bd7b3 100644 --- a/packages/api/tests/endpoints/collections/delete-collection.js +++ b/packages/api/tests/endpoints/collections/delete-collection.js @@ -202,7 +202,8 @@ test('Attempting to delete a collection with an associated rule returns a 409 re Body: JSON.stringify({}), }).promise(); - await ruleModel.create(rule); + const ruleWithTrigger = await ruleModel.createRuleTrigger(rule); + await ruleModel.create(ruleWithTrigger); const response = await request(app) .delete(`/collections/${collection.name}/${collection.version}`) @@ -235,7 +236,8 @@ test('Attempting to delete a collection with an associated rule does not delete Body: JSON.stringify({}), }).promise(); - await ruleModel.create(rule); + const ruleWithTrigger = await ruleModel.createRuleTrigger(rule); + await ruleModel.create(ruleWithTrigger); await request(app) .delete(`/collections/${collection.name}/${collection.version}`) diff --git a/packages/api/tests/endpoints/granules/test-bulk-reingest.js b/packages/api/tests/endpoints/granules/test-bulk-reingest.js index 1dd6b87c22f..f4337c89d03 100644 --- a/packages/api/tests/endpoints/granules/test-bulk-reingest.js +++ b/packages/api/tests/endpoints/granules/test-bulk-reingest.js @@ -26,7 +26,7 @@ process.env = { GranulesTable: randomId('GranulesTable'), TOKEN_SECRET: randomId('tokenSecret'), stackName: randomId('stackName'), - system_bucket: randomId('systemBucket'), + system_bucket: randomId('bucket'), AsyncOperationsTable: randomId('AsyncOperationsTable'), AsyncOperationTaskDefinition: randomId('taskDefinition'), EcsCluster: randomId('EcsCluster'), diff --git a/packages/api/tests/endpoints/providers/delete-provider.js b/packages/api/tests/endpoints/providers/delete-provider.js index 1a928b9c2e8..f31c7045e0d 100644 --- a/packages/api/tests/endpoints/providers/delete-provider.js +++ b/packages/api/tests/endpoints/providers/delete-provider.js @@ -246,7 +246,8 @@ test('Attempting to delete a provider with an associated rule returns a 409 resp Body: JSON.stringify({}), }).promise(); - await ruleModel.create(rule); + const ruleWithTrigger = await ruleModel.createRuleTrigger(rule); + await ruleModel.create(ruleWithTrigger); const response = await request(app) .delete(`/providers/${testProvider.id}`) @@ -275,7 +276,8 @@ test('Attempting to delete a provider with an associated rule does not delete th Body: JSON.stringify({}), }).promise(); - await ruleModel.create(rule); + const ruleWithTrigger = await ruleModel.createRuleTrigger(rule); + await ruleModel.create(ruleWithTrigger); await request(app) .delete(`/providers/${testProvider.id}`) diff --git a/packages/api/tests/endpoints/stats.js b/packages/api/tests/endpoints/stats.js index 58eb0887e0a..51a93e483dd 100644 --- a/packages/api/tests/endpoints/stats.js +++ b/packages/api/tests/endpoints/stats.js @@ -29,7 +29,7 @@ process.env.CollectionsTable = randomId('collectionsTable'); process.env.GranulesTable = randomId('granulesTable'); process.env.AccessTokensTable = randomId('accessTokenTable'); -process.env.system_bucket = randomId('systemBucket'); +process.env.system_bucket = randomId('bucket'); process.env.stackName = randomId('stackName'); const esIndex = randomId('esindex'); diff --git a/packages/api/tests/endpoints/test-dead-letter-archive.js b/packages/api/tests/endpoints/test-dead-letter-archive.js index 10b0d6c9ddc..eabe452b818 100644 --- a/packages/api/tests/endpoints/test-dead-letter-archive.js +++ b/packages/api/tests/endpoints/test-dead-letter-archive.js @@ -22,7 +22,7 @@ process.env = { ...process.env, AccessTokensTable: randomId('AccessTokensTable'), AsyncOperationsTable: randomId('asyncOperationsTable'), - system_bucket: randomId('systemBucket'), + system_bucket: randomId('system'), stackName: randomId('stackName'), TOKEN_SECRET: randomId('tokenSecret'), }; diff --git a/packages/api/tests/endpoints/test-executions.js b/packages/api/tests/endpoints/test-executions.js index 1e2e07e62f5..847533e01b4 100644 --- a/packages/api/tests/endpoints/test-executions.js +++ b/packages/api/tests/endpoints/test-executions.js @@ -64,7 +64,7 @@ process.env.CollectionsTable = randomId('collection'); process.env.ExecutionsTable = randomId('executions'); process.env.GranulesTable = randomId('granules'); process.env.stackName = randomId('stackname'); -process.env.system_bucket = randomId('systembucket'); +process.env.system_bucket = randomId('bucket'); process.env.TOKEN_SECRET = randomId('secret'); const testDbName = randomId('execution_test'); diff --git a/packages/api/tests/endpoints/test-granule-csv.js b/packages/api/tests/endpoints/test-granule-csv.js index 3422c5587a6..7777a016347 100644 --- a/packages/api/tests/endpoints/test-granule-csv.js +++ b/packages/api/tests/endpoints/test-granule-csv.js @@ -27,7 +27,7 @@ const { app } = require('../../app'); process.env.AccessTokensTable = randomId('token'); process.env.GranulesTable = randomId('granules'); process.env.stackName = randomId('stackname'); -process.env.system_bucket = randomId('systembucket'); +process.env.system_bucket = randomId('bucket'); process.env.TOKEN_SECRET = randomId('secret'); const createBucket = (Bucket) => awsServices.s3().createBucket({ Bucket }).promise(); diff --git a/packages/api/tests/endpoints/test-granules.js b/packages/api/tests/endpoints/test-granules.js index 823faf7cc0b..a5cbd82dd5f 100644 --- a/packages/api/tests/endpoints/test-granules.js +++ b/packages/api/tests/endpoints/test-granules.js @@ -101,7 +101,7 @@ process.env.ExecutionsTable = randomId('executions'); process.env.CollectionsTable = randomId('collection'); process.env.GranulesTable = randomId('granules'); process.env.stackName = randomId('stackname'); -process.env.system_bucket = randomId('systembucket'); +process.env.system_bucket = randomId('system-bucket'); process.env.TOKEN_SECRET = randomId('secret'); process.env.backgroundQueueUrl = randomId('backgroundQueueUrl'); @@ -1057,7 +1057,7 @@ test.serial('When a move granule request fails to move a file correctly, it reco const bucket = process.env.system_bucket; const secondBucket = randomId('second'); const thirdBucket = randomId('third'); - const fakeBucket = 'TotallyNotARealBucket'; + const fakeBucket = 'not-a-real-bucket'; await runTestUsingBuckets( [secondBucket, thirdBucket], diff --git a/packages/api/tests/endpoints/test-launchpadSaml.js b/packages/api/tests/endpoints/test-launchpadSaml.js index 25329deb1d4..dbd4589bbb4 100644 --- a/packages/api/tests/endpoints/test-launchpadSaml.js +++ b/packages/api/tests/endpoints/test-launchpadSaml.js @@ -30,7 +30,7 @@ process.env.OAUTH_PROVIDER = 'launchpad'; process.env.AccessTokensTable = randomId('tokenTable'); process.env.stackName = randomId('stackname'); process.env.TOKEN_SECRET = randomId('token_secret'); -process.env.system_bucket = randomId('systembucket'); +process.env.system_bucket = randomId('bucket'); process.env.LAUNCHPAD_METADATA_URL = 'http://example.com/launchpad.idp.xml'; const { app } = require('../../app'); @@ -205,9 +205,9 @@ test.serial( 'launchpadPublicCertificate throws error with missing bucket.', async (t) => { const stub = sinon.stub(got, 'get').callsFake(() => gotLaunchpadMetadataResponse); - await t.throwsAsync(launchpadPublicCertificate('s3://badBucket/location'), { + await t.throwsAsync(launchpadPublicCertificate('s3://bad-bucket/location'), { instanceOf: Error, - message: 'Cumulus could not find Launchpad public xml metadata at s3://badBucket/location', + message: 'Cumulus could not find Launchpad public xml metadata at s3://bad-bucket/location', }); stub.restore(); } diff --git a/packages/api/tests/endpoints/test-migration-counts.js b/packages/api/tests/endpoints/test-migration-counts.js index 1f45c5abfda..8b22416698b 100644 --- a/packages/api/tests/endpoints/test-migration-counts.js +++ b/packages/api/tests/endpoints/test-migration-counts.js @@ -22,7 +22,7 @@ process.env = { ...process.env, AccessTokensTable: randomId('AccessTokensTable'), AsyncOperationsTable: randomId('asyncOperationsTable'), - system_bucket: randomId('systemBucket'), + system_bucket: randomId('bucket'), stackName: randomId('stackName'), TOKEN_SECRET: randomId('tokenSecret'), }; diff --git a/packages/api/tests/endpoints/test-reconciliation-reports.js b/packages/api/tests/endpoints/test-reconciliation-reports.js index 8f1f41a620c..d6914cb9ac5 100644 --- a/packages/api/tests/endpoints/test-reconciliation-reports.js +++ b/packages/api/tests/endpoints/test-reconciliation-reports.js @@ -42,7 +42,7 @@ process.env.AsyncOperationsTable = randomId('asyncOperationsTable'); process.env.ReconciliationReportsTable = randomId('recReportsTable'); process.env.TOKEN_SECRET = randomId('tokenSecret'); process.env.stackName = randomId('stackname'); -process.env.system_bucket = randomId('systemBucket'); +process.env.system_bucket = randomId('bucket'); process.env.invokeReconcileLambda = randomId('invokeReconcileLambda'); process.env.AsyncOperationTaskDefinition = randomId('asyncOpTaskDefinition'); process.env.EcsCluster = randomId('ecsCluster'); diff --git a/packages/api/tests/endpoints/test-rules.js b/packages/api/tests/endpoints/test-rules.js index 302f8f4dc61..f16d62d011e 100644 --- a/packages/api/tests/endpoints/test-rules.js +++ b/packages/api/tests/endpoints/test-rules.js @@ -1,5 +1,6 @@ 'use strict'; +const fs = require('fs-extra'); const omit = require('lodash/omit'); const test = require('ava'); const sinon = require('sinon'); @@ -17,11 +18,15 @@ const { translateApiProviderToPostgresProvider, translateApiRuleToPostgresRule, migrationDir, + fakeCollectionRecordFactory, + fakeProviderRecordFactory, } = require('@cumulus/db'); const S3 = require('@cumulus/aws-client/S3'); const { bootstrapElasticSearch } = require('@cumulus/es-client/bootstrap'); +const awsServices = require('@cumulus/aws-client/services'); const { Search } = require('@cumulus/es-client/search'); const indexer = require('@cumulus/es-client/indexer'); +const { constructCollectionId } = require('@cumulus/message/Collections'); const { buildFakeExpressResponse } = require('./utils'); const { @@ -30,8 +35,9 @@ const { fakeRuleFactoryV2, createFakeJwtAuthToken, setAuthorizedOAuthUsers, + createRuleTestRecords, } = require('../../lib/testUtils'); -const { post } = require('../../endpoints/rules'); +const { post, put } = require('../../endpoints/rules'); const AccessToken = require('../../models/access-tokens'); const Rule = require('../../models/rules'); const assertions = require('../../lib/assertions'); @@ -44,6 +50,7 @@ const assertions = require('../../lib/assertions'); 'stackName', 'system_bucket', 'TOKEN_SECRET', + 'KinesisInboundEventLogger', // eslint-disable-next-line no-return-assign ].forEach((varName) => process.env[varName] = randomString()); @@ -83,6 +90,17 @@ test.before(async (t) => { PG_DATABASE: testDbName, }; + const messageConsumer = await awsServices.lambda().createFunction({ + Code: { + ZipFile: fs.readFileSync(require.resolve('@cumulus/test-data/fake-lambdas/hello.zip')), + }, + FunctionName: randomId('messageConsumer'), + Role: randomId('role'), + Handler: 'index.handler', + Runtime: 'nodejs12.x', + }).promise(); + process.env.messageConsumer = messageConsumer.FunctionName; + const { knex, knexAdmin } = await generateLocalTestDb(testDbName, migrationDir); t.context.testKnex = knex; t.context.testKnexAdmin = knexAdmin; @@ -100,10 +118,35 @@ test.before(async (t) => { t.context.collectionPgModel = new CollectionPgModel(); t.context.providerPgModel = new ProviderPgModel(); + // Create PG Provider + t.context.testPgProvider = fakeProviderRecordFactory(); + [t.context.pgProvider] = await t.context.providerPgModel.create( + t.context.testKnex, + t.context.testPgProvider, + '*' + ); + + // Create PG Collection + const collectionName = 'fakeCollection'; + const collectionVersion = 'v1'; + const testPgCollection = fakeCollectionRecordFactory({ + name: collectionName, + version: collectionVersion, + }); + t.context.collectionPgModel = new CollectionPgModel(); + [t.context.pgCollection] = await t.context.collectionPgModel.create( + t.context.testKnex, + testPgCollection, + '*' + ); + t.context.collectionId = constructCollectionId(collectionName, collectionVersion); + ruleModel = new Rule(); await ruleModel.createTable(); + t.context.ruleModel = ruleModel; - const ruleRecord = await ruleModel.create(testRule); + const ruleWithTrigger = await ruleModel.createRuleTrigger(testRule); + const ruleRecord = await ruleModel.create(ruleWithTrigger); await indexer.indexRule(esClient, ruleRecord, esAlias); const username = randomString(); @@ -386,6 +429,180 @@ test('POST creates a rule', async (t) => { ); }); +test.serial('post() creates SNS rule with same trigger information in Dynamo/PostgreSQL', async (t) => { + const { + pgProvider, + pgCollection, + } = t.context; + + const topic1 = await awsServices.sns().createTopic({ Name: randomId('topic') }).promise(); + + const rule = fakeRuleFactoryV2({ + state: 'ENABLED', + rule: { + type: 'sns', + value: topic1.TopicArn, + }, + collection: { + name: pgCollection.name, + version: pgCollection.version, + }, + provider: pgProvider.name, + }); + + const expressRequest = { + body: rule, + }; + + const response = buildFakeExpressResponse(); + + await post(expressRequest, response); + + const dynamoRule = await ruleModel.get({ name: rule.name }); + const pgRule = await t.context.rulePgModel + .get(t.context.testKnex, { name: rule.name }); + + t.truthy(dynamoRule.rule.arn); + t.like(dynamoRule, { + rule: { + type: 'sns', + value: topic1.TopicArn, + arn: dynamoRule.rule.arn, + }, + }); + t.like(pgRule, { + name: rule.name, + enabled: true, + type: 'sns', + arn: dynamoRule.rule.arn, + value: topic1.TopicArn, + }); +}); + +test.serial('post() creates the same Kinesis rule with trigger information in Dynamo/PostgreSQL', async (t) => { + const { + pgProvider, + pgCollection, + } = t.context; + + const kinesisArn1 = `arn:aws:kinesis:us-east-1:000000000000:${randomId('kinesis')}`; + const rule = fakeRuleFactoryV2({ + state: 'ENABLED', + rule: { + type: 'kinesis', + value: kinesisArn1, + }, + collection: { + name: pgCollection.name, + version: pgCollection.version, + }, + provider: pgProvider.name, + }); + + const expressRequest = { + body: rule, + }; + + const response = buildFakeExpressResponse(); + + await post(expressRequest, response); + + const dynamoRule = await ruleModel.get({ name: rule.name }); + const pgRule = await t.context.rulePgModel + .get(t.context.testKnex, { name: rule.name }); + + t.truthy(dynamoRule.rule.arn); + t.truthy(dynamoRule.rule.logEventArn); + + t.like(dynamoRule, { + rule: { + type: 'kinesis', + value: kinesisArn1, + }, + }); + t.like(pgRule, { + name: rule.name, + enabled: true, + type: 'kinesis', + arn: dynamoRule.rule.arn, + value: kinesisArn1, + log_event_arn: dynamoRule.rule.logEventArn, + }); +}); + +test.serial('post() creates the SQS rule with trigger information in Dynamo/PostgreSQL', async (t) => { + const { + pgProvider, + pgCollection, + } = t.context; + + const queue1 = randomId('queue'); + + const stubbedRulesModel = new Rule({ + SqsUtils: { + sqsQueueExists: () => Promise.resolve(true), + }, + SqsClient: { + getQueueAttributes: () => ({ + promise: () => Promise.resolve({ + Attributes: { + RedrivePolicy: 'policy', + VisibilityTimeout: 10, + }, + }), + }), + }, + }); + + const rule = fakeRuleFactoryV2({ + state: 'ENABLED', + rule: { + type: 'sqs', + value: queue1, + }, + collection: { + name: pgCollection.name, + version: pgCollection.version, + }, + provider: pgProvider.name, + }); + + const expectedMeta = { + visibilityTimeout: 10, + retries: 3, + }; + + const expressRequest = { + body: rule, + testContext: { + ruleModel: stubbedRulesModel, + }, + }; + + const response = buildFakeExpressResponse(); + + await post(expressRequest, response); + + const dynamoRule = await ruleModel.get({ name: rule.name }); + const pgRule = await t.context.rulePgModel + .get(t.context.testKnex, { name: rule.name }); + + t.like(dynamoRule, { + rule: { + type: 'sqs', + value: queue1, + }, + meta: expectedMeta, + }); + t.like(pgRule, { + name: rule.name, + enabled: true, + type: 'sqs', + value: queue1, + meta: expectedMeta, + }); +}); + test('POST creates a rule in Dynamo and PG with correct timestamps', async (t) => { const { newRule } = t.context; @@ -455,7 +672,8 @@ test('POST creates a rule that is enabled by default', async (t) => { test('POST returns a 409 response if record already exists', async (t) => { const { newRule } = t.context; - await ruleModel.create(newRule); + const ruleWithTrigger = await ruleModel.createRuleTrigger(newRule); + await ruleModel.create(ruleWithTrigger); const response = await request(app) .post('/rules') @@ -598,6 +816,7 @@ test.serial('POST does not write to DynamoDB or RDS if writing to DynamoDB fails const failingRulesModel = { exists: () => false, + createRuleTrigger: () => Promise.resolve(newRule), create: () => { throw new Error('Rule error'); }, @@ -607,7 +826,7 @@ test.serial('POST does not write to DynamoDB or RDS if writing to DynamoDB fails body: newRule, testContext: { dbClient: testKnex, - model: failingRulesModel, + ruleModel: failingRulesModel, }, }; @@ -634,7 +853,8 @@ test('PUT replaces a rule', async (t) => { await t.context.testKnex.transaction(async (trx) => { await t.context.rulePgModel.create(trx, postgresRule); - await ruleModel.create(putTestRule, putTestRule.createdAt); + const ruleWithTrigger = await ruleModel.createRuleTrigger(putTestRule); + await ruleModel.create(ruleWithTrigger, putTestRule.createdAt); }); const updateRule = { @@ -675,8 +895,8 @@ test('PUT replaces a rule', async (t) => { t.is(actualPostgresRule.updated_at.getTime(), actualRule.updatedAt); t.like(actualPostgresRule, { - ...postgresExpectedRule, - created_at: postgresRule.created_at, + queue_url: null, + enabled: true, updated_at: actualPostgresRule.updated_at, }); t.deepEqual(actualRule, { @@ -715,6 +935,454 @@ test('PUT returns 400 for name mismatch between params and payload', t.falsy(record); }); +test.serial('put() creates the same updated SNS rule in Dynamo/PostgreSQL', async (t) => { + const { + pgProvider, + pgCollection, + } = t.context; + + const topic1 = await awsServices.sns().createTopic({ Name: randomId('topic1_') }).promise(); + const topic2 = await awsServices.sns().createTopic({ Name: randomId('topic2_') }).promise(); + + const { + originalDynamoRule, + originalPgRecord, + } = await createRuleTestRecords( + t.context, + { + queueUrl: 'fake-queue-url', + state: 'ENABLED', + rule: { + type: 'sns', + value: topic1.TopicArn, + }, + collection: { + name: pgCollection.name, + version: pgCollection.version, + }, + provider: pgProvider.name, + } + ); + + t.truthy(originalDynamoRule.rule.arn); + t.truthy(originalPgRecord.arn); + + const updateRule = { + ...originalDynamoRule, + rule: { + type: 'sns', + value: topic2.TopicArn, + }, + }; + + const expressRequest = { + params: { + name: originalDynamoRule.name, + }, + body: updateRule, + }; + + const response = buildFakeExpressResponse(); + + await put(expressRequest, response); + + const updatedRule = await ruleModel.get({ name: updateRule.name }); + const updatedPgRule = await t.context.rulePgModel + .get(t.context.testKnex, { name: updateRule.name }); + + t.truthy(updatedRule.rule.arn); + t.truthy(updatedPgRule.arn); + + t.not(updatedRule.rule.arn, originalDynamoRule.rule.arn); + t.not(updatedPgRule.arn, originalPgRecord.arn); + + t.deepEqual(updatedRule, { + ...originalDynamoRule, + updatedAt: updatedRule.updatedAt, + rule: { + type: 'sns', + value: topic2.TopicArn, + arn: updatedRule.rule.arn, + }, + }); + t.deepEqual(updatedPgRule, { + ...originalPgRecord, + updated_at: updatedPgRule.updated_at, + type: 'sns', + arn: updatedPgRule.arn, + value: topic2.TopicArn, + }); +}); + +test.serial('put() creates the same updated Kinesis rule in Dynamo/PostgreSQL', async (t) => { + const { + pgProvider, + pgCollection, + } = t.context; + + const kinesisArn1 = `arn:aws:kinesis:us-east-1:000000000000:${randomId('kinesis1_')}`; + const kinesisArn2 = `arn:aws:kinesis:us-east-1:000000000000:${randomId('kinesis2_')}`; + + const { + originalDynamoRule, + originalPgRecord, + } = await createRuleTestRecords( + t.context, + { + state: 'ENABLED', + rule: { + type: 'kinesis', + value: kinesisArn1, + }, + collection: { + name: pgCollection.name, + version: pgCollection.version, + }, + provider: pgProvider.name, + } + ); + + t.truthy(originalDynamoRule.rule.arn); + t.truthy(originalDynamoRule.rule.logEventArn); + t.truthy(originalPgRecord.arn); + t.truthy(originalPgRecord.log_event_arn); + + const updateRule = { + ...originalDynamoRule, + rule: { + type: 'kinesis', + value: kinesisArn2, + }, + }; + + const expressRequest = { + params: { + name: originalDynamoRule.name, + }, + body: updateRule, + }; + + const response = buildFakeExpressResponse(); + + await put(expressRequest, response); + + const updatedRule = await ruleModel.get({ name: updateRule.name }); + const updatedPgRule = await t.context.rulePgModel + .get(t.context.testKnex, { name: updateRule.name }); + + t.truthy(updatedRule.rule.arn); + t.truthy(updatedRule.rule.logEventArn); + t.truthy(updatedPgRule.arn); + t.truthy(updatedPgRule.log_event_arn); + + t.not(originalDynamoRule.rule.arn, updatedRule.rule.arn); + t.not(originalDynamoRule.rule.logEventArn, updatedRule.rule.logEventArn); + t.not(originalPgRecord.arn, updatedPgRule.arn); + t.not(originalPgRecord.log_event_arn, updatedPgRule.log_event_arn); + + t.deepEqual(updatedRule, { + ...originalDynamoRule, + updatedAt: updatedRule.updatedAt, + rule: { + arn: updatedRule.rule.arn, + logEventArn: updatedRule.rule.logEventArn, + type: 'kinesis', + value: kinesisArn2, + }, + }); + t.deepEqual(updatedPgRule, { + ...originalPgRecord, + updated_at: updatedPgRule.updated_at, + type: 'kinesis', + value: kinesisArn2, + arn: updatedPgRule.arn, + log_event_arn: updatedPgRule.log_event_arn, + }); +}); + +test.serial('put() creates the same SQS rule in Dynamo/PostgreSQL', async (t) => { + const { + pgProvider, + pgCollection, + } = t.context; + + const queue1 = randomId('queue'); + const queue2 = randomId('queue'); + + const stubbedRulesModel = new Rule({ + SqsUtils: { + sqsQueueExists: () => Promise.resolve(true), + }, + SqsClient: { + getQueueAttributes: () => ({ + promise: () => Promise.resolve({ + Attributes: { + RedrivePolicy: 'policy', + VisibilityTimeout: 10, + }, + }), + }), + }, + }); + + const { + originalDynamoRule, + originalPgRecord, + } = await createRuleTestRecords( + { + ...t.context, + ruleModel: stubbedRulesModel, + }, + { + name: randomId('rule'), + state: 'ENABLED', + rule: { + type: 'sqs', + value: queue1, + }, + collection: { + name: pgCollection.name, + version: pgCollection.version, + }, + provider: pgProvider.name, + } + ); + + const expectedMeta = { + visibilityTimeout: 10, + retries: 3, + }; + t.deepEqual(originalDynamoRule.meta, expectedMeta); + t.deepEqual(originalPgRecord.meta, expectedMeta); + + const updateRule = { + ...originalDynamoRule, + rule: { + type: 'sqs', + value: queue2, + }, + }; + const expressRequest = { + params: { + name: originalDynamoRule.name, + }, + body: updateRule, + testContext: { + ruleModel: stubbedRulesModel, + }, + }; + const response = buildFakeExpressResponse(); + await put(expressRequest, response); + + const updatedRule = await ruleModel.get({ name: updateRule.name }); + const updatedPgRule = await t.context.rulePgModel + .get(t.context.testKnex, { name: updateRule.name }); + + t.deepEqual(updatedRule, { + ...originalDynamoRule, + updatedAt: updatedRule.updatedAt, + rule: { + type: 'sqs', + value: queue2, + }, + }); + t.deepEqual(updatedPgRule, { + ...originalPgRecord, + updated_at: updatedPgRule.updated_at, + type: 'sqs', + value: queue2, + }); +}); + +test.serial('put() keeps initial trigger information if writing to Dynamo fails', async (t) => { + const { + pgProvider, + pgCollection, + } = t.context; + + const topic1 = await awsServices.sns().createTopic({ Name: randomId('topic1_') }).promise(); + const topic2 = await awsServices.sns().createTopic({ Name: randomId('topic2_') }).promise(); + + const deleteOldEventSourceMappingsSpy = sinon.spy(Rule.prototype, 'deleteOldEventSourceMappings'); + const updateStub = sinon.stub(Rule.prototype, 'update').throws(new Error('Dynamo fail')); + t.teardown(() => { + updateStub.restore(); + deleteOldEventSourceMappingsSpy.restore(); + }); + + const stubbedRulesModel = new Rule(); + + const { + originalDynamoRule, + originalPgRecord, + } = await createRuleTestRecords( + { + ...t.context, + ruleModel: stubbedRulesModel, + }, + { + state: 'ENABLED', + rule: { + type: 'sns', + value: topic1.TopicArn, + }, + collection: { + name: pgCollection.name, + version: pgCollection.version, + }, + provider: pgProvider.name, + } + ); + + t.truthy(originalDynamoRule.rule.arn); + t.truthy(originalPgRecord.arn); + + const updateRule = { + ...originalDynamoRule, + rule: { + type: 'sns', + value: topic2.TopicArn, + }, + }; + + const expressRequest = { + params: { + name: originalDynamoRule.name, + }, + body: updateRule, + testContext: { + ruleModel: stubbedRulesModel, + }, + }; + + const response = buildFakeExpressResponse(); + + await t.throwsAsync( + put(expressRequest, response), + { message: 'Dynamo fail' } + ); + + t.false(deleteOldEventSourceMappingsSpy.called); + + const updatedRule = await ruleModel.get({ name: updateRule.name }); + const updatedPgRule = await t.context.rulePgModel + .get(t.context.testKnex, { name: updateRule.name }); + + t.is(updatedRule.rule.arn, originalDynamoRule.rule.arn); + t.is(updatedPgRule.arn, originalPgRecord.arn); + + t.like(updatedRule, { + ...originalDynamoRule, + updatedAt: updatedRule.updatedAt, + rule: { + type: 'sns', + value: topic1.TopicArn, + }, + }); + t.like(updatedPgRule, { + ...originalPgRecord, + updated_at: updatedPgRule.updated_at, + type: 'sns', + value: topic1.TopicArn, + }); +}); + +test.serial('put() keeps initial trigger information if writing to PostgreSQL fails', async (t) => { + const { + pgProvider, + pgCollection, + } = t.context; + + const topic1 = await awsServices.sns().createTopic({ Name: randomId('topic1_') }).promise(); + const topic2 = await awsServices.sns().createTopic({ Name: randomId('topic2_') }).promise(); + + const deleteOldEventSourceMappingsSpy = sinon.spy(Rule.prototype, 'deleteOldEventSourceMappings'); + t.teardown(() => { + deleteOldEventSourceMappingsSpy.restore(); + }); + + const stubbedRulesModel = new Rule(); + + const { + originalDynamoRule, + originalPgRecord, + } = await createRuleTestRecords( + { + ...t.context, + ruleModel: stubbedRulesModel, + }, + { + state: 'ENABLED', + rule: { + type: 'sns', + value: topic1.TopicArn, + }, + collection: { + name: pgCollection.name, + version: pgCollection.version, + }, + provider: pgProvider.name, + } + ); + + t.truthy(originalDynamoRule.rule.arn); + t.truthy(originalPgRecord.arn); + + const updateRule = { + ...originalDynamoRule, + rule: { + type: 'sns', + value: topic2.TopicArn, + }, + }; + + const expressRequest = { + params: { + name: originalDynamoRule.name, + }, + body: updateRule, + testContext: { + rulePgModel: { + get: () => Promise.resolve(originalPgRecord), + upsert: () => { + throw new Error('PG fail'); + }, + }, + }, + }; + + const response = buildFakeExpressResponse(); + + await t.throwsAsync( + put(expressRequest, response), + { message: 'PG fail' } + ); + + t.false(deleteOldEventSourceMappingsSpy.called); + + const updatedRule = await ruleModel.get({ name: updateRule.name }); + const updatedPgRule = await t.context.rulePgModel + .get(t.context.testKnex, { name: updateRule.name }); + + t.is(updatedRule.rule.arn, originalDynamoRule.rule.arn); + t.is(updatedPgRule.arn, originalPgRecord.arn); + + t.like(updatedRule, { + ...originalDynamoRule, + updatedAt: updatedRule.updatedAt, + rule: { + type: 'sns', + value: topic1.TopicArn, + }, + }); + t.like(updatedPgRule, { + ...originalPgRecord, + updated_at: updatedPgRule.updated_at, + type: 'sns', + value: topic1.TopicArn, + }); +}); + test('DELETE deletes a rule', async (t) => { const { newRule } = t.context; diff --git a/packages/api/tests/lambdas/test-kinesis-consumer.js b/packages/api/tests/lambdas/test-kinesis-consumer.js index f8379e7270a..2bf14700ead 100644 --- a/packages/api/tests/lambdas/test-kinesis-consumer.js +++ b/packages/api/tests/lambdas/test-kinesis-consumer.js @@ -5,7 +5,7 @@ const sinon = require('sinon'); const test = require('ava'); const proxyquire = require('proxyquire'); -const { randomString } = require('@cumulus/common/test-utils'); +const { randomString, randomId } = require('@cumulus/common/test-utils'); const { s3, sns } = require('@cumulus/aws-client/services'); const { recursivelyDeleteS3Bucket } = require('@cumulus/aws-client/S3'); @@ -54,7 +54,7 @@ const kinesisRule = { state: 'ENABLED', rule: { type: 'kinesis', - value: 'test-kinesisarn', + value: `arn:aws:kinesis:us-east-1:000000000000:${randomId('kinesis')}`, }, }; diff --git a/packages/api/tests/lambdas/test-publish-executions.js b/packages/api/tests/lambdas/test-publish-executions.js index e6de11fb268..1240ff37d5e 100644 --- a/packages/api/tests/lambdas/test-publish-executions.js +++ b/packages/api/tests/lambdas/test-publish-executions.js @@ -59,6 +59,11 @@ test.serial('The publish-executions Lambda function takes a DynamoDB stream even await handler(event); const { Messages } = await sqs().receiveMessage({ QueueUrl, WaitTimeSeconds: 10 }).promise(); + t.teardown( + async () => await Promise.all(Messages.map( + ({ ReceiptHandle }) => sqs().deleteMessage({ QueueUrl, ReceiptHandle }).promise() + )) + ); t.is(Messages.length, 1); @@ -100,6 +105,11 @@ test.serial('The publish-executions Lambda function takes a DynamoDB stream even MaxNumberOfMessages: 2, WaitTimeSeconds: 10, }).promise(); + t.teardown( + async () => await Promise.all(Messages.map( + ({ ReceiptHandle }) => sqs().deleteMessage({ QueueUrl, ReceiptHandle }).promise() + )) + ); t.is(Messages.length, 2); }); diff --git a/packages/api/tests/lambdas/test-publish-granules.js b/packages/api/tests/lambdas/test-publish-granules.js index a61c2fc270e..f36a2adaabb 100644 --- a/packages/api/tests/lambdas/test-publish-granules.js +++ b/packages/api/tests/lambdas/test-publish-granules.js @@ -11,7 +11,9 @@ test.before(async (t) => { process.env.granule_sns_topic_arn = TopicArn; const QueueName = randomString(); - const { QueueUrl } = await sqs().createQueue({ QueueName }).promise(); + const { QueueUrl } = await sqs().createQueue({ + QueueName, + }).promise(); const getQueueAttributesResponse = await sqs().getQueueAttributes({ QueueUrl, AttributeNames: ['QueueArn'], @@ -60,6 +62,11 @@ test.serial('The publish-granules Lambda function takes a DynamoDB stream event await handler(event); const { Messages } = await sqs().receiveMessage({ QueueUrl, WaitTimeSeconds: 10 }).promise(); + t.teardown( + async () => await Promise.all(Messages.map( + ({ ReceiptHandle }) => sqs().deleteMessage({ QueueUrl, ReceiptHandle }).promise() + )) + ); t.is(Messages.length, 1); @@ -75,6 +82,7 @@ test.serial('The publish-granules Lambda function takes a DynamoDB stream event test.serial('The publish-granules Lambda function takes a DynamoDB stream event with a multiple records and publishes their granules to SNS', async (t) => { const { QueueUrl } = t.context; const firstGranuleId = randomString(); + const secondGranuleId = randomString(); const event = { Records: [ @@ -90,7 +98,7 @@ test.serial('The publish-granules Lambda function takes a DynamoDB stream event { dynamodb: { NewImage: { - granuleId: { S: randomString() }, + granuleId: { S: secondGranuleId }, status: { S: 'running' }, }, }, @@ -103,9 +111,14 @@ test.serial('The publish-granules Lambda function takes a DynamoDB stream event const { Messages } = await sqs().receiveMessage({ QueueUrl, - MaxNumberOfMessages: 2, + MaxNumberOfMessages: 10, WaitTimeSeconds: 10, }).promise(); + t.teardown( + async () => await Promise.all(Messages.map( + ({ ReceiptHandle }) => sqs().deleteMessage({ QueueUrl, ReceiptHandle }).promise() + )) + ); t.is(Messages.length, 2); const firstMessage = JSON.parse(JSON.parse(Messages[0].Body).Message); @@ -141,6 +154,11 @@ test.serial('The publish-granules Lambda function takes a DynamoDB stream event await handler(event); const { Messages } = await sqs().receiveMessage({ QueueUrl, WaitTimeSeconds: 10 }).promise(); + t.teardown( + async () => await Promise.all(Messages.map( + ({ ReceiptHandle }) => sqs().deleteMessage({ QueueUrl, ReceiptHandle }).promise() + )) + ); t.is(Messages.length, 1); diff --git a/packages/api/tests/lib/test-granule-delete.js b/packages/api/tests/lib/test-granule-delete.js index 95a627b293f..c4adf4193dd 100644 --- a/packages/api/tests/lib/test-granule-delete.js +++ b/packages/api/tests/lib/test-granule-delete.js @@ -50,7 +50,7 @@ let granulePgModel; process.env.CollectionsTable = randomId('collection'); process.env.GranulesTable = randomId('granules'); process.env.stackName = randomId('stackname'); -process.env.system_bucket = randomId('systembucket'); +process.env.system_bucket = randomId('bucket'); process.env.TOKEN_SECRET = randomId('secret'); test.before(async (t) => { diff --git a/packages/api/tests/lib/test-snsRuleHelpers.js b/packages/api/tests/lib/test-snsRuleHelpers.js new file mode 100644 index 00000000000..421c36be50a --- /dev/null +++ b/packages/api/tests/lib/test-snsRuleHelpers.js @@ -0,0 +1,34 @@ +const test = require('ava'); + +const { randomId } = require('@cumulus/common/test-utils'); + +const { + getSnsPermissionIdMaxLength, + getSnsPermissionIdSuffix, + getSnsTriggerPermissionId, +} = require('../../lib/snsRuleHelpers'); +const { fakeRuleFactoryV2 } = require('../../lib/testUtils'); + +test('getSnsTriggerPermissionId() returns correct permission ID based on rule input', (t) => { + const topicName = randomId('sns'); + const topicArn = `arn:aws:sns:us-east-1:000000000000:${topicName}`; + const rule = fakeRuleFactoryV2({ + rule: { + value: topicArn, + }, + }); + t.is(getSnsTriggerPermissionId(rule), `${topicName}Permission`); +}); + +test('getSnsTriggerPermissionId() correct limits ID length to 64 characters', (t) => { + const permissionIdSuffix = getSnsPermissionIdSuffix(); + const topicName = new Array((getSnsPermissionIdMaxLength() + 2) - permissionIdSuffix.length).join('a'); + const topicArn = `arn:aws:sns:us-east-1:000000000000:${topicName}`; + const rule = fakeRuleFactoryV2({ + rule: { + value: topicArn, + }, + }); + // last character of suffix should have been trimmed by substring + t.is(getSnsTriggerPermissionId(rule), `${topicName}${permissionIdSuffix.substring(0, permissionIdSuffix.length - 1)}`); +}); diff --git a/packages/api/tests/models/rules/test-query-rules.js b/packages/api/tests/models/rules/test-query-rules.js index 63734826691..148ed200388 100644 --- a/packages/api/tests/models/rules/test-query-rules.js +++ b/packages/api/tests/models/rules/test-query-rules.js @@ -1,5 +1,6 @@ 'use strict'; +const fs = require('fs-extra'); const test = require('ava'); const sinon = require('sinon'); @@ -26,12 +27,14 @@ const commonRuleParams = { collection, provider: provider.id, workflow: randomId('workflow'), + createdAt: Date.now(), + updatedAt: Date.now(), }; const kinesisRuleParams = { rule: { type: 'kinesis', - value: 'test-kinesisarn', + value: `arn:aws:kinesis:us-east-1:000000000000:${randomId('kinesis')}`, }, }; @@ -66,7 +69,7 @@ const kinesisRule4 = { state: 'ENABLED', rule: { type: 'kinesis', - value: 'kinesisarn-4', + value: `arn:aws:kinesis:us-east-1:000000000000:${randomId('kinesis4_')}`, }, }; @@ -80,7 +83,7 @@ const kinesisRule5 = { state: 'ENABLED', rule: { type: 'kinesis', - value: 'kinesisarn-5', + value: `arn:aws:kinesis:us-east-1:000000000000:${randomId('kinesis5_')}`, }, }; @@ -94,12 +97,36 @@ const disabledKinesisRule = { test.before(async () => { process.env.RulesTable = randomId('rules'); process.env.stackName = randomId('stack'); - process.env.messageConsumer = randomId('message'); process.env.KinesisInboundEventLogger = randomId('kinesis'); process.env.system_bucket = randomId('bucket'); + const lambda = await awsServices.lambda().createFunction({ + Code: { + ZipFile: fs.readFileSync(require.resolve('@cumulus/test-data/fake-lambdas/hello.zip')), + }, + FunctionName: randomId('messageConsumer'), + Role: randomId('role'), + Handler: 'index.handler', + Runtime: 'nodejs12.x', + }).promise(); + process.env.messageConsumer = lambda.FunctionName; + // create Rules table - rulesModel = new models.Rule(); + rulesModel = new models.Rule({ + SqsUtils: { + sqsQueueExists: () => Promise.resolve(true), + }, + SqsClient: { + getQueueAttributes: () => ({ + promise: () => Promise.resolve({ + Attributes: { + RedrivePolicy: 'fake-policy', + VisibilityTimeout: '10', + }, + }), + }), + }, + }); await rulesModel.createTable(); await S3.createBucket(process.env.system_bucket); @@ -114,20 +141,6 @@ test.before(async () => { sandbox = sinon.createSandbox(); - sandbox.stub(awsServices, 'sqs').returns({ - getQueueUrl: () => ({ - promise: () => Promise.resolve(true), - }), - getQueueAttributes: () => ({ - promise: () => Promise.resolve({ - Attributes: { - RedrivePolicy: 'fake-policy', - VisibilityTimeout: '10', - }, - }), - }), - }); - const stubWorkflowFileKey = randomId('key'); sandbox.stub(workflows, 'getWorkflowFileKey').returns(stubWorkflowFileKey); sandbox.stub(S3, 'fileExists') @@ -151,7 +164,13 @@ test.before(async () => { kinesisRule5, disabledKinesisRule, ]; - await Promise.all(kinesisRules.map((rule) => rulesModel.create(rule))); + + await Promise.all( + kinesisRules.map(async (rule) => { + const ruleWithTrigger = await rulesModel.createRuleTrigger(rule); + await rulesModel.create(ruleWithTrigger); + }) + ); }); test.after.always(async () => { @@ -182,7 +201,12 @@ test.serial('queryRules returns correct rules for given state and type', async ( state: 'DISABLED', }), ]; - await Promise.all(onetimeRules.map((rule) => rulesModel.create(rule))); + await Promise.all( + onetimeRules.map(async (rule) => { + const ruleWithTrigger = await rulesModel.createRuleTrigger(rule); + await rulesModel.create(ruleWithTrigger); + }) + ); const result = await rulesModel.queryRules({ status: 'ENABLED', @@ -213,7 +237,14 @@ test.serial('queryRules defaults to returning only ENABLED rules', async (t) => state: 'DISABLED', }), ]; - await Promise.all(rules.map((rule) => rulesModel.create(rule))); + + await Promise.all( + rules.map(async (rule) => { + const ruleWithTrigger = await rulesModel.createRuleTrigger(rule); + await rulesModel.create(ruleWithTrigger); + }) + ); + const results = await rulesModel.queryRules({ type: 'onetime', }); @@ -226,16 +257,6 @@ test.serial('queryRules defaults to returning only ENABLED rules', async (t) => }); test.serial('queryRules should look up sns-type rules which are associated with the topic, but not those that are disabled', async (t) => { - // See https://github.com/localstack/localstack/issues/2016 - const stub = sinon.stub(awsServices, 'lambda').returns({ - addPermission: () => ({ - promise: () => Promise.resolve(true), - }), - removePermission: () => ({ - promise: () => Promise.resolve(true), - }), - }); - const { TopicArn } = await awsServices.sns().createTopic({ Name: randomId('topic'), }).promise(); @@ -256,7 +277,12 @@ test.serial('queryRules should look up sns-type rules which are associated with state: 'DISABLED', }), ]; - const createdRules = await Promise.all(rules.map((rule) => rulesModel.create(rule))); + const createdRules = await Promise.all( + rules.map(async (rule) => { + const ruleWithTrigger = await rulesModel.createRuleTrigger(rule); + return rulesModel.create(ruleWithTrigger); + }) + ); const result = await rulesModel.queryRules({ type: 'sns', @@ -269,21 +295,10 @@ test.serial('queryRules should look up sns-type rules which are associated with await awsServices.sns().deleteTopic({ TopicArn, }); - stub.restore(); }); }); test.serial('queryRules should look up sns-type rules which are associated with the collection', async (t) => { - // See https://github.com/localstack/localstack/issues/2016 - const stub = sinon.stub(awsServices, 'lambda').returns({ - addPermission: () => ({ - promise: () => Promise.resolve(true), - }), - removePermission: () => ({ - promise: () => Promise.resolve(true), - }), - }); - const { TopicArn } = await awsServices.sns().createTopic({ Name: randomId('topic'), }).promise(); @@ -305,7 +320,11 @@ test.serial('queryRules should look up sns-type rules which are associated with state: 'ENABLED', }), ]; - const createdRules = await Promise.all(rules.map((rule) => rulesModel.create(rule))); + + const ruleWithTrigger1 = await rulesModel.createRuleTrigger(rules[0]); + const rule1 = await rulesModel.create(ruleWithTrigger1); + const ruleWithTrigger2 = await rulesModel.createRuleTrigger(rules[1]); + const rule2 = await rulesModel.create(ruleWithTrigger2); const result = await rulesModel.queryRules({ type: 'sns', @@ -313,13 +332,13 @@ test.serial('queryRules should look up sns-type rules which are associated with version: collection.version, }); t.is(result.length, 1); - t.deepEqual(result[0], createdRules[0]); + t.deepEqual(result[0], rule1); t.teardown(async () => { - await Promise.all(createdRules.map((rule) => rulesModel.delete(rule))); + await rulesModel.delete(rule1); + await rulesModel.delete(rule2); await awsServices.sns().deleteTopic({ TopicArn, }); - stub.restore(); }); }); @@ -342,7 +361,7 @@ test('queryRules should look up kinesis-type rules which are associated with the test('queryRules should look up kinesis-type rules which are associated with the source ARN', async (t) => { const result = await rulesModel.queryRules({ - sourceArn: 'kinesisarn-4', + sourceArn: kinesisRule4.rule.value, type: 'kinesis', }); t.is(result.length, 1); @@ -352,7 +371,7 @@ test('queryRules should look up kinesis-type rules which are associated with the const result = await rulesModel.queryRules({ name: testCollectionName, version: '2.0.0', - sourceArn: 'kinesisarn-5', + sourceArn: kinesisRule5.rule.value, type: 'kinesis', }); t.is(result.length, 1); diff --git a/packages/api/tests/models/rules/test-rules-model.js b/packages/api/tests/models/rules/test-rules-model.js index f816674dc6f..e5eecd496b3 100644 --- a/packages/api/tests/models/rules/test-rules-model.js +++ b/packages/api/tests/models/rules/test-rules-model.js @@ -1,7 +1,7 @@ 'use strict'; +const fs = require('fs-extra'); const test = require('ava'); -const sinon = require('sinon'); const cloneDeep = require('lodash/cloneDeep'); const get = require('lodash/get'); @@ -20,7 +20,6 @@ const { createSqsQueues, fakeRuleFactoryV2 } = require('../../../lib/testUtils') process.env.RulesTable = `RulesTable_${randomString()}`; process.env.stackName = randomString(); -process.env.messageConsumer = randomString(); process.env.KinesisInboundEventLogger = randomString(); process.env.system_bucket = randomString(); @@ -56,6 +55,18 @@ async function deleteKinesisEventSourceMappings() { let rulesModel; test.before(async () => { + const lambda = await awsServices.lambda().createFunction({ + Code: { + ZipFile: fs.readFileSync(require.resolve('@cumulus/test-data/fake-lambdas/hello.zip')), + }, + FunctionName: randomId('messageConsumer'), + Role: randomId('role'), + Handler: 'index.handler', + Runtime: 'nodejs12.x', + }).promise(); + process.env.messageConsumer = lambda.FunctionName; + process.env.messageConsumerArn = lambda.FunctionArn; + // create Rules table rulesModel = new models.Rule(); await rulesModel.createTable(); @@ -99,7 +110,7 @@ test.beforeEach((t) => { }, rule: { type: 'kinesis', - value: 'my-kinesis-arn', + value: `arn:aws:kinesis:us-east-1:000000000000:${randomId('kinesis')}`, }, state: 'ENABLED', }); @@ -111,14 +122,14 @@ test.after.always(async () => { await recursivelyDeleteS3Bucket(process.env.system_bucket); }); -test('create defaults rule state to ENABLED', async (t) => { +test('createRuleTrigger() defaults rule state to ENABLED', async (t) => { const { onetimeRule } = t.context; // remove state from rule to be created delete onetimeRule.state; - // create rule - const rule = await rulesModel.create(onetimeRule); + // create rule trigger + const rule = await rulesModel.createRuleTrigger(onetimeRule); t.is(rule.state, 'ENABLED'); @@ -129,8 +140,9 @@ test('create defaults rule state to ENABLED', async (t) => { test('Creates and deletes a onetime rule', async (t) => { const { onetimeRule } = t.context; - // create rule - const rule = await rulesModel.create(onetimeRule); + // create rule trigger and rule + const ruleWithTrigger = await rulesModel.createRuleTrigger(onetimeRule); + const rule = await rulesModel.create(ruleWithTrigger); t.is(rule.name, onetimeRule.name); @@ -183,17 +195,10 @@ test.serial('Creating an invalid rule does not create workflow triggers', async ruleItem.rule.type = 'invalid'; - const createTriggerStub = sinon.stub(models.Rule.prototype, 'createRuleTrigger').resolves(ruleItem); - - try { - await t.throwsAsync( - () => rulesModel.create(ruleItem), - { name: 'SchemaValidationError' } - ); - t.true(createTriggerStub.notCalled); - } finally { - createTriggerStub.restore(); - } + await t.throwsAsync( + () => rulesModel.createRuleTrigger(ruleItem), + { name: 'SchemaValidationError' } + ); }); test('Enabling a disabled rule updates the state', async (t) => { @@ -202,12 +207,14 @@ test('Enabling a disabled rule updates the state', async (t) => { const ruleItem = cloneDeep(onetimeRule); ruleItem.state = 'DISABLED'; - const rule = await rulesModel.create(ruleItem); + const ruleWithTrigger = await rulesModel.createRuleTrigger(ruleItem); + const rule = await rulesModel.create(ruleWithTrigger); t.is(rule.state, 'DISABLED'); const updates = { name: rule.name, state: 'ENABLED' }; - const updatedRule = await rulesModel.update(rule, updates); + const ruleWithUpdatedTrigger = await rulesModel.updateRuleTrigger(rule, updates); + const updatedRule = await rulesModel.update(ruleWithUpdatedTrigger); t.is(updatedRule.name, rule.name); t.is(updatedRule.type, rule.type); @@ -219,45 +226,37 @@ test('Enabling a disabled rule updates the state', async (t) => { test.serial('Updating a valid rule to have an invalid schema throws an error and does not update triggers', async (t) => { const { onetimeRule } = t.context; - const rule = await rulesModel.create(onetimeRule); + const ruleWithTrigger = await rulesModel.createRuleTrigger(onetimeRule); + const rule = await rulesModel.create(ruleWithTrigger); const updates = { name: rule.name, rule: null }; - const updateTriggerStub = sinon.stub(models.Rule.prototype, 'updateRuleTrigger').resolves(rule); - - try { - await t.throwsAsync( - () => rulesModel.update(rule, updates), - { name: 'SchemaValidationError' } - ); - - t.true(updateTriggerStub.notCalled); - } finally { - updateTriggerStub.restore(); - } + await t.throwsAsync( + () => rulesModel.updateRuleTrigger(rule, updates), + { name: 'SchemaValidationError' } + ); }); -test.serial('Creating a kinesis type rule adds event mappings, creates rule', async (t) => { +test.serial('createRuleTrigger() for a kinesis type rule adds event mappings', async (t) => { const { kinesisRule } = t.context; // create rule - const createdRule = await rulesModel.create(kinesisRule); + const ruleWithTrigger = await rulesModel.createRuleTrigger(kinesisRule); const kinesisEventMappings = await getKinesisEventMappings(); const consumerEventMappings = kinesisEventMappings[0].EventSourceMappings; const logEventMappings = kinesisEventMappings[1].EventSourceMappings; t.is(consumerEventMappings.length, 1); t.is(logEventMappings.length, 1); - t.is(consumerEventMappings[0].UUID, createdRule.rule.arn); - t.is(logEventMappings[0].UUID, createdRule.rule.logEventArn); + t.is(consumerEventMappings[0].UUID, ruleWithTrigger.rule.arn); + t.is(logEventMappings[0].UUID, ruleWithTrigger.rule.logEventArn); - t.is(createdRule.name, kinesisRule.name); - t.is(createdRule.rule.value, kinesisRule.rule.value); - t.false(createdRule.rule.arn === undefined); - t.false(createdRule.rule.logEventArn === undefined); + t.is(ruleWithTrigger.name, kinesisRule.name); + t.is(ruleWithTrigger.rule.value, kinesisRule.rule.value); + t.false(ruleWithTrigger.rule.arn === undefined); + t.false(ruleWithTrigger.rule.logEventArn === undefined); // clean up - await rulesModel.delete(createdRule); await deleteKinesisEventSourceMappings(); }); @@ -265,7 +264,8 @@ test.serial('Deleting a kinesis style rule removes event mappings', async (t) => const { kinesisRule } = t.context; // create and delete rule - const createdRule = await rulesModel.create(kinesisRule); + const ruleWithTrigger = await rulesModel.createRuleTrigger(kinesisRule); + const createdRule = await rulesModel.create(ruleWithTrigger); t.true(await rulesModel.exists(createdRule.name)); await rulesModel.delete(createdRule); @@ -282,12 +282,14 @@ test.serial('Updating a kinesis type rule state does not change event source map const { kinesisRule } = t.context; // create rule - await rulesModel.create(kinesisRule); + const ruleWithTrigger = await rulesModel.createRuleTrigger(kinesisRule); + await rulesModel.create(ruleWithTrigger); const rule = await rulesModel.get({ name: kinesisRule.name }); // update rule state const updates = { name: rule.name, state: 'ENABLED' }; - const updatedRule = await rulesModel.update(rule, updates); + const ruleWithUpdatedTrigger = await rulesModel.updateRuleTrigger(rule, updates); + const updatedRule = await rulesModel.update(ruleWithUpdatedTrigger); t.is(updatedRule.state, 'ENABLED'); @@ -300,20 +302,22 @@ test.serial('Updating a kinesis type rule state does not change event source map await deleteKinesisEventSourceMappings(); }); -test.serial('Updaing a kinesis type rule value results in new event source mappings', async (t) => { +test.serial('Updating a kinesis type rule value results in new event source mappings', async (t) => { const { kinesisRule } = t.context; // create rule - await rulesModel.create(kinesisRule); + const ruleWithTrigger = await rulesModel.createRuleTrigger(kinesisRule); + await rulesModel.create(ruleWithTrigger); const rule = await rulesModel.get({ name: kinesisRule.name }); // update rule value const updates = { name: rule.name, - rule: { type: rule.rule.type, value: 'my-new-kinesis-arn' }, + rule: { type: rule.rule.type, value: `arn:aws:kinesis:us-east-1:000000000000:${randomId('kinesis')}` }, }; - const updatedRule = await rulesModel.update(rule, updates); + const ruleWithUpdatedTrigger = await rulesModel.updateRuleTrigger(rule, updates); + const updatedRule = await rulesModel.update(ruleWithUpdatedTrigger); t.is(updatedRule.name, rule.name); t.not(updatedRule.rule.value, rule.rule.value); @@ -328,11 +332,151 @@ test.serial('Updaing a kinesis type rule value results in new event source mappi await deleteKinesisEventSourceMappings(); }); +test.serial('Calling updateRuleTrigger() with a kinesis type rule value does not delete existing source mappings', async (t) => { + const { kinesisRule } = t.context; + + // create rule trigger and rule + const kinesisArn1 = `arn:aws:kinesis:us-east-1:000000000000:${randomId('kinesis1')}`; + kinesisRule.rule.value = kinesisArn1; + const ruleWithTrigger = await rulesModel.createRuleTrigger(kinesisRule); + await rulesModel.create(ruleWithTrigger); + + const rule = await rulesModel.get({ name: kinesisRule.name }); + t.teardown(async () => { + await rulesModel.delete(rule); + await deleteKinesisEventSourceMappings(); + }); + + // update rule value + const updates = { + name: rule.name, + rule: { ...rule.rule, value: `arn:aws:kinesis:us-east-1:000000000000:${randomId('kinesis2')}` }, + }; + + const ruleWithUpdatedTrigger = await rulesModel.updateRuleTrigger(rule, updates); + const updatedRule = await rulesModel.update(ruleWithUpdatedTrigger); + + t.is(updatedRule.name, rule.name); + t.not(updatedRule.rule.value, rule.rule.value); + + // Event source mappings exist and have been updated + t.truthy(updatedRule.rule.arn); + t.not(updatedRule.rule.arn, rule.rule.arn); + t.truthy(updatedRule.rule.logEventArn); + t.not(updatedRule.rule.logEventArn, rule.rule.logEventArn); + + const kinesisEventMappings = await getKinesisEventMappings(); + const consumerEventMappings = kinesisEventMappings[0].EventSourceMappings; + const logEventMappings = kinesisEventMappings[1].EventSourceMappings; + + t.is(consumerEventMappings.length, 2); + t.is(consumerEventMappings.filter((mapping) => mapping.EventSourceArn === kinesisArn1).length, 1); + t.is(logEventMappings.length, 2); + t.is(logEventMappings.filter((mapping) => mapping.EventSourceArn === kinesisArn1).length, 1); +}); + +test.serial('Calling updateRuleTrigger() with an SNS type rule value does not delete existing source mappings', async (t) => { + const topic1 = await awsServices.sns().createTopic({ Name: randomId('topic1_') }).promise(); + const topic2 = await awsServices.sns().createTopic({ Name: randomId('topic2_') }).promise(); + + // create rule trigger and rule + const snsRule = fakeRuleFactoryV2({ + workflow, + rule: { + type: 'sns', + value: topic1.TopicArn, + }, + state: 'ENABLED', + }); + + const snsRuleWithTrigger = await rulesModel.createRuleTrigger(snsRule); + await rulesModel.create(snsRuleWithTrigger); + + const rule = await rulesModel.get({ name: snsRule.name }); + t.teardown(async () => { + await rulesModel.delete(rule); + await awsServices.sns().deleteTopic({ TopicArn: topic1.TopicArn }).promise(); + await awsServices.sns().deleteTopic({ TopicArn: topic2.TopicArn }).promise(); + }); + + // update rule value + const updates = { + name: rule.name, + rule: { ...rule.rule, value: topic2.TopicArn }, + }; + + const ruleWithUpdatedTrigger = await rulesModel.updateRuleTrigger(rule, updates); + const updatedRule = await rulesModel.update(ruleWithUpdatedTrigger); + + t.is(updatedRule.name, rule.name); + t.not(updatedRule.rule.value, rule.rule.value); + + // Event source mappings exist and have been updated + t.truthy(updatedRule.rule.arn); + t.not(updatedRule.rule.arn, rule.rule.arn); +}); + +test.serial('deleteOldEventSourceMappings() removes kinesis source mappings', async (t) => { + const { kinesisRule } = t.context; + + // create rule trigger and rule + kinesisRule.rule.value = `arn:aws:kinesis:us-east-1:000000000000:${randomId('kinesis1')}`; + const ruleWithTrigger = await rulesModel.createRuleTrigger(kinesisRule); + await rulesModel.create(ruleWithTrigger); + + const rule = await rulesModel.get({ name: kinesisRule.name }); + t.teardown(() => rulesModel.delete(rule)); + + const [ + consumerEventMappingsBefore, + logEventMappingsBefore, + ] = await getKinesisEventMappings(); + t.is(consumerEventMappingsBefore.EventSourceMappings.length, 1); + t.is(logEventMappingsBefore.EventSourceMappings.length, 1); + + await rulesModel.deleteOldEventSourceMappings(rule); + + const [ + consumerEventMappingsAfter, + logEventMappingsAfter, + ] = await getKinesisEventMappings(); + t.is(consumerEventMappingsAfter.EventSourceMappings.length, 0); + t.is(logEventMappingsAfter.EventSourceMappings.length, 0); +}); + +test.serial('deleteOldEventSourceMappings() removes SNS source mappings', async (t) => { + const topic1 = await awsServices.sns().createTopic({ Name: randomId('topic1_') }).promise(); + + // create rule trigger and rule + const snsRule = fakeRuleFactoryV2({ + workflow, + rule: { + type: 'sns', + value: topic1.TopicArn, + }, + state: 'ENABLED', + }); + + const ruleWithTrigger = await rulesModel.createRuleTrigger(snsRule); + await rulesModel.create(ruleWithTrigger); + + const rule = await rulesModel.get({ name: snsRule.name }); + + const { subExists } = await rulesModel.checkForSnsSubscriptions(rule); + t.true(subExists); + + await rulesModel.deleteOldEventSourceMappings(rule); + + const { subExists: subExists2 } = await rulesModel.checkForSnsSubscriptions(rule); + t.false(subExists2); +}); + test.serial('Updating a kinesis type rule workflow does not affect value or event source mappings', async (t) => { const { kinesisRule } = t.context; // create rule - await rulesModel.create(kinesisRule); + const ruleWithTrigger = await rulesModel.createRuleTrigger(kinesisRule); + await rulesModel.create(ruleWithTrigger); const rule = await rulesModel.get({ name: kinesisRule.name }); // update rule value @@ -341,7 +485,8 @@ test.serial('Updating a kinesis type rule workflow does not affect value or even workflow: 'new-workflow', }; - const updatedRule = await rulesModel.update(rule, updates); + const ruleWithUpdatedTrigger = await rulesModel.updateRuleTrigger(rule, updates); + const updatedRule = await rulesModel.update(ruleWithUpdatedTrigger); t.is(updatedRule.name, rule.name); t.is(updatedRule.rule.value, rule.rule.value); @@ -363,10 +508,12 @@ test.serial('Creating a kinesis type rule using existing event source mappings d const newKinesisRule = cloneDeep(kinesisRule); newKinesisRule.name = `${kinesisRule.name}_new`; - await rulesModel.create(kinesisRule); + const ruleWithTrigger = await rulesModel.createRuleTrigger(kinesisRule); + await rulesModel.create(ruleWithTrigger); const rule = await rulesModel.get({ name: kinesisRule.name }); - await rulesModel.create(newKinesisRule); + const newRuleWithTrigger = await rulesModel.createRuleTrigger(newKinesisRule); + await rulesModel.create(newRuleWithTrigger); const newRule = await rulesModel.get({ name: newKinesisRule.name }); t.not(newRule.name, rule.name); @@ -393,9 +540,11 @@ test.serial('It does not delete event source mappings if they exist for other ru kinesisRuleThree.name = `${kinesisRule.name}_three`; // create two rules with same value - await rulesModel.create(kinesisRule); + const ruleWithTrigger = await rulesModel.createRuleTrigger(kinesisRule); + await rulesModel.create(ruleWithTrigger); const rule = await rulesModel.get({ name: kinesisRule.name }); - await rulesModel.create(kinesisRuleTwo); + const ruleWithTrigger2 = await rulesModel.createRuleTrigger(kinesisRuleTwo); + await rulesModel.create(ruleWithTrigger2); const ruleTwo = await rulesModel.get({ name: kinesisRuleTwo.name }); // same event source mapping @@ -406,7 +555,8 @@ test.serial('It does not delete event source mappings if they exist for other ru await rulesModel.delete(ruleTwo); // create third rule, it should use the existing event source mapping - await rulesModel.create(kinesisRuleThree); + const ruleWithTrigger3 = await rulesModel.createRuleTrigger(kinesisRuleThree); + await rulesModel.create(ruleWithTrigger3); const ruleThree = await rulesModel.get({ name: kinesisRuleThree.name }); t.is(ruleThree.rule.arn, rule.rule.arn); t.is(ruleThree.rule.logEventArn, rule.rule.logEventArn); @@ -417,50 +567,47 @@ test.serial('It does not delete event source mappings if they exist for other ru await deleteKinesisEventSourceMappings(); }); -test.serial('Creating a kinesis rule where an event source mapping already exists, but is not enabled, succeeds', async (t) => { - process.env.messageConsumer = randomString(); - - const item = fakeRuleFactoryV2({ +test.serial('Creating triggers for a kinesis rule where an event source mapping already exists, but is not enabled, succeeds', async (t) => { + const kinesisArn = `arn:aws:kinesis:us-east-1:000000000000:${randomId('kinesis')}`; + const rule = fakeRuleFactoryV2({ workflow, rule: { type: 'kinesis', - value: randomString(), + value: kinesisArn, }, state: 'ENABLED', }); - const lambdaStub = sinon.stub(awsServices, 'lambda') - .returns({ - createEventSourceMapping: () => ({ - promise: () => Promise.resolve({ UUID: randomString() }), - }), - deleteEventSourceMapping: () => ({ - promise: () => Promise.resolve(), - }), - updateEventSourceMapping: () => ({ - promise: () => Promise.resolve({ UUID: randomString() }), - }), - listEventSourceMappings: () => ({ - promise: () => Promise.resolve({ - EventSourceMappings: [ - { - UUID: randomString(), - EventSourceArn: item.rule.value, - FunctionArn: `arn:aws:lambda:us-west-2:000000000000:function:${process.env.messageConsumer}`, - State: 'Disabled', - }, - ], - }), - }), - }); + const params = { + EventSourceArn: rule.rule.value, + FunctionName: process.env.messageConsumer, + StartingPosition: 'TRIM_HORIZON', + Enabled: false, + }; + await awsServices.lambda().createEventSourceMapping(params).promise(); + t.teardown(() => deleteKinesisEventSourceMappings()); + + const mappings = await getKinesisEventMappings(); + const messageConsumerSource = mappings.find( + (mapping) => mapping.EventSourceMappings.find( + (eventSourceMapping) => + eventSourceMapping.FunctionArn === process.env.messageConsumerArn + && eventSourceMapping.EventSourceArn === kinesisArn + ) + ); + t.is( + messageConsumerSource.EventSourceMappings.length, + 1 + ); + const [messageConsumerSourceMapping] = messageConsumerSource.EventSourceMappings; + t.is(messageConsumerSourceMapping.State, 'Disabled'); try { - await rulesModel.create(item); + const ruleWithTrigger = await rulesModel.createRuleTrigger(rule); + await rulesModel.create(ruleWithTrigger); t.pass(); } catch (error) { t.fail(error); - } finally { - lambdaStub.restore(); } }); @@ -471,7 +618,7 @@ test('Creating an invalid kinesis type rule does not add event mappings', async delete newKinesisRule.name; // attempt to create rule - await t.throwsAsync(rulesModel.create(newKinesisRule), { name: 'SchemaValidationError' }); + await t.throwsAsync(rulesModel.createRuleTrigger(newKinesisRule), { name: 'SchemaValidationError' }); const kinesisEventMappings = await getKinesisEventMappings(); const consumerEventMappings = kinesisEventMappings[0].EventSourceMappings; @@ -489,7 +636,8 @@ test('Creating a rule with a queueUrl parameter succeeds', async (t) => { const ruleItem = cloneDeep(onetimeRule); ruleItem.queueUrl = 'testQueue'; - const response = await rulesModel.create(ruleItem); + const ruleWithTrigger = await rulesModel.createRuleTrigger(ruleItem); + const response = await rulesModel.create(ruleWithTrigger); const payload = await models.Rule.buildPayload(ruleItem); @@ -507,13 +655,15 @@ test('Updates rule meta object', async (t) => { triggerRule, }; - const rule = await rulesModel.create(ruleItem); + const ruleWithTrigger = await rulesModel.createRuleTrigger(ruleItem); + const rule = await rulesModel.create(ruleWithTrigger); t.is(rule.meta.triggerRule, triggerRule); const newTriggerRule = randomId('triggerRule'); const updates = { name: rule.name, meta: { triggerRule: newTriggerRule } }; - const updatedRule = await rulesModel.update(rule, updates); + const ruleWithUpdatedTrigger = await rulesModel.updateRuleTrigger(rule, updates); + const updatedRule = await rulesModel.update(ruleWithUpdatedTrigger); t.is(updatedRule.meta.triggerRule, newTriggerRule); }); @@ -529,7 +679,8 @@ test('Updates a deeply nested key', async (t) => { testObject, }; - const rule = await rulesModel.create(ruleItem); + const ruleWithTrigger = await rulesModel.createRuleTrigger(ruleItem); + const rule = await rulesModel.create(ruleWithTrigger); t.deepEqual(rule.meta.testObject, testObject); @@ -540,7 +691,8 @@ test('Updates a deeply nested key', async (t) => { testObject: newTestObject, }, }; - const updatedRule = await rulesModel.update(rule, updates); + const ruleWithUpdatedTrigger = await rulesModel.updateRuleTrigger(rule, updates); + const updatedRule = await rulesModel.update(ruleWithUpdatedTrigger); t.deepEqual(updatedRule.meta.testObject, newTestObject); }); @@ -557,7 +709,8 @@ test('Update preserves nested keys', async (t) => { testObject, }; - const rule = await rulesModel.create(ruleItem); + const ruleWithTrigger = await rulesModel.createRuleTrigger(ruleItem); + const rule = await rulesModel.create(ruleWithTrigger); t.is(rule.meta.foo, 'bar'); t.deepEqual(rule.meta.testObject, testObject); @@ -569,8 +722,8 @@ test('Update preserves nested keys', async (t) => { testObject: newTestObject, }, }; - const updatedRule = await rulesModel.update(rule, updates); - + const ruleWithUpdatedTrigger = await rulesModel.updateRuleTrigger(rule, updates); + const updatedRule = await rulesModel.update(ruleWithUpdatedTrigger); t.is(updatedRule.meta.foo, 'bar'); t.deepEqual(updatedRule.meta.testObject, newTestObject); }); @@ -588,7 +741,8 @@ test('Creating, updating, and deleting SQS type rule succeeds', async (t) => { state: 'ENABLED', }); - const createdRule = await rulesModel.create(rule); + const ruleWithTrigger = await rulesModel.createRuleTrigger(rule); + const createdRule = await rulesModel.create(ruleWithTrigger); t.deepEqual(createdRule.rule, rule.rule); t.is(get(createdRule, 'meta.visibilityTimeout', 300), 300); @@ -609,7 +763,8 @@ test('Creating, updating, and deleting SQS type rule succeeds', async (t) => { }, }; - const updatedRule = await rulesModel.update(createdRule, updates); + const ruleWithUpdatedTrigger = await rulesModel.updateRuleTrigger(createdRule, updates); + const updatedRule = await rulesModel.update(ruleWithUpdatedTrigger); t.deepEqual(updatedRule.meta.testObject, testObject); t.is(updatedRule.rule.value, newQueues.queueUrl); @@ -641,7 +796,7 @@ test('Creating SQS rule fails if queue does not exist', async (t) => { state: 'ENABLED', }); await t.throwsAsync( - rulesModel.create(rule), + rulesModel.createRuleTrigger(rule), { message: /SQS queue non-existent-queue does not exist/ } ); }); @@ -657,7 +812,7 @@ test('Creating SQS rule fails if there is no redrive policy on the queue', async state: 'ENABLED', }); await t.throwsAsync( - rulesModel.create(rule), + rulesModel.createRuleTrigger(rule), { message: `SQS queue ${queueUrl} does not have a dead-letter queue configured` } ); }); @@ -665,7 +820,8 @@ test('Creating SQS rule fails if there is no redrive policy on the queue', async test.serial('Rule.exists() returns true when a record exists', async (t) => { const { onetimeRule } = t.context; - await rulesModel.create(onetimeRule); + const ruleWithTrigger = await rulesModel.createRuleTrigger(onetimeRule); + await rulesModel.create(ruleWithTrigger); t.true(await rulesModel.exists(onetimeRule.name)); }); diff --git a/packages/api/tests/models/rules/test-sns-rules.js b/packages/api/tests/models/rules/test-sns-rules.js index 6ce2f2cdeda..6f9df65fc9a 100644 --- a/packages/api/tests/models/rules/test-sns-rules.js +++ b/packages/api/tests/models/rules/test-sns-rules.js @@ -1,3 +1,4 @@ +const fs = require('fs-extra'); const test = require('ava'); const sinon = require('sinon'); @@ -15,15 +16,24 @@ const { ResourceNotFoundError, resourceNotFoundInfo } = require('../../../lib/er const workflow = randomString(); let rulesModel; -let sandbox; test.before(async () => { process.env.RulesTable = `RulesTable_${randomString()}`; process.env.stackName = randomString(); - process.env.messageConsumer = randomString(); process.env.KinesisInboundEventLogger = randomString(); process.env.system_bucket = randomString(); + const lambda = await awsServices.lambda().createFunction({ + Code: { + ZipFile: fs.readFileSync(require.resolve('@cumulus/test-data/fake-lambdas/hello.zip')), + }, + FunctionName: randomId('messageConsumer'), + Role: randomId('role'), + Handler: 'index.handler', + Runtime: 'nodejs12.x', + }).promise(); + process.env.messageConsumer = lambda.FunctionName; + rulesModel = new Rule(); await rulesModel.createTable(); @@ -43,28 +53,25 @@ test.before(async () => { {} ), ]); +}); - sandbox = sinon.createSandbox(); - sandbox.stub(awsServices, 'lambda') - .returns({ - addPermission: () => ({ - promise: () => Promise.resolve(), - }), - removePermission: () => ({ - promise: () => Promise.resolve(), - }), - }); +test.beforeEach(async (t) => { + const topic = await awsServices.sns().createTopic({ Name: randomId('sns') }).promise(); + t.context.snsTopicArn = topic.TopicArn; +}); + +test.afterEach.always(async (t) => { + await awsServices.sns().deleteTopic({ TopicArn: t.context.snsTopicArn }).promise(); }); test.after.always(async () => { // cleanup table - sandbox.restore(); await rulesModel.deleteTable(); await recursivelyDeleteS3Bucket(process.env.system_bucket); }); test('creating a disabled SNS rule creates no event source mapping', async (t) => { - const snsTopicArn = randomString(); + const { snsTopicArn } = t.context; const item = fakeRuleFactoryV2({ workflow, rule: { @@ -82,21 +89,7 @@ test('creating a disabled SNS rule creates no event source mapping', async (t) = }); test.serial('disabling an SNS rule removes the event source mapping', async (t) => { - const snsTopicArn = randomString(); - const snsStub = sinon.stub(awsServices, 'sns') - .returns({ - listSubscriptionsByTopic: () => ({ - promise: () => Promise.resolve({ - Subscriptions: [{ - Endpoint: process.env.messageConsumer, - SubscriptionArn: snsTopicArn, - }], - }), - }), - unsubscribe: () => ({ - promise: () => Promise.resolve(), - }), - }); + const { snsTopicArn } = t.context; const item = fakeRuleFactoryV2({ workflow, @@ -107,14 +100,16 @@ test.serial('disabling an SNS rule removes the event source mapping', async (t) state: 'ENABLED', }); - const rule = await rulesModel.create(item); + const ruleWithTrigger = await rulesModel.createRuleTrigger(item); + const rule = await rulesModel.create(ruleWithTrigger); t.is(rule.rule.value, snsTopicArn); t.truthy(rule.rule.arn); t.is(rule.state, 'ENABLED'); const updates = { name: rule.name, state: 'DISABLED' }; - const updatedRule = await rulesModel.update(rule, updates); + const ruleWithUpdatedTrigger = await rulesModel.updateRuleTrigger(rule, updates); + const updatedRule = await rulesModel.update(ruleWithUpdatedTrigger); t.is(updatedRule.name, rule.name); t.is(updatedRule.state, 'DISABLED'); @@ -122,26 +117,11 @@ test.serial('disabling an SNS rule removes the event source mapping', async (t) t.is(updatedRule.rule.value, rule.rule.value); t.falsy(updatedRule.rule.arn); - await rulesModel.delete(rule); - t.teardown(() => snsStub.restore()); + t.teardown(() => rulesModel.delete(rule)); }); test.serial('enabling a disabled SNS rule and passing rule.arn throws specific error', async (t) => { - const snsTopicArn = randomString(); - const snsStub = sinon.stub(awsServices, 'sns') - .returns({ - listSubscriptionsByTopic: () => ({ - promise: () => Promise.resolve({ - Subscriptions: [{ - Endpoint: process.env.messageConsumer, - SubscriptionArn: snsTopicArn, - }], - }), - }), - unsubscribe: () => ({ - promise: () => Promise.resolve(), - }), - }); + const { snsTopicArn } = t.context; const item = fakeRuleFactoryV2({ workflow, @@ -152,7 +132,8 @@ test.serial('enabling a disabled SNS rule and passing rule.arn throws specific e state: 'DISABLED', }); - const rule = await rulesModel.create(item); + const ruleWithTrigger = await rulesModel.createRuleTrigger(item); + const rule = await rulesModel.create(ruleWithTrigger); t.is(rule.rule.value, snsTopicArn); t.falsy(rule.rule.arn); @@ -169,32 +150,14 @@ test.serial('enabling a disabled SNS rule and passing rule.arn throws specific e // Should fail because a disabled rule should not have an ARN // when being updated - await t.throwsAsync(rulesModel.update(rule, updates), null, + await t.throwsAsync(rulesModel.updateRuleTrigger(rule, updates), null, 'Including rule.arn is not allowed when enabling a disabled rule'); - t.teardown(async () => { - await rulesModel.delete(rule); - snsStub.restore(); - }); + t.teardown(() => rulesModel.delete(rule)); }); test.serial('updating an SNS rule updates the event source mapping', async (t) => { - const snsTopicArn = randomString(); - const newSnsTopicArn = randomString(); - - const snsStub = sinon.stub(awsServices, 'sns') - .returns({ - listSubscriptionsByTopic: () => ({ - promise: () => Promise.resolve({ - Subscriptions: [{ - Endpoint: process.env.messageConsumer, - SubscriptionArn: randomString(), - }], - }), - }), - unsubscribe: () => ({ - promise: () => Promise.resolve(), - }), - }); + const { snsTopicArn } = t.context; + const { TopicArn: newSnsTopicArn } = await awsServices.sns().createTopic({ Name: randomId('sns') }).promise(); const item = fakeRuleFactoryV2({ workflow, @@ -205,39 +168,26 @@ test.serial('updating an SNS rule updates the event source mapping', async (t) = state: 'ENABLED', }); - const rule = await rulesModel.create(item); + const ruleWithTrigger = await rulesModel.createRuleTrigger(item); + const rule = await rulesModel.create(ruleWithTrigger); t.is(rule.rule.value, snsTopicArn); const updates = { name: rule.name, rule: { value: newSnsTopicArn } }; - const updatedRule = await rulesModel.update(rule, updates); + const ruleWithUpdatedTrigger = await rulesModel.updateRuleTrigger(rule, updates); + const updatedRule = await rulesModel.update(ruleWithUpdatedTrigger); t.is(updatedRule.name, rule.name); t.is(updatedRule.type, rule.type); t.is(updatedRule.rule.value, newSnsTopicArn); t.not(updatedRule.rule.arn, rule.rule.arn); - await rulesModel.delete(rule); - t.teardown(() => snsStub.restore()); + t.teardown(() => rulesModel.delete(rule)); }); test.serial('deleting an SNS rule updates the event source mapping', async (t) => { - const snsTopicArn = randomString(); - - const snsStub = sinon.stub(awsServices, 'sns') - .returns({ - listSubscriptionsByTopic: () => ({ - promise: () => Promise.resolve({ - Subscriptions: [{ - Endpoint: process.env.messageConsumer, - SubscriptionArn: randomString(), - }], - }), - }), - unsubscribe: () => ({ - promise: () => Promise.resolve(), - }), - }); + const { snsTopicArn } = t.context; + const unsubscribeSpy = sinon.spy(awsServices.sns(), 'unsubscribe'); const item = fakeRuleFactoryV2({ @@ -249,7 +199,8 @@ test.serial('deleting an SNS rule updates the event source mapping', async (t) = state: 'ENABLED', }); - const rule = await rulesModel.create(item); + const ruleWithTrigger = await rulesModel.createRuleTrigger(item); + const rule = await rulesModel.create(ruleWithTrigger); t.is(rule.rule.value, snsTopicArn); @@ -261,7 +212,6 @@ test.serial('deleting an SNS rule updates the event source mapping', async (t) = })); t.teardown(() => { - snsStub.restore(); unsubscribeSpy.restore(); }); }); @@ -272,7 +222,7 @@ test.serial('multiple rules using same SNS topic can be created and deleted', as Name: randomId('topic'), }).promise(); - const rule1 = await rulesModel.create(fakeRuleFactoryV2({ + const ruleWithTrigger = await rulesModel.createRuleTrigger(fakeRuleFactoryV2({ rule: { type: 'sns', value: TopicArn, @@ -280,7 +230,8 @@ test.serial('multiple rules using same SNS topic can be created and deleted', as workflow, state: 'ENABLED', })); - const rule2 = await rulesModel.create(fakeRuleFactoryV2({ + const rule1 = await rulesModel.create(ruleWithTrigger); + const ruleWithTrigger2 = await rulesModel.createRuleTrigger(fakeRuleFactoryV2({ rule: { type: 'sns', value: TopicArn, @@ -288,6 +239,7 @@ test.serial('multiple rules using same SNS topic can be created and deleted', as workflow, state: 'ENABLED', })); + const rule2 = await rulesModel.create(ruleWithTrigger2); // rules share the same subscription t.is(rule1.rule.arn, rule2.rule.arn); @@ -315,24 +267,10 @@ test.serial('deleteSnsTrigger throws more detailed ResourceNotFoundError', async const errorMessage = 'Resource is not found in resource policy.'; const error = new Error(errorMessage); error.code = 'ResourceNotFoundException'; - const snsTopicArn = randomString(); + const { snsTopicArn } = t.context; const lambdaStub = sinon.stub(awsServices.lambda(), 'removePermission').throws(error); - const snsStub = sinon.stub(awsServices, 'sns') - .returns({ - listSubscriptionsByTopic: () => ({ - promise: () => Promise.resolve({ - Subscriptions: [{ - Endpoint: process.env.messageConsumer, - SubscriptionArn: randomString(), - }], - }), - }), - unsubscribe: () => ({ - promise: () => Promise.resolve(), - }), - }); - const rule = await rulesModel.create(fakeRuleFactoryV2({ + const ruleWithTrigger = await rulesModel.createRuleTrigger(fakeRuleFactoryV2({ rule: { type: 'sns', value: snsTopicArn, @@ -340,6 +278,7 @@ test.serial('deleteSnsTrigger throws more detailed ResourceNotFoundError', async workflow, state: 'ENABLED', })); + const rule = await rulesModel.create(ruleWithTrigger); await t.throwsAsync( rulesModel.deleteSnsTrigger(rule), { @@ -350,7 +289,6 @@ test.serial('deleteSnsTrigger throws more detailed ResourceNotFoundError', async t.teardown(async () => { lambdaStub.restore(); - snsStub.restore(); await rulesModel.delete(rule); }); }); diff --git a/packages/api/tests/models/test-collections-model.js b/packages/api/tests/models/test-collections-model.js index 0ea5964093e..88638452ec9 100644 --- a/packages/api/tests/models/test-collections-model.js +++ b/packages/api/tests/models/test-collections-model.js @@ -239,7 +239,8 @@ test.serial('Collection.delete() throws an exception if the collection has assoc }).promise(), ]); - await ruleModel.create(rule); + const ruleWithTrigger = await ruleModel.createRuleTrigger(rule); + await ruleModel.create(ruleWithTrigger); try { await collectionsModel.delete({ name, version }); diff --git a/packages/api/tests/models/test-providers-model.js b/packages/api/tests/models/test-providers-model.js index a7889e0a891..c30efed73d9 100644 --- a/packages/api/tests/models/test-providers-model.js +++ b/packages/api/tests/models/test-providers-model.js @@ -89,7 +89,8 @@ test('Providers.delete() throws an exception if the provider has associated rule ), ]); - await ruleModel.create(rule); + const ruleWithTrigger = await ruleModel.createRuleTrigger(rule); + await ruleModel.create(ruleWithTrigger); try { await providersModel.delete({ id: providerId }); diff --git a/packages/async-operations/package.json b/packages/async-operations/package.json index 8d131e22c2c..22e8968a5f9 100644 --- a/packages/async-operations/package.json +++ b/packages/async-operations/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/async-operations", - "version": "10.1.0", + "version": "10.1.1", "description": "Cumulus Core internal async operations module", "main": "./dist/index.js", "types": "./dist/index.d.ts", @@ -28,14 +28,14 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/db": "10.1.0", - "@cumulus/errors": "10.1.0", - "@cumulus/types": "10.1.0", + "@cumulus/aws-client": "10.1.1", + "@cumulus/db": "10.1.1", + "@cumulus/errors": "10.1.1", + "@cumulus/types": "10.1.1", "uuid": "8.3.2" }, "devDependencies": { - "@cumulus/common": "10.1.0", + "@cumulus/common": "10.1.1", "@types/aws-sdk": "2.7.0", "@types/uuid": "^8.0.0" } diff --git a/packages/aws-client/package.json b/packages/aws-client/package.json index 63cd31802bc..58d78f667ed 100644 --- a/packages/aws-client/package.json +++ b/packages/aws-client/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/aws-client", - "version": "10.1.0", + "version": "10.1.1", "description": "Utilities for working with AWS", "keywords": [ "GIBS", @@ -43,12 +43,12 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/checksum": "10.1.0", - "@cumulus/errors": "10.1.0", - "@cumulus/logger": "10.1.0", + "@cumulus/checksum": "10.1.1", + "@cumulus/errors": "10.1.1", + "@cumulus/logger": "10.1.1", "aws-sdk": "^2.814.0", "jsonpath-plus": "^1.1.0", - "lodash": "~4.17.20", + "lodash": "~4.17.21", "p-map": "^1.2.0", "p-retry": "^4.2.0", "p-timeout": "^4.1.0", diff --git a/packages/aws-client/src/test-utils.ts b/packages/aws-client/src/test-utils.ts index 58c849d1ff7..6fff869775a 100644 --- a/packages/aws-client/src/test-utils.ts +++ b/packages/aws-client/src/test-utils.ts @@ -5,28 +5,28 @@ export const inTestMode = () => process.env.NODE_ENV === 'test'; // From https://github.com/localstack/localstack/blob/master/README.md const localStackPorts = { - stepfunctions: 4585, - apigateway: 4567, - cloudformation: 4581, - cloudwatch: 4582, - cloudwatchevents: 4582, - cloudwatchlogs: 4586, - dynamodb: 4564, - es: 4571, - firehose: 4573, - iam: 4593, - kinesis: 4568, - kms: 4599, - lambda: 4574, - redshift: 4577, - route53: 4580, - s3: 4572, - secretsmanager: 4584, - ses: 4579, - sns: 4575, - sqs: 4576, - ssm: 4583, - sts: 4592, + stepfunctions: 4566, + apigateway: 4566, + cloudformation: 4566, + cloudwatch: 4566, + cloudwatchevents: 4566, + cloudwatchlogs: 4566, + dynamodb: 4566, + es: 4566, + firehose: 4566, + iam: 4566, + kinesis: 4566, + kms: 4566, + lambda: 4566, + redshift: 4566, + route53: 4566, + s3: 4566, + secretsmanager: 4566, + ses: 4566, + sns: 4566, + sqs: 4566, + ssm: 4566, + sts: 4566, }; /** diff --git a/packages/aws-client/tests/test-CloudFormation.js b/packages/aws-client/tests/test-CloudFormation.js index 41d0f9837a5..c8b23afaa8d 100644 --- a/packages/aws-client/tests/test-CloudFormation.js +++ b/packages/aws-client/tests/test-CloudFormation.js @@ -11,7 +11,11 @@ test('describeCfStack() returns the stack information', async (t) => { await cf().createStack({ StackName, TemplateBody: JSON.stringify({ - Resources: {}, + Resources: { + MyBucket: { + Type: 'AWS::S3::Bucket', + }, + }, }), }).promise(); @@ -60,7 +64,11 @@ test('getCfStackParameterValues() returns object excluding keys for missing para await cf().createStack({ StackName, TemplateBody: JSON.stringify({ - Resources: {}, + Resources: { + MyBucket: { + Type: 'AWS::S3::Bucket', + }, + }, }), }).promise(); @@ -81,7 +89,11 @@ test('getCfStackParameterValues() returns requested stack parameters', async (t) foo: { Type: 'String' }, key: { Type: 'String' }, }, - Resources: {}, + Resources: { + MyBucket: { + Type: 'AWS::S3::Bucket', + }, + }, }), Parameters: [ { ParameterKey: 'foo', ParameterValue: 'bar' }, diff --git a/packages/checksum/package.json b/packages/checksum/package.json index 2ebfb9d1329..e51b2b4193e 100644 --- a/packages/checksum/package.json +++ b/packages/checksum/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/checksum", - "version": "10.1.0", + "version": "10.1.1", "description": "Cumulus checksum utilities", "engines": { "node": ">=12.18.0" diff --git a/packages/cmr-client/package.json b/packages/cmr-client/package.json index 31d84c770f0..af2cc012ca3 100644 --- a/packages/cmr-client/package.json +++ b/packages/cmr-client/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/cmr-client", - "version": "10.1.0", + "version": "10.1.1", "description": "A Node.js client to NASA's Common Metadata Repository (CMR) API.", "engines": { "node": ">=12.18.0" @@ -34,11 +34,11 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/errors": "10.1.0", - "@cumulus/logger": "10.1.0", + "@cumulus/aws-client": "10.1.1", + "@cumulus/errors": "10.1.1", + "@cumulus/logger": "10.1.1", "got": "^11.7.0", - "lodash": "^4.17.20", + "lodash": "^4.17.21", "public-ip": "^3.0.0", "xml2js": "^0.4.19" } diff --git a/packages/cmrjs/package.json b/packages/cmrjs/package.json index 5f5290a6d61..43000f19b44 100644 --- a/packages/cmrjs/package.json +++ b/packages/cmrjs/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/cmrjs", - "version": "10.1.0", + "version": "10.1.1", "description": "A node SDK for CMR", "engines": { "node": ">=12.18.0" @@ -33,16 +33,16 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/cmr-client": "10.1.0", - "@cumulus/common": "10.1.0", - "@cumulus/distribution-utils": "10.1.0", - "@cumulus/errors": "10.1.0", - "@cumulus/launchpad-auth": "10.1.0", - "@cumulus/logger": "10.1.0", + "@cumulus/aws-client": "10.1.1", + "@cumulus/cmr-client": "10.1.1", + "@cumulus/common": "10.1.1", + "@cumulus/distribution-utils": "10.1.1", + "@cumulus/errors": "10.1.1", + "@cumulus/launchpad-auth": "10.1.1", + "@cumulus/logger": "10.1.1", "got": "^11.8.1", "js2xmlparser": "^4.0.0", - "lodash": "^4.17.20", + "lodash": "^4.17.21", "public-ip": "^3.0.0", "url-join": "^1.0.0", "xml2js": "^0.4.19" diff --git a/packages/cmrjs/tests/cmr-utils/test-cmr-utils.js b/packages/cmrjs/tests/cmr-utils/test-cmr-utils.js index a6777e3a7d0..089c6901262 100644 --- a/packages/cmrjs/tests/cmr-utils/test-cmr-utils.js +++ b/packages/cmrjs/tests/cmr-utils/test-cmr-utils.js @@ -407,7 +407,7 @@ test('constructCmrConceptLink returns umm_json link', (t) => { test.serial('uploadEcho10CMRFile uploads CMR File to S3 correctly, preserving tags and setting ContentType', async (t) => { const cmrFile = { - bucket: 'Echo10FileBucket', + bucket: 'echo10filebucket', key: 'metadata.cmr.xml', }; await s3().createBucket({ Bucket: cmrFile.bucket }).promise(); @@ -436,7 +436,7 @@ test.serial('uploadEcho10CMRFile uploads CMR File to S3 correctly, preserving ta test.serial('uploadUMMGJSONCMRFile uploads CMR File to S3 correctly, preserving tags and setting ContentType', async (t) => { const cmrFile = { - bucket: 'UMMGJSONFileBucket', + bucket: 'ummg-file-bucket', key: 'metadata.cmr.json', }; await s3().createBucket({ Bucket: cmrFile.bucket }).promise(); diff --git a/packages/collection-config-store/package.json b/packages/collection-config-store/package.json index ac66e4bb241..289f85c1f14 100644 --- a/packages/collection-config-store/package.json +++ b/packages/collection-config-store/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/collection-config-store", - "version": "10.1.0", + "version": "10.1.1", "description": "Utility for persisting collection configuration to S3 and retrieving it", "keywords": [ "CUMULUS", @@ -32,8 +32,8 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/common": "10.1.0", - "@cumulus/message": "10.1.0" + "@cumulus/aws-client": "10.1.1", + "@cumulus/common": "10.1.1", + "@cumulus/message": "10.1.1" } } diff --git a/packages/common/package.json b/packages/common/package.json index 1375d4325ce..d608f23b8e1 100644 --- a/packages/common/package.json +++ b/packages/common/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/common", - "version": "10.1.0", + "version": "10.1.1", "description": "Common utilities used across tasks", "keywords": [ "GIBS", @@ -41,14 +41,14 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/errors": "10.1.0", - "@cumulus/logger": "10.1.0", + "@cumulus/errors": "10.1.1", + "@cumulus/logger": "10.1.1", "ajv": "^6.12.3", "aws-sdk": "^2.585.0", "follow-redirects": "^1.2.4", "fs-extra": "^5.0.0", "jsonpath-plus": "^3.0.0", - "lodash": "^4.17.20", + "lodash": "^4.17.21", "node-forge": "^1.0.0", "p-limit": "^2.0.0", "p-map": "^1.2.0", diff --git a/packages/common/src/key-pair-provider.ts b/packages/common/src/key-pair-provider.ts index 7c45dc123a2..7a22d2b3b4d 100644 --- a/packages/common/src/key-pair-provider.ts +++ b/packages/common/src/key-pair-provider.ts @@ -39,7 +39,7 @@ const buildS3Client = () => { if (inTestMode()) { options.accessKeyId = 'my-access-key-id'; - options.endpoint = `http://${getLocalStackHost()}:4572`; + options.endpoint = `http://${getLocalStackHost()}:4566`; options.region = 'us-east-1'; options.s3ForcePathStyle = true; options.secretAccessKey = 'my-secret-access-key'; diff --git a/packages/db/package.json b/packages/db/package.json index 91c012d3799..b4e872251c9 100644 --- a/packages/db/package.json +++ b/packages/db/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/db", - "version": "10.1.0", + "version": "10.1.1", "description": "Utilities for working with the Cumulus DB", "license": "Apache-2.0", "main": "./dist/index.js", @@ -29,16 +29,16 @@ "node": ">=12.18.0" }, "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/common": "10.1.0", - "@cumulus/errors": "10.1.0", - "@cumulus/logger": "10.1.0", - "@cumulus/message": "10.1.0", - "@cumulus/types": "10.1.0", + "@cumulus/aws-client": "10.1.1", + "@cumulus/common": "10.1.1", + "@cumulus/errors": "10.1.1", + "@cumulus/logger": "10.1.1", + "@cumulus/message": "10.1.1", + "@cumulus/types": "10.1.1", "crypto-random-string": "^3.2.0", "is-valid-hostname": "0.0.1", "knex": "0.95.15", - "lodash": "^4.17.20", + "lodash": "^4.17.21", "pg": "^8.3.0", "snake-camel": "^1.0.6", "uuid": "8.3.2" diff --git a/packages/db/src/index.ts b/packages/db/src/index.ts index 1fc5e6a3693..af7c05c2734 100644 --- a/packages/db/src/index.ts +++ b/packages/db/src/index.ts @@ -69,7 +69,10 @@ export { translateApiCollectionToPostgresCollection } from './translate/collecti export { translateApiProviderToPostgresProvider, } from './translate/providers'; -export { translateApiRuleToPostgresRule } from './translate/rules'; +export { + translateApiRuleToPostgresRule, + translateApiRuleToPostgresRuleRaw, +} from './translate/rules'; export { translateApiExecutionToPostgresExecution, translatePostgresExecutionToApiExecution, diff --git a/packages/db/src/models/base.ts b/packages/db/src/models/base.ts index c936a0196d9..da67e4cb369 100644 --- a/packages/db/src/models/base.ts +++ b/packages/db/src/models/base.ts @@ -142,15 +142,18 @@ class BasePgModel { * * @param {Knex | Knex.Transaction} knexOrTransaction - DB client or transaction * @param {ItemType} item - A record to insert into the DB + * @param {string | Array} returningFields - A string or array of strings + * of columns to return. Defaults to 'cumulus_id'. * @returns {Promise} List of IDs of the inserted records */ async create( knexOrTransaction: Knex | Knex.Transaction, - item: ItemType + item: ItemType, + returningFields: string | string[] = 'cumulus_id' ): Promise { return await knexOrTransaction(this.tableName) .insert(item) - .returning('cumulus_id'); + .returning(returningFields); } /** diff --git a/packages/db/src/translate/rules.ts b/packages/db/src/translate/rules.ts index e303fb80c71..8e022523569 100644 --- a/packages/db/src/translate/rules.ts +++ b/packages/db/src/translate/rules.ts @@ -1,4 +1,5 @@ import { Knex } from 'knex'; +import { removeNilProperties } from '@cumulus/common/util'; import { RuleRecord } from '@cumulus/types/api/rules'; import { CollectionPgModel } from '../models/collection'; @@ -14,36 +15,56 @@ import { PostgresRule } from '../types/rule'; * @param {Object} providerPgModel - Instance of the provider database model * @returns {Object} A rule record */ +export const translateApiRuleToPostgresRuleRaw = async ( + record: RuleRecord, + dbClient: Knex, + collectionPgModel = new CollectionPgModel(), + providerPgModel = new ProviderPgModel() +): Promise => ({ + name: record.name, + workflow: record.workflow, + provider_cumulus_id: record.provider ? await providerPgModel.getRecordCumulusId( + dbClient, + { name: record.provider } + ) : undefined, + collection_cumulus_id: record.collection ? await collectionPgModel.getRecordCumulusId( + dbClient, + { name: record.collection.name, version: record.collection.version } + ) : undefined, + meta: record.meta, + payload: record.payload as any, + queue_url: record.queueUrl, + arn: record.rule.arn, + type: record.rule.type, + value: record.rule.value, + log_event_arn: record.rule.logEventArn, + enabled: (record.state === undefined) || (record.state === 'ENABLED'), + tags: (record.tags ? JSON.stringify(record.tags) : undefined), + execution_name_prefix: record.executionNamePrefix, + created_at: (record.createdAt ? new Date(record.createdAt) : undefined), + updated_at: (record.updatedAt ? new Date(record.updatedAt) : undefined), +}); + +/** + * Generate a Postgres rule record from a DynamoDB record and remove nil properties. + * + * @param {Object} record - A rule + * @param {Object} dbClient - Knex client for reading from RDS database + * @param {Object} collectionPgModel - Instance of the collection database model + * @param {Object} providerPgModel - Instance of the provider database model + * @returns {Object} A rule record + */ export const translateApiRuleToPostgresRule = async ( record: RuleRecord, dbClient: Knex, collectionPgModel = new CollectionPgModel(), providerPgModel = new ProviderPgModel() ): Promise => { - const ruleRecord: PostgresRule = { - name: record.name, - workflow: record.workflow, - provider_cumulus_id: record.provider ? await providerPgModel.getRecordCumulusId( - dbClient, - { name: record.provider } - ) : undefined, - collection_cumulus_id: record.collection ? await collectionPgModel.getRecordCumulusId( - dbClient, - { name: record.collection.name, version: record.collection.version } - ) : undefined, - meta: record.meta, - payload: record.payload as any, - queue_url: record.queueUrl, - arn: record.rule.arn, - type: record.rule.type, - value: record.rule.value, - log_event_arn: record.rule.logEventArn, - enabled: (record.state === undefined) || (record.state === 'ENABLED'), - tags: (record.tags ? JSON.stringify(record.tags) : undefined), - execution_name_prefix: record.executionNamePrefix, - created_at: (record.createdAt ? new Date(record.createdAt) : undefined), - updated_at: (record.updatedAt ? new Date(record.updatedAt) : undefined), - }; - - return ruleRecord; + const ruleRecord: PostgresRule = await translateApiRuleToPostgresRuleRaw( + record, + dbClient, + collectionPgModel, + providerPgModel + ); + return removeNilProperties(ruleRecord); }; diff --git a/packages/db/tests/models/test-rule-model.js b/packages/db/tests/models/test-rule-model.js index 65b7dffbaea..2383ab6c637 100644 --- a/packages/db/tests/models/test-rule-model.js +++ b/packages/db/tests/models/test-rule-model.js @@ -71,3 +71,39 @@ test('RulePgModel.upsert() overwrites a rule record', async (t) => { updatedRule ); }); + +test('RulePgModel.upsert() overwrites a rule record and deletes fields', async (t) => { + const { + knex, + rulePgModel, + ruleRecord, + } = t.context; + + ruleRecord.queue_url = 'queue-url'; + + await rulePgModel.create(knex, ruleRecord); + const initialRule = await rulePgModel.get(knex, { + name: ruleRecord.name, + }); + t.is(initialRule.queue_url, 'queue-url'); + + const ruleUpdates = { + ...ruleRecord, + queue_url: undefined, + value: cryptoRandomString({ length: 5 }), + }; + + await rulePgModel.upsert(knex, ruleUpdates); + + const actualRule = await rulePgModel.get(knex, { + name: ruleRecord.name, + }); + + t.like( + actualRule, + { + ...ruleUpdates, + queue_url: null, + } + ); +}); diff --git a/packages/db/tests/translate/test-rules.js b/packages/db/tests/translate/test-rules.js index 0df4e5f21fa..20b2725fdb8 100644 --- a/packages/db/tests/translate/test-rules.js +++ b/packages/db/tests/translate/test-rules.js @@ -1,5 +1,62 @@ const test = require('ava'); -const { translateApiRuleToPostgresRule } = require('../../dist/translate/rules'); +const { + translateApiRuleToPostgresRule, + translateApiRuleToPostgresRuleRaw, +} = require('../../dist/translate/rules'); + +test('translateApiRuleToPostgresRuleRaw converts API rule to Postgres and keeps nil fields', async (t) => { + const record = { + name: 'name', + workflow: 'workflow_name', + provider: 'fake-provider', + state: 'ENABLED', + collection: { + name: 'fake-collection', + version: '0.0.0', + }, + rule: { type: 'onetime', value: 'value' }, + createdAt: Date.now(), + updatedAt: Date.now(), + }; + + const fakeDbClient = {}; + const fakeCollectionPgModel = { + getRecordCumulusId: () => Promise.resolve(1), + }; + const fakeProviderPgModel = { + getRecordCumulusId: () => Promise.resolve(2), + }; + + const expectedPostgresRule = { + name: record.name, + workflow: record.workflow, + meta: undefined, + payload: undefined, + queue_url: undefined, + arn: undefined, + type: record.rule.type, + value: record.rule.value, + log_event_arn: undefined, + enabled: true, + tags: undefined, + execution_name_prefix: undefined, + created_at: new Date(record.createdAt), + updated_at: new Date(record.updatedAt), + collection_cumulus_id: 1, + provider_cumulus_id: 2, + }; + + const result = await translateApiRuleToPostgresRuleRaw( + record, + fakeDbClient, + fakeCollectionPgModel, + fakeProviderPgModel + ); + t.deepEqual( + result, + expectedPostgresRule + ); +}); test('translateApiRuleToPostgresRule converts API rule to Postgres', async (t) => { const record = { @@ -14,7 +71,7 @@ test('translateApiRuleToPostgresRule converts API rule to Postgres', async (t) = rule: { type: 'onetime', value: 'value', arn: 'arn', logEventArn: 'event_arn' }, executionNamePrefix: 'prefix', meta: { key: 'value' }, - queueName: 'queue_url', + queueUrl: 'queue_url', payload: { result: { key: 'value' } }, tags: ['tag1', 'tag2'], createdAt: Date.now(), diff --git a/packages/distribution-utils/package.json b/packages/distribution-utils/package.json index fd9d1767564..d080a73c448 100644 --- a/packages/distribution-utils/package.json +++ b/packages/distribution-utils/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/distribution-utils", - "version": "10.1.0", + "version": "10.1.1", "description": "Cumulus Distribution utilities", "keywords": [ "CUMULUS" @@ -33,9 +33,9 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/common": "10.1.0", - "@cumulus/errors": "10.1.0", + "@cumulus/aws-client": "10.1.1", + "@cumulus/common": "10.1.1", + "@cumulus/errors": "10.1.1", "url-join": "^1.1.0" }, "devDependencies": { diff --git a/packages/errors/package.json b/packages/errors/package.json index 6463aa8e45f..9380f1c11bb 100644 --- a/packages/errors/package.json +++ b/packages/errors/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/errors", - "version": "10.1.0", + "version": "10.1.1", "description": "Provides error classes for Cumulus", "keywords": [ "GIBS", diff --git a/packages/es-client/package.json b/packages/es-client/package.json index bf0f1e9f9fb..fb31d74f424 100644 --- a/packages/es-client/package.json +++ b/packages/es-client/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/es-client", - "version": "10.1.0", + "version": "10.1.1", "description": "Utilities for working with Elasticsearch", "keywords": [ "CUMULUS", @@ -30,20 +30,20 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/common": "10.1.0", - "@cumulus/errors": "10.1.0", - "@cumulus/logger": "10.1.0", - "@cumulus/message": "10.1.0", + "@cumulus/common": "10.1.1", + "@cumulus/errors": "10.1.1", + "@cumulus/logger": "10.1.1", + "@cumulus/message": "10.1.1", "@elastic/elasticsearch": "^5.6.20", "aws-elasticsearch-connector": "8.2.0", "aws-sdk": "^2.585.0", - "lodash": "~4.17.20", + "lodash": "~4.17.21", "moment": "2.29.1", "p-limit": "^1.2.0" }, "devDependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/test-data": "10.1.0", + "@cumulus/aws-client": "10.1.1", + "@cumulus/test-data": "10.1.1", "p-each-series": "^2.1.0" } } diff --git a/packages/es-client/tests/test-collections.js b/packages/es-client/tests/test-collections.js index 38d70ec5c80..ee1b2a8cf71 100644 --- a/packages/es-client/tests/test-collections.js +++ b/packages/es-client/tests/test-collections.js @@ -16,7 +16,7 @@ const Collection = require('../collections'); const { Search } = require('../search'); const { bootstrapElasticSearch } = require('../bootstrap'); -process.env.system_bucket = randomId('systemBucket'); +process.env.system_bucket = randomId('system-bucket'); process.env.stackName = randomId('stackName'); let esClient; diff --git a/packages/es-client/tests/test-stats.js b/packages/es-client/tests/test-stats.js index c78e69dc82b..a690276320c 100644 --- a/packages/es-client/tests/test-stats.js +++ b/packages/es-client/tests/test-stats.js @@ -14,7 +14,7 @@ const { Search } = require('../search'); const { bootstrapElasticSearch } = require('../bootstrap'); const Stats = require('../stats'); -process.env.system_bucket = randomId('systemBucket'); +process.env.system_bucket = randomId('system-bucket'); process.env.stackName = randomId('stackName'); let esClient; diff --git a/packages/ingest/package.json b/packages/ingest/package.json index eeb1bb4e7fe..84c20c9e7d9 100644 --- a/packages/ingest/package.json +++ b/packages/ingest/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/ingest", - "version": "10.1.0", + "version": "10.1.1", "description": "Ingest utilities", "engines": { "node": ">=12.18.0" @@ -38,13 +38,13 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/common": "10.1.0", - "@cumulus/db": "10.1.0", - "@cumulus/errors": "10.1.0", - "@cumulus/logger": "10.1.0", - "@cumulus/message": "10.1.0", - "@cumulus/sftp-client": "10.1.0", + "@cumulus/aws-client": "10.1.1", + "@cumulus/common": "10.1.1", + "@cumulus/db": "10.1.1", + "@cumulus/errors": "10.1.1", + "@cumulus/logger": "10.1.1", + "@cumulus/message": "10.1.1", + "@cumulus/sftp-client": "10.1.1", "aws-sdk": "^2.585.0", "cksum": "^1.3.0", "delay": "^4.3.0", @@ -54,16 +54,16 @@ "is-ip": "^2.0.0", "is-valid-hostname": "^0.1.1", "jsftp": "https://github.com/jkovarik/jsftp.git#add_288", - "lodash": "^4.17.20", + "lodash": "^4.17.21", "mime-types": "^2.1.22", "moment": "2.29.1", "simplecrawler": "^1.1.9", "tough-cookie": "^4.0.0" }, "devDependencies": { - "@cumulus/checksum": "10.1.0", - "@cumulus/cmrjs": "10.1.0", - "@cumulus/test-data": "10.1.0", - "@cumulus/types": "10.1.0" + "@cumulus/checksum": "10.1.1", + "@cumulus/cmrjs": "10.1.1", + "@cumulus/test-data": "10.1.1", + "@cumulus/types": "10.1.1" } } diff --git a/packages/integration-tests/index.js b/packages/integration-tests/index.js index 49159749a1c..90e39850f78 100644 --- a/packages/integration-tests/index.js +++ b/packages/integration-tests/index.js @@ -22,14 +22,13 @@ const { getWorkflowFileKey, } = require('@cumulus/common/workflows'); const { readJsonFile } = require('@cumulus/common/FileUtils'); -const RulesModel = require('@cumulus/api/models/rules'); const collectionsApi = require('@cumulus/api-client/collections'); const providersApi = require('@cumulus/api-client/providers'); +const rulesApi = require('@cumulus/api-client/rules'); const asyncOperationsApi = require('@cumulus/api-client/asyncOperations'); const { pullStepFunctionEvent } = require('@cumulus/message/StepFunctions'); const { addCollections, addCustomUrlPathToCollectionFiles, buildCollection } = require('./Collections.js'); -const rulesApi = require('./api/rules'); const executionsApi = require('./api/executions'); const granulesApi = require('./api/granules'); const api = require('./api/api'); @@ -432,7 +431,7 @@ async function addRulesWithPostfix(config, dataDirectory, overrides, postfix) { // race condition return await pMap( rules, - (rule) => { + async (rule) => { if (postfix) { rule.name += replace(postfix, /-/g, '_'); rule.collection.name += postfix; @@ -447,9 +446,14 @@ async function addRulesWithPostfix(config, dataDirectory, overrides, postfix) { ...config, })); - const rulesmodel = new RulesModel(); console.log(`adding rule ${JSON.stringify(templatedRule)}`); - return rulesmodel.create(templatedRule); + + const response = await rulesApi.postRule({ + prefix: stackName, + rule: templatedRule, + }); + const { record } = JSON.parse(response.body); + return record; }, { concurrency: 1 } ); @@ -467,17 +471,6 @@ function addRules(config, dataDirectory, overrides) { return addRulesWithPostfix(config, dataDirectory, overrides); } -/** - * deletes a rule by name - * - * @param {string} name - name of the rule to delete. - * @returns {Promise.} - superclass delete promise - */ -async function _deleteOneRule(name) { - const rulesModel = new RulesModel(); - return await rulesModel.get({ name }).then((item) => rulesModel.delete(item)); -} - /** * Remove params added to the rule when it is saved into dynamo * and comes back from the db @@ -523,7 +516,10 @@ async function deleteRules(stackName, bucketName, rules, postfix) { await pMap( rules, - (rule) => _deleteOneRule(postfix ? `${rule.name}${postfix}` : rule.name), + (rule) => rulesApi.deleteRule({ + prefix: stackName, + ruleName: postfix ? `${rule.name}${postfix}` : rule.name, + }), { concurrency: process.env.CONCURRENCY || 3 } ); diff --git a/packages/integration-tests/package.json b/packages/integration-tests/package.json index 4f3bce7d091..4c16835cf60 100644 --- a/packages/integration-tests/package.json +++ b/packages/integration-tests/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/integration-tests", - "version": "10.1.0", + "version": "10.1.1", "description": "Integration tests", "bin": { "cumulus-test": "./bin/cli.js" @@ -28,16 +28,16 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/api": "10.1.0", - "@cumulus/api-client": "10.1.0", - "@cumulus/aws-client": "10.1.0", - "@cumulus/cmr-client": "10.1.0", - "@cumulus/cmrjs": "10.1.0", - "@cumulus/common": "10.1.0", - "@cumulus/launchpad-auth": "10.1.0", - "@cumulus/logger": "10.1.0", - "@cumulus/message": "10.1.0", - "@cumulus/oauth-client": "10.1.0", + "@cumulus/api": "10.1.1", + "@cumulus/api-client": "10.1.1", + "@cumulus/aws-client": "10.1.1", + "@cumulus/cmr-client": "10.1.1", + "@cumulus/cmrjs": "10.1.1", + "@cumulus/common": "10.1.1", + "@cumulus/launchpad-auth": "10.1.1", + "@cumulus/logger": "10.1.1", + "@cumulus/message": "10.1.1", + "@cumulus/oauth-client": "10.1.1", "aws-sdk": "^2.585.0", "base-64": "^0.1.0", "commander": "^2.15.0", @@ -48,7 +48,7 @@ "handlebars": "^4.0.11", "js-yaml": "^3.13.1", "jsonwebtoken": "^8.5.1", - "lodash": "^4.17.20", + "lodash": "^4.17.21", "moment": "2.29.1", "p-map": "^2.1.0", "p-retry": "^2.0.0", diff --git a/packages/launchpad-auth/package.json b/packages/launchpad-auth/package.json index 6b17f3ee4dc..1c6daccecf8 100644 --- a/packages/launchpad-auth/package.json +++ b/packages/launchpad-auth/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/launchpad-auth", - "version": "10.1.0", + "version": "10.1.1", "description": "Utilities for authentication with Launchpad", "keywords": [ "CUMULUS", @@ -37,10 +37,10 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/logger": "10.1.0", + "@cumulus/aws-client": "10.1.1", + "@cumulus/logger": "10.1.1", "got": "^11.7.0", - "lodash": "^4.17.20", + "lodash": "^4.17.21", "uuid": "^3.2.1" } } diff --git a/packages/logger/package.json b/packages/logger/package.json index ec92bdfea9c..78c42e583b6 100644 --- a/packages/logger/package.json +++ b/packages/logger/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/logger", - "version": "10.1.0", + "version": "10.1.1", "description": "A log library for use on Cumulus", "keywords": [ "GIBS", diff --git a/packages/message/package.json b/packages/message/package.json index d36410b3057..9c5ca6c9979 100644 --- a/packages/message/package.json +++ b/packages/message/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/message", - "version": "10.1.0", + "version": "10.1.1", "description": "Utilities for building and parsing Cumulus messages", "keywords": [ "GIBS", @@ -38,13 +38,13 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/common": "10.1.0", - "@cumulus/errors": "10.1.0", - "@cumulus/logger": "10.1.0", - "@cumulus/types": "10.1.0", + "@cumulus/aws-client": "10.1.1", + "@cumulus/common": "10.1.1", + "@cumulus/errors": "10.1.1", + "@cumulus/logger": "10.1.1", + "@cumulus/types": "10.1.1", "jsonpath-plus": "^3.0.0", - "lodash": "^4.17.20", + "lodash": "^4.17.21", "uuid": "^8.2.0" }, "devDependencies": { diff --git a/packages/oauth-client/package.json b/packages/oauth-client/package.json index 93f33e1c720..a1838382af8 100644 --- a/packages/oauth-client/package.json +++ b/packages/oauth-client/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/oauth-client", - "version": "10.1.0", + "version": "10.1.1", "description": "A generic auth client", "homepage": "https://github.com/nasa/cumulus/tree/master/packages/oauth-client#readme", "repository": { diff --git a/packages/object-store/package.json b/packages/object-store/package.json index a61b97493f9..fbb27d0821c 100644 --- a/packages/object-store/package.json +++ b/packages/object-store/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/object-store", - "version": "10.1.0", + "version": "10.1.1", "description": "Utilities for managing object stores", "keywords": [ "GIBS", @@ -39,6 +39,6 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0" + "@cumulus/aws-client": "10.1.1" } } diff --git a/packages/pvl/package.json b/packages/pvl/package.json index 82f630a7b27..6437e0a001e 100644 --- a/packages/pvl/package.json +++ b/packages/pvl/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/pvl", - "version": "10.1.0", + "version": "10.1.1", "description": "Parse and serialize Parameter Value Language, a data markup language used by NASA", "main": "index.js", "engine": { @@ -31,6 +31,6 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "lodash": "^4.17.20" + "lodash": "^4.17.21" } } diff --git a/packages/s3-credentials-endpoint/package.json b/packages/s3-credentials-endpoint/package.json index f558ae1d9b8..aaa6a23ef61 100644 --- a/packages/s3-credentials-endpoint/package.json +++ b/packages/s3-credentials-endpoint/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/s3-credentials-endpoint", - "version": "10.1.0", + "version": "10.1.1", "description": "An API Gateway Lambda to return AWS credentials for fetching objects from S3", "license": "Apache-2.0", "engines": { @@ -17,12 +17,12 @@ "timeout": "15m" }, "dependencies": { - "@cumulus/api": "10.1.0", - "@cumulus/aws-client": "10.1.0", - "@cumulus/cmrjs": "10.1.0", - "@cumulus/errors": "10.1.0", - "@cumulus/logger": "10.1.0", - "@cumulus/oauth-client": "10.1.0", + "@cumulus/api": "10.1.1", + "@cumulus/aws-client": "10.1.1", + "@cumulus/cmrjs": "10.1.1", + "@cumulus/errors": "10.1.1", + "@cumulus/logger": "10.1.1", + "@cumulus/oauth-client": "10.1.1", "aws-serverless-express": "^3.3.6", "body-parser": "^1.19.0", "cookie-parser": "^1.4.4", @@ -32,7 +32,7 @@ "express-promise-router": "^3.0.3", "got": "^11.7.0", "hsts": "^2.2.0", - "lodash": "^4.17.20", + "lodash": "^4.17.21", "morgan": "^1.9.1", "url-join": "^4.0.0" }, diff --git a/packages/schemas/package.json b/packages/schemas/package.json index 4dd700cdee2..fe57be36f1a 100644 --- a/packages/schemas/package.json +++ b/packages/schemas/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/schemas", - "version": "10.1.0", + "version": "10.1.1", "description": "Helpers for managing Cumulus task schemas", "homepage": "https://github.com/nasa/cumulus/tree/master/packages/schemas", "repository": { diff --git a/packages/sftp-client/package.json b/packages/sftp-client/package.json index 3185e2540e4..758dc1195bc 100644 --- a/packages/sftp-client/package.json +++ b/packages/sftp-client/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/sftp-client", - "version": "10.1.0", + "version": "10.1.1", "description": "A Promise-based SFTP client", "keywords": [ "GIBS", @@ -36,16 +36,16 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/common": "10.1.0", - "lodash": "^4.17.20", + "@cumulus/aws-client": "10.1.1", + "@cumulus/common": "10.1.1", + "lodash": "^4.17.21", "mime-types": "^2.1.27", "ssh2": "^1.0.0", "ssh2-sftp-client": "^7.0.4" }, "devDependencies": { - "@cumulus/checksum": "10.1.0", - "@cumulus/test-data": "10.1.0", + "@cumulus/checksum": "10.1.1", + "@cumulus/test-data": "10.1.1", "@types/ssh2-sftp-client": "^7.0.0" } } diff --git a/packages/tea-map-cache/package.json b/packages/tea-map-cache/package.json index 1aa31b9bb4f..75dc100ebe1 100644 --- a/packages/tea-map-cache/package.json +++ b/packages/tea-map-cache/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/tea-map-cache", - "version": "10.1.0", + "version": "10.1.1", "description": "Tea Bucket Map Cache Writer", "main": "index.js", "engines": { @@ -27,8 +27,8 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/logger": "10.1.0", + "@cumulus/aws-client": "10.1.1", + "@cumulus/logger": "10.1.1", "got": "^11.7.0", "p-retry": "^4.2.0" }, diff --git a/packages/test-data/fake-lambdas/hello.zip b/packages/test-data/fake-lambdas/hello.zip new file mode 100644 index 00000000000..dc0a8557b17 Binary files /dev/null and b/packages/test-data/fake-lambdas/hello.zip differ diff --git a/packages/test-data/fake-lambdas/index.js b/packages/test-data/fake-lambdas/index.js new file mode 100644 index 00000000000..02aa9f1ad0b --- /dev/null +++ b/packages/test-data/fake-lambdas/index.js @@ -0,0 +1,3 @@ +exports.handler = () => { + console.log('hello'); +}; diff --git a/packages/test-data/package.json b/packages/test-data/package.json index dc7da08597b..52412424898 100644 --- a/packages/test-data/package.json +++ b/packages/test-data/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/test-data", - "version": "10.1.0", + "version": "10.1.1", "description": "Includes the test data for various packages", "keywords": [ "GIBS", diff --git a/packages/tf-inventory/package.json b/packages/tf-inventory/package.json index b2b379a6edb..05424f26fe0 100644 --- a/packages/tf-inventory/package.json +++ b/packages/tf-inventory/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/tf-inventory", - "version": "10.1.0", + "version": "10.1.1", "description": "Package to help keep track of what resources are managed by Terraform in the AWS account", "main": "index.js", "engines": { @@ -30,11 +30,11 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0", + "@cumulus/aws-client": "10.1.1", "commander": "^4.1.0", - "lodash": "^4.17.20" + "lodash": "^4.17.21" }, "devDependencies": { - "@cumulus/common": "10.1.0" + "@cumulus/common": "10.1.1" } } diff --git a/packages/types/package.json b/packages/types/package.json index 3019cba062e..c6759a9c77f 100644 --- a/packages/types/package.json +++ b/packages/types/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/types", - "version": "10.1.0", + "version": "10.1.1", "description": "TypeScript definitions for working with Cumulus data structures", "keywords": [ "GIBS", diff --git a/tasks/add-missing-file-checksums/package.json b/tasks/add-missing-file-checksums/package.json index b7296c0eed9..4aa5a6b90be 100644 --- a/tasks/add-missing-file-checksums/package.json +++ b/tasks/add-missing-file-checksums/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/add-missing-file-checksums", - "version": "10.1.0", + "version": "10.1.1", "description": "Add checksums to files in S3 which don't have one", "author": "Cumulus Authors", "license": "Apache-2.0", @@ -42,12 +42,12 @@ } }, "dependencies": { - "@cumulus/aws-client": "10.1.0", + "@cumulus/aws-client": "10.1.1", "@cumulus/cumulus-message-adapter-js": "2.0.4" }, "devDependencies": { - "@cumulus/schemas": "10.1.0", - "@cumulus/types": "10.1.0", + "@cumulus/schemas": "10.1.1", + "@cumulus/types": "10.1.1", "@types/aws-lambda": "^8.10.58" } } diff --git a/tasks/discover-granules/package.json b/tasks/discover-granules/package.json index c30fc65352a..dc441a520e9 100644 --- a/tasks/discover-granules/package.json +++ b/tasks/discover-granules/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/discover-granules", - "version": "10.1.0", + "version": "10.1.1", "description": "Discover Granules in FTP/HTTP/HTTPS/SFTP/S3 endpoints", "main": "index.js", "directories": { @@ -35,16 +35,16 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/api-client": "10.1.0", + "@cumulus/api-client": "10.1.1", "@cumulus/cumulus-message-adapter-js": "2.0.4", - "@cumulus/ingest": "10.1.0", - "@cumulus/logger": "10.1.0", + "@cumulus/ingest": "10.1.1", + "@cumulus/logger": "10.1.1", "got": "^9.2.1", - "lodash": "^4.17.20", + "lodash": "^4.17.21", "p-map": "^4.0.0" }, "devDependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/common": "10.1.0" + "@cumulus/aws-client": "10.1.1", + "@cumulus/common": "10.1.1" } } diff --git a/tasks/discover-pdrs/package.json b/tasks/discover-pdrs/package.json index 67dabe8e786..a2ea37e0f3a 100644 --- a/tasks/discover-pdrs/package.json +++ b/tasks/discover-pdrs/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/discover-pdrs", - "version": "10.1.0", + "version": "10.1.1", "description": "Discover PDRs in FTP and HTTP endpoints", "main": "index.js", "directories": { @@ -34,14 +34,14 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0", + "@cumulus/aws-client": "10.1.1", "@cumulus/cumulus-message-adapter-js": "2.0.4", - "@cumulus/ingest": "10.1.0", - "lodash": "^4.17.20", + "@cumulus/ingest": "10.1.1", + "lodash": "^4.17.21", "p-filter": "^2.1.0" }, "devDependencies": { - "@cumulus/common": "10.1.0", - "@cumulus/errors": "10.1.0" + "@cumulus/common": "10.1.1", + "@cumulus/errors": "10.1.1" } } diff --git a/tasks/files-to-granules/package.json b/tasks/files-to-granules/package.json index d4854bb7d8e..9f8742cee8c 100644 --- a/tasks/files-to-granules/package.json +++ b/tasks/files-to-granules/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/files-to-granules", - "version": "10.1.0", + "version": "10.1.1", "description": "Converts array-of-files input into a granules object by extracting granuleId from filename", "main": "index.js", "directories": { @@ -32,12 +32,12 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0", + "@cumulus/aws-client": "10.1.1", "@cumulus/cumulus-message-adapter-js": "2.0.4", - "lodash": "^4.17.20" + "lodash": "^4.17.21" }, "devDependencies": { - "@cumulus/common": "10.1.0", - "@cumulus/schemas": "10.1.0" + "@cumulus/common": "10.1.1", + "@cumulus/schemas": "10.1.1" } } diff --git a/tasks/hello-world/package.json b/tasks/hello-world/package.json index 6f071a69303..0fc591a3e89 100644 --- a/tasks/hello-world/package.json +++ b/tasks/hello-world/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/hello-world", - "version": "10.1.0", + "version": "10.1.1", "description": "Example task", "main": "index.js", "directories": { @@ -32,8 +32,8 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/common": "10.1.0", + "@cumulus/aws-client": "10.1.1", + "@cumulus/common": "10.1.1", "@cumulus/cumulus-message-adapter-js": "2.0.4" } } diff --git a/tasks/hyrax-metadata-updates/package.json b/tasks/hyrax-metadata-updates/package.json index c7b9a1f13aa..5aebdab282e 100644 --- a/tasks/hyrax-metadata-updates/package.json +++ b/tasks/hyrax-metadata-updates/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/hyrax-metadata-updates", - "version": "10.1.0", + "version": "10.1.1", "description": "Update granule metadata with hooks to OPeNDAP URL", "main": "index.js", "directories": { @@ -38,18 +38,18 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/cmr-client": "10.1.0", - "@cumulus/cmrjs": "10.1.0", - "@cumulus/common": "10.1.0", + "@cumulus/aws-client": "10.1.1", + "@cumulus/cmr-client": "10.1.1", + "@cumulus/cmrjs": "10.1.1", + "@cumulus/common": "10.1.1", "@cumulus/cumulus-message-adapter-js": "2.0.4", - "@cumulus/errors": "10.1.0", + "@cumulus/errors": "10.1.1", "libxmljs": "^0.19.7", - "lodash": "^4.17.20", + "lodash": "^4.17.21", "xml2js": "^0.4.23" }, "devDependencies": { - "@cumulus/schemas": "10.1.0", + "@cumulus/schemas": "10.1.1", "nock": "^12.0.1", "rewire": "^6.0.0" }, diff --git a/tasks/lzards-backup/package.json b/tasks/lzards-backup/package.json index 9724392901a..cdf755cab62 100644 --- a/tasks/lzards-backup/package.json +++ b/tasks/lzards-backup/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/lzards-backup", - "version": "10.1.0", + "version": "10.1.1", "description": "Run LZARDS backup", "author": "Cumulus Authors", "license": "Apache-2.0", @@ -42,18 +42,18 @@ } }, "dependencies": { - "@cumulus/api-client": "10.1.0", - "@cumulus/aws-client": "10.1.0", - "@cumulus/common": "10.1.0", + "@cumulus/api-client": "10.1.1", + "@cumulus/aws-client": "10.1.1", + "@cumulus/common": "10.1.1", "@cumulus/cumulus-message-adapter-js": "2.0.4", - "@cumulus/distribution-utils": "10.1.0", - "@cumulus/launchpad-auth": "10.1.0", - "@cumulus/logger": "10.1.0", - "@cumulus/message": "10.1.0", + "@cumulus/distribution-utils": "10.1.1", + "@cumulus/launchpad-auth": "10.1.1", + "@cumulus/logger": "10.1.1", + "@cumulus/message": "10.1.1", "got": "11.8.3" }, "devDependencies": { - "@cumulus/schemas": "10.1.0", - "@cumulus/types": "10.1.0" + "@cumulus/schemas": "10.1.1", + "@cumulus/types": "10.1.1" } } diff --git a/tasks/move-granules/package.json b/tasks/move-granules/package.json index 952abf52a8e..f4c083d5f2e 100644 --- a/tasks/move-granules/package.json +++ b/tasks/move-granules/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/move-granules", - "version": "10.1.0", + "version": "10.1.1", "description": "Move granule files from staging to final location", "main": "index.js", "directories": { @@ -38,16 +38,16 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/cmrjs": "10.1.0", - "@cumulus/common": "10.1.0", + "@cumulus/aws-client": "10.1.1", + "@cumulus/cmrjs": "10.1.1", + "@cumulus/common": "10.1.1", "@cumulus/cumulus-message-adapter-js": "2.0.4", - "@cumulus/distribution-utils": "10.1.0", - "@cumulus/errors": "10.1.0", - "@cumulus/ingest": "10.1.0", - "lodash": "^4.17.20" + "@cumulus/distribution-utils": "10.1.1", + "@cumulus/errors": "10.1.1", + "@cumulus/ingest": "10.1.1", + "lodash": "^4.17.21" }, "devDependencies": { - "@cumulus/schemas": "10.1.0" + "@cumulus/schemas": "10.1.1" } } diff --git a/tasks/move-granules/tests/test-index.js b/tasks/move-granules/tests/test-index.js index f39d6fd2f63..3536c4c3347 100644 --- a/tasks/move-granules/tests/test-index.js +++ b/tasks/move-granules/tests/test-index.js @@ -333,7 +333,7 @@ test.serial('Should overwrite files.', async (t) => { const existingModified = new Date(existingFile.LastModified).getTime(); const itemModified = new Date(item.LastModified).getTime(); - t.true(itemModified > existingModified); + t.true(itemModified >= existingModified); t.is(updatedFile.ContentLength, content.length); t.true( @@ -583,7 +583,7 @@ async function granuleFilesOverwrittenTest(t, newPayload) { // check timestamps are updated currentFilesMetadata.forEach((f) => { const existingFileMeta = existingFilesMetadata.filter((ef) => ef.key === f.key)[0]; - t.true(new Date(f.LastModified).getTime() > new Date(existingFileMeta.LastModified).getTime()); + t.true(new Date(f.LastModified).getTime() >= new Date(existingFileMeta.LastModified).getTime()); }); output.granules[0].files.forEach((f) => { diff --git a/tasks/parse-pdr/package.json b/tasks/parse-pdr/package.json index fc6a0e0f4fc..25300be754d 100644 --- a/tasks/parse-pdr/package.json +++ b/tasks/parse-pdr/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/parse-pdr", - "version": "10.1.0", + "version": "10.1.1", "description": "Download and Parse a given PDR", "license": "Apache-2.0", "main": "index.js", @@ -30,17 +30,17 @@ "timeout": "15m" }, "dependencies": { - "@cumulus/api-client": "10.1.0", - "@cumulus/aws-client": "10.1.0", - "@cumulus/collection-config-store": "10.1.0", - "@cumulus/common": "10.1.0", + "@cumulus/api-client": "10.1.1", + "@cumulus/aws-client": "10.1.1", + "@cumulus/collection-config-store": "10.1.1", + "@cumulus/common": "10.1.1", "@cumulus/cumulus-message-adapter-js": "2.0.4", - "@cumulus/errors": "10.1.0", - "@cumulus/ingest": "10.1.0", - "@cumulus/pvl": "10.1.0", - "lodash": "^4.17.20" + "@cumulus/errors": "10.1.1", + "@cumulus/ingest": "10.1.1", + "@cumulus/pvl": "10.1.1", + "lodash": "^4.17.21" }, "devDependencies": { - "@cumulus/test-data": "10.1.0" + "@cumulus/test-data": "10.1.1" } } diff --git a/tasks/pdr-status-check/package.json b/tasks/pdr-status-check/package.json index 6ef7de597fd..ffb8f203088 100644 --- a/tasks/pdr-status-check/package.json +++ b/tasks/pdr-status-check/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/pdr-status-check", - "version": "10.1.0", + "version": "10.1.1", "description": "Checks execution status of granules in a PDR", "main": "index.js", "directories": { @@ -32,9 +32,9 @@ "timeout": "15m" }, "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/common": "10.1.0", + "@cumulus/aws-client": "10.1.1", + "@cumulus/common": "10.1.1", "@cumulus/cumulus-message-adapter-js": "2.0.4", - "@cumulus/errors": "10.1.0" + "@cumulus/errors": "10.1.1" } } diff --git a/tasks/post-to-cmr/package.json b/tasks/post-to-cmr/package.json index 5a1b6387bb5..90b2cead1f2 100644 --- a/tasks/post-to-cmr/package.json +++ b/tasks/post-to-cmr/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/post-to-cmr", - "version": "10.1.0", + "version": "10.1.1", "description": "Post a given granule to CMR", "main": "index.js", "directories": { @@ -33,16 +33,16 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/cmrjs": "10.1.0", - "@cumulus/common": "10.1.0", + "@cumulus/aws-client": "10.1.1", + "@cumulus/cmrjs": "10.1.1", + "@cumulus/common": "10.1.1", "@cumulus/cumulus-message-adapter-js": "2.0.4", - "@cumulus/errors": "10.1.0", - "@cumulus/launchpad-auth": "10.1.0", - "lodash": "^4.17.20" + "@cumulus/errors": "10.1.1", + "@cumulus/launchpad-auth": "10.1.1", + "lodash": "^4.17.21" }, "devDependencies": { - "@cumulus/cmr-client": "10.1.0", - "@cumulus/schemas": "10.1.0" + "@cumulus/cmr-client": "10.1.1", + "@cumulus/schemas": "10.1.1" } } diff --git a/tasks/queue-granules/index.js b/tasks/queue-granules/index.js index 6472f95b172..9a20d349f1a 100644 --- a/tasks/queue-granules/index.js +++ b/tasks/queue-granules/index.js @@ -48,6 +48,20 @@ function groupAndBatchGranules(granules, batchSize) { ), []); } +/** +* Updates each granule in the 'batch' to the passed in createdAt value if one does not already exist +* @param {Array} granuleBatch - Array of Cumulus Granule objects +* @param {number} createdAt - 'Date.now()' to apply to the granules if there is no +* existing createdAt value +* @returns {Array} updated array of Cumulus Granule objects +*/ +function updateGranuleBatchCreatedAt(granuleBatch, createdAt) { + return granuleBatch.map((granule) => ({ + ...granule, + createdAt: granule.createdAt ? granule.createdAt : createdAt, + })); +} + /** * See schemas/input.json and schemas/config.json for detailed event description * @@ -80,13 +94,14 @@ async function queueGranules(event, testMocks = {}) { const pMapConcurrency = get(event, 'config.concurrency', 3); const executionArns = await pMap( groupedAndBatchedGranules, - async (granuleBatch) => { + async (granuleBatchIn) => { const collectionConfig = await collectionConfigStore.get( - granuleBatch[0].dataType, - granuleBatch[0].version + granuleBatchIn[0].dataType, + granuleBatchIn[0].version ); - // include createdAt to ensure write logic passes + const createdAt = Date.now(); + const granuleBatch = updateGranuleBatchCreatedAt(granuleBatchIn, createdAt); await pMap( granuleBatch, (queuedGranule) => updateGranule({ @@ -98,7 +113,7 @@ async function queueGranules(event, testMocks = {}) { ), granuleId: queuedGranule.granuleId, status: 'queued', - createdAt, + createdAt: queuedGranule.createdAt, }, }), { concurrency: pMapConcurrency } @@ -147,4 +162,5 @@ module.exports = { groupAndBatchGranules, handler, queueGranules, + updateGranuleBatchCreatedAt, }; diff --git a/tasks/queue-granules/package.json b/tasks/queue-granules/package.json index 07e3d2a3a70..6aa7cffc304 100644 --- a/tasks/queue-granules/package.json +++ b/tasks/queue-granules/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/queue-granules", - "version": "10.1.0", + "version": "10.1.1", "description": "Add discovered granules to the queue", "main": "index.js", "directories": { @@ -31,14 +31,14 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/api-client": "10.1.0", - "@cumulus/aws-client": "10.1.0", - "@cumulus/collection-config-store": "10.1.0", - "@cumulus/common": "10.1.0", + "@cumulus/api-client": "10.1.1", + "@cumulus/aws-client": "10.1.1", + "@cumulus/collection-config-store": "10.1.1", + "@cumulus/common": "10.1.1", "@cumulus/cumulus-message-adapter-js": "2.0.4", - "@cumulus/ingest": "10.1.0", - "@cumulus/message": "10.1.0", - "lodash": "^4.17.20", + "@cumulus/ingest": "10.1.1", + "@cumulus/message": "10.1.1", + "lodash": "^4.17.21", "p-map": "^4.0.0" } } diff --git a/tasks/queue-granules/tests/index.js b/tasks/queue-granules/tests/index.js index 617f7311521..a3c81367f94 100644 --- a/tasks/queue-granules/tests/index.js +++ b/tasks/queue-granules/tests/index.js @@ -23,7 +23,7 @@ const noop = require('lodash/noop'); const pMapSpy = sinon.spy(pMap); const fakeProvidersApi = {}; -const { groupAndBatchGranules } = require('..'); +const { groupAndBatchGranules, updateGranuleBatchCreatedAt } = require('..'); const fakeGranulesApi = { updateGranule: noop, }; @@ -109,7 +109,7 @@ test('groupAndBatchGranules uses default if batchSize is NaN', (t) => { { granuleId: '3', dataType: 'XYZ', version: '001' }, ]; const expectedBatchGranules = granules.map((g) => [g]); - const actualGroupedAndBatchedGranules = groupAndBatchGranules(granules, null); + const actualGroupedAndBatchedGranules = groupAndBatchGranules(granules, undefined); t.deepEqual(actualGroupedAndBatchedGranules, expectedBatchGranules); }); @@ -274,19 +274,23 @@ test.serial('The correct message is enqueued without a PDR', async (t) => { workflow, } = t.context; + const createdAt = Date.now(); + const granule1 = { + createdAt, dataType: `data-type-${randomString().slice(0, 6)}`, - version: '6', - granuleId: `granule-${randomString().slice(0, 6)}`, files: [{ name: `file-${randomString().slice(0, 6)}` }], + granuleId: `granule-${randomString().slice(0, 6)}`, + version: '6', }; const collectionConfig1 = { name: `collection-config-${randomString().slice(0, 6)}` }; const granule2 = { + createdAt, dataType: `data-type-${randomString().slice(0, 6)}`, - version: '6', - granuleId: `granule-${randomString().slice(0, 6)}`, files: [{ name: `file-${randomString().slice(0, 6)}` }], + granuleId: `granule-${randomString().slice(0, 6)}`, + version: '6', }; const collectionConfig2 = { name: `collection-config-${randomString().slice(0, 6)}` }; @@ -336,9 +340,10 @@ test.serial('The correct message is enqueued without a PDR', async (t) => { payload: { granules: [ { + createdAt, dataType: granule1.dataType, - granuleId: granule1.granuleId, files: granule1.files, + granuleId: granule1.granuleId, version: granule1.version, }, ], @@ -367,9 +372,10 @@ test.serial('The correct message is enqueued without a PDR', async (t) => { payload: { granules: [ { + createdAt, dataType: granule2.dataType, - granuleId: granule2.granuleId, files: granule2.files, + granuleId: granule2.granuleId, version: granule2.version, }, ], @@ -378,6 +384,64 @@ test.serial('The correct message is enqueued without a PDR', async (t) => { ); }); +test.serial('granules are enqueued with createdAt values added to granules that are missing them', async (t) => { + const { + collectionConfigStore, + event, + } = t.context; + + const createdAt = Date.now(); + + const granule1 = { + dataType: `data-type-${randomString().slice(0, 6)}`, + files: [{ name: `file-${randomString().slice(0, 6)}` }], + granuleId: `granule-${randomString().slice(0, 6)}`, + version: '6', + }; + const collectionConfig1 = { name: `collection-config-${randomString().slice(0, 6)}` }; + + const granule2 = { + createdAt, + dataType: `data-type-${randomString().slice(0, 6)}`, + files: [{ name: `file-${randomString().slice(0, 6)}` }], + granuleId: `granule-${randomString().slice(0, 6)}`, + version: '6', + }; + const collectionConfig2 = { name: `collection-config-${randomString().slice(0, 6)}` }; + + event.input.granules = [granule1, granule2]; + + await Promise.all([ + collectionConfigStore.put(granule1.dataType, granule1.version, collectionConfig1), + collectionConfigStore.put(granule2.dataType, granule2.version, collectionConfig2), + ]); + + await validateConfig(t, event.config); + await validateInput(t, event.input); + + const output = await queueGranules(event); + + await validateOutput(t, output); + + // Get messages from the queue + const receiveMessageResponse = await sqs().receiveMessage({ + QueueUrl: event.config.queueUrl, + MaxNumberOfMessages: 10, + WaitTimeSeconds: 1, + }).promise(); + const messages = receiveMessageResponse.Messages.map((message) => JSON.parse(message.Body)); + + t.is(messages.length, 2); + + const message1 = messages.find((message) => + message.payload.granules[0].granuleId === granule1.granuleId); + const message2 = messages.find((message) => + message.payload.granules[0].granuleId === granule2.granuleId); + + t.true(createdAt < message1.payload.granules[0].createdAt); + t.is(createdAt, message2.payload.granules[0].createdAt); +}); + test.serial('The correct message is enqueued with a PDR', async (t) => { const { collectionConfigStore, @@ -388,6 +452,8 @@ test.serial('The correct message is enqueued with a PDR', async (t) => { workflow, } = t.context; + const createdAt = Date.now(); + // if the event.cumulus_config has 'state_machine' and 'execution_name', the enqueued message // will have 'parentExecutionArn' event.cumulus_config = { state_machine: randomString(), execution_name: randomString() }; @@ -405,6 +471,7 @@ test.serial('The correct message is enqueued with a PDR', async (t) => { version: '6', granuleId: `granule-${randomString().slice(0, 6)}`, files: [{ name: `file-${randomString().slice(0, 6)}` }], + createdAt, }; const collectionConfig1 = { name: `collection-config-${randomString().slice(0, 6)}` }; @@ -413,6 +480,7 @@ test.serial('The correct message is enqueued with a PDR', async (t) => { version: '6', granuleId: `granule-${randomString().slice(0, 6)}`, files: [{ name: `file-${randomString().slice(0, 6)}` }], + createdAt, }; const collectionConfig2 = { name: `collection-config-${randomString().slice(0, 6)}` }; @@ -468,6 +536,7 @@ test.serial('The correct message is enqueued with a PDR', async (t) => { granuleId: granule1.granuleId, files: granule1.files, version: granule1.version, + createdAt, }, ], }, @@ -501,6 +570,7 @@ test.serial('The correct message is enqueued with a PDR', async (t) => { granuleId: granule2.granuleId, files: granule2.files, version: granule2.version, + createdAt, }, ], }, @@ -710,7 +780,7 @@ test.serial('If a childWorkflowMeta is provided, it is passed through to the mes ); }); -test('createdAt for queued granule is older than enqueueGranuleIngestMessage date', async (t) => { +test.serial('createdAt for queued granule is equal to enqueueGranuleIngestMessage date when granules do not have createdAt set', async (t) => { const { event } = t.context; const dataType = `data-type-${randomString().slice(0, 6)}`; const version = '6'; @@ -726,7 +796,7 @@ test('createdAt for queued granule is older than enqueueGranuleIngestMessage dat ]; const updateGranuleMock = sinon.spy(({ body }) => body.createdAt); - const enqueueGranuleIngestMessageMock = sinon.spy(() => new Date(Date.now() + 1).valueOf()); + const enqueueGranuleIngestMessageMock = sinon.spy((params) => params); const testMocks = { updateGranuleMock, @@ -734,5 +804,29 @@ test('createdAt for queued granule is older than enqueueGranuleIngestMessage dat }; await queueGranules(event, testMocks); - t.assert(updateGranuleMock.returnValues[0] < enqueueGranuleIngestMessageMock.returnValues[0]); + const expectedCreatedAt = enqueueGranuleIngestMessageMock.returnValues[0].granules[0].createdAt; + t.assert(updateGranuleMock.returnValues[0] === expectedCreatedAt); +}); + +test('updatedGranuleBatchCreatedAt updates batch granule object with correct createdAt values', (t) => { + const testGranuleBatch = [ + { + granuleId: 1, + collectionId: 'fakeCollection', + status: 'complete', + }, + { + granuleId: 1, + collectionId: 'fakeCollection', + status: 'complete', + createdAt: Date.now(), + updatedAt: Date.now(), + }, + ]; + const createdAtTestDate = Date.now(); + + const expected = [{ ...testGranuleBatch[0], createdAt: createdAtTestDate }, testGranuleBatch[1]]; + + const actual = updateGranuleBatchCreatedAt(testGranuleBatch, createdAtTestDate); + t.deepEqual(actual, expected); }); diff --git a/tasks/queue-pdrs/package.json b/tasks/queue-pdrs/package.json index b8d1962c5f7..89130cc69b3 100644 --- a/tasks/queue-pdrs/package.json +++ b/tasks/queue-pdrs/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/queue-pdrs", - "version": "10.1.0", + "version": "10.1.1", "description": "Add discovered PDRs to a queue", "main": "index.js", "directories": { @@ -31,11 +31,11 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/common": "10.1.0", + "@cumulus/aws-client": "10.1.1", + "@cumulus/common": "10.1.1", "@cumulus/cumulus-message-adapter-js": "2.0.4", - "@cumulus/ingest": "10.1.0", - "@cumulus/message": "10.1.0", - "lodash": "^4.17.20" + "@cumulus/ingest": "10.1.1", + "@cumulus/message": "10.1.1", + "lodash": "^4.17.21" } } diff --git a/tasks/queue-workflow/package.json b/tasks/queue-workflow/package.json index b89cdc7586a..4ac33e211ee 100644 --- a/tasks/queue-workflow/package.json +++ b/tasks/queue-workflow/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/queue-workflow", - "version": "10.1.0", + "version": "10.1.1", "description": "Add workflow to the queue", "main": "index.js", "directories": { @@ -31,11 +31,11 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/common": "10.1.0", + "@cumulus/aws-client": "10.1.1", + "@cumulus/common": "10.1.1", "@cumulus/cumulus-message-adapter-js": "2.0.4", - "@cumulus/ingest": "10.1.0", - "@cumulus/message": "10.1.0", - "lodash": "^4.17.20" + "@cumulus/ingest": "10.1.1", + "@cumulus/message": "10.1.1", + "lodash": "^4.17.21" } } diff --git a/tasks/sf-sqs-report/package.json b/tasks/sf-sqs-report/package.json index 82862a31bcf..24fdf860849 100644 --- a/tasks/sf-sqs-report/package.json +++ b/tasks/sf-sqs-report/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/sf-sqs-report", - "version": "10.1.0", + "version": "10.1.1", "description": "Sends an incoming Cumulus message to SQS", "main": "index.js", "directories": { @@ -32,11 +32,11 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0", + "@cumulus/aws-client": "10.1.1", "@cumulus/cumulus-message-adapter-js": "2.0.4", - "lodash": "^4.17.20" + "lodash": "^4.17.21" }, "devDependencies": { - "@cumulus/common": "10.1.0" + "@cumulus/common": "10.1.1" } } diff --git a/tasks/sync-granule/package.json b/tasks/sync-granule/package.json index cdf531740a3..6cac7a226c5 100644 --- a/tasks/sync-granule/package.json +++ b/tasks/sync-granule/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/sync-granule", - "version": "10.1.0", + "version": "10.1.1", "description": "Download a given granule", "main": "index.js", "directories": { @@ -37,19 +37,19 @@ "timeout": "15m" }, "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/collection-config-store": "10.1.0", - "@cumulus/common": "10.1.0", + "@cumulus/aws-client": "10.1.1", + "@cumulus/collection-config-store": "10.1.1", + "@cumulus/common": "10.1.1", "@cumulus/cumulus-message-adapter-js": "2.0.4", - "@cumulus/errors": "10.1.0", - "@cumulus/ingest": "10.1.0", - "@cumulus/message": "10.1.0", - "lodash": "^4.17.20", + "@cumulus/errors": "10.1.1", + "@cumulus/ingest": "10.1.1", + "@cumulus/message": "10.1.1", + "lodash": "^4.17.21", "p-map": "^2.1.0", "uuid": "^3.4.0" }, "devDependencies": { - "@cumulus/schemas": "10.1.0", - "@cumulus/test-data": "10.1.0" + "@cumulus/schemas": "10.1.1", + "@cumulus/test-data": "10.1.1" } } diff --git a/tasks/sync-granule/tests/sync_granule_test.js b/tasks/sync-granule/tests/sync_granule_test.js index b3135d0e565..f7768ca783b 100644 --- a/tasks/sync-granule/tests/sync_granule_test.js +++ b/tasks/sync-granule/tests/sync_granule_test.js @@ -973,7 +973,7 @@ async function granuleFilesOverwrittenTest(t) { const currentFileInfo = (await getFilesMetadata(output.granules[0].files))[0]; t.is(currentFileInfo.size, randomString().length); - t.true(currentFileInfo.LastModified > existingFileInfo.LastModified); + t.true(currentFileInfo.LastModified >= existingFileInfo.LastModified); } finally { recursivelyDeleteS3Bucket(t.context.event.config.provider.host); } diff --git a/tasks/test-processing/package.json b/tasks/test-processing/package.json index cb936ff0e9e..7e4ecb5b90c 100644 --- a/tasks/test-processing/package.json +++ b/tasks/test-processing/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/test-processing", - "version": "10.1.0", + "version": "10.1.1", "description": "Fake processing task used for integration tests", "main": "index.js", "homepage": "https://github.com/nasa/cumulus/tree/master/tasks/test-processing", @@ -21,8 +21,8 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/aws-client": "10.1.0", + "@cumulus/aws-client": "10.1.1", "@cumulus/cumulus-message-adapter-js": "2.0.4", - "@cumulus/integration-tests": "10.1.0" + "@cumulus/integration-tests": "10.1.1" } } diff --git a/tasks/update-cmr-access-constraints/package.json b/tasks/update-cmr-access-constraints/package.json index 15cf359caba..7f62cf8d607 100644 --- a/tasks/update-cmr-access-constraints/package.json +++ b/tasks/update-cmr-access-constraints/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/update-cmr-access-constraints", - "version": "10.1.0", + "version": "10.1.1", "description": "Updates CMR metadata to set access constraints", "author": "Cumulus Authors", "license": "Apache-2.0", @@ -34,13 +34,13 @@ "verbose": true }, "dependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/cmrjs": "10.1.0", + "@cumulus/aws-client": "10.1.1", + "@cumulus/cmrjs": "10.1.1", "@cumulus/cumulus-message-adapter-js": "2.0.4", "lodash": "^4.17.5" }, "devDependencies": { - "@cumulus/common": "10.1.0", - "@cumulus/schemas": "10.1.0" + "@cumulus/common": "10.1.1", + "@cumulus/schemas": "10.1.1" } } diff --git a/tasks/update-granules-cmr-metadata-file-links/package.json b/tasks/update-granules-cmr-metadata-file-links/package.json index 87853a5d500..0002c3c4b04 100644 --- a/tasks/update-granules-cmr-metadata-file-links/package.json +++ b/tasks/update-granules-cmr-metadata-file-links/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/update-granules-cmr-metadata-file-links", - "version": "10.1.0", + "version": "10.1.1", "description": "Update CMR metadata files with correct online access urls and etags and transfer etag info to granules' CMR files", "main": "index.js", "directories": { @@ -38,14 +38,14 @@ "author": "Cumulus Authors", "license": "Apache-2.0", "dependencies": { - "@cumulus/cmrjs": "10.1.0", - "@cumulus/common": "10.1.0", + "@cumulus/cmrjs": "10.1.1", + "@cumulus/common": "10.1.1", "@cumulus/cumulus-message-adapter-js": "2.0.4", - "@cumulus/distribution-utils": "10.1.0", + "@cumulus/distribution-utils": "10.1.1", "lodash": "^4.17.15" }, "devDependencies": { - "@cumulus/aws-client": "10.1.0", - "@cumulus/schemas": "10.1.0" + "@cumulus/aws-client": "10.1.1", + "@cumulus/schemas": "10.1.1" } } diff --git a/tf-modules/cumulus/variables.tf b/tf-modules/cumulus/variables.tf index 4932a6e9f4f..0ae1a5ffe20 100644 --- a/tf-modules/cumulus/variables.tf +++ b/tf-modules/cumulus/variables.tf @@ -3,7 +3,7 @@ variable "async_operation_image" { description = "docker image to use for Cumulus async operations tasks" type = string - default = "cumuluss/async-operation:36" + default = "cumuluss/async-operation:39" } variable "cmr_client_id" { diff --git a/tf-modules/ingest/package.json b/tf-modules/ingest/package.json index a04cfb70a2c..6a517b00047 100644 --- a/tf-modules/ingest/package.json +++ b/tf-modules/ingest/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/ingest-module", - "version": "10.1.0", + "version": "10.1.1", "description": "Terraform module for data ingest related functionality", "engines": { "node": ">=12.18.0" diff --git a/tf-modules/s3-replicator/package.json b/tf-modules/s3-replicator/package.json index eb311c7286d..73e4b86d074 100644 --- a/tf-modules/s3-replicator/package.json +++ b/tf-modules/s3-replicator/package.json @@ -1,6 +1,6 @@ { "name": "@cumulus/s3-replicator", - "version": "10.1.0", + "version": "10.1.1", "description": "Replicate S3 Events to alternate bucket. Solves same-region replication.", "main": "index.js", "engines": { diff --git a/website/versions.json b/website/versions.json index a4f688d962d..e26f9fec57a 100644 --- a/website/versions.json +++ b/website/versions.json @@ -1,4 +1,5 @@ [ + "v10.1.1", "v10.1.0", "v10.0.0", "v9.9.0",