Skip to content

Commit

Permalink
Upgrade to Cumulus 16.1.1, ORCA 8.0.1
Browse files Browse the repository at this point in the history
Fixes #265
  • Loading branch information
chuckwondo committed Nov 3, 2023
1 parent 3f2f4e3 commit 86d95ea
Show file tree
Hide file tree
Showing 11 changed files with 1,059 additions and 2,346 deletions.
18 changes: 9 additions & 9 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -55,12 +55,12 @@ jobs:
deploy: true
secrets: inherit

deploy-prod:
# Run only on main branch (on push, including on merged PRs)
if: github.event.ref == 'refs/heads/main'
needs: deploy-uat
uses: ./.github/workflows/terraspace.yml
with:
TS_ENV: prod
deploy: true
secrets: inherit
# deploy-prod:
# # Run only on main branch (on push, including on merged PRs)
# if: github.event.ref == 'refs/heads/main'
# needs: deploy-uat
# uses: ./.github/workflows/terraspace.yml
# with:
# TS_ENV: prod
# deploy: true
# secrets: inherit
2 changes: 1 addition & 1 deletion .nvmrc
Original file line number Diff line number Diff line change
@@ -1 +1 @@
14.19.3
16.20.2
77 changes: 74 additions & 3 deletions app/stacks/cumulus/iam.tf
Original file line number Diff line number Diff line change
@@ -1,3 +1,21 @@
locals {
# See https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html#attach-bucket-policy
# At above link, expand the collapsed section titled "Regions available before
# August 2022" to see the list of Elastic Load Balancing account IDs by region.
# The following map is constructed from that list, but may not be complete, as
# we don't necessarily need to cover all regions.
elb_account_ids = {
"us-east-1" = "127311923021"
"us-east-2" = "033677994240"
"us-west-1" = "027434742980"
"us-west-2" = "797873946194"
}
# 797873946194 is the AWS account for Elastic Load Balancing for us-west-2 as
# shown in the elb_account_ids map above. This is the default value for the
# lookup function used farther below, as a matter of best practice.
default_elb_account_id = "797873946194"
}

# <% if !in_sandbox? then %>
data "aws_iam_policy_document" "allow_s3_access_logging" {
statement {
Expand All @@ -16,9 +34,15 @@ data "aws_iam_policy_document" "allow_s3_access_logging" {
}
# <% end %>

#-------------------------------------------------------------------------------
# Additional permissions required in order to allow Step Functions to include
# Distributed Map states.
# See https://docs.aws.amazon.com/step-functions/latest/dg/use-dist-map-orchestrate-large-scale-parallel-workloads.html#dist-map-permissions
# Distributed Map states. This is what allows us to sidestep the 25,000 event-
# transition quota for Step Functions.
#
# See also:
# - https://docs.aws.amazon.com/step-functions/latest/dg/use-dist-map-orchestrate-large-scale-parallel-workloads.html#dist-map-permissions
#-------------------------------------------------------------------------------

data "aws_iam_policy_document" "allow_sfn_distributed_maps" {
statement {
effect = "Allow"
Expand Down Expand Up @@ -50,7 +74,54 @@ resource "aws_iam_role_policy_attachment" "allow_sfn_distributed_maps" {
policy_arn = aws_iam_policy.allow_sfn_distributed_maps.arn
}

# temporary workaround for dashboard permissions issue
#-------------------------------------------------------------------------------
# Additional policy required on the system bucket as per ORCA v8.0.0.
#
# See also:
# - https://github.com/nasa/cumulus-orca/releases/tag/v8.0.0
# - https://nasa.github.io/cumulus-orca/docs/developer/deployment-guide/deployment-s3-bucket#bucket-policy-for-load-balancer-server-access-logging
#-------------------------------------------------------------------------------

data "aws_iam_policy_document" "allow_load_balancer_s3_write_access" {
statement {
effect = "Allow"
actions = ["s3:PutObject"]
principals {
type = "AWS"
identifiers = [
"arn:aws:iam::${lookup(local.elb_account_ids, data.aws_region.current.name, local.default_elb_account_id)}:root"
]
}
resources = [
"arn:aws:s3:::${var.system_bucket}/${var.prefix}-lb-gql-a-logs/AWSLogs/${local.aws_account_id}/*"
]
}
}

# Attach policy above to the system bucket
resource "null_resource" "allow_load_balancer_s3_write_access" {
triggers = {
buckets = var.system_bucket
}

# Since we do not have Terraform configured to manage our buckets, we cannot
# ask Terraform to put any policies on the buckets, so we're calling out to
# the AWS CLI to put the desired policy on our "system" (internal) bucket to
# allow load balancer logs to be written to it, as required by ORCA.
provisioner "local-exec" {
interpreter = ["bash", "-c"]
command = <<-COMMAND
aws s3api put-bucket-policy \
--bucket ${var.system_bucket} \
--policy '${data.aws_iam_policy_document.allow_load_balancer_s3_write_access.json}'
COMMAND
}
}

#-------------------------------------------------------------------------------
# Temporary workaround for dashboard permissions issue
#-------------------------------------------------------------------------------

data "aws_iam_role" "api_gateway_role" {
depends_on = [module.cumulus]

Expand Down
7 changes: 6 additions & 1 deletion app/stacks/cumulus/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -483,7 +483,7 @@ module "ingest_and_publish_granule_workflow" {
files_to_granules_task_arn : module.cumulus.files_to_granules_task.task_arn,
move_granules_task_arn : module.cumulus.move_granules_task.task_arn,
update_granules_cmr_metadata_file_links_task_arn : module.cumulus.update_granules_cmr_metadata_file_links_task.task_arn,
copy_to_archive_task_arn : module.orca.orca_lambda_copy_to_glacier_arn,
copy_to_archive_adapter_task_arn : module.cumulus.orca_copy_to_archive_adapter_task.task_arn,
post_to_cmr_task_arn : module.cumulus.post_to_cmr_task.task_arn
})
}
Expand All @@ -495,6 +495,7 @@ module "cumulus" {
deploy_to_ngap = true

cumulus_message_adapter_lambda_layer_version_arn = module.cma.lambda_layer_version_arn
async_operation_image = "cumuluss/async-operation:47"

vpc_id = module.vpc.vpc_id
lambda_subnet_ids = module.vpc.subnets.ids
Expand All @@ -508,6 +509,10 @@ module "cumulus" {
ecs_cluster_max_size = 2
key_name = var.key_name

orca_api_uri = module.orca.orca_api_deployment_invoke_url
orca_lambda_copy_to_archive_arn = module.orca.orca_lambda_copy_to_archive_arn
orca_sfn_recovery_workflow_arn = module.orca.orca_sfn_recovery_workflow_arn

rds_security_group = local.rds_security_group
rds_user_access_secret_arn = local.rds_user_access_secret_arn

Expand Down
4 changes: 2 additions & 2 deletions app/stacks/cumulus/orca.tf
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ data "aws_secretsmanager_secret_version" "rds_cluster_user_credentials_secret_ve
}

module "orca" {
source = "https://github.com/nasa/cumulus-orca/releases/download/v6.0.3/cumulus-orca-terraform.zip"
source = "https://github.com/nasa/cumulus-orca/releases/download/v8.0.1/cumulus-orca-terraform.zip"
#--------------------------
# Cumulus variables
#--------------------------
Expand Down Expand Up @@ -53,7 +53,7 @@ module "orca" {
# default_multipart_chunksize_mb = 250
# metadata_queue_message_retention_time = 777600
# orca_default_recovery_type = "Standard"
orca_default_storage_class = "DEEP_ARCHIVE"
orca_default_storage_class = "DEEP_ARCHIVE"
# orca_delete_old_reconcile_jobs_frequency_cron = "cron(0 0 ? * SUN *)"
# orca_ingest_lambda_memory_size = 2240
# orca_ingest_lambda_timeout = 600
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -233,8 +233,8 @@
"cma": {
"event.$": "$",
"task_config": {
"excludedFileExtensions": "{$.meta.collection.meta.orca.excludedFileExtensions}",
"s3MultipartChunksizeMb": "{$.meta.collection.meta.s3MultipartChunksizeMb}",
"excludedFileExtensions": "{$.meta.collection.meta.orca.excludedFileExtensions}",
"providerId": "{$.meta.provider.id}",
"providerName": "{$.meta.provider.name}",
"executionId": "{$.cumulus_meta.execution_name}",
Expand All @@ -245,7 +245,7 @@
}
},
"Type": "Task",
"Resource": "${copy_to_archive_task_arn}",
"Resource": "${copy_to_archive_adapter_task_arn}",
"Retry": [
{
"ErrorEquals": [
Expand Down
28 changes: 21 additions & 7 deletions app/stacks/rds-cluster/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,14 @@ resource "random_password" "db_password" {
special = false
}

resource "random_password" "db_user_password" {
length = 50
upper = true
lower = true
number = true
special = false
}

module "vpc" {
source = "../../modules/vpc"
}
Expand All @@ -15,15 +23,21 @@ module "rds_cluster" {
db_admin_password = random_password.db_password.result
db_admin_username = "postgres"
deletion_protection = true
engine_version = "11.13"
engine_version = "11.18"
parameter_group_family = "aurora-postgresql11"
permissions_boundary_arn = local.permissions_boundary_arn
prefix = var.prefix
provision_user_database = true
rds_user_password = random_password.db_password.result
region = data.aws_region.current.name
snapshot_identifier = null
subnets = module.vpc.subnets.ids
tags = { Deployment = var.prefix }
vpc_id = module.vpc.vpc_id
# ORCA requires us to use a password that contains a special character, but there is
# some Cumulus constraint that only allows an underscore (in addition to alphanumeric
# characters), and no other special characters, so we must generate a password that
# does not contain any special characters, in order to avoid special characters other
# than and underscore, and then insert an underscore (we chose to at it to the end) to
# satisfy the ORCA constraint requiring at least one special character.
rds_user_password = "${random_password.db_user_password.result}_"
region = data.aws_region.current.name
snapshot_identifier = null
subnets = module.vpc.subnets.ids
tags = { Deployment = var.prefix }
vpc_id = module.vpc.vpc_id
}
2 changes: 1 addition & 1 deletion config/helpers/cumulus_version_helper.rb
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
module Terraspace::Project::CumulusVersionHelper
def cumulus_version
"v13.4.0"
"v16.1.1"
end
end
23 changes: 14 additions & 9 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -52,30 +52,35 @@
"tf:lambda:archive-exploded": "yarn lambda:archive-exploded >&2 && echo { '\"'dir'\"': '\"'${PWD}/${npm_package_config_lambda_archive_dir}'\"' }"
},
"engines": {
"node": ">=14"
"node": ">=16"
},
"dependencies": {
"@aws-sdk/client-s3": "^3.370.0",
"@cumulus/aws-client": "13.4.0",
"@cumulus/cmrjs": "13.4.0",
"@cumulus/common": "13.4.0",
"@aws-sdk/client-sts": "^3.370.0",
"@aws-sdk/lib-dynamodb": "^3.370.0",
"@aws-sdk/lib-storage": "^3.370.0",
"@aws-sdk/types": "^3.370.0",
"@cumulus/aws-client": "16.1.1",
"@cumulus/cmrjs": "16.1.1",
"@cumulus/common": "16.1.1",
"@cumulus/cumulus-message-adapter-js": "2.0.4",
"@smithy/util-stream": "^2.0.17",
"date-fns": "^2.29.3",
"duration-fns": "^3.0.1",
"fp-ts": "^2.11.5",
"fp-ts-contrib": "^0.1.29",
"io-ts": "^2.2.16",
"io-ts-types": "^0.5.16",
"monocle-ts": "^2.3.13",
"newtype-ts": "^0.3.5"
"newtype-ts": "^0.3.5",
"xml2js": "^0.6.0"
},
"devDependencies": {
"@ava/typescript": "^3.0.1",
"@aws-sdk/client-dynamodb": "^3.370.0",
"@aws-sdk/util-stream-node": "^3.370.0",
"@cumulus/types": "13.4.0",
"@cumulus/types": "16.1.1",
"@istanbuljs/nyc-config-typescript": "^1.0.1",
"@tsconfig/node14": "^1.0.1",
"@tsconfig/node16": "^16.1.1",
"@types/aws-lambda": "^8.10.85",
"@types/lodash": "^4.14.177",
"@types/node": "^16.11.1",
Expand All @@ -97,7 +102,7 @@
"source-map-support": "^0.5.19",
"standard-version": "^9.0.0",
"ts-node": "^10.3.0",
"typedoc": "^0.22.6",
"typedoc": "^0.25.3",
"typescript": "^4.4.4"
},
"resolutions": {
Expand Down
2 changes: 1 addition & 1 deletion src/lib/aws/s3.fixture.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { Readable } from 'stream';

import { GetObjectCommandInput } from '@aws-sdk/client-s3';
import { sdkStreamMixin } from '@aws-sdk/util-stream-node';
import { sdkStreamMixin } from '@smithy/util-stream';

const store: { readonly [Bucket: string]: { readonly [Key: string]: string } } = {
'my-bucket': {
Expand Down
Loading

0 comments on commit 86d95ea

Please sign in to comment.