Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix/66 new lambdas iac #68

Merged
merged 11 commits into from
Jan 10, 2024
1 change: 1 addition & 0 deletions api/src/functions/cpfValidation/cpfValidation.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import { logger } from 'src/lib/logger'

const apiEndpoint = 'https://example.com'

/* eslint-disable @typescript-eslint/no-unused-vars */
export const handler: S3Handler = async (event: S3Event): Promise<void> => {
try {
const bucket = event.Records[0].s3.bucket.name
Expand Down
12 changes: 6 additions & 6 deletions api/src/lib/aws.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ import { getSignedUrl as awsGetSignedUrl } from '@aws-sdk/s3-request-presigner'
import { StreamingBlobPayloadInputTypes } from '@smithy/types'
import { QueryResolvers, CreateUploadInput } from 'types/graphql'

const CPF_REPORTER_BUCKET_NAME = `cpf-reporter-${process.env.environment}`
const REPORTING_DATA_BUCKET_NAME = `${process.env.REPORTING_DATA_BUCKET_NAME}`
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm wondering if this actually needs to be a global constant – the only time I can think of where it would actually be necessary to reference a bucket name like this would be at the point where a pre-signed URL was being generated. For any Lambda function that's invoked via a bucket notification, it seems like it would be better to just refer to the invocation event payload.


function getS3Client() {
let s3: S3Client
Expand Down Expand Up @@ -64,8 +64,8 @@ export function uploadWorkbook(
uploadId: number,
body: StreamingBlobPayloadInputTypes
) {
const folderName = `${upload.organizationId}/${upload.agencyId}/${upload.reportingPeriodId}/uploads/${upload.expenditureCategoryId}/${uploadId}/${upload.filename}`
return sendPutObjectToS3Bucket(CPF_REPORTER_BUCKET_NAME, folderName, body)
const folderName = `uploads/${upload.organizationId}/${upload.agencyId}/${upload.reportingPeriodId}/${upload.expenditureCategoryId}/${uploadId}/${upload.filename}`
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Given that this path structure is duplicated below, it might be useful to add some kind of helper method/function that returns a string S3 key for a given Upload object, and perhaps another one that returns the S3 for the JSON file resulting from the XLSM-to-JSON conversion.

return sendPutObjectToS3Bucket(REPORTING_DATA_BUCKET_NAME, folderName, body)
}

async function sendPutObjectToS3Bucket(
Expand All @@ -85,7 +85,7 @@ async function sendPutObjectToS3Bucket(

export function getTemplateRules(inputTemplateId: number) {
return sendHeadObjectToS3Bucket(
CPF_REPORTER_BUCKET_NAME,
REPORTING_DATA_BUCKET_NAME,
`templates/input_templates/${inputTemplateId}/rules/`
)
}
Expand All @@ -104,9 +104,9 @@ export async function s3PutSignedUrl(
uploadId: number
): Promise<string> {
const s3 = getS3Client()
const key = `${upload.organizationId}/${upload.agencyId}/${upload.reportingPeriodId}/uploads/${upload.expenditureCategoryId}/${uploadId}/${upload.filename}`
const key = `uploads/${upload.organizationId}/${upload.agencyId}/${upload.reportingPeriodId}/${upload.expenditureCategoryId}/${uploadId}/${upload.filename}`
const baseParams: PutObjectCommandInput = {
Bucket: CPF_REPORTER_BUCKET_NAME,
Bucket: REPORTING_DATA_BUCKET_NAME,
Key: key,
ContentType:
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
Expand Down
193 changes: 138 additions & 55 deletions terraform/functions.tf
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,9 @@ locals {
var.datadog_default_environment_variables,
),
{
LOG_LEVEL = var.lambda_log_level
TZ = "UTC"
LOG_LEVEL = var.lambda_log_level
REPORTING_DATA_BUCKET_NAME = module.reporting_data_bucket.bucket_id
TZ = "UTC"
},
)
lambda_default_execution_policies = compact([
Expand Down Expand Up @@ -117,11 +118,11 @@ module "lambda_artifacts_bucket" {
]
}

module "cpf_uploads_bucket" {
module "reporting_data_bucket" {
source = "cloudposse/s3-bucket/aws"
version = "4.0.1"
context = module.s3_label.context
name = "cpf-reporter-${var.environment}"
name = "reporting_data"

acl = "private"
versioning_enabled = true
Expand Down Expand Up @@ -178,43 +179,45 @@ resource "aws_s3_object" "lambda_artifact-cpfValidation" {
server_side_encryption = "AES256"
}

resource "aws_s3_bucket_notification" "json_notification" {
bucket = module.cpf_uploads_bucket.bucket_id
resource "aws_s3_bucket_notification" "reporting_data" {
bucket = module.reporting_data_bucket.bucket_id

lambda_function {
lambda_function_arn = module.lambda_function-cpfValidation.lambda_function_arn
lambda_function_arn = module.lambda_function-excelToJson.lambda_function_arn
events = ["s3:ObjectCreated:*"]
filter_suffix = ".json"
filter_prefix = "uploads/"
filter_suffix = ".xlsm"
}
}

resource "aws_s3_bucket_notification" "excel_notification" {
bucket = module.cpf_uploads_bucket.bucket_id

lambda_function {
lambda_function_arn = module.lambda_function-excelToJson.lambda_function_arn
lambda_function_arn = module.lambda_function-cpfValidation.lambda_function_arn
events = ["s3:ObjectCreated:*"]
filter_suffix = ".xlsm"
filter_prefix = "uploads/"
filter_suffix = ".xlsm.json"
}
}

module "lambda_function-graphql" {
source = "terraform-aws-modules/lambda/aws"
version = "6.5.0"

// Metadata
function_name = "${var.namespace}-graphql"
description = "GraphQL API server for the CPF Reporter service."

vpc_subnet_ids = local.private_subnet_ids
// Networking
attach_network_policy = true
vpc_subnet_ids = local.private_subnet_ids
vpc_security_group_ids = [
module.lambda_security_group.id,
module.postgres.security_group_id,
]
attach_network_policy = true

// Permissions
role_permissions_boundary = local.permissions_boundary_arn
attach_cloudwatch_logs_policy = true
cloudwatch_logs_retention_in_days = var.log_retention_in_days
attach_policy_jsons = true
attach_policy_jsons = length(local.lambda_default_execution_policies) > 0
number_of_policy_jsons = length(local.lambda_default_execution_policies)
policy_jsons = local.lambda_default_execution_policies
attach_policy_statements = true
Expand All @@ -241,20 +244,21 @@ module "lambda_function-graphql" {
}
}

handler = var.datadog_enabled ? local.datadog_lambda_handler : "graphql.handler"
runtime = var.lambda_runtime
architectures = [var.lambda_arch]
publish = true
layers = local.lambda_layer_arns

// Artifacts
publish = true
create_package = false
s3_existing_package = {
bucket = aws_s3_object.lambda_artifact-graphql.bucket
key = aws_s3_object.lambda_artifact-graphql.key
}

timeout = 25 # seconds (API Gateway limit is 30 seconds)
memory_size = 512 # MB
// Runtime
handler = var.datadog_enabled ? local.datadog_lambda_handler : "graphql.handler"
runtime = var.lambda_runtime
architectures = [var.lambda_arch]
layers = local.lambda_layer_arns
timeout = 25 # seconds (API Gateway limit is 30 seconds)
memory_size = 512 # MB
environment_variables = merge(local.lambda_default_environment_variables, {
// Function-specific environment variables go here:
DATABASE_URL = format(
Expand All @@ -274,6 +278,7 @@ module "lambda_function-graphql" {
PASSAGE_API_KEY_SECRET_ARN = data.aws_ssm_parameter.passage_api_key_secret_arn.value
})

// Triggers
allowed_triggers = {
APIGateway = {
service = "apigateway"
Expand All @@ -286,54 +291,132 @@ module "lambda_function-excelToJson" {
source = "terraform-aws-modules/lambda/aws"
version = "6.5.0"

function_name = "excel-to-json"
// Metadata
function_name = "${var.namespace}-excelToJson"
description = "Reacts to S3 events and converts Excel files to JSON."

vpc_subnet_ids = local.private_subnet_ids
vpc_security_group_ids = [
module.lambda_security_group.id,
module.postgres.security_group_id,
]
Comment on lines -292 to -296
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since the excelToJson handler doesn't seem to require DB access at the moment, we can avoid adding the Lambda function to the VPC.

handler = "index.handler"
architectures = [var.lambda_arch]
runtime = var.lambda_runtime
publish = true
layers = local.lambda_layer_arns
// Networking
attach_network_policy = false
vpc_subnet_ids = null
vpc_security_group_ids = null

// Permissions
role_permissions_boundary = local.permissions_boundary_arn
attach_cloudwatch_logs_policy = true
cloudwatch_logs_retention_in_days = var.log_retention_in_days
attach_policy_jsons = length(local.lambda_default_execution_policies) > 0
number_of_policy_jsons = length(local.lambda_default_execution_policies)
policy_jsons = local.lambda_default_execution_policies
attach_policy_statements = true
policy_statements = {
AllowDownloadExcelObjects = {
effect = "Allow"
actions = [
"s3:GetObject",
"s3:HeadObject",
]
resources = [
# Path: uploads/{organization_id}/{agency_id}/{reporting_period_id}/{expenditure_category_code}/{upload_id}/{filename}.xlsm
"${module.reporting_data_bucket.bucket_arn}/uploads/*/*/*/*/*/*.xlsm",
]
}
AllowUploadJsonObjects = {
effect = "Allow"
actions = ["s3:PutObject"]
resources = [
# Path: uploads/{organization_id}/{agency_id}/{reporting_period_id}/{expenditure_category_code}/{upload_id}/{filename}.xlsm.json
"${module.reporting_data_bucket.bucket_arn}/uploads/*/*/*/*/*/*.xlsm.json",
]
}
}

// Artifacts
create_package = false
s3_existing_package = {
bucket = aws_s3_object.lambda_artifact-excelToJson.bucket
key = aws_s3_object.lambda_artifact-excelToJson.key
}

role_name = "lambda-role-excelToJson"
attach_policy = true
policy = "arn:aws:iam::aws:policy/AmazonS3FullAccess"
// Runtime
handler = var.datadog_enabled ? local.datadog_lambda_handler : "excelToJson.handler"
runtime = var.lambda_runtime
architectures = [var.lambda_arch]
publish = true
layers = local.lambda_layer_arns
timeout = 300 # 5 minutes, in seconds
memory_size = 512 # MB
environment_variables = merge(local.lambda_default_environment_variables, {
DD_LAMBDA_HANDLER = "excelToJson.handler"
})

// Triggers
allowed_triggers = {
S3BucketNotification = {
principal = "s3.amazonaws.com"
source_arn = module.reporting_data_bucket.bucket_arn
}
}
}

module "lambda_function-cpfValidation" {
source = "terraform-aws-modules/lambda/aws"
version = "6.5.0"
function_name = "cpf-validation"
source = "terraform-aws-modules/lambda/aws"
version = "6.5.0"

// Metadata
function_name = "${var.namespace}-cpfValidation"
description = "Reacts to S3 events and validates CPF JSON files."

vpc_subnet_ids = local.private_subnet_ids
vpc_security_group_ids = [
module.lambda_security_group.id,
module.postgres.security_group_id,
]
Comment on lines -319 to -323
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since the cpfValidation handler doesn't seem to require DB access at the moment, we can avoid adding the Lambda function to the VPC.

handler = "index.handler"
architectures = [var.lambda_arch]
runtime = var.lambda_runtime
// Networking
vpc_subnet_ids = null
vpc_security_group_ids = null
attach_network_policy = false

// Permissions
role_permissions_boundary = local.permissions_boundary_arn
attach_cloudwatch_logs_policy = true
cloudwatch_logs_retention_in_days = var.log_retention_in_days
attach_policy_jsons = length(local.lambda_default_execution_policies) > 0
number_of_policy_jsons = length(local.lambda_default_execution_policies)
policy_jsons = local.lambda_default_execution_policies
attach_policy_statements = true
policy_statements = {
AllowDownloadJSONObjects = {
effect = "Allow"
actions = [
"s3:GetObject",
"s3:HeadObject",
]
resources = [
# Path: uploads/{organization_id}/{agency_id}/{reporting_period_id}/{expenditure_category_code}/{upload_id}/{filename}.xlsm.json
"${module.reporting_data_bucket.bucket_arn}/uploads/*/*/*/*/*/*.xlsm.json",
]
}
}

// Artifacts
publish = true
layers = local.lambda_layer_arns
create_package = false
s3_existing_package = {
bucket = aws_s3_object.lambda_artifact-cpfValidation.bucket
key = aws_s3_object.lambda_artifact-cpfValidation.key
}

role_name = "lambda-role-cpfValidation"
attach_policy = true
policy = "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess"
# TODO: we need a policy for calling an API endpoint on the application for validation
// Runtime
handler = var.datadog_enabled ? local.datadog_lambda_handler : "cpfValidation.handler"
runtime = var.lambda_runtime
architectures = [var.lambda_arch]
layers = local.lambda_layer_arns
timeout = 60 # 1 minute, in seconds
memory_size = 512
environment_variables = merge(local.lambda_default_environment_variables, {
DD_LAMBDA_HANDLER = "cpfValidation.handler"
})

// Triggers
allowed_triggers = {
S3BucketNotification = {
principal = "s3.amazonaws.com"
source_arn = module.reporting_data_bucket.bucket_arn
}
}
}
Loading