diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d4c5790..c2668c6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -34,7 +34,7 @@ following tools: 1. Clone and open this repository: ```bash - git clone https://github.com/sgtoj/terraform-aws-teleport-cluster.git + git clone https://github.com/cruxstack/terraform-aws-teleport-cluster.git code terraform-aws-teleport-cluster ``` diff --git a/README.md b/README.md index e3d82e7..0570be5 100755 --- a/README.md +++ b/README.md @@ -1,43 +1,78 @@ -# Terraform Module: AWS Teleport Cluster +# Terraform Module: Teleport Cluster -This project is under development. See `dev` branch for latest activity. +This Terraform module deploys a Teleport cluster in high availability (HA) +configuration. [Teleport](https://github.com/gravitational/teleport) is a modern +zero-trust solution by Gravitational. -## Prerequisites +### Features -- Terraform v0.13.0 or newer -- An AWS account +- **High Availability**: Deploys Teleport in a highly available configuration to + ensure uninterrupted access. +- **Managed Upgrades**: Supports controlled upgrades to new versions of + Teleport. +- **Secure**: Uses AWS Key Management Service (KMS) to secure sensitive data. +- **Scalable**: Can handle growth in your user base and infrastructure without a + corresponding increase in complexity. +- **Integrated**: Works well with your existing infrastructure by following + CloudPosse's context and labeling patterns. ## Usage +Deploy it using the block below. For the first time deployments, it make take 10 +minutes before the web portal is available. + +```hcl + ```hcl module "teleport_cluster" { source = "cruxstack/teleport-cluster/aws" version = "x.x.x" - # TBD + teleport_letsencrypt_email = "letencrypt@example.com" + teleport_runtime_version = "10.3.15" + dns_parent_zone_id = Z0000000000000000000 + dns_parent_zone_name = demo.example.com + vpc_id = "vpc-00000000000000" + vpc_subnet_ids = ["subnet-00000000000000", "subnet-11111111111111111", "subnet-22222222222222222"] + vpc_public_subnet_ids = ["subnet-33333333333333", "subnet-44444444444444444", "subnet-55555555555555555"] + teleport_setup_mode = false } ``` -## Requirements - -- Terraform 0.13.0 or later -- AWS provider - ## Inputs In addition to the variables documented below, this module includes several other optional variables (e.g., `name`, `tags`, etc.) provided by the `cloudposse/label/null` module. Please refer to the [`cloudposse/label` documentation](https://registry.terraform.io/modules/cloudposse/label/null/latest) for more details on these variables. -| Name | Description | Type | Default | Required | -|-------------|-------------|:------:|:-------:|:--------:| -| `placehold` | N/A | string | null | No | +| Name | Description | Type | Default | Required | +|------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------|---------|:--------:| +| `teleport_runtime_version` | The runtime version of Teleport. | `string` | n/a | yes | +| `teleport_letsencrypt_email` | The email address to use for Let's Encrypt. | `string` | n/a | yes | +| `teleport_setup_mode` | Toggle Teleport setup mode. | `bool` | `true` | no | +| `teleport_experimental_mode` | Toggle Teleport experimental mode. | `bool` | `false` | no | +| `instance_config` | Configuration for the instances. Each type (`auth`, `node`, `proxy`) contains an object with `count` and `sizes`. | `object` | `{}` | no | +| `artifacts_bucket_name` | The name of the S3 bucket for artifacts. | `string` | `""` | no | +| `logs_bucket_name` | The name of the S3 bucket for logs. | `string` | `""` | no | +| `dns_parent_zone_id` | The ID of the parent DNS zone. | `string` | n/a | yes | +| `dns_parent_zone_name` | The name of the parent DNS zone. | `string` | n/a | yes | +| `vpc_id` | The ID of the VPC to deploy resources into. | `string` | n/a | yes | +| `vpc_private_subnet_ids` | The IDs of the private subnets in the VPC to deploy resources into. | `list(string)` | n/a | yes | +| `vpc_public_subnet_ids` | The IDs of the public subnets in the VPC to deploy resources into. | `list(string)` | n/a | yes | +| `aws_region_name` | The name of the AWS region. | `string` | `""` | no | +| `aws_account_id` | The ID of the AWS account. | `string` | `""` | no | +| `aws_kv_namespace` | The namespace or prefix for AWS SSM parameters and similar resources. | `string` | `""` | no | -## Outputs +### Outputs -| Name | Description | -|-------------|-------------| -| `placehold` | N/A | +| Name | Description | +|-------------------------|------------------------------------------------------------------| +| `teleport_dns_name` | The DNS name of the Teleport service. | +| `teleport_auth_config` | The configuration details for the Teleport auth service. | +| `teleport_node_config` | The configuration details for the Teleport node service. | +| `teleport_proxy_config` | The configuration details for the Teleport proxy service. | +| `security_group_id` | The ID of the security group created for the Teleport service. | +| `security_group_name` | The name of the security group created for the Teleport service. | ## Contributing diff --git a/assets/image-files/teleport-all-pre-start b/assets/image-files/teleport-all-pre-start new file mode 100644 index 0000000..e172249 --- /dev/null +++ b/assets/image-files/teleport-all-pre-start @@ -0,0 +1,14 @@ +#!/bin/bash +# shellcheck disable=SC1091 +set -e -x + +source "/etc/teleport.d/conf" + +# copy certificates into place +/bin/aws s3 sync "s3://${TELEPORT_S3_BUCKET}/live/${TELEPORT_DOMAIN_NAME}" /var/lib/teleport + +# disable influxdb +systemctl stop telegraf +systemctl stop influxdb +systemctl disable telegraf +systemctl disable influxdb diff --git a/assets/image-files/teleport-generate-config b/assets/image-files/teleport-generate-config new file mode 100644 index 0000000..daa5ee0 --- /dev/null +++ b/assets/image-files/teleport-generate-config @@ -0,0 +1,77 @@ +#!/bin/bash +# shellcheck disable=SC1091,SC2002 +set -e -o pipefail + +TELEPORT_CONFIG_TEMPLATE_PATH=/etc/teleport.d/teleport.tmpl.yaml +TELEPORT_CONFIG_PATH=/etc/teleport.yaml + +# ================================================================= function === + +get_aws_metadata() { + REQUEST_PATH="${1}" + IMDS_TOKEN=$(curl -m5 -sS -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 300") + curl -m5 -sS -H "\"X-aws-ec2-metadata-token: ${IMDS_TOKEN}\"" "http://169.254.169.254/latest/${REQUEST_PATH}" 2>/dev/null +} + +# =================================================================== script === + +# ------------------------------------------------------------------- config --- + +if getent passwd teleport >/dev/null 2>&1 && getent group adm >/dev/null 2>&1; then + if [ ! -d /var/lib/teleport ]; then + mkdir -p /var/lib/teleport + fi + chown -R teleport:adm /var/lib/teleport +fi + +source "/etc/teleport.d/conf" + +echo "${TELEPORT_ROLE}" >> "/etc/teleport.d/role.${TELEPORT_ROLE}" + +INSTANCE_HOSTNAME=$(get_aws_metadata "meta-data/local-hostname") +INSTANCE_PRIVATE_IP=$(get_aws_metadata "meta-data/local-ipv4") + +export TELEPORT_NODENAME=${INSTANCE_HOSTNAME} +export TELEPORT_ADVERTISE_IP=${INSTANCE_PRIVATE_IP} + +cat "$TELEPORT_CONFIG_TEMPLATE_PATH" | envsubst > "${TELEPORT_CONFIG_PATH}" + +chmod 664 "${TELEPORT_CONFIG_PATH}" +if getent passwd teleport >/dev/null 2>&1 && getent group adm >/dev/null 2>&1; then + chown teleport:adm ${TELEPORT_CONFIG_PATH} +fi + +# ----------------------------------------------------------------- services --- + +if [[ "${TELEPORT_ROLE}" == "auth" ]]; then + + systemctl enable teleport-ssm-publish-tokens.service teleport-ssm-publish-tokens.timer + systemctl start teleport-ssm-publish-tokens.timer + + systemctl enable teleport-get-cert.service teleport-get-cert.timer + systemctl enable teleport-renew-cert.service teleport-renew-cert.timer + systemctl start --no-block teleport-get-cert.timer + systemctl start --no-block teleport-renew-cert.timer + + systemctl disable teleport.service + systemctl enable teleport-auth.service + systemctl start --no-block teleport-auth.service + +elif [[ "${TELEPORT_ROLE}" == "proxy" ]]; then + + systemctl enable teleport-check-cert.service teleport-check-cert.timer + systemctl start --no-block teleport-check-cert.timer + + systemctl disable teleport.service + systemctl enable teleport-proxy.service + systemctl start --no-block teleport-proxy.service + +elif [[ "${TELEPORT_ROLE}" == "node" ]]; then + + systemctl disable teleport.service + systemctl enable teleport-node.service + systemctl start --no-block teleport-node.service + +fi + + diff --git a/assets/image-files/teleport-ssm-publish-tokens b/assets/image-files/teleport-ssm-publish-tokens new file mode 100644 index 0000000..d35f551 --- /dev/null +++ b/assets/image-files/teleport-ssm-publish-tokens @@ -0,0 +1,30 @@ +#!/bin/bash +# shellcheck disable=SC1091,SC2002 +set -e -o pipefail + +source /etc/teleport.d/conf + +TCTL=/usr/local/bin/tctl + +PROXY_TOKEN=$(uuid -v4) +${TCTL} nodes add --roles=proxy --ttl=4h --token="${PROXY_TOKEN}" +aws ssm put-parameter --name "/teleport/${TELEPORT_CLUSTER_NAME}/tokens/proxy" --region "${EC2_REGION}" --type="SecureString" --value="${PROXY_TOKEN}" --overwrite + +NODE_TOKEN=$(uuid -v4) +${TCTL} nodes add --roles=node,app,db --ttl=4h --token="${NODE_TOKEN}" +aws ssm put-parameter --name "/teleport/${TELEPORT_CLUSTER_NAME}/tokens/node" --region "${EC2_REGION}" --type="SecureString" --value="${NODE_TOKEN}" --overwrite + +KUBE_TOKEN=$(uuid -v4) +${TCTL} nodes add --roles=kube --ttl=4h --token="${KUBE_TOKEN}" +aws ssm put-parameter --name "/teleport/${TELEPORT_CLUSTER_NAME}/tokens/kube" --region "${EC2_REGION}" --type="SecureString" --value="${KUBE_TOKEN}" --overwrite + +APP_TOKEN=$(uuid -v4) +${TCTL} nodes add --roles=app --ttl=4h --token="${APP_TOKEN}" +aws ssm put-parameter --name "/teleport/${TELEPORT_CLUSTER_NAME}/tokens/app" --region "${EC2_REGION}" --type="SecureString" --value="${APP_TOKEN}" --overwrite + +DATABASE_TOKEN=$(uuid -v4) +${TCTL} nodes add --roles=db --ttl=4h --token="${DATABASE_TOKEN}" +aws ssm put-parameter --name "/teleport/${TELEPORT_CLUSTER_NAME}/tokens/db" --region "${EC2_REGION}" --type="SecureString" --value="${DATABASE_TOKEN}" --overwrite + +CA_PIN_HASH=$(tctl status | grep "CA pin" | awk '{print $3}') +aws ssm put-parameter --name "/teleport/${TELEPORT_CLUSTER_NAME}/ca-pin-hash" --region "${EC2_REGION}" --type="String" --value="${CA_PIN_HASH}" --overwrite diff --git a/examples/complete/README.md b/examples/complete/README.md index 1ac91ed..4d15956 100644 --- a/examples/complete/README.md +++ b/examples/complete/README.md @@ -1 +1,45 @@ # Example: Complete + +This directory contains a complete example of how to use the Teleport Cluster +Terraform module in a real-world scenario. + +## Overview + +This example deploys a Teleport cluster with the following configuration: + +- Teleport auth, node, and proxy services deployed in a high-availability (HA) + configuration. +- Deployment into a specified AWS VPC and subnets. + +## Usage + +To run this example, provide your own values for the following variables in a +`.terraform.tfvars` file: + +```hcl +teleport_letsencrypt_email = "your-email@example.com" +dns_parent_zone_id = "your-dns-zone-id" +dns_parent_zone_name = "your-dns-zone-name" +vpc_id = "your-vpc-id" +vpc_private_subnet_ids = ["your-private-subnet-id"] +vpc_public_subnet_ids = ["your-public-subnet-id"] +``` + +## Inputs + +| Name | Description | Type | Default | Required | +|----------------------------|---------------------------------------------------------------------|----------------|---------|:--------:| +| teleport_letsencrypt_email | The email address to use for Let's Encrypt. | `string` | n/a | yes | +| dns_parent_zone_id | The ID of the parent DNS zone. | `string` | n/a | yes | +| dns_parent_zone_name | The name of the parent DNS zone. | `string` | n/a | yes | +| vpc_id | The ID of the VPC to deploy resources into. | `string` | n/a | yes | +| vpc_private_subnet_ids | The IDs of the private subnets in the VPC to deploy resources into. | `list(string)` | n/a | yes | +| vpc_public_subnet_ids | The IDs of the public subnets in the VPC to deploy resources into. | `list(string)` | n/a | yes | + +## Outputs + +| Name | Description | +|-------------------------|---------------------------------------| +| teleport_dns_name | The DNS name of the Teleport service. | +| teleport_web_portal_url | The URL of the Teleport web portal. | +``` diff --git a/examples/complete/main.tf b/examples/complete/main.tf index e69de29..1ef196b 100755 --- a/examples/complete/main.tf +++ b/examples/complete/main.tf @@ -0,0 +1,35 @@ +locals {} + +# ================================================================== example === + +module "teleport_cluster" { + source = "../.." + + teleport_experimental_mode = true + teleport_letsencrypt_email = var.teleport_letsencrypt_email + teleport_runtime_version = var.teleport_runtime_version + dns_parent_zone_id = var.dns_parent_zone_id + dns_parent_zone_name = var.dns_parent_zone_name + vpc_id = var.vpc_id + vpc_private_subnet_ids = var.vpc_private_subnet_ids + vpc_public_subnet_ids = var.vpc_public_subnet_ids + teleport_setup_mode = false + + context = module.example_label.context # not required +} + +# ===================================================== supporting-resources === + +module "example_label" { + source = "cloudposse/label/null" + version = "0.25.0" + + name = "tf-example-complete-${random_string.example_random_suffix.result}" + environment = "use1" # us-east-1 +} + +resource "random_string" "example_random_suffix" { + length = 6 + special = false + upper = false +} diff --git a/examples/complete/output.tf b/examples/complete/output.tf index e69de29..c9810bd 100644 --- a/examples/complete/output.tf +++ b/examples/complete/output.tf @@ -0,0 +1,9 @@ +output "teleport_dns_name" { + value = module.teleport_cluster.teleport_dns_name + description = "The DNS name of the Teleport service." +} + +output "teleport_web_portal_url" { + value = "https://${module.teleport_cluster.teleport_dns_name}/web" + description = "The URL of the Teleport web portal." +} diff --git a/examples/complete/variables.tf b/examples/complete/variables.tf index e69de29..cfbaaae 100644 --- a/examples/complete/variables.tf +++ b/examples/complete/variables.tf @@ -0,0 +1,35 @@ +variable "teleport_runtime_version" { + type = string + description = "The runtime version of Teleport." + default = "10.3.15" +} + +variable "teleport_letsencrypt_email" { + type = string + description = "The email address to use for Let's Encrypt." +} + +variable "dns_parent_zone_id" { + type = string + description = "The ID of the parent DNS zone." +} + +variable "dns_parent_zone_name" { + type = string + description = "The name of the parent DNS zone." +} + +variable "vpc_id" { + type = string + description = "The ID of the VPC to deploy resources into." +} + +variable "vpc_private_subnet_ids" { + type = list(string) + description = "The IDs of the private subnets in the VPC to deploy resources into." +} + +variable "vpc_public_subnet_ids" { + type = list(string) + description = "The IDs of the public subnets in the VPC to deploy resources into." +} diff --git a/main.tf b/main.tf index e69de29..a71969c 100755 --- a/main.tf +++ b/main.tf @@ -0,0 +1,445 @@ +locals { + enabled = coalesce(var.enabled, module.this.enabled, true) + name = coalesce(var.name, module.this.name, "teleport-cluster-${random_string.teleport_cluster_random_suffix.result}") + + aws_account_id = try(coalesce(var.aws_account_id, data.aws_caller_identity.current[0].account_id), "") + aws_region_name = try(coalesce(var.aws_region_name, data.aws_region.current[0].name), "") + aws_kv_namespace = trim(coalesce(var.aws_kv_namespace, "teleport-cluster/${module.teleport_cluster_label.id}"), "/") + + teleport_cluster_name = join("-", [module.teleport_cluster_label.name, module.teleport_cluster_label.stage, module.teleport_cluster_label.environment]) + teleport_image_name = "gravitational-teleport-ami-oss-${var.teleport_runtime_version}" + teleport_image_id = try(data.aws_ami.official_image[0].id, "") + teleport_letsencrypt_email = var.teleport_letsencrypt_email + teleport_setup_mode = var.teleport_setup_mode + teleport_experimental_mode = var.teleport_experimental_mode + teleport_aws_account_id = "126027368216" # gravitational teleport's aws account id for ami filtering + + artifacts_bucket_name = coalesce(var.artifacts_bucket_name, local.teleport_bucket_name) + logs_bucket_name = coalesce(var.logs_bucket_name, local.teleport_bucket_name) + teleport_bucket_name = module.s3_bucket.bucket_id + + is_teleport_and_logs_bucket_same = local.artifacts_bucket_name == local.logs_bucket_name + + instance_config = { + auth = merge({ sizes = ["t3.micro", "t3a.micro"], count = 1 }, lookup(var.instance_config, "auth", {})) + node = merge({ sizes = ["t3.micro", "t3a.micro"], count = 1 }, lookup(var.instance_config, "node", {})) + proxy = merge({ sizes = ["t3.micro", "t3a.micro"], count = 1 }, lookup(var.instance_config, "proxy", {})) + } +} + +data "aws_caller_identity" "current" { + count = module.this.enabled && var.aws_account_id == "" ? 1 : 0 +} + +data "aws_region" "current" { + count = module.this.enabled && var.aws_region_name == "" ? 1 : 0 +} + +# ================================================================= teleport === + +module "teleport_cluster_label" { + source = "cloudposse/label/null" + version = "0.25.0" + + enabled = local.enabled + name = local.name + context = module.this.context +} + +# only appliable if name variable was not set +resource "random_string" "teleport_cluster_random_suffix" { + length = 6 + special = false + upper = false +} + +# ================================================================== cluster === + +module "auth_servers" { + source = "./modules/teleport-node" + + instance_sizes = local.instance_config.auth.sizes + instance_count = local.instance_config.auth.count + + teleport_cluster_name = local.teleport_cluster_name + teleport_image_id = local.teleport_image_id + teleport_letsencrypt_email = local.teleport_letsencrypt_email + teleport_node_type = "auth" + teleport_setup_mode = local.teleport_setup_mode + + teleport_bucket_name = module.s3_bucket.bucket_id + teleport_ddb_table_events_name = aws_dynamodb_table.events[0].name + teleport_ddb_table_locks_name = aws_dynamodb_table.locks[0].name + teleport_ddb_table_state_name = aws_dynamodb_table.state[0].name + teleport_security_group_ids = compact([module.security_group.id]) + + experimental = local.teleport_experimental_mode + + dns_parent_zone_id = var.dns_parent_zone_id + dns_parent_zone_name = var.dns_parent_zone_name + artifacts_bucket_name = local.artifacts_bucket_name + logs_bucket_name = local.logs_bucket_name + vpc_id = var.vpc_id + vpc_private_subnet_ids = var.vpc_private_subnet_ids + vpc_public_subnet_ids = var.vpc_public_subnet_ids + aws_account_id = local.aws_account_id + aws_kv_namespace = local.aws_kv_namespace + aws_region_name = local.aws_region_name + + context = module.teleport_cluster_label.context +} + +module "proxy_servers" { + source = "./modules/teleport-node" + + instance_sizes = local.instance_config.proxy.sizes + instance_count = local.instance_config.proxy.count + + teleport_cluster_name = local.teleport_cluster_name + teleport_image_id = local.teleport_image_id + teleport_letsencrypt_email = local.teleport_letsencrypt_email + teleport_node_type = "proxy" + teleport_setup_mode = local.teleport_setup_mode + + teleport_auth_address = module.auth_servers.lb_dns_name + teleport_bucket_name = module.s3_bucket.bucket_id + teleport_ddb_table_events_name = aws_dynamodb_table.events[0].name + teleport_ddb_table_locks_name = aws_dynamodb_table.locks[0].name + teleport_ddb_table_state_name = aws_dynamodb_table.state[0].name + teleport_security_group_ids = compact([module.security_group.id]) + + experimental = local.teleport_experimental_mode + + dns_parent_zone_id = var.dns_parent_zone_id + dns_parent_zone_name = var.dns_parent_zone_name + artifacts_bucket_name = local.artifacts_bucket_name # todo - create bucket with module + logs_bucket_name = local.logs_bucket_name + vpc_id = var.vpc_id + vpc_private_subnet_ids = var.vpc_private_subnet_ids + vpc_public_subnet_ids = var.vpc_public_subnet_ids + aws_account_id = local.aws_account_id + aws_kv_namespace = local.aws_kv_namespace + aws_region_name = local.aws_region_name + + context = module.teleport_cluster_label.context +} + +module "node_servers" { + source = "./modules/teleport-node" + + instance_sizes = local.instance_config.node.sizes + instance_count = local.instance_config.node.count + + teleport_cluster_name = local.teleport_cluster_name + teleport_image_id = local.teleport_image_id + teleport_letsencrypt_email = local.teleport_letsencrypt_email + teleport_node_type = "node" + teleport_setup_mode = local.teleport_setup_mode + + teleport_auth_address = module.auth_servers.lb_dns_name + teleport_bucket_name = module.s3_bucket.bucket_id + teleport_ddb_table_events_name = aws_dynamodb_table.events[0].name + teleport_ddb_table_locks_name = aws_dynamodb_table.locks[0].name + teleport_ddb_table_state_name = aws_dynamodb_table.state[0].name + teleport_security_group_ids = compact([module.security_group.id]) + + experimental = local.teleport_experimental_mode + + dns_parent_zone_id = var.dns_parent_zone_id + dns_parent_zone_name = var.dns_parent_zone_name + artifacts_bucket_name = local.artifacts_bucket_name # todo - create bucket with module + logs_bucket_name = local.logs_bucket_name + vpc_id = var.vpc_id + vpc_private_subnet_ids = var.vpc_private_subnet_ids + vpc_public_subnet_ids = var.vpc_public_subnet_ids + aws_account_id = local.aws_account_id + aws_kv_namespace = local.aws_kv_namespace + aws_region_name = local.aws_region_name + + context = module.teleport_cluster_label.context +} + +# ========================================================= cluster-resource === + +# ---------------------------------------------------------------------- ddb --- + +resource "aws_dynamodb_table" "state" { + count = local.enabled ? 1 : 0 + + name = "${module.teleport_cluster_label.id}-state" + billing_mode = "PAY_PER_REQUEST" + hash_key = "HashKey" + range_key = "FullPath" + stream_enabled = "true" + stream_view_type = "NEW_IMAGE" + + server_side_encryption { + enabled = true + } + + lifecycle { + ignore_changes = [ + read_capacity, + write_capacity, + ] + } + + attribute { + name = "HashKey" + type = "S" + } + + attribute { + name = "FullPath" + type = "S" + } + + ttl { + attribute_name = "Expires" + enabled = true + } + + tags = module.teleport_cluster_label.tags +} + +resource "aws_dynamodb_table" "events" { + count = local.enabled ? 1 : 0 + + name = "${module.teleport_cluster_label.id}-events" + billing_mode = "PAY_PER_REQUEST" + hash_key = "SessionID" + range_key = "EventIndex" + + point_in_time_recovery { + enabled = true + } + + server_side_encryption { + enabled = true + } + + global_secondary_index { + name = "timesearchV2" + hash_key = "CreatedAtDate" + range_key = "CreatedAt" + write_capacity = 10 + read_capacity = 10 + projection_type = "ALL" + } + + lifecycle { + ignore_changes = all + } + + attribute { + name = "SessionID" + type = "S" + } + + attribute { + name = "EventIndex" + type = "N" + } + + attribute { + name = "CreatedAtDate" + type = "S" + } + + attribute { + name = "CreatedAt" + type = "N" + } + + ttl { + attribute_name = "Expires" + enabled = true + } + + tags = module.teleport_cluster_label.tags +} + +resource "aws_dynamodb_table" "locks" { + count = local.enabled ? 1 : 0 + + name = "${module.teleport_cluster_label.id}-locks" + billing_mode = "PAY_PER_REQUEST" + hash_key = "Lock" + + + lifecycle { + ignore_changes = [ + read_capacity, + write_capacity, + ] + } + + attribute { + name = "Lock" + type = "S" + } + + ttl { + attribute_name = "Expires" + enabled = true + } + + tags = merge(module.teleport_cluster_label.tags, { + TeleportCluster = local.teleport_cluster_name + }) +} + +# ----------------------------------------------------------------------- s3 --- + +module "s3_bucket" { + source = "cloudposse/s3-bucket/aws" + version = "3.1.2" + + acl = "private" + block_public_policy = true + force_destroy = local.teleport_experimental_mode + sse_algorithm = "AES256" + allow_ssl_requests_only = true + source_policy_documents = [data.aws_iam_policy_document.bucket.json] + + logging = { + bucket_name = local.logs_bucket_name + prefix = "access/s3/${module.teleport_cluster_label.id}" + } + + lifecycle_configuration_rules = [{ + enabled = true + id = "transition-old-versions" + + abort_incomplete_multipart_upload_days = 5 + + filter_and = null + expiration = null + transition = [] + noncurrent_version_expiration = null + + noncurrent_version_transition = [ + { + newer_noncurrent_versions = 2 + noncurrent_days = 30 + storage_class = "STANDARD_IA" + }, + { + newer_noncurrent_versions = 2 + noncurrent_days = 60 + storage_class = "GLACIER" + } + ] + }] + + context = module.teleport_cluster_label.context +} + +data "aws_iam_policy_document" "bucket" { + dynamic "statement" { + for_each = local.is_teleport_and_logs_bucket_same ? [true] : [] + + content { + sid = "AWSLogDeliveryWrite" + effect = "Allow" + actions = ["s3:PutObject"] + resources = ["arn:aws:s3:::${local.logs_bucket_name}/AWSLogs/${local.aws_account_id}/*"] + + principals { + type = "Service" + identifiers = ["delivery.logs.amazonaws.com"] + } + + condition { + test = "StringEquals" + variable = "s3:x-amz-acl" + values = ["bucket-owner-full-control"] + } + } + } + + dynamic "statement" { + for_each = local.is_teleport_and_logs_bucket_same ? [true] : [] + + content { + sid = "AWSLogDeliveryAclCheck" + effect = "Allow" + actions = ["s3:GetBucketAcl"] + resources = ["arn:aws:s3:::${local.logs_bucket_name}"] + + principals { + type = "Service" + identifiers = ["delivery.logs.amazonaws.com"] + } + } + } +} + +# ----------------------------------------------------------- security-group --- + +module "security_group" { + source = "cloudposse/security-group/aws" + version = "2.2.0" + + vpc_id = var.vpc_id + + rules = [{ + key = "gropu-egress" + type = "egress" + from_port = 0 + to_port = 0 + protocol = "-1" + description = "allow all group egress" + cidr_blocks = [] + ipv6_cidr_blocks = [] + source_security_group_id = null + self = true + }, { + key = "group-ingress" + type = "ingress" + from_port = 0 + to_port = 0 + protocol = "-1" + description = "allow all group ingress" + cidr_blocks = [] + ipv6_cidr_blocks = [] + source_security_group_id = null + self = true + }] + + tags = merge(module.teleport_cluster_label.tags, { Name = module.teleport_cluster_label.id }) + context = module.teleport_cluster_label.context +} + +# ================================================================== uploads === + +resource "aws_s3_object" "image_files" { + for_each = toset(local.enabled ? fileset("${path.module}/assets/image-files", "*") : []) + + bucket = local.artifacts_bucket_name + key = "${local.aws_kv_namespace}/image/files/bin/${each.key}" + source = "${path.module}/assets/image-files/${each.key}" + etag = filemd5("${path.module}/assets/image-files/${each.key}") + + tags = module.teleport_cluster_label.tags +} + +# =================================================================== lookup === + +data "aws_ssm_parameter" "amzn2_image_id" { + count = module.teleport_cluster_label.enabled ? 1 : 0 + name = "/aws/service/ami-amazon-linux-latest/amzn2-ami-hvm-x86_64-ebs" +} + +data "aws_ami" "official_image" { + count = module.teleport_cluster_label.enabled ? 1 : 0 + + most_recent = true + owners = [local.teleport_aws_account_id] + + filter { + name = "name" + values = [local.teleport_image_name] + } +} + diff --git a/modules/teleport-node/assets/cloud-init/cloud-config.yaml b/modules/teleport-node/assets/cloud-init/cloud-config.yaml new file mode 100644 index 0000000..f7c6aee --- /dev/null +++ b/modules/teleport-node/assets/cloud-init/cloud-config.yaml @@ -0,0 +1,25 @@ +#cloud-config +packages: + - amazon-cloudwatch-agent +package_update: true +package_upgrade: true +write_files: + - path: /opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.d/user.json + content: ${cloudwatch_agent_config_encoded} + encoding: base64 + permissions: "0644" + - path: /etc/teleport.d/teleport.tmpl.yaml + content: ${teleport_config_tmpl_encoded} + encoding: base64 + permissions: "0644" + - path: /etc/teleport.d/conf + content: ${teleport_envs_encoded} + encoding: base64 + permissions: "0644" + - path: /etc/teleport.d/use-letsencrypt + content: use-letsencrypt + permissions: "0644" +power_state: + delay: now + mode: reboot + timeout: 10 diff --git a/modules/teleport-node/assets/cloud-init/cloudwatch-agent-config.json b/modules/teleport-node/assets/cloud-init/cloudwatch-agent-config.json new file mode 100644 index 0000000..5077bfd --- /dev/null +++ b/modules/teleport-node/assets/cloud-init/cloudwatch-agent-config.json @@ -0,0 +1,44 @@ +{ + "agent": { + "run_as_user": "cwagent" + }, + "logs": { + "logs_collected": { + "files": { + "collect_list": [ + { + "file_path": "/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log", + "log_group_name": "${cluster_log_group_name}", + "log_stream_name": "/ec2/instance/{instance_id}/amazon-cloudwatch-agent.log", + "timestamp_format": "%Y-%m-%dT%H:%M:%SZ" + }, + { + "file_path": "/var/log/dmesg", + "log_group_name": "${cluster_log_group_name}", + "log_stream_name": "/ec2/instance/{instance_id}/dmesg" + }, + { + "file_path": "/var/log/messages", + "log_group_name": "${cluster_log_group_name}", + "log_stream_name": "/ec2/instance/{instance_id}/messages", + "timestamp_format": "%b %d %H:%M:%S" + }, + { + "file_path": "/var/log/cloud-init.log", + "log_group_name": "${cluster_log_group_name}", + "log_stream_name": "/ec2/instance/{instance_id}/cloud-init.log", + "multi_line_start_pattern": "\\w+ \\d{2} \\d{2}:\\d{2}:\\d{2} cloud-init\\[[\\w]+]:", + "timestamp_format": "%B %d %H:%M:%S", + "timezone": "UTC" + }, + { + "file_path": "/var/log/cloud-init-output.log", + "log_group_name": "${cluster_log_group_name}", + "log_stream_name": "/ec2/instance/{instance_id}/cloud-init-output.log", + "multi_line_start_pattern": "Cloud-init v. \\d+.\\d+-\\d+" + } + ] + } + } + } +} diff --git a/modules/teleport-node/assets/cloud-init/install_packages.sh b/modules/teleport-node/assets/cloud-init/install_packages.sh new file mode 100644 index 0000000..342083f --- /dev/null +++ b/modules/teleport-node/assets/cloud-init/install_packages.sh @@ -0,0 +1,12 @@ +#!/bin/bash -xe + +# --- script------------------------------------------------ + +yum install -y binutils +yum install -y yum-plugin-kernel-livepatch +yum kernel-livepatch enable -y +yum install -y kpatch-runtime + +systemctl enable kpatch.service +amazon-linux-extras enable livepatch +yum update -y diff --git a/modules/teleport-node/assets/cloud-init/pull_files.sh b/modules/teleport-node/assets/cloud-init/pull_files.sh new file mode 100644 index 0000000..a2ffe25 --- /dev/null +++ b/modules/teleport-node/assets/cloud-init/pull_files.sh @@ -0,0 +1,13 @@ +#!/bin/bash -xe +# shellcheck disable=SC2154,SC2686 + +# --- terraform inputs ------------------------------------- + +SRC_BUCKET_NAME=${src_bucket_name} +SRC_BUCKET_PATH=${src_bucket_path} +DST_PATH=${dst_path} + +# --- script------------------------------------------------ + +aws s3 cp "s3://$SRC_BUCKET_NAME/$SRC_BUCKET_PATH/" "$DST_PATH/" --recursive +chmod 755 "$DST_PATH"/teleport* diff --git a/modules/teleport-node/assets/cloud-init/start_core_services.sh b/modules/teleport-node/assets/cloud-init/start_core_services.sh new file mode 100644 index 0000000..ccca61d --- /dev/null +++ b/modules/teleport-node/assets/cloud-init/start_core_services.sh @@ -0,0 +1,12 @@ +#!/bin/bash -xe + +function cwagent_ctl { + /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl "$@" +} +export -f cwagent_ctl + +chmod 644 /var/log/cloud-init-output.log +chmod 644 /var/log/messages + +cwagent_ctl -a fetch-config -s -m ec2 \ + -c file:/opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.d/user.json diff --git a/modules/teleport-node/assets/teleport/teleport.conf b/modules/teleport-node/assets/teleport/teleport.conf new file mode 100644 index 0000000..60c358c --- /dev/null +++ b/modules/teleport-node/assets/teleport/teleport.conf @@ -0,0 +1,8 @@ +TELEPORT_ROLE=${teleport_node_type} +TELEPORT_CLUSTER_NAME=${teleport_cluster_name} +TELEPORT_DOMAIN_NAME=${teleport_domain_name} +TELEPORT_DOMAIN_ADMIN_EMAIL=${teleport_domain_email} +TELEPORT_LOCKS_TABLE_NAME=${teleport_ddb_table_locks_name} +TELEPORT_S3_BUCKET=${teleport_bucket_name} +EC2_REGION=${aws_region_name} +USE_LETSENCRYPT=true diff --git a/modules/teleport-node/context.tf b/modules/teleport-node/context.tf new file mode 100755 index 0000000..873244c --- /dev/null +++ b/modules/teleport-node/context.tf @@ -0,0 +1,277 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} \ No newline at end of file diff --git a/modules/teleport-node/main.tf b/modules/teleport-node/main.tf new file mode 100755 index 0000000..c159510 --- /dev/null +++ b/modules/teleport-node/main.tf @@ -0,0 +1,967 @@ +locals { + teleport_auth_address = var.teleport_auth_address + teleport_bucket_name = var.teleport_bucket_name + teleport_cluster_name = var.teleport_cluster_name + teleport_image_id = var.teleport_image_id + teleport_letsencrypt_email = var.teleport_letsencrypt_email + teleport_node_type = var.teleport_node_type + teleport_setup_enabled = module.this.enabled && var.teleport_setup_mode + + teleport_ddb_table_events_name = var.teleport_ddb_table_events_name + teleport_ddb_table_locks_name = var.teleport_ddb_table_locks_name + teleport_ddb_table_state_name = var.teleport_ddb_table_state_name + teleport_security_group_ids = var.teleport_security_group_ids + + aws_account_id = var.aws_account_id + aws_kv_namespace = var.aws_kv_namespace + aws_region_name = var.aws_region_name + artifacts_bucket_name = var.artifacts_bucket_name + logs_bucket_name = var.logs_bucket_name + experimental = var.experimental + + desired_capacity = local.teleport_setup_enabled ? (local.teleport_node_type == "auth" ? 1 : 0) : var.instance_count + min_capacity = local.teleport_setup_enabled ? (local.teleport_node_type == "auth" ? 1 : 0) : var.instance_count + max_capacity = local.teleport_setup_enabled ? (local.teleport_node_type == "auth" ? 1 : 0) : var.instance_count + instance_sizes = var.instance_sizes + + dns_name = "${module.dns_label.id}.${var.dns_parent_zone_name}" + dns_parent_zone_id = var.dns_parent_zone_id + + vpc_associate_public_ips = var.vpc_associate_public_ips + vpc_id = var.vpc_id + vpc_security_group_ids = var.vpc_security_group_ids + vpc_private_subnet_ids = var.vpc_private_subnet_ids + vpc_public_subnet_ids = var.vpc_public_subnet_ids +} + +module "dns_label" { + source = "cloudposse/label/null" + version = "0.25.0" + + label_order = ["environment", "name", "attributes"] + tags = { TeleportCluster = local.teleport_cluster_name, TeleportRole = local.teleport_node_type } + context = module.this.context +} + +module "node_type_label" { + source = "cloudposse/label/null" + version = "0.25.0" + + attributes = [local.teleport_node_type] + tags = { TeleportCluster = local.teleport_cluster_name, TeleportRole = local.teleport_node_type } + context = module.this.context +} + +# ================================================================= teleport === + +locals { + teleport_config = { + auth = { + teleport = { + nodename = "$TELEPORT_NODENAME" + advertise_ip = "$TELEPORT_ADVERTISE_IP" + log = { + output = "stderr" + severity = "INFO" + } + data_dir = "/var/lib/teleport" + storage = { + type = "dynamodb" + region = "us-east-1" + table_name = local.teleport_ddb_table_state_name + audit_events_uri = "dynamodb://${local.teleport_ddb_table_events_name}" + audit_sessions_uri = "s3://${local.teleport_bucket_name}/records" + } + } + auth_service = { + enabled = "yes" + cluster_name = local.dns_name + public_addr = tobool(local.teleport_node_type == "auth") ? "${aws_lb.this[0].dns_name}:3025" : "" + keep_alive_interval = "1m" + keep_alive_count_max = 3 + listen_addr = "0.0.0.0:3025" + authentication = { + second_factor = "otp" + } + session_recording = "node-sync" + } + proxy_service = { + enabled = "no" + } + ssh_service = { + enabled = "no" + } + } + node = { + teleport = { + auth_token = "/var/lib/teleport/token" + ca_pin = "CA_PIN_HASH_PLACEHOLDER" + nodename = "$TELEPORT_NODENAME" + advertise_ip = "$TELEPORT_ADVERTISE_IP" + log = { + output = "stderr" + severity = "INFO" + } + data_dir = "/var/lib/teleport" + storage = { + type = "dir" + path = "/var/lib/teleport/backend" + } + auth_servers = [ + "${local.dns_name}:443", + "${local.teleport_auth_address}:3025", + ] + } + app_service = { + enabled = "yes" + debug_app = true + resources = [{ + labels = { + "*" : "*" + } + }] + } + auth_service = { + enabled = "no" + } + db_service = { + enabled = "yes" + aws = [{ + types = ["rds", "redshift"] + regions = ["us-east-1"] + tags = { + "*" : "*" + } + }] + resources = [{ + labels = { + "*" : "*" + } + }] + } + proxy_service = { + enabled = "no" + } + ssh_service = { + enabled = "yes" + listen_addr = "0.0.0.0:3022" + enhanced_recording = { + enabled = false # todo enable w/ amazon-linux 2022; minimum supported kernel is 5.8.0 + command_buffer_size = 8 + disk_buffer_size = 128 + network_buffer_size = 8 + cgroup_path = "/cgroup2" + } + labels = module.this.tags + } + } + proxy = { + teleport = { + auth_token = "/var/lib/teleport/token" + ca_pin = "CA_PIN_HASH_PLACEHOLDER" + nodename = "$TELEPORT_NODENAME" + advertise_ip = "$TELEPORT_ADVERTISE_IP" + cache = { + type = "in-memory" + } + connection_limits = { + max_connections = 1000 + max_users = 100 + } + log = { + output = "stderr" + severity = "INFO" + } + data_dir = "/var/lib/teleport" + storage = { + type = "dir" + path = "/var/lib/teleport/backend" + } + auth_servers = [ + "${local.teleport_auth_address}:3025", + ] + } + auth_service = { + enabled = "no" + } + proxy_service = { + enabled = "yes" + listen_addr = "0.0.0.0:3023" + tunnel_listen_addr = "0.0.0.0:3080" + web_listen_addr = "0.0.0.0:3080" + public_addr = "${local.dns_name}:443" + ssh_public_addr = "${local.dns_name}:3023" + tunnel_public_addr = "${local.dns_name}:443" + https_keypairs = [{ + cert_file = "/var/lib/teleport/fullchain.pem" + key_file = "/var/lib/teleport/privkey.pem" + }] + kubernetes = { + enabled = "yes" + listen_addr = "0.0.0.0:3026" + public_addr = ["${local.dns_name}:3026"] + } + } + ssh_service = { + enabled = "no" + } + } + } +} + +# ---------------------------------------------------------------------- iam --- + +data "template_cloudinit_config" "this" { + count = module.this.enabled ? 1 : 0 + + gzip = true + base64_encode = true + + part { + content_type = "text/cloud-config" + content = templatefile("${path.module}/assets/cloud-init/cloud-config.yaml", { + cloudwatch_agent_config_encoded = base64encode( + templatefile("${path.module}/assets/cloud-init/cloudwatch-agent-config.json", { + cluster_log_group_name = aws_cloudwatch_log_group.this[0].name + }) + ) + teleport_envs_encoded = base64encode( + templatefile("${path.module}/assets/teleport/teleport.conf", { + aws_region_name = local.aws_region_name + teleport_node_type = local.teleport_node_type + teleport_cluster_name = local.teleport_cluster_name + teleport_ddb_table_locks_name = local.teleport_ddb_table_locks_name + teleport_domain_email = local.teleport_letsencrypt_email + teleport_domain_name = local.dns_name + teleport_bucket_name = local.teleport_bucket_name + }) + ) + teleport_config_tmpl_encoded = base64encode( + yamlencode(local.teleport_config[local.teleport_node_type]) + ) + }) + } + + part { + content_type = "text/x-shellscript" + content = file("${path.module}/assets/cloud-init/install_packages.sh") + } + + part { + content_type = "text/x-shellscript" + content = file("${path.module}/assets/cloud-init/start_core_services.sh") + } + + part { + content_type = "text/x-shellscript" + content = templatefile("${path.module}/assets/cloud-init/pull_files.sh", { + src_bucket_name = local.artifacts_bucket_name + src_bucket_path = "${local.aws_kv_namespace}/image/files/bin" + dst_path = "/usr/local/bin" + }) + } +} + +resource "aws_autoscaling_group" "this" { + count = module.this.enabled ? 1 : 0 + + name = module.node_type_label.id + vpc_zone_identifier = local.vpc_private_subnet_ids + max_instance_lifetime = 86400 + metrics_granularity = "1Minute" + termination_policies = ["OldestLaunchTemplate", "AllocationStrategy", "Default"] + health_check_grace_period = 300 + health_check_type = "EC2" + + desired_capacity = local.desired_capacity + min_size = local.min_capacity + max_size = local.max_capacity + + target_group_arns = flatten([ + contains(["auth"], local.teleport_node_type) ? [ + aws_lb_target_group.auth_ssh[0].arn + ] : [], + contains(["proxy"], local.teleport_node_type) ? [ + aws_lb_target_group.proxy_ssh[0].arn, + aws_lb_target_group.proxy_web[0].arn, + ] : [], + ]) + + enabled_metrics = [ + "GroupMinSize", + "GroupMaxSize", + "GroupDesiredCapacity", + "GroupInServiceInstances", + "GroupPendingInstances", + "GroupStandbyInstances", + "GroupTerminatingInstances", + "GroupTotalInstances", + "GroupInServiceCapacity", + "GroupPendingCapacity", + "GroupStandbyCapacity", + "GroupTerminatingCapacity", + "GroupTotalCapacity", + ] + + mixed_instances_policy { + instances_distribution { + on_demand_base_capacity = 0 + on_demand_percentage_above_base_capacity = 0 + spot_allocation_strategy = "capacity-optimized" + spot_instance_pools = 0 + } + + launch_template { + launch_template_specification { + launch_template_id = aws_launch_template.this[0].id + version = aws_launch_template.this[0].latest_version + } + + dynamic "override" { + for_each = local.instance_sizes + + content { + instance_type = override.value + weighted_capacity = "1" + } + } + } + } + + instance_refresh { + strategy = "Rolling" + triggers = ["tag"] + + preferences { + min_healthy_percentage = 0 + } + } + + dynamic "tag" { + for_each = merge(module.node_type_label.tags, { Name = module.node_type_label.id }) + + content { + key = tag.key + value = tag.value + propagate_at_launch = true + } + } +} + +resource "aws_launch_template" "this" { + count = module.this.enabled ? 1 : 0 + + name = module.node_type_label.id + image_id = local.teleport_image_id + user_data = data.template_cloudinit_config.this[0].rendered + update_default_version = true + + block_device_mappings { + device_name = "/dev/xvda" + + ebs { + delete_on_termination = true + encrypted = true + iops = null + kms_key_id = null + snapshot_id = null + throughput = null + volume_size = 100 + volume_type = "gp3" + } + } + + iam_instance_profile { + name = resource.aws_iam_instance_profile.this[0].id + } + + monitoring { + enabled = true + } + + metadata_options { + http_endpoint = "enabled" + http_put_response_hop_limit = 1 + http_tokens = "required" + instance_metadata_tags = "enabled" + } + + network_interfaces { + associate_public_ip_address = false + security_groups = distinct(concat([module.security_group.id], local.teleport_security_group_ids)) + } +} + +# ======================================================== instance-resource === + +resource "aws_cloudwatch_log_group" "this" { + count = module.this.enabled ? 1 : 0 + + name = module.node_type_label.id + retention_in_days = local.experimental ? 90 : 180 + tags = module.node_type_label.tags +} + +module "security_group" { + source = "cloudposse/security-group/aws" + version = "2.2.0" + + vpc_id = local.vpc_id + allow_all_egress = true + + rules = [{ + key = "group" + type = "ingress" + from_port = 0 + to_port = 0 + protocol = "all" + description = "allow all group ingress" + cidr_blocks = [] + ipv6_cidr_blocks = [] + source_security_group_id = null + self = true + }, { + key = "auth" + type = "ingress" + from_port = 3025 + to_port = 3025 + protocol = "tcp" + description = "allow auth traffic" + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = [] + source_security_group_id = null + self = null + }, { + key = "node-ssh" + type = "ingress" + from_port = 3022 + to_port = 3022 + protocol = "tcp" + description = "allow teleport node ssh" + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = [] + source_security_group_id = null + self = null + }, { + key = "proxy-ssh" + type = "ingress" + from_port = 3023 + to_port = 3023 + protocol = "tcp" + description = "allow teleport proxy ssh" + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = [] + source_security_group_id = null + self = null + }, { + key = "proxy-reverse-ssh" + type = "ingress" + from_port = 3024 + to_port = 3024 + protocol = "tcp" + description = "allow teleport proxy reverse-ssh" + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = [] + source_security_group_id = null + self = null + }, { + key = "proxy-https" + type = "ingress" + from_port = 443 + to_port = 443 + protocol = "tcp" + description = "allow teleport proxy https" + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = [] + source_security_group_id = null + self = null + }, { + key = "proxy-web" + type = "ingress" + from_port = 3080 + to_port = 3080 + protocol = "tcp" + description = "allow teleport proxy https" + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = [] + source_security_group_id = null + self = null + }, { + key = "node-mysql" + type = "ingress" + from_port = 3036 + to_port = 3036 + protocol = "tcp" + description = "allow teleport proxy https" + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = [] + source_security_group_id = null + self = null + }] + + tags = merge(module.node_type_label.tags, { Name = module.node_type_label.id }) + context = module.node_type_label.context +} + +# ---------------------------------------------------------------------- iam --- + +resource "aws_iam_instance_profile" "this" { + count = module.this.enabled ? 1 : 0 + + name = module.node_type_label.id + role = aws_iam_role.this[0].name +} + +resource "aws_iam_role" "this" { + count = module.this.enabled ? 1 : 0 + + name = module.node_type_label.id + description = "" + + assume_role_policy = jsonencode({ + Version = "2012-10-17", + Statement = [{ + Effect = "Allow" + Principal = { "Service" : "ec2.amazonaws.com" } + Action = ["sts:AssumeRole", "sts:TagSession"] + }] + }) + + managed_policy_arns = [ + "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore", + ] + + inline_policy { + name = "ec2-management-access" + policy = data.aws_iam_policy_document.ec2_management[0].json + } + + inline_policy { + name = "teleport-base-access" + policy = data.aws_iam_policy_document.base_access[0].json + } + + dynamic "inline_policy" { + for_each = contains(["auth"], local.teleport_node_type) ? [true] : [] + + content { + name = "teleport-auth-access" + policy = data.aws_iam_policy_document.auth_access[0].json + } + } + + dynamic "inline_policy" { + for_each = contains(["node"], local.teleport_node_type) ? [true] : [] + + content { + name = "teleport-node-access" + policy = data.aws_iam_policy_document.node_access[0].json + } + } + + dynamic "inline_policy" { + for_each = contains(["proxy"], local.teleport_node_type) ? [true] : [] + + content { + name = "teleport-proxy-access" + policy = data.aws_iam_policy_document.proxy_access[0].json + } + } + + tags = module.node_type_label.tags +} + +data "aws_iam_policy_document" "ec2_management" { + count = module.this.enabled ? 1 : 0 + + statement { + sid = "AllowSsmSessionLogging" + effect = "Allow" + actions = [ + "s3:PutObject", + "s3:PutObjectAcl", + "s3:PutObjectTagging", + "s3:GetEncryptionConfiguration", + "s3:GetBucketLocation", + ] + resources = [ + "arn:aws:s3:::${local.logs_bucket_name}", + "arn:aws:s3:::${local.logs_bucket_name}/*" + ] + } + + statement { + sid = "AllowArtifactsBucketRead" + effect = "Allow" + actions = [ + "s3:GetObject", + "s3:ListBucket*", + ] + resources = [ + "arn:aws:s3:::${local.artifacts_bucket_name}", + "arn:aws:s3:::${local.artifacts_bucket_name}/*" + ] + } +} + +data "aws_iam_policy_document" "base_access" { + count = contains(["auth", "node", "proxy"], local.teleport_node_type) ? 1 : 0 + + statement { + sid = "AllowSecretsKmsKeyAccess" + effect = "Allow" + actions = [ + "kms:Decrypt", + ] + resources = [ + "*", + ] + + condition { + test = "StringEquals" + variable = "kms:ViaService" + values = [ + "ssm.${local.aws_region_name}.amazonaws.com", + ] + } + } + + statement { + sid = "AllowTeleportSsmParameterAccess" + effect = "Allow" + actions = [ + "ssm:GetParameters", + "ssm:GetParametersByPath", + "ssm:GetParameter" + ] + resources = [ + "arn:aws:ssm:${local.aws_region_name}:${local.aws_account_id}:parameter/${local.aws_kv_namespace}/*/tokens/proxy", + "arn:aws:ssm:${local.aws_region_name}:${local.aws_account_id}:parameter/${local.aws_kv_namespace}/*/ca-pin-hash", + "arn:aws:ssm:${local.aws_region_name}:${local.aws_account_id}:parameter/teleport/${local.teleport_cluster_name}/tokens/proxy", + "arn:aws:ssm:${local.aws_region_name}:${local.aws_account_id}:parameter/teleport/${local.teleport_cluster_name}/ca-pin-hash", + ] + } + + statement { + sid = "AllowCloudWatchLogging" + effect = "Allow" + actions = [ + "logs:CreateLogStream", + "logs:PutLogEvents" + ] + resources = [ + "${aws_cloudwatch_log_group.this[0].arn}*" + ] + } +} + +data "aws_iam_policy_document" "auth_access" { + count = module.this.enabled ? 1 : 0 + + statement { + sid = "AllowSecretsKmsKeyAccess" + effect = "Allow" + actions = [ + "kms:Decrypt", + ] + resources = [ + "*", + ] + + condition { + test = "StringEquals" + variable = "kms:ViaService" + values = [ + "ssm.${local.aws_region_name}.amazonaws.com", + ] + } + } + + statement { + sid = "AllowR53ReadAccess" + effect = "Allow" + actions = [ + "route53:ListHostedZones", + "route53:GetChange", + ] + resources = [ + "*", + ] + } + + statement { + sid = "AllowR53WriteAccess" + effect = "Allow" + actions = [ + "route53:ChangeResourceRecordSets", + ] + resources = [ + "arn:aws:route53:::hostedzone/${local.dns_parent_zone_id}" + ] + } + + statement { + sid = "AllowTeleportDdbTableFullAccess" + effect = "Allow" + actions = [ + "dynamodb:*", + ] + resources = [ + "arn:aws:dynamodb:${local.aws_region_name}:${local.aws_account_id}:table/${local.teleport_ddb_table_events_name}", + "arn:aws:dynamodb:${local.aws_region_name}:${local.aws_account_id}:table/${local.teleport_ddb_table_events_name}/index/*", + "arn:aws:dynamodb:${local.aws_region_name}:${local.aws_account_id}:table/${local.teleport_ddb_table_locks_name}", + "arn:aws:dynamodb:${local.aws_region_name}:${local.aws_account_id}:table/${local.teleport_ddb_table_state_name}", + "arn:aws:dynamodb:${local.aws_region_name}:${local.aws_account_id}:table/${local.teleport_ddb_table_state_name}/stream/*", + ] + } + + statement { + sid = "AllowTeleportS3BucketAccess" + effect = "Allow" + actions = [ + "s3:GetObject", + "s3:GetObjectVersion", + "s3:ListBucket", + "s3:ListBucketVersions", + "s3:ListBucketMultipartUploads", + "s3:PutObject", + ] + resources = [ + "arn:aws:s3:::${local.teleport_bucket_name}", + "arn:aws:s3:::${local.teleport_bucket_name}/*", + ] + } + + statement { + sid = "AllowTeleportSsmParameterAccess" + effect = "Allow" + actions = [ + "ssm:DescribeParameters", + "ssm:GetParameters", + "ssm:GetParametersByPath", + "ssm:GetParameter", + "ssm:PutParameter", + "ssm:DeleteParameter", + ] + resources = [ + "arn:aws:ssm:${local.aws_region_name}:${local.aws_account_id}:parameter/${local.aws_kv_namespace}/*", + "arn:aws:ssm:${local.aws_region_name}:${local.aws_account_id}:parameter/teleport/${local.teleport_cluster_name}/*", + ] + } +} + +data "aws_iam_policy_document" "node_access" { + count = contains(["node"], local.teleport_node_type) ? 1 : 0 + + statement { + sid = "AllowDatabaseClusterAccess" + effect = "Allow" + actions = [ + "redshift:DescribeClusters", + "redshift:GetClusterCredentials", + "rds:DescribeDBInstances", + "rds:ModifyDBInstance", + "rds:DescribeDBClusters", + "rds:ModifyDBCluster", + "rds-db:connect", + ] + resources = [ + "*", + ] + } + + statement { + sid = "AllowDatabaseIamAccess" + effect = "Allow" + actions = [ + "iam:GetRolePolicy", + "iam:PutRolePolicy", + "iam:DeleteRolePolicy", + ] + resources = [ + "*", # todo limit which resources + ] + } +} + +data "aws_iam_policy_document" "proxy_access" { + count = module.this.enabled ? 1 : 0 + + statement { + sid = "AllowTeleportS3BucketAccess" + effect = "Allow" + actions = [ + "s3:GetObject", + "s3:ListBucket", + ] + resources = [ + "arn:aws:s3:::${local.teleport_bucket_name}", + "arn:aws:s3:::${local.teleport_bucket_name}/*", + ] + } +} + +# ====================================================================== nlb === + +module "node_lb_label" { + source = "cloudposse/label/null" + version = "0.25.0" + + id_length_limit = 32 + label_order = ["name", "attributes"] + context = module.node_type_label.context +} + +resource "aws_lb" "this" { + count = contains(["auth", "proxy"], local.teleport_node_type) ? 1 : 0 + + name = module.node_lb_label.id + internal = contains(["auth"], local.teleport_node_type) + subnets = contains(["auth"], local.teleport_node_type) ? local.vpc_private_subnet_ids : local.vpc_public_subnet_ids + load_balancer_type = "network" + idle_timeout = 3600 + enable_cross_zone_load_balancing = true + enable_deletion_protection = local.experimental ? false : true + + access_logs { + bucket = local.logs_bucket_name + enabled = true + } + + tags = module.node_type_label.tags +} + +resource "aws_route53_record" "proxy" { + count = contains(["proxy"], local.teleport_node_type) ? 1 : 0 + + zone_id = local.dns_parent_zone_id + name = local.dns_name + type = "A" + allow_overwrite = true + + alias { + name = aws_lb.this[0].dns_name + zone_id = aws_lb.this[0].zone_id + evaluate_target_health = true + } +} + +resource "aws_route53_record" "proxy_wildcard" { + count = contains(["proxy"], local.teleport_node_type) ? 1 : 0 + + zone_id = local.dns_parent_zone_id + name = "*.${local.dns_name}" + type = "A" + allow_overwrite = true + + alias { + name = aws_lb.this[0].dns_name + zone_id = aws_lb.this[0].zone_id + evaluate_target_health = true + } +} + +# ---------------------------------------------------------------- auth: ssh --- + +module "auth_ssh_lb_label" { + source = "cloudposse/label/null" + version = "0.25.0" + + # ensure there are room for the 6 unique chars plus dash for actual label + attributes = ["auth", "ssh"] + id_length_limit = 32 + label_order = ["name", "attributes"] + context = module.node_lb_label.context +} + +resource "aws_lb_target_group" "auth_ssh" { + count = contains(["auth"], local.teleport_node_type) ? 1 : 0 + + name = module.node_lb_label.id + port = 3025 + vpc_id = local.vpc_id + protocol = "TCP" +} + +resource "aws_lb_listener" "auth_ssh" { + count = contains(["auth"], local.teleport_node_type) ? 1 : 0 + + load_balancer_arn = aws_lb.this[0].arn + port = 3025 + protocol = "TCP" + + default_action { + target_group_arn = aws_lb_target_group.auth_ssh[0].arn + type = "forward" + } +} + +# --------------------------------------------------------------- proxy: ssh --- + +module "proxy_ssh_lb_label" { + source = "cloudposse/label/null" + version = "0.25.0" + + # ensure there are room for the 6 unique chars plus dash for actual label + attributes = ["proxy", "ssh"] + id_length_limit = 32 + label_order = ["name", "attributes"] + context = module.node_lb_label.context +} + +resource "aws_lb_target_group" "proxy_ssh" { + count = contains(["proxy"], local.teleport_node_type) ? 1 : 0 + + name = module.proxy_ssh_lb_label.id + port = 3023 + vpc_id = local.vpc_id + protocol = "TCP" +} + +resource "aws_lb_listener" "proxy_ssh" { + count = contains(["proxy"], local.teleport_node_type) ? 1 : 0 + + load_balancer_arn = aws_lb.this[0].arn + port = 3023 + protocol = "TCP" + + default_action { + target_group_arn = aws_lb_target_group.proxy_ssh[0].arn + type = "forward" + } +} + +# --------------------------------------------------------------- proxy: web --- + +module "proxy_web_lb_label" { + source = "cloudposse/label/null" + version = "0.25.0" + + attributes = ["proxy", "web"] + id_length_limit = 32 + label_order = ["name", "attributes"] + context = module.node_lb_label.context +} + +resource "aws_lb_target_group" "proxy_web" { + count = contains(["proxy"], local.teleport_node_type) ? 1 : 0 + + name = module.proxy_web_lb_label.id + port = 3080 + vpc_id = local.vpc_id + protocol = "TCP" +} + +resource "aws_lb_listener" "proxy_web" { + count = contains(["proxy"], local.teleport_node_type) ? 1 : 0 + + load_balancer_arn = aws_lb.this[0].arn + port = 443 + protocol = "TCP" + + default_action { + target_group_arn = aws_lb_target_group.proxy_web[0].arn + type = "forward" + } +} + diff --git a/modules/teleport-node/output.tf b/modules/teleport-node/output.tf new file mode 100644 index 0000000..9aeb1fe --- /dev/null +++ b/modules/teleport-node/output.tf @@ -0,0 +1,11 @@ +output "lb_dns_name" { + value = contains(["auth", "proxy"], local.teleport_node_type) ? aws_lb.this[0].dns_name : "" +} + +output "teleport_dns_name" { + value = local.dns_name +} + +output "teleport_config" { + value = local.teleport_config[local.teleport_node_type] +} diff --git a/modules/teleport-node/variables.tf b/modules/teleport-node/variables.tf new file mode 100644 index 0000000..9cb3899 --- /dev/null +++ b/modules/teleport-node/variables.tf @@ -0,0 +1,120 @@ + +variable "teleport_auth_address" { + type = string + default = "" +} + +variable "teleport_bucket_name" { + type = string +} + +variable "teleport_cluster_name" { + type = string +} + +variable "teleport_ddb_table_events_name" { + type = string +} + +variable "teleport_ddb_table_locks_name" { + type = string +} + +variable "teleport_ddb_table_state_name" { + type = string +} + +variable "teleport_image_id" { + type = string +} + +variable "teleport_letsencrypt_email" { + type = string +} + +variable "teleport_node_type" { + type = string +} + +variable "teleport_security_group_ids" { + type = list(string) +} + +variable "teleport_setup_mode" { + type = bool + default = true +} + +variable "experimental" { + type = bool + default = false +} + +# ----------------------------------------------------------------- instance --- + + +variable "instance_count" { + type = number + default = 1 +} + +variable "instance_sizes" { + type = list(string) + default = ["t3.medium", "t3a.medium"] +} + +# ----------------------------------------------------------- infrastructure --- + +variable "artifacts_bucket_name" { + type = string +} + +variable "logs_bucket_name" { + type = string +} + +variable "dns_parent_zone_id" { + type = string +} + +variable "dns_parent_zone_name" { + type = string +} + +variable "vpc_associate_public_ips" { + type = bool + default = false +} + +variable "vpc_id" { + type = string +} + +variable "vpc_security_group_ids" { + type = list(string) + default = [] +} + +variable "vpc_private_subnet_ids" { + type = list(string) + default = [] +} + +variable "vpc_public_subnet_ids" { + type = list(string) + default = [] +} + +# ---------------------------------------------------------------- component --- + +variable "aws_account_id" { + type = string +} + +variable "aws_kv_namespace" { + type = string +} + +variable "aws_region_name" { + type = string +} diff --git a/outputs.tf b/outputs.tf index e69de29..08599a9 100755 --- a/outputs.tf +++ b/outputs.tf @@ -0,0 +1,33 @@ +# ================================================================= teleport === + +output "teleport_dns_name" { + value = module.auth_servers.teleport_dns_name + description = "The DNS name of the Teleport service." +} + +output "teleport_auth_config" { + value = module.auth_servers.teleport_config + description = "The configuration details for the Teleport auth service." +} + +output "teleport_node_config" { + value = module.node_servers.teleport_config + description = "The configuration details for the Teleport node service." +} + +output "teleport_proxy_config" { + value = module.proxy_servers.teleport_config + description = "The configuration details for the Teleport proxy service." +} + +# ================================================================ resources === + +output "security_group_id" { + value = module.security_group.id + description = "The ID of the security group created for the Teleport service." +} + +output "security_group_name" { + value = module.security_group.name + description = "The name of the security group created for the Teleport service." +} diff --git a/variables.tf b/variables.tf index e69de29..43c87ac 100755 --- a/variables.tf +++ b/variables.tf @@ -0,0 +1,107 @@ +# ================================================================= teleport === + +variable "teleport_runtime_version" { + type = string + description = "The runtime version of Teleport." +} + +variable "teleport_letsencrypt_email" { + type = string + description = "The email address to use for Let's Encrypt." +} + +variable "teleport_setup_mode" { + type = bool + description = "Toggle Teleport setup mode." + default = true +} + +variable "teleport_experimental_mode" { + type = bool + description = "Toggle Teleport experimental mode." + default = false +} + +# ----------------------------------------------------------------- instance --- + +variable "instance_config" { + type = object({ + auth = optional(object({ + count = optional(number, 1) + sizes = optional(list(string), ["t3.micro", "t3a.micro"]) + }), {}) + node = optional(object({ + count = optional(number, 1) + sizes = optional(list(string), ["t3.micro", "t3a.micro"]) + }), {}) + proxy = optional(object({ + count = optional(number, 1) + sizes = optional(list(string), ["t3.micro", "t3a.micro"]) + }), {}) + }) + description = "Configuration for the instances. Each type (`auth`, `node`, `proxy`) contains an object with `count` and `sizes`." + default = {} +} + +# ------------------------------------------------------------------ buckets --- + +variable "artifacts_bucket_name" { + type = string + description = "The name of the S3 bucket for artifacts." + default = "" +} + +variable "logs_bucket_name" { + type = string + description = "The name of the S3 bucket for logs." + default = "" +} + +# ---------------------------------------------------------------------- dns --- + +variable "dns_parent_zone_id" { + type = string + description = "The ID of the parent DNS zone." +} + +variable "dns_parent_zone_name" { + type = string + description = "The name of the parent DNS zone." +} + +# ------------------------------------------------------------------ network --- + +variable "vpc_id" { + type = string + description = "The ID of the VPC to deploy resources into." +} + +variable "vpc_private_subnet_ids" { + type = list(string) + description = "The IDs of the private subnets in the VPC to deploy resources into." +} + +variable "vpc_public_subnet_ids" { + type = list(string) + description = "The IDs of the public subnets in the VPC to deploy resources into." +} + +# ================================================================== context === + +variable "aws_region_name" { + type = string + description = "The name of the AWS region." + default = "" +} + +variable "aws_account_id" { + type = string + description = "The ID of the AWS account." + default = "" +} + +variable "aws_kv_namespace" { + type = string + description = "The namespace or prefix for AWS SSM parameters and similar resources." + default = "" +} diff --git a/version.tf b/version.tf index 8a7a14c..b5df4ee 100755 --- a/version.tf +++ b/version.tf @@ -1,11 +1,10 @@ terraform { - required_version = ">= 0.13.0" + required_version = ">= 1.3.0" required_providers { - # v5 blocked until https://github.com/cloudposse/terraform-aws-cloudfront-s3-cdn/pull/280 aws = { source = "hashicorp/aws" - version = ">= 4.9.0, < 6.0.0" + version = ">= 5.0.0, < 6.0.0" } } }